1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache setup/teardown code, and some metadata io - read a superblock and 4 * figure out what to do with it. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "extents.h" 14 #include "request.h" 15 #include "writeback.h" 16 17 #include <linux/blkdev.h> 18 #include <linux/buffer_head.h> 19 #include <linux/debugfs.h> 20 #include <linux/genhd.h> 21 #include <linux/idr.h> 22 #include <linux/kthread.h> 23 #include <linux/module.h> 24 #include <linux/random.h> 25 #include <linux/reboot.h> 26 #include <linux/sysfs.h> 27 28 unsigned int bch_cutoff_writeback; 29 unsigned int bch_cutoff_writeback_sync; 30 31 static const char bcache_magic[] = { 32 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 33 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 34 }; 35 36 static const char invalid_uuid[] = { 37 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 38 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 39 }; 40 41 static struct kobject *bcache_kobj; 42 struct mutex bch_register_lock; 43 LIST_HEAD(bch_cache_sets); 44 static LIST_HEAD(uncached_devices); 45 46 static int bcache_major; 47 static DEFINE_IDA(bcache_device_idx); 48 static wait_queue_head_t unregister_wait; 49 struct workqueue_struct *bcache_wq; 50 struct workqueue_struct *bch_journal_wq; 51 52 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 53 /* limitation of partitions number on single bcache device */ 54 #define BCACHE_MINORS 128 55 /* limitation of bcache devices number on single system */ 56 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 57 58 /* Superblock */ 59 60 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 61 struct page **res) 62 { 63 const char *err; 64 struct cache_sb *s; 65 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 66 unsigned int i; 67 68 if (!bh) 69 return "IO error"; 70 71 s = (struct cache_sb *) bh->b_data; 72 73 sb->offset = le64_to_cpu(s->offset); 74 sb->version = le64_to_cpu(s->version); 75 76 memcpy(sb->magic, s->magic, 16); 77 memcpy(sb->uuid, s->uuid, 16); 78 memcpy(sb->set_uuid, s->set_uuid, 16); 79 memcpy(sb->label, s->label, SB_LABEL_SIZE); 80 81 sb->flags = le64_to_cpu(s->flags); 82 sb->seq = le64_to_cpu(s->seq); 83 sb->last_mount = le32_to_cpu(s->last_mount); 84 sb->first_bucket = le16_to_cpu(s->first_bucket); 85 sb->keys = le16_to_cpu(s->keys); 86 87 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 88 sb->d[i] = le64_to_cpu(s->d[i]); 89 90 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 91 sb->version, sb->flags, sb->seq, sb->keys); 92 93 err = "Not a bcache superblock"; 94 if (sb->offset != SB_SECTOR) 95 goto err; 96 97 if (memcmp(sb->magic, bcache_magic, 16)) 98 goto err; 99 100 err = "Too many journal buckets"; 101 if (sb->keys > SB_JOURNAL_BUCKETS) 102 goto err; 103 104 err = "Bad checksum"; 105 if (s->csum != csum_set(s)) 106 goto err; 107 108 err = "Bad UUID"; 109 if (bch_is_zero(sb->uuid, 16)) 110 goto err; 111 112 sb->block_size = le16_to_cpu(s->block_size); 113 114 err = "Superblock block size smaller than device block size"; 115 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 116 goto err; 117 118 switch (sb->version) { 119 case BCACHE_SB_VERSION_BDEV: 120 sb->data_offset = BDEV_DATA_START_DEFAULT; 121 break; 122 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 123 sb->data_offset = le64_to_cpu(s->data_offset); 124 125 err = "Bad data offset"; 126 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 127 goto err; 128 129 break; 130 case BCACHE_SB_VERSION_CDEV: 131 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 132 sb->nbuckets = le64_to_cpu(s->nbuckets); 133 sb->bucket_size = le16_to_cpu(s->bucket_size); 134 135 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 136 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 137 138 err = "Too many buckets"; 139 if (sb->nbuckets > LONG_MAX) 140 goto err; 141 142 err = "Not enough buckets"; 143 if (sb->nbuckets < 1 << 7) 144 goto err; 145 146 err = "Bad block/bucket size"; 147 if (!is_power_of_2(sb->block_size) || 148 sb->block_size > PAGE_SECTORS || 149 !is_power_of_2(sb->bucket_size) || 150 sb->bucket_size < PAGE_SECTORS) 151 goto err; 152 153 err = "Invalid superblock: device too small"; 154 if (get_capacity(bdev->bd_disk) < 155 sb->bucket_size * sb->nbuckets) 156 goto err; 157 158 err = "Bad UUID"; 159 if (bch_is_zero(sb->set_uuid, 16)) 160 goto err; 161 162 err = "Bad cache device number in set"; 163 if (!sb->nr_in_set || 164 sb->nr_in_set <= sb->nr_this_dev || 165 sb->nr_in_set > MAX_CACHES_PER_SET) 166 goto err; 167 168 err = "Journal buckets not sequential"; 169 for (i = 0; i < sb->keys; i++) 170 if (sb->d[i] != sb->first_bucket + i) 171 goto err; 172 173 err = "Too many journal buckets"; 174 if (sb->first_bucket + sb->keys > sb->nbuckets) 175 goto err; 176 177 err = "Invalid superblock: first bucket comes before end of super"; 178 if (sb->first_bucket * sb->bucket_size < 16) 179 goto err; 180 181 break; 182 default: 183 err = "Unsupported superblock version"; 184 goto err; 185 } 186 187 sb->last_mount = (u32)ktime_get_real_seconds(); 188 err = NULL; 189 190 get_page(bh->b_page); 191 *res = bh->b_page; 192 err: 193 put_bh(bh); 194 return err; 195 } 196 197 static void write_bdev_super_endio(struct bio *bio) 198 { 199 struct cached_dev *dc = bio->bi_private; 200 /* XXX: error checking */ 201 202 closure_put(&dc->sb_write); 203 } 204 205 static void __write_super(struct cache_sb *sb, struct bio *bio) 206 { 207 struct cache_sb *out = page_address(bio_first_page_all(bio)); 208 unsigned int i; 209 210 bio->bi_iter.bi_sector = SB_SECTOR; 211 bio->bi_iter.bi_size = SB_SIZE; 212 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); 213 bch_bio_map(bio, NULL); 214 215 out->offset = cpu_to_le64(sb->offset); 216 out->version = cpu_to_le64(sb->version); 217 218 memcpy(out->uuid, sb->uuid, 16); 219 memcpy(out->set_uuid, sb->set_uuid, 16); 220 memcpy(out->label, sb->label, SB_LABEL_SIZE); 221 222 out->flags = cpu_to_le64(sb->flags); 223 out->seq = cpu_to_le64(sb->seq); 224 225 out->last_mount = cpu_to_le32(sb->last_mount); 226 out->first_bucket = cpu_to_le16(sb->first_bucket); 227 out->keys = cpu_to_le16(sb->keys); 228 229 for (i = 0; i < sb->keys; i++) 230 out->d[i] = cpu_to_le64(sb->d[i]); 231 232 out->csum = csum_set(out); 233 234 pr_debug("ver %llu, flags %llu, seq %llu", 235 sb->version, sb->flags, sb->seq); 236 237 submit_bio(bio); 238 } 239 240 static void bch_write_bdev_super_unlock(struct closure *cl) 241 { 242 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 243 244 up(&dc->sb_write_mutex); 245 } 246 247 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 248 { 249 struct closure *cl = &dc->sb_write; 250 struct bio *bio = &dc->sb_bio; 251 252 down(&dc->sb_write_mutex); 253 closure_init(cl, parent); 254 255 bio_reset(bio); 256 bio_set_dev(bio, dc->bdev); 257 bio->bi_end_io = write_bdev_super_endio; 258 bio->bi_private = dc; 259 260 closure_get(cl); 261 /* I/O request sent to backing device */ 262 __write_super(&dc->sb, bio); 263 264 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 265 } 266 267 static void write_super_endio(struct bio *bio) 268 { 269 struct cache *ca = bio->bi_private; 270 271 /* is_read = 0 */ 272 bch_count_io_errors(ca, bio->bi_status, 0, 273 "writing superblock"); 274 closure_put(&ca->set->sb_write); 275 } 276 277 static void bcache_write_super_unlock(struct closure *cl) 278 { 279 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 280 281 up(&c->sb_write_mutex); 282 } 283 284 void bcache_write_super(struct cache_set *c) 285 { 286 struct closure *cl = &c->sb_write; 287 struct cache *ca; 288 unsigned int i; 289 290 down(&c->sb_write_mutex); 291 closure_init(cl, &c->cl); 292 293 c->sb.seq++; 294 295 for_each_cache(ca, c, i) { 296 struct bio *bio = &ca->sb_bio; 297 298 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 299 ca->sb.seq = c->sb.seq; 300 ca->sb.last_mount = c->sb.last_mount; 301 302 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 303 304 bio_reset(bio); 305 bio_set_dev(bio, ca->bdev); 306 bio->bi_end_io = write_super_endio; 307 bio->bi_private = ca; 308 309 closure_get(cl); 310 __write_super(&ca->sb, bio); 311 } 312 313 closure_return_with_destructor(cl, bcache_write_super_unlock); 314 } 315 316 /* UUID io */ 317 318 static void uuid_endio(struct bio *bio) 319 { 320 struct closure *cl = bio->bi_private; 321 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 322 323 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 324 bch_bbio_free(bio, c); 325 closure_put(cl); 326 } 327 328 static void uuid_io_unlock(struct closure *cl) 329 { 330 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 331 332 up(&c->uuid_write_mutex); 333 } 334 335 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 336 struct bkey *k, struct closure *parent) 337 { 338 struct closure *cl = &c->uuid_write; 339 struct uuid_entry *u; 340 unsigned int i; 341 char buf[80]; 342 343 BUG_ON(!parent); 344 down(&c->uuid_write_mutex); 345 closure_init(cl, parent); 346 347 for (i = 0; i < KEY_PTRS(k); i++) { 348 struct bio *bio = bch_bbio_alloc(c); 349 350 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 351 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 352 353 bio->bi_end_io = uuid_endio; 354 bio->bi_private = cl; 355 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 356 bch_bio_map(bio, c->uuids); 357 358 bch_submit_bbio(bio, c, k, i); 359 360 if (op != REQ_OP_WRITE) 361 break; 362 } 363 364 bch_extent_to_text(buf, sizeof(buf), k); 365 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); 366 367 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 368 if (!bch_is_zero(u->uuid, 16)) 369 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 370 u - c->uuids, u->uuid, u->label, 371 u->first_reg, u->last_reg, u->invalidated); 372 373 closure_return_with_destructor(cl, uuid_io_unlock); 374 } 375 376 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 377 { 378 struct bkey *k = &j->uuid_bucket; 379 380 if (__bch_btree_ptr_invalid(c, k)) 381 return "bad uuid pointer"; 382 383 bkey_copy(&c->uuid_bucket, k); 384 uuid_io(c, REQ_OP_READ, 0, k, cl); 385 386 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 387 struct uuid_entry_v0 *u0 = (void *) c->uuids; 388 struct uuid_entry *u1 = (void *) c->uuids; 389 int i; 390 391 closure_sync(cl); 392 393 /* 394 * Since the new uuid entry is bigger than the old, we have to 395 * convert starting at the highest memory address and work down 396 * in order to do it in place 397 */ 398 399 for (i = c->nr_uuids - 1; 400 i >= 0; 401 --i) { 402 memcpy(u1[i].uuid, u0[i].uuid, 16); 403 memcpy(u1[i].label, u0[i].label, 32); 404 405 u1[i].first_reg = u0[i].first_reg; 406 u1[i].last_reg = u0[i].last_reg; 407 u1[i].invalidated = u0[i].invalidated; 408 409 u1[i].flags = 0; 410 u1[i].sectors = 0; 411 } 412 } 413 414 return NULL; 415 } 416 417 static int __uuid_write(struct cache_set *c) 418 { 419 BKEY_PADDED(key) k; 420 struct closure cl; 421 struct cache *ca; 422 423 closure_init_stack(&cl); 424 lockdep_assert_held(&bch_register_lock); 425 426 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 427 return 1; 428 429 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 430 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 431 closure_sync(&cl); 432 433 /* Only one bucket used for uuid write */ 434 ca = PTR_CACHE(c, &k.key, 0); 435 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); 436 437 bkey_copy(&c->uuid_bucket, &k.key); 438 bkey_put(c, &k.key); 439 return 0; 440 } 441 442 int bch_uuid_write(struct cache_set *c) 443 { 444 int ret = __uuid_write(c); 445 446 if (!ret) 447 bch_journal_meta(c, NULL); 448 449 return ret; 450 } 451 452 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 453 { 454 struct uuid_entry *u; 455 456 for (u = c->uuids; 457 u < c->uuids + c->nr_uuids; u++) 458 if (!memcmp(u->uuid, uuid, 16)) 459 return u; 460 461 return NULL; 462 } 463 464 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 465 { 466 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 467 468 return uuid_find(c, zero_uuid); 469 } 470 471 /* 472 * Bucket priorities/gens: 473 * 474 * For each bucket, we store on disk its 475 * 8 bit gen 476 * 16 bit priority 477 * 478 * See alloc.c for an explanation of the gen. The priority is used to implement 479 * lru (and in the future other) cache replacement policies; for most purposes 480 * it's just an opaque integer. 481 * 482 * The gens and the priorities don't have a whole lot to do with each other, and 483 * it's actually the gens that must be written out at specific times - it's no 484 * big deal if the priorities don't get written, if we lose them we just reuse 485 * buckets in suboptimal order. 486 * 487 * On disk they're stored in a packed array, and in as many buckets are required 488 * to fit them all. The buckets we use to store them form a list; the journal 489 * header points to the first bucket, the first bucket points to the second 490 * bucket, et cetera. 491 * 492 * This code is used by the allocation code; periodically (whenever it runs out 493 * of buckets to allocate from) the allocation code will invalidate some 494 * buckets, but it can't use those buckets until their new gens are safely on 495 * disk. 496 */ 497 498 static void prio_endio(struct bio *bio) 499 { 500 struct cache *ca = bio->bi_private; 501 502 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 503 bch_bbio_free(bio, ca->set); 504 closure_put(&ca->prio); 505 } 506 507 static void prio_io(struct cache *ca, uint64_t bucket, int op, 508 unsigned long op_flags) 509 { 510 struct closure *cl = &ca->prio; 511 struct bio *bio = bch_bbio_alloc(ca->set); 512 513 closure_init_stack(cl); 514 515 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 516 bio_set_dev(bio, ca->bdev); 517 bio->bi_iter.bi_size = bucket_bytes(ca); 518 519 bio->bi_end_io = prio_endio; 520 bio->bi_private = ca; 521 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 522 bch_bio_map(bio, ca->disk_buckets); 523 524 closure_bio_submit(ca->set, bio, &ca->prio); 525 closure_sync(cl); 526 } 527 528 void bch_prio_write(struct cache *ca) 529 { 530 int i; 531 struct bucket *b; 532 struct closure cl; 533 534 closure_init_stack(&cl); 535 536 lockdep_assert_held(&ca->set->bucket_lock); 537 538 ca->disk_buckets->seq++; 539 540 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 541 &ca->meta_sectors_written); 542 543 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 544 // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 545 546 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 547 long bucket; 548 struct prio_set *p = ca->disk_buckets; 549 struct bucket_disk *d = p->data; 550 struct bucket_disk *end = d + prios_per_bucket(ca); 551 552 for (b = ca->buckets + i * prios_per_bucket(ca); 553 b < ca->buckets + ca->sb.nbuckets && d < end; 554 b++, d++) { 555 d->prio = cpu_to_le16(b->prio); 556 d->gen = b->gen; 557 } 558 559 p->next_bucket = ca->prio_buckets[i + 1]; 560 p->magic = pset_magic(&ca->sb); 561 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 562 563 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); 564 BUG_ON(bucket == -1); 565 566 mutex_unlock(&ca->set->bucket_lock); 567 prio_io(ca, bucket, REQ_OP_WRITE, 0); 568 mutex_lock(&ca->set->bucket_lock); 569 570 ca->prio_buckets[i] = bucket; 571 atomic_dec_bug(&ca->buckets[bucket].pin); 572 } 573 574 mutex_unlock(&ca->set->bucket_lock); 575 576 bch_journal_meta(ca->set, &cl); 577 closure_sync(&cl); 578 579 mutex_lock(&ca->set->bucket_lock); 580 581 /* 582 * Don't want the old priorities to get garbage collected until after we 583 * finish writing the new ones, and they're journalled 584 */ 585 for (i = 0; i < prio_buckets(ca); i++) { 586 if (ca->prio_last_buckets[i]) 587 __bch_bucket_free(ca, 588 &ca->buckets[ca->prio_last_buckets[i]]); 589 590 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 591 } 592 } 593 594 static void prio_read(struct cache *ca, uint64_t bucket) 595 { 596 struct prio_set *p = ca->disk_buckets; 597 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 598 struct bucket *b; 599 unsigned int bucket_nr = 0; 600 601 for (b = ca->buckets; 602 b < ca->buckets + ca->sb.nbuckets; 603 b++, d++) { 604 if (d == end) { 605 ca->prio_buckets[bucket_nr] = bucket; 606 ca->prio_last_buckets[bucket_nr] = bucket; 607 bucket_nr++; 608 609 prio_io(ca, bucket, REQ_OP_READ, 0); 610 611 if (p->csum != 612 bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 613 pr_warn("bad csum reading priorities"); 614 615 if (p->magic != pset_magic(&ca->sb)) 616 pr_warn("bad magic reading priorities"); 617 618 bucket = p->next_bucket; 619 d = p->data; 620 } 621 622 b->prio = le16_to_cpu(d->prio); 623 b->gen = b->last_gc = d->gen; 624 } 625 } 626 627 /* Bcache device */ 628 629 static int open_dev(struct block_device *b, fmode_t mode) 630 { 631 struct bcache_device *d = b->bd_disk->private_data; 632 633 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 634 return -ENXIO; 635 636 closure_get(&d->cl); 637 return 0; 638 } 639 640 static void release_dev(struct gendisk *b, fmode_t mode) 641 { 642 struct bcache_device *d = b->private_data; 643 644 closure_put(&d->cl); 645 } 646 647 static int ioctl_dev(struct block_device *b, fmode_t mode, 648 unsigned int cmd, unsigned long arg) 649 { 650 struct bcache_device *d = b->bd_disk->private_data; 651 652 return d->ioctl(d, mode, cmd, arg); 653 } 654 655 static const struct block_device_operations bcache_ops = { 656 .open = open_dev, 657 .release = release_dev, 658 .ioctl = ioctl_dev, 659 .owner = THIS_MODULE, 660 }; 661 662 void bcache_device_stop(struct bcache_device *d) 663 { 664 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 665 closure_queue(&d->cl); 666 } 667 668 static void bcache_device_unlink(struct bcache_device *d) 669 { 670 lockdep_assert_held(&bch_register_lock); 671 672 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 673 unsigned int i; 674 struct cache *ca; 675 676 sysfs_remove_link(&d->c->kobj, d->name); 677 sysfs_remove_link(&d->kobj, "cache"); 678 679 for_each_cache(ca, d->c, i) 680 bd_unlink_disk_holder(ca->bdev, d->disk); 681 } 682 } 683 684 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 685 const char *name) 686 { 687 unsigned int i; 688 struct cache *ca; 689 690 for_each_cache(ca, d->c, i) 691 bd_link_disk_holder(ca->bdev, d->disk); 692 693 snprintf(d->name, BCACHEDEVNAME_SIZE, 694 "%s%u", name, d->id); 695 696 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 697 sysfs_create_link(&c->kobj, &d->kobj, d->name), 698 "Couldn't create device <-> cache set symlinks"); 699 700 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 701 } 702 703 static void bcache_device_detach(struct bcache_device *d) 704 { 705 lockdep_assert_held(&bch_register_lock); 706 707 atomic_dec(&d->c->attached_dev_nr); 708 709 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 710 struct uuid_entry *u = d->c->uuids + d->id; 711 712 SET_UUID_FLASH_ONLY(u, 0); 713 memcpy(u->uuid, invalid_uuid, 16); 714 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 715 bch_uuid_write(d->c); 716 } 717 718 bcache_device_unlink(d); 719 720 d->c->devices[d->id] = NULL; 721 closure_put(&d->c->caching); 722 d->c = NULL; 723 } 724 725 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 726 unsigned int id) 727 { 728 d->id = id; 729 d->c = c; 730 c->devices[id] = d; 731 732 if (id >= c->devices_max_used) 733 c->devices_max_used = id + 1; 734 735 closure_get(&c->caching); 736 } 737 738 static inline int first_minor_to_idx(int first_minor) 739 { 740 return (first_minor/BCACHE_MINORS); 741 } 742 743 static inline int idx_to_first_minor(int idx) 744 { 745 return (idx * BCACHE_MINORS); 746 } 747 748 static void bcache_device_free(struct bcache_device *d) 749 { 750 lockdep_assert_held(&bch_register_lock); 751 752 pr_info("%s stopped", d->disk->disk_name); 753 754 if (d->c) 755 bcache_device_detach(d); 756 if (d->disk && d->disk->flags & GENHD_FL_UP) 757 del_gendisk(d->disk); 758 if (d->disk && d->disk->queue) 759 blk_cleanup_queue(d->disk->queue); 760 if (d->disk) { 761 ida_simple_remove(&bcache_device_idx, 762 first_minor_to_idx(d->disk->first_minor)); 763 put_disk(d->disk); 764 } 765 766 bioset_exit(&d->bio_split); 767 kvfree(d->full_dirty_stripes); 768 kvfree(d->stripe_sectors_dirty); 769 770 closure_debug_destroy(&d->cl); 771 } 772 773 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 774 sector_t sectors) 775 { 776 struct request_queue *q; 777 const size_t max_stripes = min_t(size_t, INT_MAX, 778 SIZE_MAX / sizeof(atomic_t)); 779 size_t n; 780 int idx; 781 782 if (!d->stripe_size) 783 d->stripe_size = 1 << 31; 784 785 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 786 787 if (!d->nr_stripes || d->nr_stripes > max_stripes) { 788 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", 789 (unsigned int)d->nr_stripes); 790 return -ENOMEM; 791 } 792 793 n = d->nr_stripes * sizeof(atomic_t); 794 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 795 if (!d->stripe_sectors_dirty) 796 return -ENOMEM; 797 798 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 799 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 800 if (!d->full_dirty_stripes) 801 return -ENOMEM; 802 803 idx = ida_simple_get(&bcache_device_idx, 0, 804 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 805 if (idx < 0) 806 return idx; 807 808 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), 809 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 810 goto err; 811 812 d->disk = alloc_disk(BCACHE_MINORS); 813 if (!d->disk) 814 goto err; 815 816 set_capacity(d->disk, sectors); 817 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 818 819 d->disk->major = bcache_major; 820 d->disk->first_minor = idx_to_first_minor(idx); 821 d->disk->fops = &bcache_ops; 822 d->disk->private_data = d; 823 824 q = blk_alloc_queue(GFP_KERNEL); 825 if (!q) 826 return -ENOMEM; 827 828 blk_queue_make_request(q, NULL); 829 d->disk->queue = q; 830 q->queuedata = d; 831 q->backing_dev_info->congested_data = d; 832 q->limits.max_hw_sectors = UINT_MAX; 833 q->limits.max_sectors = UINT_MAX; 834 q->limits.max_segment_size = UINT_MAX; 835 q->limits.max_segments = BIO_MAX_PAGES; 836 blk_queue_max_discard_sectors(q, UINT_MAX); 837 q->limits.discard_granularity = 512; 838 q->limits.io_min = block_size; 839 q->limits.logical_block_size = block_size; 840 q->limits.physical_block_size = block_size; 841 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 842 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 843 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 844 845 blk_queue_write_cache(q, true, true); 846 847 return 0; 848 849 err: 850 ida_simple_remove(&bcache_device_idx, idx); 851 return -ENOMEM; 852 853 } 854 855 /* Cached device */ 856 857 static void calc_cached_dev_sectors(struct cache_set *c) 858 { 859 uint64_t sectors = 0; 860 struct cached_dev *dc; 861 862 list_for_each_entry(dc, &c->cached_devs, list) 863 sectors += bdev_sectors(dc->bdev); 864 865 c->cached_dev_sectors = sectors; 866 } 867 868 #define BACKING_DEV_OFFLINE_TIMEOUT 5 869 static int cached_dev_status_update(void *arg) 870 { 871 struct cached_dev *dc = arg; 872 struct request_queue *q; 873 874 /* 875 * If this delayed worker is stopping outside, directly quit here. 876 * dc->io_disable might be set via sysfs interface, so check it 877 * here too. 878 */ 879 while (!kthread_should_stop() && !dc->io_disable) { 880 q = bdev_get_queue(dc->bdev); 881 if (blk_queue_dying(q)) 882 dc->offline_seconds++; 883 else 884 dc->offline_seconds = 0; 885 886 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { 887 pr_err("%s: device offline for %d seconds", 888 dc->backing_dev_name, 889 BACKING_DEV_OFFLINE_TIMEOUT); 890 pr_err("%s: disable I/O request due to backing " 891 "device offline", dc->disk.name); 892 dc->io_disable = true; 893 /* let others know earlier that io_disable is true */ 894 smp_mb(); 895 bcache_device_stop(&dc->disk); 896 break; 897 } 898 schedule_timeout_interruptible(HZ); 899 } 900 901 wait_for_kthread_stop(); 902 return 0; 903 } 904 905 906 void bch_cached_dev_run(struct cached_dev *dc) 907 { 908 struct bcache_device *d = &dc->disk; 909 char buf[SB_LABEL_SIZE + 1]; 910 char *env[] = { 911 "DRIVER=bcache", 912 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 913 NULL, 914 NULL, 915 }; 916 917 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 918 buf[SB_LABEL_SIZE] = '\0'; 919 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 920 921 if (atomic_xchg(&dc->running, 1)) { 922 kfree(env[1]); 923 kfree(env[2]); 924 return; 925 } 926 927 if (!d->c && 928 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 929 struct closure cl; 930 931 closure_init_stack(&cl); 932 933 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 934 bch_write_bdev_super(dc, &cl); 935 closure_sync(&cl); 936 } 937 938 add_disk(d->disk); 939 bd_link_disk_holder(dc->bdev, dc->disk.disk); 940 /* 941 * won't show up in the uevent file, use udevadm monitor -e instead 942 * only class / kset properties are persistent 943 */ 944 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 945 kfree(env[1]); 946 kfree(env[2]); 947 948 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 949 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 950 pr_debug("error creating sysfs link"); 951 952 dc->status_update_thread = kthread_run(cached_dev_status_update, 953 dc, "bcache_status_update"); 954 if (IS_ERR(dc->status_update_thread)) { 955 pr_warn("failed to create bcache_status_update kthread, " 956 "continue to run without monitoring backing " 957 "device status"); 958 } 959 } 960 961 /* 962 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 963 * work dc->writeback_rate_update is running. Wait until the routine 964 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 965 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 966 * seconds, give up waiting here and continue to cancel it too. 967 */ 968 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 969 { 970 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 971 972 do { 973 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 974 &dc->disk.flags)) 975 break; 976 time_out--; 977 schedule_timeout_interruptible(1); 978 } while (time_out > 0); 979 980 if (time_out == 0) 981 pr_warn("give up waiting for dc->writeback_write_update to quit"); 982 983 cancel_delayed_work_sync(&dc->writeback_rate_update); 984 } 985 986 static void cached_dev_detach_finish(struct work_struct *w) 987 { 988 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 989 struct closure cl; 990 991 closure_init_stack(&cl); 992 993 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 994 BUG_ON(refcount_read(&dc->count)); 995 996 mutex_lock(&bch_register_lock); 997 998 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 999 cancel_writeback_rate_update_dwork(dc); 1000 1001 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 1002 kthread_stop(dc->writeback_thread); 1003 dc->writeback_thread = NULL; 1004 } 1005 1006 memset(&dc->sb.set_uuid, 0, 16); 1007 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 1008 1009 bch_write_bdev_super(dc, &cl); 1010 closure_sync(&cl); 1011 1012 calc_cached_dev_sectors(dc->disk.c); 1013 bcache_device_detach(&dc->disk); 1014 list_move(&dc->list, &uncached_devices); 1015 1016 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1017 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 1018 1019 mutex_unlock(&bch_register_lock); 1020 1021 pr_info("Caching disabled for %s", dc->backing_dev_name); 1022 1023 /* Drop ref we took in cached_dev_detach() */ 1024 closure_put(&dc->disk.cl); 1025 } 1026 1027 void bch_cached_dev_detach(struct cached_dev *dc) 1028 { 1029 lockdep_assert_held(&bch_register_lock); 1030 1031 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1032 return; 1033 1034 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 1035 return; 1036 1037 /* 1038 * Block the device from being closed and freed until we're finished 1039 * detaching 1040 */ 1041 closure_get(&dc->disk.cl); 1042 1043 bch_writeback_queue(dc); 1044 1045 cached_dev_put(dc); 1046 } 1047 1048 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1049 uint8_t *set_uuid) 1050 { 1051 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); 1052 struct uuid_entry *u; 1053 struct cached_dev *exist_dc, *t; 1054 1055 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1056 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1057 return -ENOENT; 1058 1059 if (dc->disk.c) { 1060 pr_err("Can't attach %s: already attached", 1061 dc->backing_dev_name); 1062 return -EINVAL; 1063 } 1064 1065 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1066 pr_err("Can't attach %s: shutting down", 1067 dc->backing_dev_name); 1068 return -EINVAL; 1069 } 1070 1071 if (dc->sb.block_size < c->sb.block_size) { 1072 /* Will die */ 1073 pr_err("Couldn't attach %s: block size less than set's block size", 1074 dc->backing_dev_name); 1075 return -EINVAL; 1076 } 1077 1078 /* Check whether already attached */ 1079 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1080 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1081 pr_err("Tried to attach %s but duplicate UUID already attached", 1082 dc->backing_dev_name); 1083 1084 return -EINVAL; 1085 } 1086 } 1087 1088 u = uuid_find(c, dc->sb.uuid); 1089 1090 if (u && 1091 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1092 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1093 memcpy(u->uuid, invalid_uuid, 16); 1094 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 1095 u = NULL; 1096 } 1097 1098 if (!u) { 1099 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1100 pr_err("Couldn't find uuid for %s in set", 1101 dc->backing_dev_name); 1102 return -ENOENT; 1103 } 1104 1105 u = uuid_find_empty(c); 1106 if (!u) { 1107 pr_err("Not caching %s, no room for UUID", 1108 dc->backing_dev_name); 1109 return -EINVAL; 1110 } 1111 } 1112 1113 /* 1114 * Deadlocks since we're called via sysfs... 1115 * sysfs_remove_file(&dc->kobj, &sysfs_attach); 1116 */ 1117 1118 if (bch_is_zero(u->uuid, 16)) { 1119 struct closure cl; 1120 1121 closure_init_stack(&cl); 1122 1123 memcpy(u->uuid, dc->sb.uuid, 16); 1124 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1125 u->first_reg = u->last_reg = rtime; 1126 bch_uuid_write(c); 1127 1128 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1129 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1130 1131 bch_write_bdev_super(dc, &cl); 1132 closure_sync(&cl); 1133 } else { 1134 u->last_reg = rtime; 1135 bch_uuid_write(c); 1136 } 1137 1138 bcache_device_attach(&dc->disk, c, u - c->uuids); 1139 list_move(&dc->list, &c->cached_devs); 1140 calc_cached_dev_sectors(c); 1141 1142 /* 1143 * dc->c must be set before dc->count != 0 - paired with the mb in 1144 * cached_dev_get() 1145 */ 1146 smp_wmb(); 1147 refcount_set(&dc->count, 1); 1148 1149 /* Block writeback thread, but spawn it */ 1150 down_write(&dc->writeback_lock); 1151 if (bch_cached_dev_writeback_start(dc)) { 1152 up_write(&dc->writeback_lock); 1153 return -ENOMEM; 1154 } 1155 1156 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1157 atomic_set(&dc->has_dirty, 1); 1158 bch_writeback_queue(dc); 1159 } 1160 1161 bch_sectors_dirty_init(&dc->disk); 1162 1163 bch_cached_dev_run(dc); 1164 bcache_device_link(&dc->disk, c, "bdev"); 1165 atomic_inc(&c->attached_dev_nr); 1166 1167 /* Allow the writeback thread to proceed */ 1168 up_write(&dc->writeback_lock); 1169 1170 pr_info("Caching %s as %s on set %pU", 1171 dc->backing_dev_name, 1172 dc->disk.disk->disk_name, 1173 dc->disk.c->sb.set_uuid); 1174 return 0; 1175 } 1176 1177 void bch_cached_dev_release(struct kobject *kobj) 1178 { 1179 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1180 disk.kobj); 1181 kfree(dc); 1182 module_put(THIS_MODULE); 1183 } 1184 1185 static void cached_dev_free(struct closure *cl) 1186 { 1187 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1188 1189 mutex_lock(&bch_register_lock); 1190 1191 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1192 cancel_writeback_rate_update_dwork(dc); 1193 1194 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1195 kthread_stop(dc->writeback_thread); 1196 if (dc->writeback_write_wq) 1197 destroy_workqueue(dc->writeback_write_wq); 1198 if (!IS_ERR_OR_NULL(dc->status_update_thread)) 1199 kthread_stop(dc->status_update_thread); 1200 1201 if (atomic_read(&dc->running)) 1202 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1203 bcache_device_free(&dc->disk); 1204 list_del(&dc->list); 1205 1206 mutex_unlock(&bch_register_lock); 1207 1208 if (!IS_ERR_OR_NULL(dc->bdev)) 1209 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1210 1211 wake_up(&unregister_wait); 1212 1213 kobject_put(&dc->disk.kobj); 1214 } 1215 1216 static void cached_dev_flush(struct closure *cl) 1217 { 1218 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1219 struct bcache_device *d = &dc->disk; 1220 1221 mutex_lock(&bch_register_lock); 1222 bcache_device_unlink(d); 1223 mutex_unlock(&bch_register_lock); 1224 1225 bch_cache_accounting_destroy(&dc->accounting); 1226 kobject_del(&d->kobj); 1227 1228 continue_at(cl, cached_dev_free, system_wq); 1229 } 1230 1231 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) 1232 { 1233 int ret; 1234 struct io *io; 1235 struct request_queue *q = bdev_get_queue(dc->bdev); 1236 1237 __module_get(THIS_MODULE); 1238 INIT_LIST_HEAD(&dc->list); 1239 closure_init(&dc->disk.cl, NULL); 1240 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1241 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1242 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1243 sema_init(&dc->sb_write_mutex, 1); 1244 INIT_LIST_HEAD(&dc->io_lru); 1245 spin_lock_init(&dc->io_lock); 1246 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1247 1248 dc->sequential_cutoff = 4 << 20; 1249 1250 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1251 list_add(&io->lru, &dc->io_lru); 1252 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1253 } 1254 1255 dc->disk.stripe_size = q->limits.io_opt >> 9; 1256 1257 if (dc->disk.stripe_size) 1258 dc->partial_stripes_expensive = 1259 q->limits.raid_partial_stripes_expensive; 1260 1261 ret = bcache_device_init(&dc->disk, block_size, 1262 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1263 if (ret) 1264 return ret; 1265 1266 dc->disk.disk->queue->backing_dev_info->ra_pages = 1267 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1268 q->backing_dev_info->ra_pages); 1269 1270 atomic_set(&dc->io_errors, 0); 1271 dc->io_disable = false; 1272 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1273 /* default to auto */ 1274 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1275 1276 bch_cached_dev_request_init(dc); 1277 bch_cached_dev_writeback_init(dc); 1278 return 0; 1279 } 1280 1281 /* Cached device - bcache superblock */ 1282 1283 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1284 struct block_device *bdev, 1285 struct cached_dev *dc) 1286 { 1287 const char *err = "cannot allocate memory"; 1288 struct cache_set *c; 1289 1290 bdevname(bdev, dc->backing_dev_name); 1291 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1292 dc->bdev = bdev; 1293 dc->bdev->bd_holder = dc; 1294 1295 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); 1296 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1297 get_page(sb_page); 1298 1299 1300 if (cached_dev_init(dc, sb->block_size << 9)) 1301 goto err; 1302 1303 err = "error creating kobject"; 1304 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1305 "bcache")) 1306 goto err; 1307 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1308 goto err; 1309 1310 pr_info("registered backing device %s", dc->backing_dev_name); 1311 1312 list_add(&dc->list, &uncached_devices); 1313 /* attach to a matched cache set if it exists */ 1314 list_for_each_entry(c, &bch_cache_sets, list) 1315 bch_cached_dev_attach(dc, c, NULL); 1316 1317 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1318 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1319 bch_cached_dev_run(dc); 1320 1321 return; 1322 err: 1323 pr_notice("error %s: %s", dc->backing_dev_name, err); 1324 bcache_device_stop(&dc->disk); 1325 } 1326 1327 /* Flash only volumes */ 1328 1329 void bch_flash_dev_release(struct kobject *kobj) 1330 { 1331 struct bcache_device *d = container_of(kobj, struct bcache_device, 1332 kobj); 1333 kfree(d); 1334 } 1335 1336 static void flash_dev_free(struct closure *cl) 1337 { 1338 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1339 1340 mutex_lock(&bch_register_lock); 1341 atomic_long_sub(bcache_dev_sectors_dirty(d), 1342 &d->c->flash_dev_dirty_sectors); 1343 bcache_device_free(d); 1344 mutex_unlock(&bch_register_lock); 1345 kobject_put(&d->kobj); 1346 } 1347 1348 static void flash_dev_flush(struct closure *cl) 1349 { 1350 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1351 1352 mutex_lock(&bch_register_lock); 1353 bcache_device_unlink(d); 1354 mutex_unlock(&bch_register_lock); 1355 kobject_del(&d->kobj); 1356 continue_at(cl, flash_dev_free, system_wq); 1357 } 1358 1359 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1360 { 1361 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1362 GFP_KERNEL); 1363 if (!d) 1364 return -ENOMEM; 1365 1366 closure_init(&d->cl, NULL); 1367 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1368 1369 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1370 1371 if (bcache_device_init(d, block_bytes(c), u->sectors)) 1372 goto err; 1373 1374 bcache_device_attach(d, c, u - c->uuids); 1375 bch_sectors_dirty_init(d); 1376 bch_flash_dev_request_init(d); 1377 add_disk(d->disk); 1378 1379 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1380 goto err; 1381 1382 bcache_device_link(d, c, "volume"); 1383 1384 return 0; 1385 err: 1386 kobject_put(&d->kobj); 1387 return -ENOMEM; 1388 } 1389 1390 static int flash_devs_run(struct cache_set *c) 1391 { 1392 int ret = 0; 1393 struct uuid_entry *u; 1394 1395 for (u = c->uuids; 1396 u < c->uuids + c->nr_uuids && !ret; 1397 u++) 1398 if (UUID_FLASH_ONLY(u)) 1399 ret = flash_dev_run(c, u); 1400 1401 return ret; 1402 } 1403 1404 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1405 { 1406 struct uuid_entry *u; 1407 1408 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1409 return -EINTR; 1410 1411 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1412 return -EPERM; 1413 1414 u = uuid_find_empty(c); 1415 if (!u) { 1416 pr_err("Can't create volume, no room for UUID"); 1417 return -EINVAL; 1418 } 1419 1420 get_random_bytes(u->uuid, 16); 1421 memset(u->label, 0, 32); 1422 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); 1423 1424 SET_UUID_FLASH_ONLY(u, 1); 1425 u->sectors = size >> 9; 1426 1427 bch_uuid_write(c); 1428 1429 return flash_dev_run(c, u); 1430 } 1431 1432 bool bch_cached_dev_error(struct cached_dev *dc) 1433 { 1434 struct cache_set *c; 1435 1436 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1437 return false; 1438 1439 dc->io_disable = true; 1440 /* make others know io_disable is true earlier */ 1441 smp_mb(); 1442 1443 pr_err("stop %s: too many IO errors on backing device %s\n", 1444 dc->disk.disk->disk_name, dc->backing_dev_name); 1445 1446 /* 1447 * If the cached device is still attached to a cache set, 1448 * even dc->io_disable is true and no more I/O requests 1449 * accepted, cache device internal I/O (writeback scan or 1450 * garbage collection) may still prevent bcache device from 1451 * being stopped. So here CACHE_SET_IO_DISABLE should be 1452 * set to c->flags too, to make the internal I/O to cache 1453 * device rejected and stopped immediately. 1454 * If c is NULL, that means the bcache device is not attached 1455 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. 1456 */ 1457 c = dc->disk.c; 1458 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1459 pr_info("CACHE_SET_IO_DISABLE already set"); 1460 1461 bcache_device_stop(&dc->disk); 1462 return true; 1463 } 1464 1465 /* Cache set */ 1466 1467 __printf(2, 3) 1468 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1469 { 1470 va_list args; 1471 1472 if (c->on_error != ON_ERROR_PANIC && 1473 test_bit(CACHE_SET_STOPPING, &c->flags)) 1474 return false; 1475 1476 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1477 pr_info("CACHE_SET_IO_DISABLE already set"); 1478 1479 /* 1480 * XXX: we can be called from atomic context 1481 * acquire_console_sem(); 1482 */ 1483 1484 pr_err("bcache: error on %pU: ", c->sb.set_uuid); 1485 1486 va_start(args, fmt); 1487 vprintk(fmt, args); 1488 va_end(args); 1489 1490 pr_err(", disabling caching\n"); 1491 1492 if (c->on_error == ON_ERROR_PANIC) 1493 panic("panic forced after error\n"); 1494 1495 bch_cache_set_unregister(c); 1496 return true; 1497 } 1498 1499 void bch_cache_set_release(struct kobject *kobj) 1500 { 1501 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1502 1503 kfree(c); 1504 module_put(THIS_MODULE); 1505 } 1506 1507 static void cache_set_free(struct closure *cl) 1508 { 1509 struct cache_set *c = container_of(cl, struct cache_set, cl); 1510 struct cache *ca; 1511 unsigned int i; 1512 1513 debugfs_remove(c->debug); 1514 1515 bch_open_buckets_free(c); 1516 bch_btree_cache_free(c); 1517 bch_journal_free(c); 1518 1519 for_each_cache(ca, c, i) 1520 if (ca) { 1521 ca->set = NULL; 1522 c->cache[ca->sb.nr_this_dev] = NULL; 1523 kobject_put(&ca->kobj); 1524 } 1525 1526 bch_bset_sort_state_free(&c->sort); 1527 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1528 1529 if (c->moving_gc_wq) 1530 destroy_workqueue(c->moving_gc_wq); 1531 bioset_exit(&c->bio_split); 1532 mempool_exit(&c->fill_iter); 1533 mempool_exit(&c->bio_meta); 1534 mempool_exit(&c->search); 1535 kfree(c->devices); 1536 1537 mutex_lock(&bch_register_lock); 1538 list_del(&c->list); 1539 mutex_unlock(&bch_register_lock); 1540 1541 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1542 wake_up(&unregister_wait); 1543 1544 closure_debug_destroy(&c->cl); 1545 kobject_put(&c->kobj); 1546 } 1547 1548 static void cache_set_flush(struct closure *cl) 1549 { 1550 struct cache_set *c = container_of(cl, struct cache_set, caching); 1551 struct cache *ca; 1552 struct btree *b; 1553 unsigned int i; 1554 1555 bch_cache_accounting_destroy(&c->accounting); 1556 1557 kobject_put(&c->internal); 1558 kobject_del(&c->kobj); 1559 1560 if (c->gc_thread) 1561 kthread_stop(c->gc_thread); 1562 1563 if (!IS_ERR_OR_NULL(c->root)) 1564 list_add(&c->root->list, &c->btree_cache); 1565 1566 /* Should skip this if we're unregistering because of an error */ 1567 list_for_each_entry(b, &c->btree_cache, list) { 1568 mutex_lock(&b->write_lock); 1569 if (btree_node_dirty(b)) 1570 __bch_btree_node_write(b, NULL); 1571 mutex_unlock(&b->write_lock); 1572 } 1573 1574 for_each_cache(ca, c, i) 1575 if (ca->alloc_thread) 1576 kthread_stop(ca->alloc_thread); 1577 1578 if (c->journal.cur) { 1579 cancel_delayed_work_sync(&c->journal.work); 1580 /* flush last journal entry if needed */ 1581 c->journal.work.work.func(&c->journal.work.work); 1582 } 1583 1584 closure_return(cl); 1585 } 1586 1587 /* 1588 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1589 * cache set is unregistering due to too many I/O errors. In this condition, 1590 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1591 * value and whether the broken cache has dirty data: 1592 * 1593 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1594 * BCH_CACHED_STOP_AUTO 0 NO 1595 * BCH_CACHED_STOP_AUTO 1 YES 1596 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1597 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1598 * 1599 * The expected behavior is, if stop_when_cache_set_failed is configured to 1600 * "auto" via sysfs interface, the bcache device will not be stopped if the 1601 * backing device is clean on the broken cache device. 1602 */ 1603 static void conditional_stop_bcache_device(struct cache_set *c, 1604 struct bcache_device *d, 1605 struct cached_dev *dc) 1606 { 1607 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1608 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", 1609 d->disk->disk_name, c->sb.set_uuid); 1610 bcache_device_stop(d); 1611 } else if (atomic_read(&dc->has_dirty)) { 1612 /* 1613 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1614 * and dc->has_dirty == 1 1615 */ 1616 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", 1617 d->disk->disk_name); 1618 /* 1619 * There might be a small time gap that cache set is 1620 * released but bcache device is not. Inside this time 1621 * gap, regular I/O requests will directly go into 1622 * backing device as no cache set attached to. This 1623 * behavior may also introduce potential inconsistence 1624 * data in writeback mode while cache is dirty. 1625 * Therefore before calling bcache_device_stop() due 1626 * to a broken cache device, dc->io_disable should be 1627 * explicitly set to true. 1628 */ 1629 dc->io_disable = true; 1630 /* make others know io_disable is true earlier */ 1631 smp_mb(); 1632 bcache_device_stop(d); 1633 } else { 1634 /* 1635 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1636 * and dc->has_dirty == 0 1637 */ 1638 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", 1639 d->disk->disk_name); 1640 } 1641 } 1642 1643 static void __cache_set_unregister(struct closure *cl) 1644 { 1645 struct cache_set *c = container_of(cl, struct cache_set, caching); 1646 struct cached_dev *dc; 1647 struct bcache_device *d; 1648 size_t i; 1649 1650 mutex_lock(&bch_register_lock); 1651 1652 for (i = 0; i < c->devices_max_used; i++) { 1653 d = c->devices[i]; 1654 if (!d) 1655 continue; 1656 1657 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1658 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1659 dc = container_of(d, struct cached_dev, disk); 1660 bch_cached_dev_detach(dc); 1661 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1662 conditional_stop_bcache_device(c, d, dc); 1663 } else { 1664 bcache_device_stop(d); 1665 } 1666 } 1667 1668 mutex_unlock(&bch_register_lock); 1669 1670 continue_at(cl, cache_set_flush, system_wq); 1671 } 1672 1673 void bch_cache_set_stop(struct cache_set *c) 1674 { 1675 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1676 closure_queue(&c->caching); 1677 } 1678 1679 void bch_cache_set_unregister(struct cache_set *c) 1680 { 1681 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1682 bch_cache_set_stop(c); 1683 } 1684 1685 #define alloc_bucket_pages(gfp, c) \ 1686 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1687 1688 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1689 { 1690 int iter_size; 1691 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1692 1693 if (!c) 1694 return NULL; 1695 1696 __module_get(THIS_MODULE); 1697 closure_init(&c->cl, NULL); 1698 set_closure_fn(&c->cl, cache_set_free, system_wq); 1699 1700 closure_init(&c->caching, &c->cl); 1701 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1702 1703 /* Maybe create continue_at_noreturn() and use it here? */ 1704 closure_set_stopped(&c->cl); 1705 closure_put(&c->cl); 1706 1707 kobject_init(&c->kobj, &bch_cache_set_ktype); 1708 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1709 1710 bch_cache_accounting_init(&c->accounting, &c->cl); 1711 1712 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1713 c->sb.block_size = sb->block_size; 1714 c->sb.bucket_size = sb->bucket_size; 1715 c->sb.nr_in_set = sb->nr_in_set; 1716 c->sb.last_mount = sb->last_mount; 1717 c->bucket_bits = ilog2(sb->bucket_size); 1718 c->block_bits = ilog2(sb->block_size); 1719 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1720 c->devices_max_used = 0; 1721 atomic_set(&c->attached_dev_nr, 0); 1722 c->btree_pages = bucket_pages(c); 1723 if (c->btree_pages > BTREE_MAX_PAGES) 1724 c->btree_pages = max_t(int, c->btree_pages / 4, 1725 BTREE_MAX_PAGES); 1726 1727 sema_init(&c->sb_write_mutex, 1); 1728 mutex_init(&c->bucket_lock); 1729 init_waitqueue_head(&c->btree_cache_wait); 1730 init_waitqueue_head(&c->bucket_wait); 1731 init_waitqueue_head(&c->gc_wait); 1732 sema_init(&c->uuid_write_mutex, 1); 1733 1734 spin_lock_init(&c->btree_gc_time.lock); 1735 spin_lock_init(&c->btree_split_time.lock); 1736 spin_lock_init(&c->btree_read_time.lock); 1737 1738 bch_moving_init_cache_set(c); 1739 1740 INIT_LIST_HEAD(&c->list); 1741 INIT_LIST_HEAD(&c->cached_devs); 1742 INIT_LIST_HEAD(&c->btree_cache); 1743 INIT_LIST_HEAD(&c->btree_cache_freeable); 1744 INIT_LIST_HEAD(&c->btree_cache_freed); 1745 INIT_LIST_HEAD(&c->data_buckets); 1746 1747 iter_size = (sb->bucket_size / sb->block_size + 1) * 1748 sizeof(struct btree_iter_set); 1749 1750 if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || 1751 mempool_init_slab_pool(&c->search, 32, bch_search_cache) || 1752 mempool_init_kmalloc_pool(&c->bio_meta, 2, 1753 sizeof(struct bbio) + sizeof(struct bio_vec) * 1754 bucket_pages(c)) || 1755 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || 1756 bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), 1757 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) || 1758 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1759 !(c->moving_gc_wq = alloc_workqueue("bcache_gc", 1760 WQ_MEM_RECLAIM, 0)) || 1761 bch_journal_alloc(c) || 1762 bch_btree_cache_alloc(c) || 1763 bch_open_buckets_alloc(c) || 1764 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1765 goto err; 1766 1767 c->congested_read_threshold_us = 2000; 1768 c->congested_write_threshold_us = 20000; 1769 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1770 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1771 1772 return c; 1773 err: 1774 bch_cache_set_unregister(c); 1775 return NULL; 1776 } 1777 1778 static void run_cache_set(struct cache_set *c) 1779 { 1780 const char *err = "cannot allocate memory"; 1781 struct cached_dev *dc, *t; 1782 struct cache *ca; 1783 struct closure cl; 1784 unsigned int i; 1785 1786 closure_init_stack(&cl); 1787 1788 for_each_cache(ca, c, i) 1789 c->nbuckets += ca->sb.nbuckets; 1790 set_gc_sectors(c); 1791 1792 if (CACHE_SYNC(&c->sb)) { 1793 LIST_HEAD(journal); 1794 struct bkey *k; 1795 struct jset *j; 1796 1797 err = "cannot allocate memory for journal"; 1798 if (bch_journal_read(c, &journal)) 1799 goto err; 1800 1801 pr_debug("btree_journal_read() done"); 1802 1803 err = "no journal entries found"; 1804 if (list_empty(&journal)) 1805 goto err; 1806 1807 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1808 1809 err = "IO error reading priorities"; 1810 for_each_cache(ca, c, i) 1811 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1812 1813 /* 1814 * If prio_read() fails it'll call cache_set_error and we'll 1815 * tear everything down right away, but if we perhaps checked 1816 * sooner we could avoid journal replay. 1817 */ 1818 1819 k = &j->btree_root; 1820 1821 err = "bad btree root"; 1822 if (__bch_btree_ptr_invalid(c, k)) 1823 goto err; 1824 1825 err = "error reading btree root"; 1826 c->root = bch_btree_node_get(c, NULL, k, 1827 j->btree_level, 1828 true, NULL); 1829 if (IS_ERR_OR_NULL(c->root)) 1830 goto err; 1831 1832 list_del_init(&c->root->list); 1833 rw_unlock(true, c->root); 1834 1835 err = uuid_read(c, j, &cl); 1836 if (err) 1837 goto err; 1838 1839 err = "error in recovery"; 1840 if (bch_btree_check(c)) 1841 goto err; 1842 1843 bch_journal_mark(c, &journal); 1844 bch_initial_gc_finish(c); 1845 pr_debug("btree_check() done"); 1846 1847 /* 1848 * bcache_journal_next() can't happen sooner, or 1849 * btree_gc_finish() will give spurious errors about last_gc > 1850 * gc_gen - this is a hack but oh well. 1851 */ 1852 bch_journal_next(&c->journal); 1853 1854 err = "error starting allocator thread"; 1855 for_each_cache(ca, c, i) 1856 if (bch_cache_allocator_start(ca)) 1857 goto err; 1858 1859 /* 1860 * First place it's safe to allocate: btree_check() and 1861 * btree_gc_finish() have to run before we have buckets to 1862 * allocate, and bch_bucket_alloc_set() might cause a journal 1863 * entry to be written so bcache_journal_next() has to be called 1864 * first. 1865 * 1866 * If the uuids were in the old format we have to rewrite them 1867 * before the next journal entry is written: 1868 */ 1869 if (j->version < BCACHE_JSET_VERSION_UUID) 1870 __uuid_write(c); 1871 1872 bch_journal_replay(c, &journal); 1873 } else { 1874 pr_notice("invalidating existing data"); 1875 1876 for_each_cache(ca, c, i) { 1877 unsigned int j; 1878 1879 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1880 2, SB_JOURNAL_BUCKETS); 1881 1882 for (j = 0; j < ca->sb.keys; j++) 1883 ca->sb.d[j] = ca->sb.first_bucket + j; 1884 } 1885 1886 bch_initial_gc_finish(c); 1887 1888 err = "error starting allocator thread"; 1889 for_each_cache(ca, c, i) 1890 if (bch_cache_allocator_start(ca)) 1891 goto err; 1892 1893 mutex_lock(&c->bucket_lock); 1894 for_each_cache(ca, c, i) 1895 bch_prio_write(ca); 1896 mutex_unlock(&c->bucket_lock); 1897 1898 err = "cannot allocate new UUID bucket"; 1899 if (__uuid_write(c)) 1900 goto err; 1901 1902 err = "cannot allocate new btree root"; 1903 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 1904 if (IS_ERR_OR_NULL(c->root)) 1905 goto err; 1906 1907 mutex_lock(&c->root->write_lock); 1908 bkey_copy_key(&c->root->key, &MAX_KEY); 1909 bch_btree_node_write(c->root, &cl); 1910 mutex_unlock(&c->root->write_lock); 1911 1912 bch_btree_set_root(c->root); 1913 rw_unlock(true, c->root); 1914 1915 /* 1916 * We don't want to write the first journal entry until 1917 * everything is set up - fortunately journal entries won't be 1918 * written until the SET_CACHE_SYNC() here: 1919 */ 1920 SET_CACHE_SYNC(&c->sb, true); 1921 1922 bch_journal_next(&c->journal); 1923 bch_journal_meta(c, &cl); 1924 } 1925 1926 err = "error starting gc thread"; 1927 if (bch_gc_thread_start(c)) 1928 goto err; 1929 1930 closure_sync(&cl); 1931 c->sb.last_mount = (u32)ktime_get_real_seconds(); 1932 bcache_write_super(c); 1933 1934 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1935 bch_cached_dev_attach(dc, c, NULL); 1936 1937 flash_devs_run(c); 1938 1939 set_bit(CACHE_SET_RUNNING, &c->flags); 1940 return; 1941 err: 1942 closure_sync(&cl); 1943 /* XXX: test this, it's broken */ 1944 bch_cache_set_error(c, "%s", err); 1945 } 1946 1947 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1948 { 1949 return ca->sb.block_size == c->sb.block_size && 1950 ca->sb.bucket_size == c->sb.bucket_size && 1951 ca->sb.nr_in_set == c->sb.nr_in_set; 1952 } 1953 1954 static const char *register_cache_set(struct cache *ca) 1955 { 1956 char buf[12]; 1957 const char *err = "cannot allocate memory"; 1958 struct cache_set *c; 1959 1960 list_for_each_entry(c, &bch_cache_sets, list) 1961 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1962 if (c->cache[ca->sb.nr_this_dev]) 1963 return "duplicate cache set member"; 1964 1965 if (!can_attach_cache(ca, c)) 1966 return "cache sb does not match set"; 1967 1968 if (!CACHE_SYNC(&ca->sb)) 1969 SET_CACHE_SYNC(&c->sb, false); 1970 1971 goto found; 1972 } 1973 1974 c = bch_cache_set_alloc(&ca->sb); 1975 if (!c) 1976 return err; 1977 1978 err = "error creating kobject"; 1979 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1980 kobject_add(&c->internal, &c->kobj, "internal")) 1981 goto err; 1982 1983 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1984 goto err; 1985 1986 bch_debug_init_cache_set(c); 1987 1988 list_add(&c->list, &bch_cache_sets); 1989 found: 1990 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1991 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1992 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1993 goto err; 1994 1995 if (ca->sb.seq > c->sb.seq) { 1996 c->sb.version = ca->sb.version; 1997 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1998 c->sb.flags = ca->sb.flags; 1999 c->sb.seq = ca->sb.seq; 2000 pr_debug("set version = %llu", c->sb.version); 2001 } 2002 2003 kobject_get(&ca->kobj); 2004 ca->set = c; 2005 ca->set->cache[ca->sb.nr_this_dev] = ca; 2006 c->cache_by_alloc[c->caches_loaded++] = ca; 2007 2008 if (c->caches_loaded == c->sb.nr_in_set) 2009 run_cache_set(c); 2010 2011 return NULL; 2012 err: 2013 bch_cache_set_unregister(c); 2014 return err; 2015 } 2016 2017 /* Cache device */ 2018 2019 void bch_cache_release(struct kobject *kobj) 2020 { 2021 struct cache *ca = container_of(kobj, struct cache, kobj); 2022 unsigned int i; 2023 2024 if (ca->set) { 2025 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 2026 ca->set->cache[ca->sb.nr_this_dev] = NULL; 2027 } 2028 2029 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 2030 kfree(ca->prio_buckets); 2031 vfree(ca->buckets); 2032 2033 free_heap(&ca->heap); 2034 free_fifo(&ca->free_inc); 2035 2036 for (i = 0; i < RESERVE_NR; i++) 2037 free_fifo(&ca->free[i]); 2038 2039 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 2040 put_page(bio_first_page_all(&ca->sb_bio)); 2041 2042 if (!IS_ERR_OR_NULL(ca->bdev)) 2043 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2044 2045 kfree(ca); 2046 module_put(THIS_MODULE); 2047 } 2048 2049 static int cache_alloc(struct cache *ca) 2050 { 2051 size_t free; 2052 size_t btree_buckets; 2053 struct bucket *b; 2054 int ret = -ENOMEM; 2055 const char *err = NULL; 2056 2057 __module_get(THIS_MODULE); 2058 kobject_init(&ca->kobj, &bch_cache_ktype); 2059 2060 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2061 2062 /* 2063 * when ca->sb.njournal_buckets is not zero, journal exists, 2064 * and in bch_journal_replay(), tree node may split, 2065 * so bucket of RESERVE_BTREE type is needed, 2066 * the worst situation is all journal buckets are valid journal, 2067 * and all the keys need to replay, 2068 * so the number of RESERVE_BTREE type buckets should be as much 2069 * as journal buckets 2070 */ 2071 btree_buckets = ca->sb.njournal_buckets ?: 8; 2072 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2073 if (!free) { 2074 ret = -EPERM; 2075 err = "ca->sb.nbuckets is too small"; 2076 goto err_free; 2077 } 2078 2079 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, 2080 GFP_KERNEL)) { 2081 err = "ca->free[RESERVE_BTREE] alloc failed"; 2082 goto err_btree_alloc; 2083 } 2084 2085 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), 2086 GFP_KERNEL)) { 2087 err = "ca->free[RESERVE_PRIO] alloc failed"; 2088 goto err_prio_alloc; 2089 } 2090 2091 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { 2092 err = "ca->free[RESERVE_MOVINGGC] alloc failed"; 2093 goto err_movinggc_alloc; 2094 } 2095 2096 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { 2097 err = "ca->free[RESERVE_NONE] alloc failed"; 2098 goto err_none_alloc; 2099 } 2100 2101 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { 2102 err = "ca->free_inc alloc failed"; 2103 goto err_free_inc_alloc; 2104 } 2105 2106 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { 2107 err = "ca->heap alloc failed"; 2108 goto err_heap_alloc; 2109 } 2110 2111 ca->buckets = vzalloc(array_size(sizeof(struct bucket), 2112 ca->sb.nbuckets)); 2113 if (!ca->buckets) { 2114 err = "ca->buckets alloc failed"; 2115 goto err_buckets_alloc; 2116 } 2117 2118 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), 2119 prio_buckets(ca), 2), 2120 GFP_KERNEL); 2121 if (!ca->prio_buckets) { 2122 err = "ca->prio_buckets alloc failed"; 2123 goto err_prio_buckets_alloc; 2124 } 2125 2126 ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca); 2127 if (!ca->disk_buckets) { 2128 err = "ca->disk_buckets alloc failed"; 2129 goto err_disk_buckets_alloc; 2130 } 2131 2132 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2133 2134 for_each_bucket(b, ca) 2135 atomic_set(&b->pin, 0); 2136 return 0; 2137 2138 err_disk_buckets_alloc: 2139 kfree(ca->prio_buckets); 2140 err_prio_buckets_alloc: 2141 vfree(ca->buckets); 2142 err_buckets_alloc: 2143 free_heap(&ca->heap); 2144 err_heap_alloc: 2145 free_fifo(&ca->free_inc); 2146 err_free_inc_alloc: 2147 free_fifo(&ca->free[RESERVE_NONE]); 2148 err_none_alloc: 2149 free_fifo(&ca->free[RESERVE_MOVINGGC]); 2150 err_movinggc_alloc: 2151 free_fifo(&ca->free[RESERVE_PRIO]); 2152 err_prio_alloc: 2153 free_fifo(&ca->free[RESERVE_BTREE]); 2154 err_btree_alloc: 2155 err_free: 2156 module_put(THIS_MODULE); 2157 if (err) 2158 pr_notice("error %s: %s", ca->cache_dev_name, err); 2159 return ret; 2160 } 2161 2162 static int register_cache(struct cache_sb *sb, struct page *sb_page, 2163 struct block_device *bdev, struct cache *ca) 2164 { 2165 const char *err = NULL; /* must be set for any error case */ 2166 int ret = 0; 2167 2168 bdevname(bdev, ca->cache_dev_name); 2169 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2170 ca->bdev = bdev; 2171 ca->bdev->bd_holder = ca; 2172 2173 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); 2174 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 2175 get_page(sb_page); 2176 2177 if (blk_queue_discard(bdev_get_queue(bdev))) 2178 ca->discard = CACHE_DISCARD(&ca->sb); 2179 2180 ret = cache_alloc(ca); 2181 if (ret != 0) { 2182 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2183 if (ret == -ENOMEM) 2184 err = "cache_alloc(): -ENOMEM"; 2185 else if (ret == -EPERM) 2186 err = "cache_alloc(): cache device is too small"; 2187 else 2188 err = "cache_alloc(): unknown error"; 2189 goto err; 2190 } 2191 2192 if (kobject_add(&ca->kobj, 2193 &part_to_dev(bdev->bd_part)->kobj, 2194 "bcache")) { 2195 err = "error calling kobject_add"; 2196 ret = -ENOMEM; 2197 goto out; 2198 } 2199 2200 mutex_lock(&bch_register_lock); 2201 err = register_cache_set(ca); 2202 mutex_unlock(&bch_register_lock); 2203 2204 if (err) { 2205 ret = -ENODEV; 2206 goto out; 2207 } 2208 2209 pr_info("registered cache device %s", ca->cache_dev_name); 2210 2211 out: 2212 kobject_put(&ca->kobj); 2213 2214 err: 2215 if (err) 2216 pr_notice("error %s: %s", ca->cache_dev_name, err); 2217 2218 return ret; 2219 } 2220 2221 /* Global interfaces/init */ 2222 2223 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2224 const char *buffer, size_t size); 2225 2226 kobj_attribute_write(register, register_bcache); 2227 kobj_attribute_write(register_quiet, register_bcache); 2228 2229 static bool bch_is_open_backing(struct block_device *bdev) 2230 { 2231 struct cache_set *c, *tc; 2232 struct cached_dev *dc, *t; 2233 2234 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2235 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2236 if (dc->bdev == bdev) 2237 return true; 2238 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2239 if (dc->bdev == bdev) 2240 return true; 2241 return false; 2242 } 2243 2244 static bool bch_is_open_cache(struct block_device *bdev) 2245 { 2246 struct cache_set *c, *tc; 2247 struct cache *ca; 2248 unsigned int i; 2249 2250 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2251 for_each_cache(ca, c, i) 2252 if (ca->bdev == bdev) 2253 return true; 2254 return false; 2255 } 2256 2257 static bool bch_is_open(struct block_device *bdev) 2258 { 2259 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2260 } 2261 2262 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2263 const char *buffer, size_t size) 2264 { 2265 ssize_t ret = size; 2266 const char *err = "cannot allocate memory"; 2267 char *path = NULL; 2268 struct cache_sb *sb = NULL; 2269 struct block_device *bdev = NULL; 2270 struct page *sb_page = NULL; 2271 2272 if (!try_module_get(THIS_MODULE)) 2273 return -EBUSY; 2274 2275 path = kstrndup(buffer, size, GFP_KERNEL); 2276 if (!path) 2277 goto err; 2278 2279 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); 2280 if (!sb) 2281 goto err; 2282 2283 err = "failed to open device"; 2284 bdev = blkdev_get_by_path(strim(path), 2285 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2286 sb); 2287 if (IS_ERR(bdev)) { 2288 if (bdev == ERR_PTR(-EBUSY)) { 2289 bdev = lookup_bdev(strim(path)); 2290 mutex_lock(&bch_register_lock); 2291 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2292 err = "device already registered"; 2293 else 2294 err = "device busy"; 2295 mutex_unlock(&bch_register_lock); 2296 if (!IS_ERR(bdev)) 2297 bdput(bdev); 2298 if (attr == &ksysfs_register_quiet) 2299 goto out; 2300 } 2301 goto err; 2302 } 2303 2304 err = "failed to set blocksize"; 2305 if (set_blocksize(bdev, 4096)) 2306 goto err_close; 2307 2308 err = read_super(sb, bdev, &sb_page); 2309 if (err) 2310 goto err_close; 2311 2312 err = "failed to register device"; 2313 if (SB_IS_BDEV(sb)) { 2314 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2315 2316 if (!dc) 2317 goto err_close; 2318 2319 mutex_lock(&bch_register_lock); 2320 register_bdev(sb, sb_page, bdev, dc); 2321 mutex_unlock(&bch_register_lock); 2322 } else { 2323 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2324 2325 if (!ca) 2326 goto err_close; 2327 2328 if (register_cache(sb, sb_page, bdev, ca) != 0) 2329 goto err; 2330 } 2331 out: 2332 if (sb_page) 2333 put_page(sb_page); 2334 kfree(sb); 2335 kfree(path); 2336 module_put(THIS_MODULE); 2337 return ret; 2338 2339 err_close: 2340 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2341 err: 2342 pr_info("error %s: %s", path, err); 2343 ret = -EINVAL; 2344 goto out; 2345 } 2346 2347 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2348 { 2349 if (code == SYS_DOWN || 2350 code == SYS_HALT || 2351 code == SYS_POWER_OFF) { 2352 DEFINE_WAIT(wait); 2353 unsigned long start = jiffies; 2354 bool stopped = false; 2355 2356 struct cache_set *c, *tc; 2357 struct cached_dev *dc, *tdc; 2358 2359 mutex_lock(&bch_register_lock); 2360 2361 if (list_empty(&bch_cache_sets) && 2362 list_empty(&uncached_devices)) 2363 goto out; 2364 2365 pr_info("Stopping all devices:"); 2366 2367 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2368 bch_cache_set_stop(c); 2369 2370 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2371 bcache_device_stop(&dc->disk); 2372 2373 /* What's a condition variable? */ 2374 while (1) { 2375 long timeout = start + 2 * HZ - jiffies; 2376 2377 stopped = list_empty(&bch_cache_sets) && 2378 list_empty(&uncached_devices); 2379 2380 if (timeout < 0 || stopped) 2381 break; 2382 2383 prepare_to_wait(&unregister_wait, &wait, 2384 TASK_UNINTERRUPTIBLE); 2385 2386 mutex_unlock(&bch_register_lock); 2387 schedule_timeout(timeout); 2388 mutex_lock(&bch_register_lock); 2389 } 2390 2391 finish_wait(&unregister_wait, &wait); 2392 2393 if (stopped) 2394 pr_info("All devices stopped"); 2395 else 2396 pr_notice("Timeout waiting for devices to be closed"); 2397 out: 2398 mutex_unlock(&bch_register_lock); 2399 } 2400 2401 return NOTIFY_DONE; 2402 } 2403 2404 static struct notifier_block reboot = { 2405 .notifier_call = bcache_reboot, 2406 .priority = INT_MAX, /* before any real devices */ 2407 }; 2408 2409 static void bcache_exit(void) 2410 { 2411 bch_debug_exit(); 2412 bch_request_exit(); 2413 if (bcache_kobj) 2414 kobject_put(bcache_kobj); 2415 if (bcache_wq) 2416 destroy_workqueue(bcache_wq); 2417 if (bch_journal_wq) 2418 destroy_workqueue(bch_journal_wq); 2419 2420 if (bcache_major) 2421 unregister_blkdev(bcache_major, "bcache"); 2422 unregister_reboot_notifier(&reboot); 2423 mutex_destroy(&bch_register_lock); 2424 } 2425 2426 /* Check and fixup module parameters */ 2427 static void check_module_parameters(void) 2428 { 2429 if (bch_cutoff_writeback_sync == 0) 2430 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; 2431 else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { 2432 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u", 2433 bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); 2434 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; 2435 } 2436 2437 if (bch_cutoff_writeback == 0) 2438 bch_cutoff_writeback = CUTOFF_WRITEBACK; 2439 else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { 2440 pr_warn("set bch_cutoff_writeback (%u) to max value %u", 2441 bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); 2442 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; 2443 } 2444 2445 if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { 2446 pr_warn("set bch_cutoff_writeback (%u) to %u", 2447 bch_cutoff_writeback, bch_cutoff_writeback_sync); 2448 bch_cutoff_writeback = bch_cutoff_writeback_sync; 2449 } 2450 } 2451 2452 static int __init bcache_init(void) 2453 { 2454 static const struct attribute *files[] = { 2455 &ksysfs_register.attr, 2456 &ksysfs_register_quiet.attr, 2457 NULL 2458 }; 2459 2460 check_module_parameters(); 2461 2462 mutex_init(&bch_register_lock); 2463 init_waitqueue_head(&unregister_wait); 2464 register_reboot_notifier(&reboot); 2465 2466 bcache_major = register_blkdev(0, "bcache"); 2467 if (bcache_major < 0) { 2468 unregister_reboot_notifier(&reboot); 2469 mutex_destroy(&bch_register_lock); 2470 return bcache_major; 2471 } 2472 2473 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2474 if (!bcache_wq) 2475 goto err; 2476 2477 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); 2478 if (!bch_journal_wq) 2479 goto err; 2480 2481 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2482 if (!bcache_kobj) 2483 goto err; 2484 2485 if (bch_request_init() || 2486 sysfs_create_files(bcache_kobj, files)) 2487 goto err; 2488 2489 bch_debug_init(); 2490 closure_debug_init(); 2491 2492 return 0; 2493 err: 2494 bcache_exit(); 2495 return -ENOMEM; 2496 } 2497 2498 /* 2499 * Module hooks 2500 */ 2501 module_exit(bcache_exit); 2502 module_init(bcache_init); 2503 2504 module_param(bch_cutoff_writeback, uint, 0); 2505 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); 2506 2507 module_param(bch_cutoff_writeback_sync, uint, 0); 2508 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); 2509 2510 MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); 2511 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 2512 MODULE_LICENSE("GPL"); 2513