1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache setup/teardown code, and some metadata io - read a superblock and 4 * figure out what to do with it. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "extents.h" 14 #include "request.h" 15 #include "writeback.h" 16 17 #include <linux/blkdev.h> 18 #include <linux/debugfs.h> 19 #include <linux/genhd.h> 20 #include <linux/idr.h> 21 #include <linux/kthread.h> 22 #include <linux/workqueue.h> 23 #include <linux/module.h> 24 #include <linux/random.h> 25 #include <linux/reboot.h> 26 #include <linux/sysfs.h> 27 28 unsigned int bch_cutoff_writeback; 29 unsigned int bch_cutoff_writeback_sync; 30 31 static const char bcache_magic[] = { 32 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 33 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 34 }; 35 36 static const char invalid_uuid[] = { 37 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 38 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 39 }; 40 41 static struct kobject *bcache_kobj; 42 struct mutex bch_register_lock; 43 bool bcache_is_reboot; 44 LIST_HEAD(bch_cache_sets); 45 static LIST_HEAD(uncached_devices); 46 47 static int bcache_major; 48 static DEFINE_IDA(bcache_device_idx); 49 static wait_queue_head_t unregister_wait; 50 struct workqueue_struct *bcache_wq; 51 struct workqueue_struct *bch_journal_wq; 52 53 54 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 55 /* limitation of partitions number on single bcache device */ 56 #define BCACHE_MINORS 128 57 /* limitation of bcache devices number on single system */ 58 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 59 60 /* Superblock */ 61 62 static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, 63 struct cache_sb_disk *s) 64 { 65 const char *err; 66 unsigned int i; 67 68 sb->nbuckets = le64_to_cpu(s->nbuckets); 69 sb->bucket_size = le16_to_cpu(s->bucket_size); 70 71 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 72 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 73 74 err = "Too many buckets"; 75 if (sb->nbuckets > LONG_MAX) 76 goto err; 77 78 err = "Not enough buckets"; 79 if (sb->nbuckets < 1 << 7) 80 goto err; 81 82 err = "Bad block size (not power of 2)"; 83 if (!is_power_of_2(sb->block_size)) 84 goto err; 85 86 err = "Bad block size (larger than page size)"; 87 if (sb->block_size > PAGE_SECTORS) 88 goto err; 89 90 err = "Bad bucket size (not power of 2)"; 91 if (!is_power_of_2(sb->bucket_size)) 92 goto err; 93 94 err = "Bad bucket size (smaller than page size)"; 95 if (sb->bucket_size < PAGE_SECTORS) 96 goto err; 97 98 err = "Invalid superblock: device too small"; 99 if (get_capacity(bdev->bd_disk) < 100 sb->bucket_size * sb->nbuckets) 101 goto err; 102 103 err = "Bad UUID"; 104 if (bch_is_zero(sb->set_uuid, 16)) 105 goto err; 106 107 err = "Bad cache device number in set"; 108 if (!sb->nr_in_set || 109 sb->nr_in_set <= sb->nr_this_dev || 110 sb->nr_in_set > MAX_CACHES_PER_SET) 111 goto err; 112 113 err = "Journal buckets not sequential"; 114 for (i = 0; i < sb->keys; i++) 115 if (sb->d[i] != sb->first_bucket + i) 116 goto err; 117 118 err = "Too many journal buckets"; 119 if (sb->first_bucket + sb->keys > sb->nbuckets) 120 goto err; 121 122 err = "Invalid superblock: first bucket comes before end of super"; 123 if (sb->first_bucket * sb->bucket_size < 16) 124 goto err; 125 126 err = NULL; 127 err: 128 return err; 129 } 130 131 132 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 133 struct cache_sb_disk **res) 134 { 135 const char *err; 136 struct cache_sb_disk *s; 137 struct page *page; 138 unsigned int i; 139 140 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 141 SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); 142 if (IS_ERR(page)) 143 return "IO error"; 144 s = page_address(page) + offset_in_page(SB_OFFSET); 145 146 sb->offset = le64_to_cpu(s->offset); 147 sb->version = le64_to_cpu(s->version); 148 149 memcpy(sb->magic, s->magic, 16); 150 memcpy(sb->uuid, s->uuid, 16); 151 memcpy(sb->set_uuid, s->set_uuid, 16); 152 memcpy(sb->label, s->label, SB_LABEL_SIZE); 153 154 sb->flags = le64_to_cpu(s->flags); 155 sb->seq = le64_to_cpu(s->seq); 156 sb->last_mount = le32_to_cpu(s->last_mount); 157 sb->first_bucket = le16_to_cpu(s->first_bucket); 158 sb->keys = le16_to_cpu(s->keys); 159 160 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 161 sb->d[i] = le64_to_cpu(s->d[i]); 162 163 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n", 164 sb->version, sb->flags, sb->seq, sb->keys); 165 166 err = "Not a bcache superblock (bad offset)"; 167 if (sb->offset != SB_SECTOR) 168 goto err; 169 170 err = "Not a bcache superblock (bad magic)"; 171 if (memcmp(sb->magic, bcache_magic, 16)) 172 goto err; 173 174 err = "Too many journal buckets"; 175 if (sb->keys > SB_JOURNAL_BUCKETS) 176 goto err; 177 178 err = "Bad checksum"; 179 if (s->csum != csum_set(s)) 180 goto err; 181 182 err = "Bad UUID"; 183 if (bch_is_zero(sb->uuid, 16)) 184 goto err; 185 186 sb->block_size = le16_to_cpu(s->block_size); 187 188 err = "Superblock block size smaller than device block size"; 189 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 190 goto err; 191 192 switch (sb->version) { 193 case BCACHE_SB_VERSION_BDEV: 194 sb->data_offset = BDEV_DATA_START_DEFAULT; 195 break; 196 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 197 sb->data_offset = le64_to_cpu(s->data_offset); 198 199 err = "Bad data offset"; 200 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 201 goto err; 202 203 break; 204 case BCACHE_SB_VERSION_CDEV: 205 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 206 err = read_super_common(sb, bdev, s); 207 if (err) 208 goto err; 209 break; 210 default: 211 err = "Unsupported superblock version"; 212 goto err; 213 } 214 215 sb->last_mount = (u32)ktime_get_real_seconds(); 216 *res = s; 217 return NULL; 218 err: 219 put_page(page); 220 return err; 221 } 222 223 static void write_bdev_super_endio(struct bio *bio) 224 { 225 struct cached_dev *dc = bio->bi_private; 226 227 if (bio->bi_status) 228 bch_count_backing_io_errors(dc, bio); 229 230 closure_put(&dc->sb_write); 231 } 232 233 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, 234 struct bio *bio) 235 { 236 unsigned int i; 237 238 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; 239 bio->bi_iter.bi_sector = SB_SECTOR; 240 __bio_add_page(bio, virt_to_page(out), SB_SIZE, 241 offset_in_page(out)); 242 243 out->offset = cpu_to_le64(sb->offset); 244 out->version = cpu_to_le64(sb->version); 245 246 memcpy(out->uuid, sb->uuid, 16); 247 memcpy(out->set_uuid, sb->set_uuid, 16); 248 memcpy(out->label, sb->label, SB_LABEL_SIZE); 249 250 out->flags = cpu_to_le64(sb->flags); 251 out->seq = cpu_to_le64(sb->seq); 252 253 out->last_mount = cpu_to_le32(sb->last_mount); 254 out->first_bucket = cpu_to_le16(sb->first_bucket); 255 out->keys = cpu_to_le16(sb->keys); 256 257 for (i = 0; i < sb->keys; i++) 258 out->d[i] = cpu_to_le64(sb->d[i]); 259 260 out->csum = csum_set(out); 261 262 pr_debug("ver %llu, flags %llu, seq %llu\n", 263 sb->version, sb->flags, sb->seq); 264 265 submit_bio(bio); 266 } 267 268 static void bch_write_bdev_super_unlock(struct closure *cl) 269 { 270 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 271 272 up(&dc->sb_write_mutex); 273 } 274 275 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 276 { 277 struct closure *cl = &dc->sb_write; 278 struct bio *bio = &dc->sb_bio; 279 280 down(&dc->sb_write_mutex); 281 closure_init(cl, parent); 282 283 bio_init(bio, dc->sb_bv, 1); 284 bio_set_dev(bio, dc->bdev); 285 bio->bi_end_io = write_bdev_super_endio; 286 bio->bi_private = dc; 287 288 closure_get(cl); 289 /* I/O request sent to backing device */ 290 __write_super(&dc->sb, dc->sb_disk, bio); 291 292 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 293 } 294 295 static void write_super_endio(struct bio *bio) 296 { 297 struct cache *ca = bio->bi_private; 298 299 /* is_read = 0 */ 300 bch_count_io_errors(ca, bio->bi_status, 0, 301 "writing superblock"); 302 closure_put(&ca->set->sb_write); 303 } 304 305 static void bcache_write_super_unlock(struct closure *cl) 306 { 307 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 308 309 up(&c->sb_write_mutex); 310 } 311 312 void bcache_write_super(struct cache_set *c) 313 { 314 struct closure *cl = &c->sb_write; 315 struct cache *ca; 316 unsigned int i; 317 318 down(&c->sb_write_mutex); 319 closure_init(cl, &c->cl); 320 321 c->sb.seq++; 322 323 for_each_cache(ca, c, i) { 324 struct bio *bio = &ca->sb_bio; 325 326 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 327 ca->sb.seq = c->sb.seq; 328 ca->sb.last_mount = c->sb.last_mount; 329 330 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 331 332 bio_init(bio, ca->sb_bv, 1); 333 bio_set_dev(bio, ca->bdev); 334 bio->bi_end_io = write_super_endio; 335 bio->bi_private = ca; 336 337 closure_get(cl); 338 __write_super(&ca->sb, ca->sb_disk, bio); 339 } 340 341 closure_return_with_destructor(cl, bcache_write_super_unlock); 342 } 343 344 /* UUID io */ 345 346 static void uuid_endio(struct bio *bio) 347 { 348 struct closure *cl = bio->bi_private; 349 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 350 351 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 352 bch_bbio_free(bio, c); 353 closure_put(cl); 354 } 355 356 static void uuid_io_unlock(struct closure *cl) 357 { 358 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 359 360 up(&c->uuid_write_mutex); 361 } 362 363 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 364 struct bkey *k, struct closure *parent) 365 { 366 struct closure *cl = &c->uuid_write; 367 struct uuid_entry *u; 368 unsigned int i; 369 char buf[80]; 370 371 BUG_ON(!parent); 372 down(&c->uuid_write_mutex); 373 closure_init(cl, parent); 374 375 for (i = 0; i < KEY_PTRS(k); i++) { 376 struct bio *bio = bch_bbio_alloc(c); 377 378 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 379 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 380 381 bio->bi_end_io = uuid_endio; 382 bio->bi_private = cl; 383 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 384 bch_bio_map(bio, c->uuids); 385 386 bch_submit_bbio(bio, c, k, i); 387 388 if (op != REQ_OP_WRITE) 389 break; 390 } 391 392 bch_extent_to_text(buf, sizeof(buf), k); 393 pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); 394 395 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 396 if (!bch_is_zero(u->uuid, 16)) 397 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n", 398 u - c->uuids, u->uuid, u->label, 399 u->first_reg, u->last_reg, u->invalidated); 400 401 closure_return_with_destructor(cl, uuid_io_unlock); 402 } 403 404 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 405 { 406 struct bkey *k = &j->uuid_bucket; 407 408 if (__bch_btree_ptr_invalid(c, k)) 409 return "bad uuid pointer"; 410 411 bkey_copy(&c->uuid_bucket, k); 412 uuid_io(c, REQ_OP_READ, 0, k, cl); 413 414 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 415 struct uuid_entry_v0 *u0 = (void *) c->uuids; 416 struct uuid_entry *u1 = (void *) c->uuids; 417 int i; 418 419 closure_sync(cl); 420 421 /* 422 * Since the new uuid entry is bigger than the old, we have to 423 * convert starting at the highest memory address and work down 424 * in order to do it in place 425 */ 426 427 for (i = c->nr_uuids - 1; 428 i >= 0; 429 --i) { 430 memcpy(u1[i].uuid, u0[i].uuid, 16); 431 memcpy(u1[i].label, u0[i].label, 32); 432 433 u1[i].first_reg = u0[i].first_reg; 434 u1[i].last_reg = u0[i].last_reg; 435 u1[i].invalidated = u0[i].invalidated; 436 437 u1[i].flags = 0; 438 u1[i].sectors = 0; 439 } 440 } 441 442 return NULL; 443 } 444 445 static int __uuid_write(struct cache_set *c) 446 { 447 BKEY_PADDED(key) k; 448 struct closure cl; 449 struct cache *ca; 450 451 closure_init_stack(&cl); 452 lockdep_assert_held(&bch_register_lock); 453 454 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 455 return 1; 456 457 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 458 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 459 closure_sync(&cl); 460 461 /* Only one bucket used for uuid write */ 462 ca = PTR_CACHE(c, &k.key, 0); 463 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); 464 465 bkey_copy(&c->uuid_bucket, &k.key); 466 bkey_put(c, &k.key); 467 return 0; 468 } 469 470 int bch_uuid_write(struct cache_set *c) 471 { 472 int ret = __uuid_write(c); 473 474 if (!ret) 475 bch_journal_meta(c, NULL); 476 477 return ret; 478 } 479 480 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 481 { 482 struct uuid_entry *u; 483 484 for (u = c->uuids; 485 u < c->uuids + c->nr_uuids; u++) 486 if (!memcmp(u->uuid, uuid, 16)) 487 return u; 488 489 return NULL; 490 } 491 492 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 493 { 494 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 495 496 return uuid_find(c, zero_uuid); 497 } 498 499 /* 500 * Bucket priorities/gens: 501 * 502 * For each bucket, we store on disk its 503 * 8 bit gen 504 * 16 bit priority 505 * 506 * See alloc.c for an explanation of the gen. The priority is used to implement 507 * lru (and in the future other) cache replacement policies; for most purposes 508 * it's just an opaque integer. 509 * 510 * The gens and the priorities don't have a whole lot to do with each other, and 511 * it's actually the gens that must be written out at specific times - it's no 512 * big deal if the priorities don't get written, if we lose them we just reuse 513 * buckets in suboptimal order. 514 * 515 * On disk they're stored in a packed array, and in as many buckets are required 516 * to fit them all. The buckets we use to store them form a list; the journal 517 * header points to the first bucket, the first bucket points to the second 518 * bucket, et cetera. 519 * 520 * This code is used by the allocation code; periodically (whenever it runs out 521 * of buckets to allocate from) the allocation code will invalidate some 522 * buckets, but it can't use those buckets until their new gens are safely on 523 * disk. 524 */ 525 526 static void prio_endio(struct bio *bio) 527 { 528 struct cache *ca = bio->bi_private; 529 530 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 531 bch_bbio_free(bio, ca->set); 532 closure_put(&ca->prio); 533 } 534 535 static void prio_io(struct cache *ca, uint64_t bucket, int op, 536 unsigned long op_flags) 537 { 538 struct closure *cl = &ca->prio; 539 struct bio *bio = bch_bbio_alloc(ca->set); 540 541 closure_init_stack(cl); 542 543 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 544 bio_set_dev(bio, ca->bdev); 545 bio->bi_iter.bi_size = bucket_bytes(ca); 546 547 bio->bi_end_io = prio_endio; 548 bio->bi_private = ca; 549 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 550 bch_bio_map(bio, ca->disk_buckets); 551 552 closure_bio_submit(ca->set, bio, &ca->prio); 553 closure_sync(cl); 554 } 555 556 int bch_prio_write(struct cache *ca, bool wait) 557 { 558 int i; 559 struct bucket *b; 560 struct closure cl; 561 562 pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n", 563 fifo_used(&ca->free[RESERVE_PRIO]), 564 fifo_used(&ca->free[RESERVE_NONE]), 565 fifo_used(&ca->free_inc)); 566 567 /* 568 * Pre-check if there are enough free buckets. In the non-blocking 569 * scenario it's better to fail early rather than starting to allocate 570 * buckets and do a cleanup later in case of failure. 571 */ 572 if (!wait) { 573 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + 574 fifo_used(&ca->free[RESERVE_NONE]); 575 if (prio_buckets(ca) > avail) 576 return -ENOMEM; 577 } 578 579 closure_init_stack(&cl); 580 581 lockdep_assert_held(&ca->set->bucket_lock); 582 583 ca->disk_buckets->seq++; 584 585 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 586 &ca->meta_sectors_written); 587 588 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 589 long bucket; 590 struct prio_set *p = ca->disk_buckets; 591 struct bucket_disk *d = p->data; 592 struct bucket_disk *end = d + prios_per_bucket(ca); 593 594 for (b = ca->buckets + i * prios_per_bucket(ca); 595 b < ca->buckets + ca->sb.nbuckets && d < end; 596 b++, d++) { 597 d->prio = cpu_to_le16(b->prio); 598 d->gen = b->gen; 599 } 600 601 p->next_bucket = ca->prio_buckets[i + 1]; 602 p->magic = pset_magic(&ca->sb); 603 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 604 605 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); 606 BUG_ON(bucket == -1); 607 608 mutex_unlock(&ca->set->bucket_lock); 609 prio_io(ca, bucket, REQ_OP_WRITE, 0); 610 mutex_lock(&ca->set->bucket_lock); 611 612 ca->prio_buckets[i] = bucket; 613 atomic_dec_bug(&ca->buckets[bucket].pin); 614 } 615 616 mutex_unlock(&ca->set->bucket_lock); 617 618 bch_journal_meta(ca->set, &cl); 619 closure_sync(&cl); 620 621 mutex_lock(&ca->set->bucket_lock); 622 623 /* 624 * Don't want the old priorities to get garbage collected until after we 625 * finish writing the new ones, and they're journalled 626 */ 627 for (i = 0; i < prio_buckets(ca); i++) { 628 if (ca->prio_last_buckets[i]) 629 __bch_bucket_free(ca, 630 &ca->buckets[ca->prio_last_buckets[i]]); 631 632 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 633 } 634 return 0; 635 } 636 637 static int prio_read(struct cache *ca, uint64_t bucket) 638 { 639 struct prio_set *p = ca->disk_buckets; 640 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 641 struct bucket *b; 642 unsigned int bucket_nr = 0; 643 int ret = -EIO; 644 645 for (b = ca->buckets; 646 b < ca->buckets + ca->sb.nbuckets; 647 b++, d++) { 648 if (d == end) { 649 ca->prio_buckets[bucket_nr] = bucket; 650 ca->prio_last_buckets[bucket_nr] = bucket; 651 bucket_nr++; 652 653 prio_io(ca, bucket, REQ_OP_READ, 0); 654 655 if (p->csum != 656 bch_crc64(&p->magic, bucket_bytes(ca) - 8)) { 657 pr_warn("bad csum reading priorities\n"); 658 goto out; 659 } 660 661 if (p->magic != pset_magic(&ca->sb)) { 662 pr_warn("bad magic reading priorities\n"); 663 goto out; 664 } 665 666 bucket = p->next_bucket; 667 d = p->data; 668 } 669 670 b->prio = le16_to_cpu(d->prio); 671 b->gen = b->last_gc = d->gen; 672 } 673 674 ret = 0; 675 out: 676 return ret; 677 } 678 679 /* Bcache device */ 680 681 static int open_dev(struct block_device *b, fmode_t mode) 682 { 683 struct bcache_device *d = b->bd_disk->private_data; 684 685 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 686 return -ENXIO; 687 688 closure_get(&d->cl); 689 return 0; 690 } 691 692 static void release_dev(struct gendisk *b, fmode_t mode) 693 { 694 struct bcache_device *d = b->private_data; 695 696 closure_put(&d->cl); 697 } 698 699 static int ioctl_dev(struct block_device *b, fmode_t mode, 700 unsigned int cmd, unsigned long arg) 701 { 702 struct bcache_device *d = b->bd_disk->private_data; 703 704 return d->ioctl(d, mode, cmd, arg); 705 } 706 707 static const struct block_device_operations bcache_cached_ops = { 708 .submit_bio = cached_dev_submit_bio, 709 .open = open_dev, 710 .release = release_dev, 711 .ioctl = ioctl_dev, 712 .owner = THIS_MODULE, 713 }; 714 715 static const struct block_device_operations bcache_flash_ops = { 716 .submit_bio = flash_dev_submit_bio, 717 .open = open_dev, 718 .release = release_dev, 719 .ioctl = ioctl_dev, 720 .owner = THIS_MODULE, 721 }; 722 723 void bcache_device_stop(struct bcache_device *d) 724 { 725 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 726 /* 727 * closure_fn set to 728 * - cached device: cached_dev_flush() 729 * - flash dev: flash_dev_flush() 730 */ 731 closure_queue(&d->cl); 732 } 733 734 static void bcache_device_unlink(struct bcache_device *d) 735 { 736 lockdep_assert_held(&bch_register_lock); 737 738 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 739 unsigned int i; 740 struct cache *ca; 741 742 sysfs_remove_link(&d->c->kobj, d->name); 743 sysfs_remove_link(&d->kobj, "cache"); 744 745 for_each_cache(ca, d->c, i) 746 bd_unlink_disk_holder(ca->bdev, d->disk); 747 } 748 } 749 750 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 751 const char *name) 752 { 753 unsigned int i; 754 struct cache *ca; 755 int ret; 756 757 for_each_cache(ca, d->c, i) 758 bd_link_disk_holder(ca->bdev, d->disk); 759 760 snprintf(d->name, BCACHEDEVNAME_SIZE, 761 "%s%u", name, d->id); 762 763 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); 764 if (ret < 0) 765 pr_err("Couldn't create device -> cache set symlink\n"); 766 767 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); 768 if (ret < 0) 769 pr_err("Couldn't create cache set -> device symlink\n"); 770 771 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 772 } 773 774 static void bcache_device_detach(struct bcache_device *d) 775 { 776 lockdep_assert_held(&bch_register_lock); 777 778 atomic_dec(&d->c->attached_dev_nr); 779 780 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 781 struct uuid_entry *u = d->c->uuids + d->id; 782 783 SET_UUID_FLASH_ONLY(u, 0); 784 memcpy(u->uuid, invalid_uuid, 16); 785 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 786 bch_uuid_write(d->c); 787 } 788 789 bcache_device_unlink(d); 790 791 d->c->devices[d->id] = NULL; 792 closure_put(&d->c->caching); 793 d->c = NULL; 794 } 795 796 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 797 unsigned int id) 798 { 799 d->id = id; 800 d->c = c; 801 c->devices[id] = d; 802 803 if (id >= c->devices_max_used) 804 c->devices_max_used = id + 1; 805 806 closure_get(&c->caching); 807 } 808 809 static inline int first_minor_to_idx(int first_minor) 810 { 811 return (first_minor/BCACHE_MINORS); 812 } 813 814 static inline int idx_to_first_minor(int idx) 815 { 816 return (idx * BCACHE_MINORS); 817 } 818 819 static void bcache_device_free(struct bcache_device *d) 820 { 821 struct gendisk *disk = d->disk; 822 823 lockdep_assert_held(&bch_register_lock); 824 825 if (disk) 826 pr_info("%s stopped\n", disk->disk_name); 827 else 828 pr_err("bcache device (NULL gendisk) stopped\n"); 829 830 if (d->c) 831 bcache_device_detach(d); 832 833 if (disk) { 834 bool disk_added = (disk->flags & GENHD_FL_UP) != 0; 835 836 if (disk_added) 837 del_gendisk(disk); 838 839 if (disk->queue) 840 blk_cleanup_queue(disk->queue); 841 842 ida_simple_remove(&bcache_device_idx, 843 first_minor_to_idx(disk->first_minor)); 844 if (disk_added) 845 put_disk(disk); 846 } 847 848 bioset_exit(&d->bio_split); 849 kvfree(d->full_dirty_stripes); 850 kvfree(d->stripe_sectors_dirty); 851 852 closure_debug_destroy(&d->cl); 853 } 854 855 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 856 sector_t sectors, struct block_device *cached_bdev, 857 const struct block_device_operations *ops) 858 { 859 struct request_queue *q; 860 const size_t max_stripes = min_t(size_t, INT_MAX, 861 SIZE_MAX / sizeof(atomic_t)); 862 uint64_t n; 863 int idx; 864 865 if (!d->stripe_size) 866 d->stripe_size = 1 << 31; 867 868 n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 869 if (!n || n > max_stripes) { 870 pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", 871 n); 872 return -ENOMEM; 873 } 874 d->nr_stripes = n; 875 876 n = d->nr_stripes * sizeof(atomic_t); 877 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 878 if (!d->stripe_sectors_dirty) 879 return -ENOMEM; 880 881 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 882 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 883 if (!d->full_dirty_stripes) 884 return -ENOMEM; 885 886 idx = ida_simple_get(&bcache_device_idx, 0, 887 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 888 if (idx < 0) 889 return idx; 890 891 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), 892 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 893 goto err; 894 895 d->disk = alloc_disk(BCACHE_MINORS); 896 if (!d->disk) 897 goto err; 898 899 set_capacity(d->disk, sectors); 900 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 901 902 d->disk->major = bcache_major; 903 d->disk->first_minor = idx_to_first_minor(idx); 904 d->disk->fops = ops; 905 d->disk->private_data = d; 906 907 q = blk_alloc_queue(NUMA_NO_NODE); 908 if (!q) 909 return -ENOMEM; 910 911 d->disk->queue = q; 912 q->backing_dev_info->congested_data = d; 913 q->limits.max_hw_sectors = UINT_MAX; 914 q->limits.max_sectors = UINT_MAX; 915 q->limits.max_segment_size = UINT_MAX; 916 q->limits.max_segments = BIO_MAX_PAGES; 917 blk_queue_max_discard_sectors(q, UINT_MAX); 918 q->limits.discard_granularity = 512; 919 q->limits.io_min = block_size; 920 q->limits.logical_block_size = block_size; 921 q->limits.physical_block_size = block_size; 922 923 if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { 924 /* 925 * This should only happen with BCACHE_SB_VERSION_BDEV. 926 * Block/page size is checked for BCACHE_SB_VERSION_CDEV. 927 */ 928 pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", 929 d->disk->disk_name, q->limits.logical_block_size, 930 PAGE_SIZE, bdev_logical_block_size(cached_bdev)); 931 932 /* This also adjusts physical block size/min io size if needed */ 933 blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); 934 } 935 936 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 937 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 938 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 939 940 blk_queue_write_cache(q, true, true); 941 942 return 0; 943 944 err: 945 ida_simple_remove(&bcache_device_idx, idx); 946 return -ENOMEM; 947 948 } 949 950 /* Cached device */ 951 952 static void calc_cached_dev_sectors(struct cache_set *c) 953 { 954 uint64_t sectors = 0; 955 struct cached_dev *dc; 956 957 list_for_each_entry(dc, &c->cached_devs, list) 958 sectors += bdev_sectors(dc->bdev); 959 960 c->cached_dev_sectors = sectors; 961 } 962 963 #define BACKING_DEV_OFFLINE_TIMEOUT 5 964 static int cached_dev_status_update(void *arg) 965 { 966 struct cached_dev *dc = arg; 967 struct request_queue *q; 968 969 /* 970 * If this delayed worker is stopping outside, directly quit here. 971 * dc->io_disable might be set via sysfs interface, so check it 972 * here too. 973 */ 974 while (!kthread_should_stop() && !dc->io_disable) { 975 q = bdev_get_queue(dc->bdev); 976 if (blk_queue_dying(q)) 977 dc->offline_seconds++; 978 else 979 dc->offline_seconds = 0; 980 981 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { 982 pr_err("%s: device offline for %d seconds\n", 983 dc->backing_dev_name, 984 BACKING_DEV_OFFLINE_TIMEOUT); 985 pr_err("%s: disable I/O request due to backing device offline\n", 986 dc->disk.name); 987 dc->io_disable = true; 988 /* let others know earlier that io_disable is true */ 989 smp_mb(); 990 bcache_device_stop(&dc->disk); 991 break; 992 } 993 schedule_timeout_interruptible(HZ); 994 } 995 996 wait_for_kthread_stop(); 997 return 0; 998 } 999 1000 1001 int bch_cached_dev_run(struct cached_dev *dc) 1002 { 1003 struct bcache_device *d = &dc->disk; 1004 char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); 1005 char *env[] = { 1006 "DRIVER=bcache", 1007 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 1008 kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""), 1009 NULL, 1010 }; 1011 1012 if (dc->io_disable) { 1013 pr_err("I/O disabled on cached dev %s\n", 1014 dc->backing_dev_name); 1015 kfree(env[1]); 1016 kfree(env[2]); 1017 kfree(buf); 1018 return -EIO; 1019 } 1020 1021 if (atomic_xchg(&dc->running, 1)) { 1022 kfree(env[1]); 1023 kfree(env[2]); 1024 kfree(buf); 1025 pr_info("cached dev %s is running already\n", 1026 dc->backing_dev_name); 1027 return -EBUSY; 1028 } 1029 1030 if (!d->c && 1031 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 1032 struct closure cl; 1033 1034 closure_init_stack(&cl); 1035 1036 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 1037 bch_write_bdev_super(dc, &cl); 1038 closure_sync(&cl); 1039 } 1040 1041 add_disk(d->disk); 1042 bd_link_disk_holder(dc->bdev, dc->disk.disk); 1043 /* 1044 * won't show up in the uevent file, use udevadm monitor -e instead 1045 * only class / kset properties are persistent 1046 */ 1047 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 1048 kfree(env[1]); 1049 kfree(env[2]); 1050 kfree(buf); 1051 1052 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 1053 sysfs_create_link(&disk_to_dev(d->disk)->kobj, 1054 &d->kobj, "bcache")) { 1055 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); 1056 return -ENOMEM; 1057 } 1058 1059 dc->status_update_thread = kthread_run(cached_dev_status_update, 1060 dc, "bcache_status_update"); 1061 if (IS_ERR(dc->status_update_thread)) { 1062 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); 1063 } 1064 1065 return 0; 1066 } 1067 1068 /* 1069 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 1070 * work dc->writeback_rate_update is running. Wait until the routine 1071 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 1072 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 1073 * seconds, give up waiting here and continue to cancel it too. 1074 */ 1075 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 1076 { 1077 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 1078 1079 do { 1080 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 1081 &dc->disk.flags)) 1082 break; 1083 time_out--; 1084 schedule_timeout_interruptible(1); 1085 } while (time_out > 0); 1086 1087 if (time_out == 0) 1088 pr_warn("give up waiting for dc->writeback_write_update to quit\n"); 1089 1090 cancel_delayed_work_sync(&dc->writeback_rate_update); 1091 } 1092 1093 static void cached_dev_detach_finish(struct work_struct *w) 1094 { 1095 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 1096 struct closure cl; 1097 1098 closure_init_stack(&cl); 1099 1100 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 1101 BUG_ON(refcount_read(&dc->count)); 1102 1103 1104 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1105 cancel_writeback_rate_update_dwork(dc); 1106 1107 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 1108 kthread_stop(dc->writeback_thread); 1109 dc->writeback_thread = NULL; 1110 } 1111 1112 memset(&dc->sb.set_uuid, 0, 16); 1113 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 1114 1115 bch_write_bdev_super(dc, &cl); 1116 closure_sync(&cl); 1117 1118 mutex_lock(&bch_register_lock); 1119 1120 calc_cached_dev_sectors(dc->disk.c); 1121 bcache_device_detach(&dc->disk); 1122 list_move(&dc->list, &uncached_devices); 1123 1124 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1125 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 1126 1127 mutex_unlock(&bch_register_lock); 1128 1129 pr_info("Caching disabled for %s\n", dc->backing_dev_name); 1130 1131 /* Drop ref we took in cached_dev_detach() */ 1132 closure_put(&dc->disk.cl); 1133 } 1134 1135 void bch_cached_dev_detach(struct cached_dev *dc) 1136 { 1137 lockdep_assert_held(&bch_register_lock); 1138 1139 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1140 return; 1141 1142 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 1143 return; 1144 1145 /* 1146 * Block the device from being closed and freed until we're finished 1147 * detaching 1148 */ 1149 closure_get(&dc->disk.cl); 1150 1151 bch_writeback_queue(dc); 1152 1153 cached_dev_put(dc); 1154 } 1155 1156 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1157 uint8_t *set_uuid) 1158 { 1159 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); 1160 struct uuid_entry *u; 1161 struct cached_dev *exist_dc, *t; 1162 int ret = 0; 1163 1164 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1165 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1166 return -ENOENT; 1167 1168 if (dc->disk.c) { 1169 pr_err("Can't attach %s: already attached\n", 1170 dc->backing_dev_name); 1171 return -EINVAL; 1172 } 1173 1174 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1175 pr_err("Can't attach %s: shutting down\n", 1176 dc->backing_dev_name); 1177 return -EINVAL; 1178 } 1179 1180 if (dc->sb.block_size < c->sb.block_size) { 1181 /* Will die */ 1182 pr_err("Couldn't attach %s: block size less than set's block size\n", 1183 dc->backing_dev_name); 1184 return -EINVAL; 1185 } 1186 1187 /* Check whether already attached */ 1188 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1189 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1190 pr_err("Tried to attach %s but duplicate UUID already attached\n", 1191 dc->backing_dev_name); 1192 1193 return -EINVAL; 1194 } 1195 } 1196 1197 u = uuid_find(c, dc->sb.uuid); 1198 1199 if (u && 1200 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1201 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1202 memcpy(u->uuid, invalid_uuid, 16); 1203 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 1204 u = NULL; 1205 } 1206 1207 if (!u) { 1208 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1209 pr_err("Couldn't find uuid for %s in set\n", 1210 dc->backing_dev_name); 1211 return -ENOENT; 1212 } 1213 1214 u = uuid_find_empty(c); 1215 if (!u) { 1216 pr_err("Not caching %s, no room for UUID\n", 1217 dc->backing_dev_name); 1218 return -EINVAL; 1219 } 1220 } 1221 1222 /* 1223 * Deadlocks since we're called via sysfs... 1224 * sysfs_remove_file(&dc->kobj, &sysfs_attach); 1225 */ 1226 1227 if (bch_is_zero(u->uuid, 16)) { 1228 struct closure cl; 1229 1230 closure_init_stack(&cl); 1231 1232 memcpy(u->uuid, dc->sb.uuid, 16); 1233 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1234 u->first_reg = u->last_reg = rtime; 1235 bch_uuid_write(c); 1236 1237 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1238 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1239 1240 bch_write_bdev_super(dc, &cl); 1241 closure_sync(&cl); 1242 } else { 1243 u->last_reg = rtime; 1244 bch_uuid_write(c); 1245 } 1246 1247 bcache_device_attach(&dc->disk, c, u - c->uuids); 1248 list_move(&dc->list, &c->cached_devs); 1249 calc_cached_dev_sectors(c); 1250 1251 /* 1252 * dc->c must be set before dc->count != 0 - paired with the mb in 1253 * cached_dev_get() 1254 */ 1255 smp_wmb(); 1256 refcount_set(&dc->count, 1); 1257 1258 /* Block writeback thread, but spawn it */ 1259 down_write(&dc->writeback_lock); 1260 if (bch_cached_dev_writeback_start(dc)) { 1261 up_write(&dc->writeback_lock); 1262 pr_err("Couldn't start writeback facilities for %s\n", 1263 dc->disk.disk->disk_name); 1264 return -ENOMEM; 1265 } 1266 1267 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1268 atomic_set(&dc->has_dirty, 1); 1269 bch_writeback_queue(dc); 1270 } 1271 1272 bch_sectors_dirty_init(&dc->disk); 1273 1274 ret = bch_cached_dev_run(dc); 1275 if (ret && (ret != -EBUSY)) { 1276 up_write(&dc->writeback_lock); 1277 /* 1278 * bch_register_lock is held, bcache_device_stop() is not 1279 * able to be directly called. The kthread and kworker 1280 * created previously in bch_cached_dev_writeback_start() 1281 * have to be stopped manually here. 1282 */ 1283 kthread_stop(dc->writeback_thread); 1284 cancel_writeback_rate_update_dwork(dc); 1285 pr_err("Couldn't run cached device %s\n", 1286 dc->backing_dev_name); 1287 return ret; 1288 } 1289 1290 bcache_device_link(&dc->disk, c, "bdev"); 1291 atomic_inc(&c->attached_dev_nr); 1292 1293 /* Allow the writeback thread to proceed */ 1294 up_write(&dc->writeback_lock); 1295 1296 pr_info("Caching %s as %s on set %pU\n", 1297 dc->backing_dev_name, 1298 dc->disk.disk->disk_name, 1299 dc->disk.c->sb.set_uuid); 1300 return 0; 1301 } 1302 1303 /* when dc->disk.kobj released */ 1304 void bch_cached_dev_release(struct kobject *kobj) 1305 { 1306 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1307 disk.kobj); 1308 kfree(dc); 1309 module_put(THIS_MODULE); 1310 } 1311 1312 static void cached_dev_free(struct closure *cl) 1313 { 1314 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1315 1316 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1317 cancel_writeback_rate_update_dwork(dc); 1318 1319 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1320 kthread_stop(dc->writeback_thread); 1321 if (!IS_ERR_OR_NULL(dc->status_update_thread)) 1322 kthread_stop(dc->status_update_thread); 1323 1324 mutex_lock(&bch_register_lock); 1325 1326 if (atomic_read(&dc->running)) 1327 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1328 bcache_device_free(&dc->disk); 1329 list_del(&dc->list); 1330 1331 mutex_unlock(&bch_register_lock); 1332 1333 if (dc->sb_disk) 1334 put_page(virt_to_page(dc->sb_disk)); 1335 1336 if (!IS_ERR_OR_NULL(dc->bdev)) 1337 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1338 1339 wake_up(&unregister_wait); 1340 1341 kobject_put(&dc->disk.kobj); 1342 } 1343 1344 static void cached_dev_flush(struct closure *cl) 1345 { 1346 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1347 struct bcache_device *d = &dc->disk; 1348 1349 mutex_lock(&bch_register_lock); 1350 bcache_device_unlink(d); 1351 mutex_unlock(&bch_register_lock); 1352 1353 bch_cache_accounting_destroy(&dc->accounting); 1354 kobject_del(&d->kobj); 1355 1356 continue_at(cl, cached_dev_free, system_wq); 1357 } 1358 1359 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) 1360 { 1361 int ret; 1362 struct io *io; 1363 struct request_queue *q = bdev_get_queue(dc->bdev); 1364 1365 __module_get(THIS_MODULE); 1366 INIT_LIST_HEAD(&dc->list); 1367 closure_init(&dc->disk.cl, NULL); 1368 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1369 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1370 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1371 sema_init(&dc->sb_write_mutex, 1); 1372 INIT_LIST_HEAD(&dc->io_lru); 1373 spin_lock_init(&dc->io_lock); 1374 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1375 1376 dc->sequential_cutoff = 4 << 20; 1377 1378 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1379 list_add(&io->lru, &dc->io_lru); 1380 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1381 } 1382 1383 dc->disk.stripe_size = q->limits.io_opt >> 9; 1384 1385 if (dc->disk.stripe_size) 1386 dc->partial_stripes_expensive = 1387 q->limits.raid_partial_stripes_expensive; 1388 1389 ret = bcache_device_init(&dc->disk, block_size, 1390 dc->bdev->bd_part->nr_sects - dc->sb.data_offset, 1391 dc->bdev, &bcache_cached_ops); 1392 if (ret) 1393 return ret; 1394 1395 dc->disk.disk->queue->backing_dev_info->ra_pages = 1396 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1397 q->backing_dev_info->ra_pages); 1398 1399 atomic_set(&dc->io_errors, 0); 1400 dc->io_disable = false; 1401 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1402 /* default to auto */ 1403 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1404 1405 bch_cached_dev_request_init(dc); 1406 bch_cached_dev_writeback_init(dc); 1407 return 0; 1408 } 1409 1410 /* Cached device - bcache superblock */ 1411 1412 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 1413 struct block_device *bdev, 1414 struct cached_dev *dc) 1415 { 1416 const char *err = "cannot allocate memory"; 1417 struct cache_set *c; 1418 int ret = -ENOMEM; 1419 1420 bdevname(bdev, dc->backing_dev_name); 1421 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1422 dc->bdev = bdev; 1423 dc->bdev->bd_holder = dc; 1424 dc->sb_disk = sb_disk; 1425 1426 if (cached_dev_init(dc, sb->block_size << 9)) 1427 goto err; 1428 1429 err = "error creating kobject"; 1430 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1431 "bcache")) 1432 goto err; 1433 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1434 goto err; 1435 1436 pr_info("registered backing device %s\n", dc->backing_dev_name); 1437 1438 list_add(&dc->list, &uncached_devices); 1439 /* attach to a matched cache set if it exists */ 1440 list_for_each_entry(c, &bch_cache_sets, list) 1441 bch_cached_dev_attach(dc, c, NULL); 1442 1443 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1444 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) { 1445 err = "failed to run cached device"; 1446 ret = bch_cached_dev_run(dc); 1447 if (ret) 1448 goto err; 1449 } 1450 1451 return 0; 1452 err: 1453 pr_notice("error %s: %s\n", dc->backing_dev_name, err); 1454 bcache_device_stop(&dc->disk); 1455 return ret; 1456 } 1457 1458 /* Flash only volumes */ 1459 1460 /* When d->kobj released */ 1461 void bch_flash_dev_release(struct kobject *kobj) 1462 { 1463 struct bcache_device *d = container_of(kobj, struct bcache_device, 1464 kobj); 1465 kfree(d); 1466 } 1467 1468 static void flash_dev_free(struct closure *cl) 1469 { 1470 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1471 1472 mutex_lock(&bch_register_lock); 1473 atomic_long_sub(bcache_dev_sectors_dirty(d), 1474 &d->c->flash_dev_dirty_sectors); 1475 bcache_device_free(d); 1476 mutex_unlock(&bch_register_lock); 1477 kobject_put(&d->kobj); 1478 } 1479 1480 static void flash_dev_flush(struct closure *cl) 1481 { 1482 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1483 1484 mutex_lock(&bch_register_lock); 1485 bcache_device_unlink(d); 1486 mutex_unlock(&bch_register_lock); 1487 kobject_del(&d->kobj); 1488 continue_at(cl, flash_dev_free, system_wq); 1489 } 1490 1491 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1492 { 1493 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1494 GFP_KERNEL); 1495 if (!d) 1496 return -ENOMEM; 1497 1498 closure_init(&d->cl, NULL); 1499 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1500 1501 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1502 1503 if (bcache_device_init(d, block_bytes(c), u->sectors, 1504 NULL, &bcache_flash_ops)) 1505 goto err; 1506 1507 bcache_device_attach(d, c, u - c->uuids); 1508 bch_sectors_dirty_init(d); 1509 bch_flash_dev_request_init(d); 1510 add_disk(d->disk); 1511 1512 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1513 goto err; 1514 1515 bcache_device_link(d, c, "volume"); 1516 1517 return 0; 1518 err: 1519 kobject_put(&d->kobj); 1520 return -ENOMEM; 1521 } 1522 1523 static int flash_devs_run(struct cache_set *c) 1524 { 1525 int ret = 0; 1526 struct uuid_entry *u; 1527 1528 for (u = c->uuids; 1529 u < c->uuids + c->nr_uuids && !ret; 1530 u++) 1531 if (UUID_FLASH_ONLY(u)) 1532 ret = flash_dev_run(c, u); 1533 1534 return ret; 1535 } 1536 1537 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1538 { 1539 struct uuid_entry *u; 1540 1541 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1542 return -EINTR; 1543 1544 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1545 return -EPERM; 1546 1547 u = uuid_find_empty(c); 1548 if (!u) { 1549 pr_err("Can't create volume, no room for UUID\n"); 1550 return -EINVAL; 1551 } 1552 1553 get_random_bytes(u->uuid, 16); 1554 memset(u->label, 0, 32); 1555 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); 1556 1557 SET_UUID_FLASH_ONLY(u, 1); 1558 u->sectors = size >> 9; 1559 1560 bch_uuid_write(c); 1561 1562 return flash_dev_run(c, u); 1563 } 1564 1565 bool bch_cached_dev_error(struct cached_dev *dc) 1566 { 1567 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1568 return false; 1569 1570 dc->io_disable = true; 1571 /* make others know io_disable is true earlier */ 1572 smp_mb(); 1573 1574 pr_err("stop %s: too many IO errors on backing device %s\n", 1575 dc->disk.disk->disk_name, dc->backing_dev_name); 1576 1577 bcache_device_stop(&dc->disk); 1578 return true; 1579 } 1580 1581 /* Cache set */ 1582 1583 __printf(2, 3) 1584 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1585 { 1586 struct va_format vaf; 1587 va_list args; 1588 1589 if (c->on_error != ON_ERROR_PANIC && 1590 test_bit(CACHE_SET_STOPPING, &c->flags)) 1591 return false; 1592 1593 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1594 pr_info("CACHE_SET_IO_DISABLE already set\n"); 1595 1596 /* 1597 * XXX: we can be called from atomic context 1598 * acquire_console_sem(); 1599 */ 1600 1601 va_start(args, fmt); 1602 1603 vaf.fmt = fmt; 1604 vaf.va = &args; 1605 1606 pr_err("error on %pU: %pV, disabling caching\n", 1607 c->sb.set_uuid, &vaf); 1608 1609 va_end(args); 1610 1611 if (c->on_error == ON_ERROR_PANIC) 1612 panic("panic forced after error\n"); 1613 1614 bch_cache_set_unregister(c); 1615 return true; 1616 } 1617 1618 /* When c->kobj released */ 1619 void bch_cache_set_release(struct kobject *kobj) 1620 { 1621 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1622 1623 kfree(c); 1624 module_put(THIS_MODULE); 1625 } 1626 1627 static void cache_set_free(struct closure *cl) 1628 { 1629 struct cache_set *c = container_of(cl, struct cache_set, cl); 1630 struct cache *ca; 1631 unsigned int i; 1632 1633 debugfs_remove(c->debug); 1634 1635 bch_open_buckets_free(c); 1636 bch_btree_cache_free(c); 1637 bch_journal_free(c); 1638 1639 mutex_lock(&bch_register_lock); 1640 for_each_cache(ca, c, i) 1641 if (ca) { 1642 ca->set = NULL; 1643 c->cache[ca->sb.nr_this_dev] = NULL; 1644 kobject_put(&ca->kobj); 1645 } 1646 1647 bch_bset_sort_state_free(&c->sort); 1648 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1649 1650 if (c->moving_gc_wq) 1651 destroy_workqueue(c->moving_gc_wq); 1652 bioset_exit(&c->bio_split); 1653 mempool_exit(&c->fill_iter); 1654 mempool_exit(&c->bio_meta); 1655 mempool_exit(&c->search); 1656 kfree(c->devices); 1657 1658 list_del(&c->list); 1659 mutex_unlock(&bch_register_lock); 1660 1661 pr_info("Cache set %pU unregistered\n", c->sb.set_uuid); 1662 wake_up(&unregister_wait); 1663 1664 closure_debug_destroy(&c->cl); 1665 kobject_put(&c->kobj); 1666 } 1667 1668 static void cache_set_flush(struct closure *cl) 1669 { 1670 struct cache_set *c = container_of(cl, struct cache_set, caching); 1671 struct cache *ca; 1672 struct btree *b; 1673 unsigned int i; 1674 1675 bch_cache_accounting_destroy(&c->accounting); 1676 1677 kobject_put(&c->internal); 1678 kobject_del(&c->kobj); 1679 1680 if (!IS_ERR_OR_NULL(c->gc_thread)) 1681 kthread_stop(c->gc_thread); 1682 1683 if (!IS_ERR_OR_NULL(c->root)) 1684 list_add(&c->root->list, &c->btree_cache); 1685 1686 /* 1687 * Avoid flushing cached nodes if cache set is retiring 1688 * due to too many I/O errors detected. 1689 */ 1690 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1691 list_for_each_entry(b, &c->btree_cache, list) { 1692 mutex_lock(&b->write_lock); 1693 if (btree_node_dirty(b)) 1694 __bch_btree_node_write(b, NULL); 1695 mutex_unlock(&b->write_lock); 1696 } 1697 1698 for_each_cache(ca, c, i) 1699 if (ca->alloc_thread) 1700 kthread_stop(ca->alloc_thread); 1701 1702 if (c->journal.cur) { 1703 cancel_delayed_work_sync(&c->journal.work); 1704 /* flush last journal entry if needed */ 1705 c->journal.work.work.func(&c->journal.work.work); 1706 } 1707 1708 closure_return(cl); 1709 } 1710 1711 /* 1712 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1713 * cache set is unregistering due to too many I/O errors. In this condition, 1714 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1715 * value and whether the broken cache has dirty data: 1716 * 1717 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1718 * BCH_CACHED_STOP_AUTO 0 NO 1719 * BCH_CACHED_STOP_AUTO 1 YES 1720 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1721 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1722 * 1723 * The expected behavior is, if stop_when_cache_set_failed is configured to 1724 * "auto" via sysfs interface, the bcache device will not be stopped if the 1725 * backing device is clean on the broken cache device. 1726 */ 1727 static void conditional_stop_bcache_device(struct cache_set *c, 1728 struct bcache_device *d, 1729 struct cached_dev *dc) 1730 { 1731 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1732 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n", 1733 d->disk->disk_name, c->sb.set_uuid); 1734 bcache_device_stop(d); 1735 } else if (atomic_read(&dc->has_dirty)) { 1736 /* 1737 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1738 * and dc->has_dirty == 1 1739 */ 1740 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n", 1741 d->disk->disk_name); 1742 /* 1743 * There might be a small time gap that cache set is 1744 * released but bcache device is not. Inside this time 1745 * gap, regular I/O requests will directly go into 1746 * backing device as no cache set attached to. This 1747 * behavior may also introduce potential inconsistence 1748 * data in writeback mode while cache is dirty. 1749 * Therefore before calling bcache_device_stop() due 1750 * to a broken cache device, dc->io_disable should be 1751 * explicitly set to true. 1752 */ 1753 dc->io_disable = true; 1754 /* make others know io_disable is true earlier */ 1755 smp_mb(); 1756 bcache_device_stop(d); 1757 } else { 1758 /* 1759 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1760 * and dc->has_dirty == 0 1761 */ 1762 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n", 1763 d->disk->disk_name); 1764 } 1765 } 1766 1767 static void __cache_set_unregister(struct closure *cl) 1768 { 1769 struct cache_set *c = container_of(cl, struct cache_set, caching); 1770 struct cached_dev *dc; 1771 struct bcache_device *d; 1772 size_t i; 1773 1774 mutex_lock(&bch_register_lock); 1775 1776 for (i = 0; i < c->devices_max_used; i++) { 1777 d = c->devices[i]; 1778 if (!d) 1779 continue; 1780 1781 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1782 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1783 dc = container_of(d, struct cached_dev, disk); 1784 bch_cached_dev_detach(dc); 1785 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1786 conditional_stop_bcache_device(c, d, dc); 1787 } else { 1788 bcache_device_stop(d); 1789 } 1790 } 1791 1792 mutex_unlock(&bch_register_lock); 1793 1794 continue_at(cl, cache_set_flush, system_wq); 1795 } 1796 1797 void bch_cache_set_stop(struct cache_set *c) 1798 { 1799 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1800 /* closure_fn set to __cache_set_unregister() */ 1801 closure_queue(&c->caching); 1802 } 1803 1804 void bch_cache_set_unregister(struct cache_set *c) 1805 { 1806 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1807 bch_cache_set_stop(c); 1808 } 1809 1810 #define alloc_bucket_pages(gfp, c) \ 1811 ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) 1812 1813 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1814 { 1815 int iter_size; 1816 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1817 1818 if (!c) 1819 return NULL; 1820 1821 __module_get(THIS_MODULE); 1822 closure_init(&c->cl, NULL); 1823 set_closure_fn(&c->cl, cache_set_free, system_wq); 1824 1825 closure_init(&c->caching, &c->cl); 1826 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1827 1828 /* Maybe create continue_at_noreturn() and use it here? */ 1829 closure_set_stopped(&c->cl); 1830 closure_put(&c->cl); 1831 1832 kobject_init(&c->kobj, &bch_cache_set_ktype); 1833 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1834 1835 bch_cache_accounting_init(&c->accounting, &c->cl); 1836 1837 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1838 c->sb.block_size = sb->block_size; 1839 c->sb.bucket_size = sb->bucket_size; 1840 c->sb.nr_in_set = sb->nr_in_set; 1841 c->sb.last_mount = sb->last_mount; 1842 c->bucket_bits = ilog2(sb->bucket_size); 1843 c->block_bits = ilog2(sb->block_size); 1844 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1845 c->devices_max_used = 0; 1846 atomic_set(&c->attached_dev_nr, 0); 1847 c->btree_pages = bucket_pages(c); 1848 if (c->btree_pages > BTREE_MAX_PAGES) 1849 c->btree_pages = max_t(int, c->btree_pages / 4, 1850 BTREE_MAX_PAGES); 1851 1852 sema_init(&c->sb_write_mutex, 1); 1853 mutex_init(&c->bucket_lock); 1854 init_waitqueue_head(&c->btree_cache_wait); 1855 spin_lock_init(&c->btree_cannibalize_lock); 1856 init_waitqueue_head(&c->bucket_wait); 1857 init_waitqueue_head(&c->gc_wait); 1858 sema_init(&c->uuid_write_mutex, 1); 1859 1860 spin_lock_init(&c->btree_gc_time.lock); 1861 spin_lock_init(&c->btree_split_time.lock); 1862 spin_lock_init(&c->btree_read_time.lock); 1863 1864 bch_moving_init_cache_set(c); 1865 1866 INIT_LIST_HEAD(&c->list); 1867 INIT_LIST_HEAD(&c->cached_devs); 1868 INIT_LIST_HEAD(&c->btree_cache); 1869 INIT_LIST_HEAD(&c->btree_cache_freeable); 1870 INIT_LIST_HEAD(&c->btree_cache_freed); 1871 INIT_LIST_HEAD(&c->data_buckets); 1872 1873 iter_size = (sb->bucket_size / sb->block_size + 1) * 1874 sizeof(struct btree_iter_set); 1875 1876 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); 1877 if (!c->devices) 1878 goto err; 1879 1880 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) 1881 goto err; 1882 1883 if (mempool_init_kmalloc_pool(&c->bio_meta, 2, 1884 sizeof(struct bbio) + 1885 sizeof(struct bio_vec) * bucket_pages(c))) 1886 goto err; 1887 1888 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) 1889 goto err; 1890 1891 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), 1892 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 1893 goto err; 1894 1895 c->uuids = alloc_bucket_pages(GFP_KERNEL, c); 1896 if (!c->uuids) 1897 goto err; 1898 1899 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); 1900 if (!c->moving_gc_wq) 1901 goto err; 1902 1903 if (bch_journal_alloc(c)) 1904 goto err; 1905 1906 if (bch_btree_cache_alloc(c)) 1907 goto err; 1908 1909 if (bch_open_buckets_alloc(c)) 1910 goto err; 1911 1912 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1913 goto err; 1914 1915 c->congested_read_threshold_us = 2000; 1916 c->congested_write_threshold_us = 20000; 1917 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1918 c->idle_max_writeback_rate_enabled = 1; 1919 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1920 1921 return c; 1922 err: 1923 bch_cache_set_unregister(c); 1924 return NULL; 1925 } 1926 1927 static int run_cache_set(struct cache_set *c) 1928 { 1929 const char *err = "cannot allocate memory"; 1930 struct cached_dev *dc, *t; 1931 struct cache *ca; 1932 struct closure cl; 1933 unsigned int i; 1934 LIST_HEAD(journal); 1935 struct journal_replay *l; 1936 1937 closure_init_stack(&cl); 1938 1939 for_each_cache(ca, c, i) 1940 c->nbuckets += ca->sb.nbuckets; 1941 set_gc_sectors(c); 1942 1943 if (CACHE_SYNC(&c->sb)) { 1944 struct bkey *k; 1945 struct jset *j; 1946 1947 err = "cannot allocate memory for journal"; 1948 if (bch_journal_read(c, &journal)) 1949 goto err; 1950 1951 pr_debug("btree_journal_read() done\n"); 1952 1953 err = "no journal entries found"; 1954 if (list_empty(&journal)) 1955 goto err; 1956 1957 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1958 1959 err = "IO error reading priorities"; 1960 for_each_cache(ca, c, i) { 1961 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) 1962 goto err; 1963 } 1964 1965 /* 1966 * If prio_read() fails it'll call cache_set_error and we'll 1967 * tear everything down right away, but if we perhaps checked 1968 * sooner we could avoid journal replay. 1969 */ 1970 1971 k = &j->btree_root; 1972 1973 err = "bad btree root"; 1974 if (__bch_btree_ptr_invalid(c, k)) 1975 goto err; 1976 1977 err = "error reading btree root"; 1978 c->root = bch_btree_node_get(c, NULL, k, 1979 j->btree_level, 1980 true, NULL); 1981 if (IS_ERR_OR_NULL(c->root)) 1982 goto err; 1983 1984 list_del_init(&c->root->list); 1985 rw_unlock(true, c->root); 1986 1987 err = uuid_read(c, j, &cl); 1988 if (err) 1989 goto err; 1990 1991 err = "error in recovery"; 1992 if (bch_btree_check(c)) 1993 goto err; 1994 1995 bch_journal_mark(c, &journal); 1996 bch_initial_gc_finish(c); 1997 pr_debug("btree_check() done\n"); 1998 1999 /* 2000 * bcache_journal_next() can't happen sooner, or 2001 * btree_gc_finish() will give spurious errors about last_gc > 2002 * gc_gen - this is a hack but oh well. 2003 */ 2004 bch_journal_next(&c->journal); 2005 2006 err = "error starting allocator thread"; 2007 for_each_cache(ca, c, i) 2008 if (bch_cache_allocator_start(ca)) 2009 goto err; 2010 2011 /* 2012 * First place it's safe to allocate: btree_check() and 2013 * btree_gc_finish() have to run before we have buckets to 2014 * allocate, and bch_bucket_alloc_set() might cause a journal 2015 * entry to be written so bcache_journal_next() has to be called 2016 * first. 2017 * 2018 * If the uuids were in the old format we have to rewrite them 2019 * before the next journal entry is written: 2020 */ 2021 if (j->version < BCACHE_JSET_VERSION_UUID) 2022 __uuid_write(c); 2023 2024 err = "bcache: replay journal failed"; 2025 if (bch_journal_replay(c, &journal)) 2026 goto err; 2027 } else { 2028 pr_notice("invalidating existing data\n"); 2029 2030 for_each_cache(ca, c, i) { 2031 unsigned int j; 2032 2033 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2034 2, SB_JOURNAL_BUCKETS); 2035 2036 for (j = 0; j < ca->sb.keys; j++) 2037 ca->sb.d[j] = ca->sb.first_bucket + j; 2038 } 2039 2040 bch_initial_gc_finish(c); 2041 2042 err = "error starting allocator thread"; 2043 for_each_cache(ca, c, i) 2044 if (bch_cache_allocator_start(ca)) 2045 goto err; 2046 2047 mutex_lock(&c->bucket_lock); 2048 for_each_cache(ca, c, i) 2049 bch_prio_write(ca, true); 2050 mutex_unlock(&c->bucket_lock); 2051 2052 err = "cannot allocate new UUID bucket"; 2053 if (__uuid_write(c)) 2054 goto err; 2055 2056 err = "cannot allocate new btree root"; 2057 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 2058 if (IS_ERR_OR_NULL(c->root)) 2059 goto err; 2060 2061 mutex_lock(&c->root->write_lock); 2062 bkey_copy_key(&c->root->key, &MAX_KEY); 2063 bch_btree_node_write(c->root, &cl); 2064 mutex_unlock(&c->root->write_lock); 2065 2066 bch_btree_set_root(c->root); 2067 rw_unlock(true, c->root); 2068 2069 /* 2070 * We don't want to write the first journal entry until 2071 * everything is set up - fortunately journal entries won't be 2072 * written until the SET_CACHE_SYNC() here: 2073 */ 2074 SET_CACHE_SYNC(&c->sb, true); 2075 2076 bch_journal_next(&c->journal); 2077 bch_journal_meta(c, &cl); 2078 } 2079 2080 err = "error starting gc thread"; 2081 if (bch_gc_thread_start(c)) 2082 goto err; 2083 2084 closure_sync(&cl); 2085 c->sb.last_mount = (u32)ktime_get_real_seconds(); 2086 bcache_write_super(c); 2087 2088 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2089 bch_cached_dev_attach(dc, c, NULL); 2090 2091 flash_devs_run(c); 2092 2093 set_bit(CACHE_SET_RUNNING, &c->flags); 2094 return 0; 2095 err: 2096 while (!list_empty(&journal)) { 2097 l = list_first_entry(&journal, struct journal_replay, list); 2098 list_del(&l->list); 2099 kfree(l); 2100 } 2101 2102 closure_sync(&cl); 2103 2104 bch_cache_set_error(c, "%s", err); 2105 2106 return -EIO; 2107 } 2108 2109 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 2110 { 2111 return ca->sb.block_size == c->sb.block_size && 2112 ca->sb.bucket_size == c->sb.bucket_size && 2113 ca->sb.nr_in_set == c->sb.nr_in_set; 2114 } 2115 2116 static const char *register_cache_set(struct cache *ca) 2117 { 2118 char buf[12]; 2119 const char *err = "cannot allocate memory"; 2120 struct cache_set *c; 2121 2122 list_for_each_entry(c, &bch_cache_sets, list) 2123 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 2124 if (c->cache[ca->sb.nr_this_dev]) 2125 return "duplicate cache set member"; 2126 2127 if (!can_attach_cache(ca, c)) 2128 return "cache sb does not match set"; 2129 2130 if (!CACHE_SYNC(&ca->sb)) 2131 SET_CACHE_SYNC(&c->sb, false); 2132 2133 goto found; 2134 } 2135 2136 c = bch_cache_set_alloc(&ca->sb); 2137 if (!c) 2138 return err; 2139 2140 err = "error creating kobject"; 2141 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 2142 kobject_add(&c->internal, &c->kobj, "internal")) 2143 goto err; 2144 2145 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 2146 goto err; 2147 2148 bch_debug_init_cache_set(c); 2149 2150 list_add(&c->list, &bch_cache_sets); 2151 found: 2152 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 2153 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 2154 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 2155 goto err; 2156 2157 if (ca->sb.seq > c->sb.seq) { 2158 c->sb.version = ca->sb.version; 2159 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 2160 c->sb.flags = ca->sb.flags; 2161 c->sb.seq = ca->sb.seq; 2162 pr_debug("set version = %llu\n", c->sb.version); 2163 } 2164 2165 kobject_get(&ca->kobj); 2166 ca->set = c; 2167 ca->set->cache[ca->sb.nr_this_dev] = ca; 2168 c->cache_by_alloc[c->caches_loaded++] = ca; 2169 2170 if (c->caches_loaded == c->sb.nr_in_set) { 2171 err = "failed to run cache set"; 2172 if (run_cache_set(c) < 0) 2173 goto err; 2174 } 2175 2176 return NULL; 2177 err: 2178 bch_cache_set_unregister(c); 2179 return err; 2180 } 2181 2182 /* Cache device */ 2183 2184 /* When ca->kobj released */ 2185 void bch_cache_release(struct kobject *kobj) 2186 { 2187 struct cache *ca = container_of(kobj, struct cache, kobj); 2188 unsigned int i; 2189 2190 if (ca->set) { 2191 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 2192 ca->set->cache[ca->sb.nr_this_dev] = NULL; 2193 } 2194 2195 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 2196 kfree(ca->prio_buckets); 2197 vfree(ca->buckets); 2198 2199 free_heap(&ca->heap); 2200 free_fifo(&ca->free_inc); 2201 2202 for (i = 0; i < RESERVE_NR; i++) 2203 free_fifo(&ca->free[i]); 2204 2205 if (ca->sb_disk) 2206 put_page(virt_to_page(ca->sb_disk)); 2207 2208 if (!IS_ERR_OR_NULL(ca->bdev)) 2209 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2210 2211 kfree(ca); 2212 module_put(THIS_MODULE); 2213 } 2214 2215 static int cache_alloc(struct cache *ca) 2216 { 2217 size_t free; 2218 size_t btree_buckets; 2219 struct bucket *b; 2220 int ret = -ENOMEM; 2221 const char *err = NULL; 2222 2223 __module_get(THIS_MODULE); 2224 kobject_init(&ca->kobj, &bch_cache_ktype); 2225 2226 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2227 2228 /* 2229 * when ca->sb.njournal_buckets is not zero, journal exists, 2230 * and in bch_journal_replay(), tree node may split, 2231 * so bucket of RESERVE_BTREE type is needed, 2232 * the worst situation is all journal buckets are valid journal, 2233 * and all the keys need to replay, 2234 * so the number of RESERVE_BTREE type buckets should be as much 2235 * as journal buckets 2236 */ 2237 btree_buckets = ca->sb.njournal_buckets ?: 8; 2238 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2239 if (!free) { 2240 ret = -EPERM; 2241 err = "ca->sb.nbuckets is too small"; 2242 goto err_free; 2243 } 2244 2245 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, 2246 GFP_KERNEL)) { 2247 err = "ca->free[RESERVE_BTREE] alloc failed"; 2248 goto err_btree_alloc; 2249 } 2250 2251 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), 2252 GFP_KERNEL)) { 2253 err = "ca->free[RESERVE_PRIO] alloc failed"; 2254 goto err_prio_alloc; 2255 } 2256 2257 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { 2258 err = "ca->free[RESERVE_MOVINGGC] alloc failed"; 2259 goto err_movinggc_alloc; 2260 } 2261 2262 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { 2263 err = "ca->free[RESERVE_NONE] alloc failed"; 2264 goto err_none_alloc; 2265 } 2266 2267 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { 2268 err = "ca->free_inc alloc failed"; 2269 goto err_free_inc_alloc; 2270 } 2271 2272 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { 2273 err = "ca->heap alloc failed"; 2274 goto err_heap_alloc; 2275 } 2276 2277 ca->buckets = vzalloc(array_size(sizeof(struct bucket), 2278 ca->sb.nbuckets)); 2279 if (!ca->buckets) { 2280 err = "ca->buckets alloc failed"; 2281 goto err_buckets_alloc; 2282 } 2283 2284 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), 2285 prio_buckets(ca), 2), 2286 GFP_KERNEL); 2287 if (!ca->prio_buckets) { 2288 err = "ca->prio_buckets alloc failed"; 2289 goto err_prio_buckets_alloc; 2290 } 2291 2292 ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca); 2293 if (!ca->disk_buckets) { 2294 err = "ca->disk_buckets alloc failed"; 2295 goto err_disk_buckets_alloc; 2296 } 2297 2298 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2299 2300 for_each_bucket(b, ca) 2301 atomic_set(&b->pin, 0); 2302 return 0; 2303 2304 err_disk_buckets_alloc: 2305 kfree(ca->prio_buckets); 2306 err_prio_buckets_alloc: 2307 vfree(ca->buckets); 2308 err_buckets_alloc: 2309 free_heap(&ca->heap); 2310 err_heap_alloc: 2311 free_fifo(&ca->free_inc); 2312 err_free_inc_alloc: 2313 free_fifo(&ca->free[RESERVE_NONE]); 2314 err_none_alloc: 2315 free_fifo(&ca->free[RESERVE_MOVINGGC]); 2316 err_movinggc_alloc: 2317 free_fifo(&ca->free[RESERVE_PRIO]); 2318 err_prio_alloc: 2319 free_fifo(&ca->free[RESERVE_BTREE]); 2320 err_btree_alloc: 2321 err_free: 2322 module_put(THIS_MODULE); 2323 if (err) 2324 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2325 return ret; 2326 } 2327 2328 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 2329 struct block_device *bdev, struct cache *ca) 2330 { 2331 const char *err = NULL; /* must be set for any error case */ 2332 int ret = 0; 2333 2334 bdevname(bdev, ca->cache_dev_name); 2335 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2336 ca->bdev = bdev; 2337 ca->bdev->bd_holder = ca; 2338 ca->sb_disk = sb_disk; 2339 2340 if (blk_queue_discard(bdev_get_queue(bdev))) 2341 ca->discard = CACHE_DISCARD(&ca->sb); 2342 2343 ret = cache_alloc(ca); 2344 if (ret != 0) { 2345 /* 2346 * If we failed here, it means ca->kobj is not initialized yet, 2347 * kobject_put() won't be called and there is no chance to 2348 * call blkdev_put() to bdev in bch_cache_release(). So we 2349 * explicitly call blkdev_put() here. 2350 */ 2351 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2352 if (ret == -ENOMEM) 2353 err = "cache_alloc(): -ENOMEM"; 2354 else if (ret == -EPERM) 2355 err = "cache_alloc(): cache device is too small"; 2356 else 2357 err = "cache_alloc(): unknown error"; 2358 goto err; 2359 } 2360 2361 if (kobject_add(&ca->kobj, 2362 &part_to_dev(bdev->bd_part)->kobj, 2363 "bcache")) { 2364 err = "error calling kobject_add"; 2365 ret = -ENOMEM; 2366 goto out; 2367 } 2368 2369 mutex_lock(&bch_register_lock); 2370 err = register_cache_set(ca); 2371 mutex_unlock(&bch_register_lock); 2372 2373 if (err) { 2374 ret = -ENODEV; 2375 goto out; 2376 } 2377 2378 pr_info("registered cache device %s\n", ca->cache_dev_name); 2379 2380 out: 2381 kobject_put(&ca->kobj); 2382 2383 err: 2384 if (err) 2385 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2386 2387 return ret; 2388 } 2389 2390 /* Global interfaces/init */ 2391 2392 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2393 const char *buffer, size_t size); 2394 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2395 struct kobj_attribute *attr, 2396 const char *buffer, size_t size); 2397 2398 kobj_attribute_write(register, register_bcache); 2399 kobj_attribute_write(register_quiet, register_bcache); 2400 kobj_attribute_write(register_async, register_bcache); 2401 kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); 2402 2403 static bool bch_is_open_backing(struct block_device *bdev) 2404 { 2405 struct cache_set *c, *tc; 2406 struct cached_dev *dc, *t; 2407 2408 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2409 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2410 if (dc->bdev == bdev) 2411 return true; 2412 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2413 if (dc->bdev == bdev) 2414 return true; 2415 return false; 2416 } 2417 2418 static bool bch_is_open_cache(struct block_device *bdev) 2419 { 2420 struct cache_set *c, *tc; 2421 struct cache *ca; 2422 unsigned int i; 2423 2424 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2425 for_each_cache(ca, c, i) 2426 if (ca->bdev == bdev) 2427 return true; 2428 return false; 2429 } 2430 2431 static bool bch_is_open(struct block_device *bdev) 2432 { 2433 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2434 } 2435 2436 struct async_reg_args { 2437 struct delayed_work reg_work; 2438 char *path; 2439 struct cache_sb *sb; 2440 struct cache_sb_disk *sb_disk; 2441 struct block_device *bdev; 2442 }; 2443 2444 static void register_bdev_worker(struct work_struct *work) 2445 { 2446 int fail = false; 2447 struct async_reg_args *args = 2448 container_of(work, struct async_reg_args, reg_work.work); 2449 struct cached_dev *dc; 2450 2451 dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2452 if (!dc) { 2453 fail = true; 2454 put_page(virt_to_page(args->sb_disk)); 2455 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2456 goto out; 2457 } 2458 2459 mutex_lock(&bch_register_lock); 2460 if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0) 2461 fail = true; 2462 mutex_unlock(&bch_register_lock); 2463 2464 out: 2465 if (fail) 2466 pr_info("error %s: fail to register backing device\n", 2467 args->path); 2468 kfree(args->sb); 2469 kfree(args->path); 2470 kfree(args); 2471 module_put(THIS_MODULE); 2472 } 2473 2474 static void register_cache_worker(struct work_struct *work) 2475 { 2476 int fail = false; 2477 struct async_reg_args *args = 2478 container_of(work, struct async_reg_args, reg_work.work); 2479 struct cache *ca; 2480 2481 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2482 if (!ca) { 2483 fail = true; 2484 put_page(virt_to_page(args->sb_disk)); 2485 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2486 goto out; 2487 } 2488 2489 /* blkdev_put() will be called in bch_cache_release() */ 2490 if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0) 2491 fail = true; 2492 2493 out: 2494 if (fail) 2495 pr_info("error %s: fail to register cache device\n", 2496 args->path); 2497 kfree(args->sb); 2498 kfree(args->path); 2499 kfree(args); 2500 module_put(THIS_MODULE); 2501 } 2502 2503 static void register_device_aync(struct async_reg_args *args) 2504 { 2505 if (SB_IS_BDEV(args->sb)) 2506 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); 2507 else 2508 INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); 2509 2510 /* 10 jiffies is enough for a delay */ 2511 queue_delayed_work(system_wq, &args->reg_work, 10); 2512 } 2513 2514 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2515 const char *buffer, size_t size) 2516 { 2517 const char *err; 2518 char *path = NULL; 2519 struct cache_sb *sb; 2520 struct cache_sb_disk *sb_disk; 2521 struct block_device *bdev; 2522 ssize_t ret; 2523 2524 ret = -EBUSY; 2525 err = "failed to reference bcache module"; 2526 if (!try_module_get(THIS_MODULE)) 2527 goto out; 2528 2529 /* For latest state of bcache_is_reboot */ 2530 smp_mb(); 2531 err = "bcache is in reboot"; 2532 if (bcache_is_reboot) 2533 goto out_module_put; 2534 2535 ret = -ENOMEM; 2536 err = "cannot allocate memory"; 2537 path = kstrndup(buffer, size, GFP_KERNEL); 2538 if (!path) 2539 goto out_module_put; 2540 2541 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); 2542 if (!sb) 2543 goto out_free_path; 2544 2545 ret = -EINVAL; 2546 err = "failed to open device"; 2547 bdev = blkdev_get_by_path(strim(path), 2548 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2549 sb); 2550 if (IS_ERR(bdev)) { 2551 if (bdev == ERR_PTR(-EBUSY)) { 2552 bdev = lookup_bdev(strim(path)); 2553 mutex_lock(&bch_register_lock); 2554 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2555 err = "device already registered"; 2556 else 2557 err = "device busy"; 2558 mutex_unlock(&bch_register_lock); 2559 if (!IS_ERR(bdev)) 2560 bdput(bdev); 2561 if (attr == &ksysfs_register_quiet) 2562 goto done; 2563 } 2564 goto out_free_sb; 2565 } 2566 2567 err = "failed to set blocksize"; 2568 if (set_blocksize(bdev, 4096)) 2569 goto out_blkdev_put; 2570 2571 err = read_super(sb, bdev, &sb_disk); 2572 if (err) 2573 goto out_blkdev_put; 2574 2575 err = "failed to register device"; 2576 if (attr == &ksysfs_register_async) { 2577 /* register in asynchronous way */ 2578 struct async_reg_args *args = 2579 kzalloc(sizeof(struct async_reg_args), GFP_KERNEL); 2580 2581 if (!args) { 2582 ret = -ENOMEM; 2583 err = "cannot allocate memory"; 2584 goto out_put_sb_page; 2585 } 2586 2587 args->path = path; 2588 args->sb = sb; 2589 args->sb_disk = sb_disk; 2590 args->bdev = bdev; 2591 register_device_aync(args); 2592 /* No wait and returns to user space */ 2593 goto async_done; 2594 } 2595 2596 if (SB_IS_BDEV(sb)) { 2597 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2598 2599 if (!dc) 2600 goto out_put_sb_page; 2601 2602 mutex_lock(&bch_register_lock); 2603 ret = register_bdev(sb, sb_disk, bdev, dc); 2604 mutex_unlock(&bch_register_lock); 2605 /* blkdev_put() will be called in cached_dev_free() */ 2606 if (ret < 0) 2607 goto out_free_sb; 2608 } else { 2609 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2610 2611 if (!ca) 2612 goto out_put_sb_page; 2613 2614 /* blkdev_put() will be called in bch_cache_release() */ 2615 if (register_cache(sb, sb_disk, bdev, ca) != 0) 2616 goto out_free_sb; 2617 } 2618 2619 done: 2620 kfree(sb); 2621 kfree(path); 2622 module_put(THIS_MODULE); 2623 async_done: 2624 return size; 2625 2626 out_put_sb_page: 2627 put_page(virt_to_page(sb_disk)); 2628 out_blkdev_put: 2629 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2630 out_free_sb: 2631 kfree(sb); 2632 out_free_path: 2633 kfree(path); 2634 path = NULL; 2635 out_module_put: 2636 module_put(THIS_MODULE); 2637 out: 2638 pr_info("error %s: %s\n", path?path:"", err); 2639 return ret; 2640 } 2641 2642 2643 struct pdev { 2644 struct list_head list; 2645 struct cached_dev *dc; 2646 }; 2647 2648 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2649 struct kobj_attribute *attr, 2650 const char *buffer, 2651 size_t size) 2652 { 2653 LIST_HEAD(pending_devs); 2654 ssize_t ret = size; 2655 struct cached_dev *dc, *tdc; 2656 struct pdev *pdev, *tpdev; 2657 struct cache_set *c, *tc; 2658 2659 mutex_lock(&bch_register_lock); 2660 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) { 2661 pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL); 2662 if (!pdev) 2663 break; 2664 pdev->dc = dc; 2665 list_add(&pdev->list, &pending_devs); 2666 } 2667 2668 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2669 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2670 char *pdev_set_uuid = pdev->dc->sb.set_uuid; 2671 char *set_uuid = c->sb.uuid; 2672 2673 if (!memcmp(pdev_set_uuid, set_uuid, 16)) { 2674 list_del(&pdev->list); 2675 kfree(pdev); 2676 break; 2677 } 2678 } 2679 } 2680 mutex_unlock(&bch_register_lock); 2681 2682 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2683 pr_info("delete pdev %p\n", pdev); 2684 list_del(&pdev->list); 2685 bcache_device_stop(&pdev->dc->disk); 2686 kfree(pdev); 2687 } 2688 2689 return ret; 2690 } 2691 2692 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2693 { 2694 if (bcache_is_reboot) 2695 return NOTIFY_DONE; 2696 2697 if (code == SYS_DOWN || 2698 code == SYS_HALT || 2699 code == SYS_POWER_OFF) { 2700 DEFINE_WAIT(wait); 2701 unsigned long start = jiffies; 2702 bool stopped = false; 2703 2704 struct cache_set *c, *tc; 2705 struct cached_dev *dc, *tdc; 2706 2707 mutex_lock(&bch_register_lock); 2708 2709 if (bcache_is_reboot) 2710 goto out; 2711 2712 /* New registration is rejected since now */ 2713 bcache_is_reboot = true; 2714 /* 2715 * Make registering caller (if there is) on other CPU 2716 * core know bcache_is_reboot set to true earlier 2717 */ 2718 smp_mb(); 2719 2720 if (list_empty(&bch_cache_sets) && 2721 list_empty(&uncached_devices)) 2722 goto out; 2723 2724 mutex_unlock(&bch_register_lock); 2725 2726 pr_info("Stopping all devices:\n"); 2727 2728 /* 2729 * The reason bch_register_lock is not held to call 2730 * bch_cache_set_stop() and bcache_device_stop() is to 2731 * avoid potential deadlock during reboot, because cache 2732 * set or bcache device stopping process will acqurie 2733 * bch_register_lock too. 2734 * 2735 * We are safe here because bcache_is_reboot sets to 2736 * true already, register_bcache() will reject new 2737 * registration now. bcache_is_reboot also makes sure 2738 * bcache_reboot() won't be re-entered on by other thread, 2739 * so there is no race in following list iteration by 2740 * list_for_each_entry_safe(). 2741 */ 2742 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2743 bch_cache_set_stop(c); 2744 2745 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2746 bcache_device_stop(&dc->disk); 2747 2748 2749 /* 2750 * Give an early chance for other kthreads and 2751 * kworkers to stop themselves 2752 */ 2753 schedule(); 2754 2755 /* What's a condition variable? */ 2756 while (1) { 2757 long timeout = start + 10 * HZ - jiffies; 2758 2759 mutex_lock(&bch_register_lock); 2760 stopped = list_empty(&bch_cache_sets) && 2761 list_empty(&uncached_devices); 2762 2763 if (timeout < 0 || stopped) 2764 break; 2765 2766 prepare_to_wait(&unregister_wait, &wait, 2767 TASK_UNINTERRUPTIBLE); 2768 2769 mutex_unlock(&bch_register_lock); 2770 schedule_timeout(timeout); 2771 } 2772 2773 finish_wait(&unregister_wait, &wait); 2774 2775 if (stopped) 2776 pr_info("All devices stopped\n"); 2777 else 2778 pr_notice("Timeout waiting for devices to be closed\n"); 2779 out: 2780 mutex_unlock(&bch_register_lock); 2781 } 2782 2783 return NOTIFY_DONE; 2784 } 2785 2786 static struct notifier_block reboot = { 2787 .notifier_call = bcache_reboot, 2788 .priority = INT_MAX, /* before any real devices */ 2789 }; 2790 2791 static void bcache_exit(void) 2792 { 2793 bch_debug_exit(); 2794 bch_request_exit(); 2795 if (bcache_kobj) 2796 kobject_put(bcache_kobj); 2797 if (bcache_wq) 2798 destroy_workqueue(bcache_wq); 2799 if (bch_journal_wq) 2800 destroy_workqueue(bch_journal_wq); 2801 2802 if (bcache_major) 2803 unregister_blkdev(bcache_major, "bcache"); 2804 unregister_reboot_notifier(&reboot); 2805 mutex_destroy(&bch_register_lock); 2806 } 2807 2808 /* Check and fixup module parameters */ 2809 static void check_module_parameters(void) 2810 { 2811 if (bch_cutoff_writeback_sync == 0) 2812 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; 2813 else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { 2814 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n", 2815 bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); 2816 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; 2817 } 2818 2819 if (bch_cutoff_writeback == 0) 2820 bch_cutoff_writeback = CUTOFF_WRITEBACK; 2821 else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { 2822 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n", 2823 bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); 2824 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; 2825 } 2826 2827 if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { 2828 pr_warn("set bch_cutoff_writeback (%u) to %u\n", 2829 bch_cutoff_writeback, bch_cutoff_writeback_sync); 2830 bch_cutoff_writeback = bch_cutoff_writeback_sync; 2831 } 2832 } 2833 2834 static int __init bcache_init(void) 2835 { 2836 static const struct attribute *files[] = { 2837 &ksysfs_register.attr, 2838 &ksysfs_register_quiet.attr, 2839 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION 2840 &ksysfs_register_async.attr, 2841 #endif 2842 &ksysfs_pendings_cleanup.attr, 2843 NULL 2844 }; 2845 2846 check_module_parameters(); 2847 2848 mutex_init(&bch_register_lock); 2849 init_waitqueue_head(&unregister_wait); 2850 register_reboot_notifier(&reboot); 2851 2852 bcache_major = register_blkdev(0, "bcache"); 2853 if (bcache_major < 0) { 2854 unregister_reboot_notifier(&reboot); 2855 mutex_destroy(&bch_register_lock); 2856 return bcache_major; 2857 } 2858 2859 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2860 if (!bcache_wq) 2861 goto err; 2862 2863 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); 2864 if (!bch_journal_wq) 2865 goto err; 2866 2867 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2868 if (!bcache_kobj) 2869 goto err; 2870 2871 if (bch_request_init() || 2872 sysfs_create_files(bcache_kobj, files)) 2873 goto err; 2874 2875 bch_debug_init(); 2876 closure_debug_init(); 2877 2878 bcache_is_reboot = false; 2879 2880 return 0; 2881 err: 2882 bcache_exit(); 2883 return -ENOMEM; 2884 } 2885 2886 /* 2887 * Module hooks 2888 */ 2889 module_exit(bcache_exit); 2890 module_init(bcache_init); 2891 2892 module_param(bch_cutoff_writeback, uint, 0); 2893 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); 2894 2895 module_param(bch_cutoff_writeback_sync, uint, 0); 2896 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); 2897 2898 MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); 2899 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 2900 MODULE_LICENSE("GPL"); 2901