1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache setup/teardown code, and some metadata io - read a superblock and 4 * figure out what to do with it. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "extents.h" 14 #include "request.h" 15 #include "writeback.h" 16 #include "features.h" 17 18 #include <linux/blkdev.h> 19 #include <linux/debugfs.h> 20 #include <linux/genhd.h> 21 #include <linux/idr.h> 22 #include <linux/kthread.h> 23 #include <linux/workqueue.h> 24 #include <linux/module.h> 25 #include <linux/random.h> 26 #include <linux/reboot.h> 27 #include <linux/sysfs.h> 28 29 unsigned int bch_cutoff_writeback; 30 unsigned int bch_cutoff_writeback_sync; 31 32 static const char bcache_magic[] = { 33 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 34 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 35 }; 36 37 static const char invalid_uuid[] = { 38 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 39 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 40 }; 41 42 static struct kobject *bcache_kobj; 43 struct mutex bch_register_lock; 44 bool bcache_is_reboot; 45 LIST_HEAD(bch_cache_sets); 46 static LIST_HEAD(uncached_devices); 47 48 static int bcache_major; 49 static DEFINE_IDA(bcache_device_idx); 50 static wait_queue_head_t unregister_wait; 51 struct workqueue_struct *bcache_wq; 52 struct workqueue_struct *bch_journal_wq; 53 54 55 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 56 /* limitation of partitions number on single bcache device */ 57 #define BCACHE_MINORS 128 58 /* limitation of bcache devices number on single system */ 59 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 60 61 /* Superblock */ 62 63 static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s) 64 { 65 unsigned int bucket_size = le16_to_cpu(s->bucket_size); 66 67 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES && 68 bch_has_feature_large_bucket(sb)) 69 bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16; 70 71 return bucket_size; 72 } 73 74 static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, 75 struct cache_sb_disk *s) 76 { 77 const char *err; 78 unsigned int i; 79 80 sb->first_bucket= le16_to_cpu(s->first_bucket); 81 sb->nbuckets = le64_to_cpu(s->nbuckets); 82 sb->bucket_size = get_bucket_size(sb, s); 83 84 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 85 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 86 87 err = "Too many journal buckets"; 88 if (sb->keys > SB_JOURNAL_BUCKETS) 89 goto err; 90 91 err = "Too many buckets"; 92 if (sb->nbuckets > LONG_MAX) 93 goto err; 94 95 err = "Not enough buckets"; 96 if (sb->nbuckets < 1 << 7) 97 goto err; 98 99 err = "Bad block size (not power of 2)"; 100 if (!is_power_of_2(sb->block_size)) 101 goto err; 102 103 err = "Bad block size (larger than page size)"; 104 if (sb->block_size > PAGE_SECTORS) 105 goto err; 106 107 err = "Bad bucket size (not power of 2)"; 108 if (!is_power_of_2(sb->bucket_size)) 109 goto err; 110 111 err = "Bad bucket size (smaller than page size)"; 112 if (sb->bucket_size < PAGE_SECTORS) 113 goto err; 114 115 err = "Invalid superblock: device too small"; 116 if (get_capacity(bdev->bd_disk) < 117 sb->bucket_size * sb->nbuckets) 118 goto err; 119 120 err = "Bad UUID"; 121 if (bch_is_zero(sb->set_uuid, 16)) 122 goto err; 123 124 err = "Bad cache device number in set"; 125 if (!sb->nr_in_set || 126 sb->nr_in_set <= sb->nr_this_dev || 127 sb->nr_in_set > MAX_CACHES_PER_SET) 128 goto err; 129 130 err = "Journal buckets not sequential"; 131 for (i = 0; i < sb->keys; i++) 132 if (sb->d[i] != sb->first_bucket + i) 133 goto err; 134 135 err = "Too many journal buckets"; 136 if (sb->first_bucket + sb->keys > sb->nbuckets) 137 goto err; 138 139 err = "Invalid superblock: first bucket comes before end of super"; 140 if (sb->first_bucket * sb->bucket_size < 16) 141 goto err; 142 143 err = NULL; 144 err: 145 return err; 146 } 147 148 149 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 150 struct cache_sb_disk **res) 151 { 152 const char *err; 153 struct cache_sb_disk *s; 154 struct page *page; 155 unsigned int i; 156 157 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 158 SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); 159 if (IS_ERR(page)) 160 return "IO error"; 161 s = page_address(page) + offset_in_page(SB_OFFSET); 162 163 sb->offset = le64_to_cpu(s->offset); 164 sb->version = le64_to_cpu(s->version); 165 166 memcpy(sb->magic, s->magic, 16); 167 memcpy(sb->uuid, s->uuid, 16); 168 memcpy(sb->set_uuid, s->set_uuid, 16); 169 memcpy(sb->label, s->label, SB_LABEL_SIZE); 170 171 sb->flags = le64_to_cpu(s->flags); 172 sb->seq = le64_to_cpu(s->seq); 173 sb->last_mount = le32_to_cpu(s->last_mount); 174 sb->keys = le16_to_cpu(s->keys); 175 176 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 177 sb->d[i] = le64_to_cpu(s->d[i]); 178 179 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n", 180 sb->version, sb->flags, sb->seq, sb->keys); 181 182 err = "Not a bcache superblock (bad offset)"; 183 if (sb->offset != SB_SECTOR) 184 goto err; 185 186 err = "Not a bcache superblock (bad magic)"; 187 if (memcmp(sb->magic, bcache_magic, 16)) 188 goto err; 189 190 err = "Bad checksum"; 191 if (s->csum != csum_set(s)) 192 goto err; 193 194 err = "Bad UUID"; 195 if (bch_is_zero(sb->uuid, 16)) 196 goto err; 197 198 sb->block_size = le16_to_cpu(s->block_size); 199 200 err = "Superblock block size smaller than device block size"; 201 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 202 goto err; 203 204 switch (sb->version) { 205 case BCACHE_SB_VERSION_BDEV: 206 sb->data_offset = BDEV_DATA_START_DEFAULT; 207 break; 208 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 209 case BCACHE_SB_VERSION_BDEV_WITH_FEATURES: 210 sb->data_offset = le64_to_cpu(s->data_offset); 211 212 err = "Bad data offset"; 213 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 214 goto err; 215 216 break; 217 case BCACHE_SB_VERSION_CDEV: 218 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 219 err = read_super_common(sb, bdev, s); 220 if (err) 221 goto err; 222 break; 223 case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: 224 /* 225 * Feature bits are needed in read_super_common(), 226 * convert them firstly. 227 */ 228 sb->feature_compat = le64_to_cpu(s->feature_compat); 229 sb->feature_incompat = le64_to_cpu(s->feature_incompat); 230 sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); 231 err = read_super_common(sb, bdev, s); 232 if (err) 233 goto err; 234 break; 235 default: 236 err = "Unsupported superblock version"; 237 goto err; 238 } 239 240 sb->last_mount = (u32)ktime_get_real_seconds(); 241 *res = s; 242 return NULL; 243 err: 244 put_page(page); 245 return err; 246 } 247 248 static void write_bdev_super_endio(struct bio *bio) 249 { 250 struct cached_dev *dc = bio->bi_private; 251 252 if (bio->bi_status) 253 bch_count_backing_io_errors(dc, bio); 254 255 closure_put(&dc->sb_write); 256 } 257 258 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, 259 struct bio *bio) 260 { 261 unsigned int i; 262 263 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; 264 bio->bi_iter.bi_sector = SB_SECTOR; 265 __bio_add_page(bio, virt_to_page(out), SB_SIZE, 266 offset_in_page(out)); 267 268 out->offset = cpu_to_le64(sb->offset); 269 270 memcpy(out->uuid, sb->uuid, 16); 271 memcpy(out->set_uuid, sb->set_uuid, 16); 272 memcpy(out->label, sb->label, SB_LABEL_SIZE); 273 274 out->flags = cpu_to_le64(sb->flags); 275 out->seq = cpu_to_le64(sb->seq); 276 277 out->last_mount = cpu_to_le32(sb->last_mount); 278 out->first_bucket = cpu_to_le16(sb->first_bucket); 279 out->keys = cpu_to_le16(sb->keys); 280 281 for (i = 0; i < sb->keys; i++) 282 out->d[i] = cpu_to_le64(sb->d[i]); 283 284 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { 285 out->feature_compat = cpu_to_le64(sb->feature_compat); 286 out->feature_incompat = cpu_to_le64(sb->feature_incompat); 287 out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat); 288 } 289 290 out->version = cpu_to_le64(sb->version); 291 out->csum = csum_set(out); 292 293 pr_debug("ver %llu, flags %llu, seq %llu\n", 294 sb->version, sb->flags, sb->seq); 295 296 submit_bio(bio); 297 } 298 299 static void bch_write_bdev_super_unlock(struct closure *cl) 300 { 301 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 302 303 up(&dc->sb_write_mutex); 304 } 305 306 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 307 { 308 struct closure *cl = &dc->sb_write; 309 struct bio *bio = &dc->sb_bio; 310 311 down(&dc->sb_write_mutex); 312 closure_init(cl, parent); 313 314 bio_init(bio, dc->sb_bv, 1); 315 bio_set_dev(bio, dc->bdev); 316 bio->bi_end_io = write_bdev_super_endio; 317 bio->bi_private = dc; 318 319 closure_get(cl); 320 /* I/O request sent to backing device */ 321 __write_super(&dc->sb, dc->sb_disk, bio); 322 323 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 324 } 325 326 static void write_super_endio(struct bio *bio) 327 { 328 struct cache *ca = bio->bi_private; 329 330 /* is_read = 0 */ 331 bch_count_io_errors(ca, bio->bi_status, 0, 332 "writing superblock"); 333 closure_put(&ca->set->sb_write); 334 } 335 336 static void bcache_write_super_unlock(struct closure *cl) 337 { 338 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 339 340 up(&c->sb_write_mutex); 341 } 342 343 void bcache_write_super(struct cache_set *c) 344 { 345 struct closure *cl = &c->sb_write; 346 struct cache *ca = c->cache; 347 struct bio *bio = &ca->sb_bio; 348 unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 349 350 down(&c->sb_write_mutex); 351 closure_init(cl, &c->cl); 352 353 c->sb.seq++; 354 355 if (c->sb.version > version) 356 version = c->sb.version; 357 358 ca->sb.version = version; 359 ca->sb.seq = c->sb.seq; 360 ca->sb.last_mount = c->sb.last_mount; 361 362 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 363 364 bio_init(bio, ca->sb_bv, 1); 365 bio_set_dev(bio, ca->bdev); 366 bio->bi_end_io = write_super_endio; 367 bio->bi_private = ca; 368 369 closure_get(cl); 370 __write_super(&ca->sb, ca->sb_disk, bio); 371 372 closure_return_with_destructor(cl, bcache_write_super_unlock); 373 } 374 375 /* UUID io */ 376 377 static void uuid_endio(struct bio *bio) 378 { 379 struct closure *cl = bio->bi_private; 380 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 381 382 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 383 bch_bbio_free(bio, c); 384 closure_put(cl); 385 } 386 387 static void uuid_io_unlock(struct closure *cl) 388 { 389 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 390 391 up(&c->uuid_write_mutex); 392 } 393 394 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 395 struct bkey *k, struct closure *parent) 396 { 397 struct closure *cl = &c->uuid_write; 398 struct uuid_entry *u; 399 unsigned int i; 400 char buf[80]; 401 402 BUG_ON(!parent); 403 down(&c->uuid_write_mutex); 404 closure_init(cl, parent); 405 406 for (i = 0; i < KEY_PTRS(k); i++) { 407 struct bio *bio = bch_bbio_alloc(c); 408 409 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 410 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 411 412 bio->bi_end_io = uuid_endio; 413 bio->bi_private = cl; 414 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 415 bch_bio_map(bio, c->uuids); 416 417 bch_submit_bbio(bio, c, k, i); 418 419 if (op != REQ_OP_WRITE) 420 break; 421 } 422 423 bch_extent_to_text(buf, sizeof(buf), k); 424 pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); 425 426 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 427 if (!bch_is_zero(u->uuid, 16)) 428 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n", 429 u - c->uuids, u->uuid, u->label, 430 u->first_reg, u->last_reg, u->invalidated); 431 432 closure_return_with_destructor(cl, uuid_io_unlock); 433 } 434 435 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 436 { 437 struct bkey *k = &j->uuid_bucket; 438 439 if (__bch_btree_ptr_invalid(c, k)) 440 return "bad uuid pointer"; 441 442 bkey_copy(&c->uuid_bucket, k); 443 uuid_io(c, REQ_OP_READ, 0, k, cl); 444 445 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 446 struct uuid_entry_v0 *u0 = (void *) c->uuids; 447 struct uuid_entry *u1 = (void *) c->uuids; 448 int i; 449 450 closure_sync(cl); 451 452 /* 453 * Since the new uuid entry is bigger than the old, we have to 454 * convert starting at the highest memory address and work down 455 * in order to do it in place 456 */ 457 458 for (i = c->nr_uuids - 1; 459 i >= 0; 460 --i) { 461 memcpy(u1[i].uuid, u0[i].uuid, 16); 462 memcpy(u1[i].label, u0[i].label, 32); 463 464 u1[i].first_reg = u0[i].first_reg; 465 u1[i].last_reg = u0[i].last_reg; 466 u1[i].invalidated = u0[i].invalidated; 467 468 u1[i].flags = 0; 469 u1[i].sectors = 0; 470 } 471 } 472 473 return NULL; 474 } 475 476 static int __uuid_write(struct cache_set *c) 477 { 478 BKEY_PADDED(key) k; 479 struct closure cl; 480 struct cache *ca; 481 unsigned int size; 482 483 closure_init_stack(&cl); 484 lockdep_assert_held(&bch_register_lock); 485 486 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) 487 return 1; 488 489 size = meta_bucket_pages(&c->sb) * PAGE_SECTORS; 490 SET_KEY_SIZE(&k.key, size); 491 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 492 closure_sync(&cl); 493 494 /* Only one bucket used for uuid write */ 495 ca = PTR_CACHE(c, &k.key, 0); 496 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); 497 498 bkey_copy(&c->uuid_bucket, &k.key); 499 bkey_put(c, &k.key); 500 return 0; 501 } 502 503 int bch_uuid_write(struct cache_set *c) 504 { 505 int ret = __uuid_write(c); 506 507 if (!ret) 508 bch_journal_meta(c, NULL); 509 510 return ret; 511 } 512 513 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 514 { 515 struct uuid_entry *u; 516 517 for (u = c->uuids; 518 u < c->uuids + c->nr_uuids; u++) 519 if (!memcmp(u->uuid, uuid, 16)) 520 return u; 521 522 return NULL; 523 } 524 525 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 526 { 527 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 528 529 return uuid_find(c, zero_uuid); 530 } 531 532 /* 533 * Bucket priorities/gens: 534 * 535 * For each bucket, we store on disk its 536 * 8 bit gen 537 * 16 bit priority 538 * 539 * See alloc.c for an explanation of the gen. The priority is used to implement 540 * lru (and in the future other) cache replacement policies; for most purposes 541 * it's just an opaque integer. 542 * 543 * The gens and the priorities don't have a whole lot to do with each other, and 544 * it's actually the gens that must be written out at specific times - it's no 545 * big deal if the priorities don't get written, if we lose them we just reuse 546 * buckets in suboptimal order. 547 * 548 * On disk they're stored in a packed array, and in as many buckets are required 549 * to fit them all. The buckets we use to store them form a list; the journal 550 * header points to the first bucket, the first bucket points to the second 551 * bucket, et cetera. 552 * 553 * This code is used by the allocation code; periodically (whenever it runs out 554 * of buckets to allocate from) the allocation code will invalidate some 555 * buckets, but it can't use those buckets until their new gens are safely on 556 * disk. 557 */ 558 559 static void prio_endio(struct bio *bio) 560 { 561 struct cache *ca = bio->bi_private; 562 563 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 564 bch_bbio_free(bio, ca->set); 565 closure_put(&ca->prio); 566 } 567 568 static void prio_io(struct cache *ca, uint64_t bucket, int op, 569 unsigned long op_flags) 570 { 571 struct closure *cl = &ca->prio; 572 struct bio *bio = bch_bbio_alloc(ca->set); 573 574 closure_init_stack(cl); 575 576 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 577 bio_set_dev(bio, ca->bdev); 578 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); 579 580 bio->bi_end_io = prio_endio; 581 bio->bi_private = ca; 582 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 583 bch_bio_map(bio, ca->disk_buckets); 584 585 closure_bio_submit(ca->set, bio, &ca->prio); 586 closure_sync(cl); 587 } 588 589 int bch_prio_write(struct cache *ca, bool wait) 590 { 591 int i; 592 struct bucket *b; 593 struct closure cl; 594 595 pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n", 596 fifo_used(&ca->free[RESERVE_PRIO]), 597 fifo_used(&ca->free[RESERVE_NONE]), 598 fifo_used(&ca->free_inc)); 599 600 /* 601 * Pre-check if there are enough free buckets. In the non-blocking 602 * scenario it's better to fail early rather than starting to allocate 603 * buckets and do a cleanup later in case of failure. 604 */ 605 if (!wait) { 606 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + 607 fifo_used(&ca->free[RESERVE_NONE]); 608 if (prio_buckets(ca) > avail) 609 return -ENOMEM; 610 } 611 612 closure_init_stack(&cl); 613 614 lockdep_assert_held(&ca->set->bucket_lock); 615 616 ca->disk_buckets->seq++; 617 618 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 619 &ca->meta_sectors_written); 620 621 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 622 long bucket; 623 struct prio_set *p = ca->disk_buckets; 624 struct bucket_disk *d = p->data; 625 struct bucket_disk *end = d + prios_per_bucket(ca); 626 627 for (b = ca->buckets + i * prios_per_bucket(ca); 628 b < ca->buckets + ca->sb.nbuckets && d < end; 629 b++, d++) { 630 d->prio = cpu_to_le16(b->prio); 631 d->gen = b->gen; 632 } 633 634 p->next_bucket = ca->prio_buckets[i + 1]; 635 p->magic = pset_magic(&ca->sb); 636 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); 637 638 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); 639 BUG_ON(bucket == -1); 640 641 mutex_unlock(&ca->set->bucket_lock); 642 prio_io(ca, bucket, REQ_OP_WRITE, 0); 643 mutex_lock(&ca->set->bucket_lock); 644 645 ca->prio_buckets[i] = bucket; 646 atomic_dec_bug(&ca->buckets[bucket].pin); 647 } 648 649 mutex_unlock(&ca->set->bucket_lock); 650 651 bch_journal_meta(ca->set, &cl); 652 closure_sync(&cl); 653 654 mutex_lock(&ca->set->bucket_lock); 655 656 /* 657 * Don't want the old priorities to get garbage collected until after we 658 * finish writing the new ones, and they're journalled 659 */ 660 for (i = 0; i < prio_buckets(ca); i++) { 661 if (ca->prio_last_buckets[i]) 662 __bch_bucket_free(ca, 663 &ca->buckets[ca->prio_last_buckets[i]]); 664 665 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 666 } 667 return 0; 668 } 669 670 static int prio_read(struct cache *ca, uint64_t bucket) 671 { 672 struct prio_set *p = ca->disk_buckets; 673 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 674 struct bucket *b; 675 unsigned int bucket_nr = 0; 676 int ret = -EIO; 677 678 for (b = ca->buckets; 679 b < ca->buckets + ca->sb.nbuckets; 680 b++, d++) { 681 if (d == end) { 682 ca->prio_buckets[bucket_nr] = bucket; 683 ca->prio_last_buckets[bucket_nr] = bucket; 684 bucket_nr++; 685 686 prio_io(ca, bucket, REQ_OP_READ, 0); 687 688 if (p->csum != 689 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { 690 pr_warn("bad csum reading priorities\n"); 691 goto out; 692 } 693 694 if (p->magic != pset_magic(&ca->sb)) { 695 pr_warn("bad magic reading priorities\n"); 696 goto out; 697 } 698 699 bucket = p->next_bucket; 700 d = p->data; 701 } 702 703 b->prio = le16_to_cpu(d->prio); 704 b->gen = b->last_gc = d->gen; 705 } 706 707 ret = 0; 708 out: 709 return ret; 710 } 711 712 /* Bcache device */ 713 714 static int open_dev(struct block_device *b, fmode_t mode) 715 { 716 struct bcache_device *d = b->bd_disk->private_data; 717 718 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 719 return -ENXIO; 720 721 closure_get(&d->cl); 722 return 0; 723 } 724 725 static void release_dev(struct gendisk *b, fmode_t mode) 726 { 727 struct bcache_device *d = b->private_data; 728 729 closure_put(&d->cl); 730 } 731 732 static int ioctl_dev(struct block_device *b, fmode_t mode, 733 unsigned int cmd, unsigned long arg) 734 { 735 struct bcache_device *d = b->bd_disk->private_data; 736 737 return d->ioctl(d, mode, cmd, arg); 738 } 739 740 static const struct block_device_operations bcache_cached_ops = { 741 .submit_bio = cached_dev_submit_bio, 742 .open = open_dev, 743 .release = release_dev, 744 .ioctl = ioctl_dev, 745 .owner = THIS_MODULE, 746 }; 747 748 static const struct block_device_operations bcache_flash_ops = { 749 .submit_bio = flash_dev_submit_bio, 750 .open = open_dev, 751 .release = release_dev, 752 .ioctl = ioctl_dev, 753 .owner = THIS_MODULE, 754 }; 755 756 void bcache_device_stop(struct bcache_device *d) 757 { 758 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 759 /* 760 * closure_fn set to 761 * - cached device: cached_dev_flush() 762 * - flash dev: flash_dev_flush() 763 */ 764 closure_queue(&d->cl); 765 } 766 767 static void bcache_device_unlink(struct bcache_device *d) 768 { 769 lockdep_assert_held(&bch_register_lock); 770 771 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 772 struct cache *ca = d->c->cache; 773 774 sysfs_remove_link(&d->c->kobj, d->name); 775 sysfs_remove_link(&d->kobj, "cache"); 776 777 bd_unlink_disk_holder(ca->bdev, d->disk); 778 } 779 } 780 781 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 782 const char *name) 783 { 784 struct cache *ca = c->cache; 785 int ret; 786 787 bd_link_disk_holder(ca->bdev, d->disk); 788 789 snprintf(d->name, BCACHEDEVNAME_SIZE, 790 "%s%u", name, d->id); 791 792 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); 793 if (ret < 0) 794 pr_err("Couldn't create device -> cache set symlink\n"); 795 796 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); 797 if (ret < 0) 798 pr_err("Couldn't create cache set -> device symlink\n"); 799 800 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 801 } 802 803 static void bcache_device_detach(struct bcache_device *d) 804 { 805 lockdep_assert_held(&bch_register_lock); 806 807 atomic_dec(&d->c->attached_dev_nr); 808 809 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 810 struct uuid_entry *u = d->c->uuids + d->id; 811 812 SET_UUID_FLASH_ONLY(u, 0); 813 memcpy(u->uuid, invalid_uuid, 16); 814 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 815 bch_uuid_write(d->c); 816 } 817 818 bcache_device_unlink(d); 819 820 d->c->devices[d->id] = NULL; 821 closure_put(&d->c->caching); 822 d->c = NULL; 823 } 824 825 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 826 unsigned int id) 827 { 828 d->id = id; 829 d->c = c; 830 c->devices[id] = d; 831 832 if (id >= c->devices_max_used) 833 c->devices_max_used = id + 1; 834 835 closure_get(&c->caching); 836 } 837 838 static inline int first_minor_to_idx(int first_minor) 839 { 840 return (first_minor/BCACHE_MINORS); 841 } 842 843 static inline int idx_to_first_minor(int idx) 844 { 845 return (idx * BCACHE_MINORS); 846 } 847 848 static void bcache_device_free(struct bcache_device *d) 849 { 850 struct gendisk *disk = d->disk; 851 852 lockdep_assert_held(&bch_register_lock); 853 854 if (disk) 855 pr_info("%s stopped\n", disk->disk_name); 856 else 857 pr_err("bcache device (NULL gendisk) stopped\n"); 858 859 if (d->c) 860 bcache_device_detach(d); 861 862 if (disk) { 863 bool disk_added = (disk->flags & GENHD_FL_UP) != 0; 864 865 if (disk_added) 866 del_gendisk(disk); 867 868 if (disk->queue) 869 blk_cleanup_queue(disk->queue); 870 871 ida_simple_remove(&bcache_device_idx, 872 first_minor_to_idx(disk->first_minor)); 873 if (disk_added) 874 put_disk(disk); 875 } 876 877 bioset_exit(&d->bio_split); 878 kvfree(d->full_dirty_stripes); 879 kvfree(d->stripe_sectors_dirty); 880 881 closure_debug_destroy(&d->cl); 882 } 883 884 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 885 sector_t sectors, struct block_device *cached_bdev, 886 const struct block_device_operations *ops) 887 { 888 struct request_queue *q; 889 const size_t max_stripes = min_t(size_t, INT_MAX, 890 SIZE_MAX / sizeof(atomic_t)); 891 uint64_t n; 892 int idx; 893 894 if (!d->stripe_size) 895 d->stripe_size = 1 << 31; 896 897 n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 898 if (!n || n > max_stripes) { 899 pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", 900 n); 901 return -ENOMEM; 902 } 903 d->nr_stripes = n; 904 905 n = d->nr_stripes * sizeof(atomic_t); 906 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 907 if (!d->stripe_sectors_dirty) 908 return -ENOMEM; 909 910 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 911 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 912 if (!d->full_dirty_stripes) 913 return -ENOMEM; 914 915 idx = ida_simple_get(&bcache_device_idx, 0, 916 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 917 if (idx < 0) 918 return idx; 919 920 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), 921 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 922 goto err; 923 924 d->disk = alloc_disk(BCACHE_MINORS); 925 if (!d->disk) 926 goto err; 927 928 set_capacity(d->disk, sectors); 929 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 930 931 d->disk->major = bcache_major; 932 d->disk->first_minor = idx_to_first_minor(idx); 933 d->disk->fops = ops; 934 d->disk->private_data = d; 935 936 q = blk_alloc_queue(NUMA_NO_NODE); 937 if (!q) 938 return -ENOMEM; 939 940 d->disk->queue = q; 941 q->limits.max_hw_sectors = UINT_MAX; 942 q->limits.max_sectors = UINT_MAX; 943 q->limits.max_segment_size = UINT_MAX; 944 q->limits.max_segments = BIO_MAX_PAGES; 945 blk_queue_max_discard_sectors(q, UINT_MAX); 946 q->limits.discard_granularity = 512; 947 q->limits.io_min = block_size; 948 q->limits.logical_block_size = block_size; 949 q->limits.physical_block_size = block_size; 950 951 if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { 952 /* 953 * This should only happen with BCACHE_SB_VERSION_BDEV. 954 * Block/page size is checked for BCACHE_SB_VERSION_CDEV. 955 */ 956 pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", 957 d->disk->disk_name, q->limits.logical_block_size, 958 PAGE_SIZE, bdev_logical_block_size(cached_bdev)); 959 960 /* This also adjusts physical block size/min io size if needed */ 961 blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); 962 } 963 964 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 965 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 966 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 967 968 blk_queue_write_cache(q, true, true); 969 970 return 0; 971 972 err: 973 ida_simple_remove(&bcache_device_idx, idx); 974 return -ENOMEM; 975 976 } 977 978 /* Cached device */ 979 980 static void calc_cached_dev_sectors(struct cache_set *c) 981 { 982 uint64_t sectors = 0; 983 struct cached_dev *dc; 984 985 list_for_each_entry(dc, &c->cached_devs, list) 986 sectors += bdev_sectors(dc->bdev); 987 988 c->cached_dev_sectors = sectors; 989 } 990 991 #define BACKING_DEV_OFFLINE_TIMEOUT 5 992 static int cached_dev_status_update(void *arg) 993 { 994 struct cached_dev *dc = arg; 995 struct request_queue *q; 996 997 /* 998 * If this delayed worker is stopping outside, directly quit here. 999 * dc->io_disable might be set via sysfs interface, so check it 1000 * here too. 1001 */ 1002 while (!kthread_should_stop() && !dc->io_disable) { 1003 q = bdev_get_queue(dc->bdev); 1004 if (blk_queue_dying(q)) 1005 dc->offline_seconds++; 1006 else 1007 dc->offline_seconds = 0; 1008 1009 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { 1010 pr_err("%s: device offline for %d seconds\n", 1011 dc->backing_dev_name, 1012 BACKING_DEV_OFFLINE_TIMEOUT); 1013 pr_err("%s: disable I/O request due to backing device offline\n", 1014 dc->disk.name); 1015 dc->io_disable = true; 1016 /* let others know earlier that io_disable is true */ 1017 smp_mb(); 1018 bcache_device_stop(&dc->disk); 1019 break; 1020 } 1021 schedule_timeout_interruptible(HZ); 1022 } 1023 1024 wait_for_kthread_stop(); 1025 return 0; 1026 } 1027 1028 1029 int bch_cached_dev_run(struct cached_dev *dc) 1030 { 1031 struct bcache_device *d = &dc->disk; 1032 char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); 1033 char *env[] = { 1034 "DRIVER=bcache", 1035 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 1036 kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""), 1037 NULL, 1038 }; 1039 1040 if (dc->io_disable) { 1041 pr_err("I/O disabled on cached dev %s\n", 1042 dc->backing_dev_name); 1043 kfree(env[1]); 1044 kfree(env[2]); 1045 kfree(buf); 1046 return -EIO; 1047 } 1048 1049 if (atomic_xchg(&dc->running, 1)) { 1050 kfree(env[1]); 1051 kfree(env[2]); 1052 kfree(buf); 1053 pr_info("cached dev %s is running already\n", 1054 dc->backing_dev_name); 1055 return -EBUSY; 1056 } 1057 1058 if (!d->c && 1059 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 1060 struct closure cl; 1061 1062 closure_init_stack(&cl); 1063 1064 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 1065 bch_write_bdev_super(dc, &cl); 1066 closure_sync(&cl); 1067 } 1068 1069 add_disk(d->disk); 1070 bd_link_disk_holder(dc->bdev, dc->disk.disk); 1071 /* 1072 * won't show up in the uevent file, use udevadm monitor -e instead 1073 * only class / kset properties are persistent 1074 */ 1075 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 1076 kfree(env[1]); 1077 kfree(env[2]); 1078 kfree(buf); 1079 1080 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 1081 sysfs_create_link(&disk_to_dev(d->disk)->kobj, 1082 &d->kobj, "bcache")) { 1083 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); 1084 return -ENOMEM; 1085 } 1086 1087 dc->status_update_thread = kthread_run(cached_dev_status_update, 1088 dc, "bcache_status_update"); 1089 if (IS_ERR(dc->status_update_thread)) { 1090 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); 1091 } 1092 1093 return 0; 1094 } 1095 1096 /* 1097 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 1098 * work dc->writeback_rate_update is running. Wait until the routine 1099 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 1100 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 1101 * seconds, give up waiting here and continue to cancel it too. 1102 */ 1103 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 1104 { 1105 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 1106 1107 do { 1108 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 1109 &dc->disk.flags)) 1110 break; 1111 time_out--; 1112 schedule_timeout_interruptible(1); 1113 } while (time_out > 0); 1114 1115 if (time_out == 0) 1116 pr_warn("give up waiting for dc->writeback_write_update to quit\n"); 1117 1118 cancel_delayed_work_sync(&dc->writeback_rate_update); 1119 } 1120 1121 static void cached_dev_detach_finish(struct work_struct *w) 1122 { 1123 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 1124 struct closure cl; 1125 1126 closure_init_stack(&cl); 1127 1128 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 1129 BUG_ON(refcount_read(&dc->count)); 1130 1131 1132 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1133 cancel_writeback_rate_update_dwork(dc); 1134 1135 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 1136 kthread_stop(dc->writeback_thread); 1137 dc->writeback_thread = NULL; 1138 } 1139 1140 memset(&dc->sb.set_uuid, 0, 16); 1141 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 1142 1143 bch_write_bdev_super(dc, &cl); 1144 closure_sync(&cl); 1145 1146 mutex_lock(&bch_register_lock); 1147 1148 calc_cached_dev_sectors(dc->disk.c); 1149 bcache_device_detach(&dc->disk); 1150 list_move(&dc->list, &uncached_devices); 1151 1152 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1153 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 1154 1155 mutex_unlock(&bch_register_lock); 1156 1157 pr_info("Caching disabled for %s\n", dc->backing_dev_name); 1158 1159 /* Drop ref we took in cached_dev_detach() */ 1160 closure_put(&dc->disk.cl); 1161 } 1162 1163 void bch_cached_dev_detach(struct cached_dev *dc) 1164 { 1165 lockdep_assert_held(&bch_register_lock); 1166 1167 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1168 return; 1169 1170 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 1171 return; 1172 1173 /* 1174 * Block the device from being closed and freed until we're finished 1175 * detaching 1176 */ 1177 closure_get(&dc->disk.cl); 1178 1179 bch_writeback_queue(dc); 1180 1181 cached_dev_put(dc); 1182 } 1183 1184 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1185 uint8_t *set_uuid) 1186 { 1187 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); 1188 struct uuid_entry *u; 1189 struct cached_dev *exist_dc, *t; 1190 int ret = 0; 1191 1192 if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) || 1193 (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16))) 1194 return -ENOENT; 1195 1196 if (dc->disk.c) { 1197 pr_err("Can't attach %s: already attached\n", 1198 dc->backing_dev_name); 1199 return -EINVAL; 1200 } 1201 1202 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1203 pr_err("Can't attach %s: shutting down\n", 1204 dc->backing_dev_name); 1205 return -EINVAL; 1206 } 1207 1208 if (dc->sb.block_size < c->sb.block_size) { 1209 /* Will die */ 1210 pr_err("Couldn't attach %s: block size less than set's block size\n", 1211 dc->backing_dev_name); 1212 return -EINVAL; 1213 } 1214 1215 /* Check whether already attached */ 1216 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1217 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1218 pr_err("Tried to attach %s but duplicate UUID already attached\n", 1219 dc->backing_dev_name); 1220 1221 return -EINVAL; 1222 } 1223 } 1224 1225 u = uuid_find(c, dc->sb.uuid); 1226 1227 if (u && 1228 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1229 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1230 memcpy(u->uuid, invalid_uuid, 16); 1231 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 1232 u = NULL; 1233 } 1234 1235 if (!u) { 1236 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1237 pr_err("Couldn't find uuid for %s in set\n", 1238 dc->backing_dev_name); 1239 return -ENOENT; 1240 } 1241 1242 u = uuid_find_empty(c); 1243 if (!u) { 1244 pr_err("Not caching %s, no room for UUID\n", 1245 dc->backing_dev_name); 1246 return -EINVAL; 1247 } 1248 } 1249 1250 /* 1251 * Deadlocks since we're called via sysfs... 1252 * sysfs_remove_file(&dc->kobj, &sysfs_attach); 1253 */ 1254 1255 if (bch_is_zero(u->uuid, 16)) { 1256 struct closure cl; 1257 1258 closure_init_stack(&cl); 1259 1260 memcpy(u->uuid, dc->sb.uuid, 16); 1261 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1262 u->first_reg = u->last_reg = rtime; 1263 bch_uuid_write(c); 1264 1265 memcpy(dc->sb.set_uuid, c->set_uuid, 16); 1266 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1267 1268 bch_write_bdev_super(dc, &cl); 1269 closure_sync(&cl); 1270 } else { 1271 u->last_reg = rtime; 1272 bch_uuid_write(c); 1273 } 1274 1275 bcache_device_attach(&dc->disk, c, u - c->uuids); 1276 list_move(&dc->list, &c->cached_devs); 1277 calc_cached_dev_sectors(c); 1278 1279 /* 1280 * dc->c must be set before dc->count != 0 - paired with the mb in 1281 * cached_dev_get() 1282 */ 1283 smp_wmb(); 1284 refcount_set(&dc->count, 1); 1285 1286 /* Block writeback thread, but spawn it */ 1287 down_write(&dc->writeback_lock); 1288 if (bch_cached_dev_writeback_start(dc)) { 1289 up_write(&dc->writeback_lock); 1290 pr_err("Couldn't start writeback facilities for %s\n", 1291 dc->disk.disk->disk_name); 1292 return -ENOMEM; 1293 } 1294 1295 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1296 atomic_set(&dc->has_dirty, 1); 1297 bch_writeback_queue(dc); 1298 } 1299 1300 bch_sectors_dirty_init(&dc->disk); 1301 1302 ret = bch_cached_dev_run(dc); 1303 if (ret && (ret != -EBUSY)) { 1304 up_write(&dc->writeback_lock); 1305 /* 1306 * bch_register_lock is held, bcache_device_stop() is not 1307 * able to be directly called. The kthread and kworker 1308 * created previously in bch_cached_dev_writeback_start() 1309 * have to be stopped manually here. 1310 */ 1311 kthread_stop(dc->writeback_thread); 1312 cancel_writeback_rate_update_dwork(dc); 1313 pr_err("Couldn't run cached device %s\n", 1314 dc->backing_dev_name); 1315 return ret; 1316 } 1317 1318 bcache_device_link(&dc->disk, c, "bdev"); 1319 atomic_inc(&c->attached_dev_nr); 1320 1321 /* Allow the writeback thread to proceed */ 1322 up_write(&dc->writeback_lock); 1323 1324 pr_info("Caching %s as %s on set %pU\n", 1325 dc->backing_dev_name, 1326 dc->disk.disk->disk_name, 1327 dc->disk.c->set_uuid); 1328 return 0; 1329 } 1330 1331 /* when dc->disk.kobj released */ 1332 void bch_cached_dev_release(struct kobject *kobj) 1333 { 1334 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1335 disk.kobj); 1336 kfree(dc); 1337 module_put(THIS_MODULE); 1338 } 1339 1340 static void cached_dev_free(struct closure *cl) 1341 { 1342 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1343 1344 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1345 cancel_writeback_rate_update_dwork(dc); 1346 1347 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1348 kthread_stop(dc->writeback_thread); 1349 if (!IS_ERR_OR_NULL(dc->status_update_thread)) 1350 kthread_stop(dc->status_update_thread); 1351 1352 mutex_lock(&bch_register_lock); 1353 1354 if (atomic_read(&dc->running)) 1355 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1356 bcache_device_free(&dc->disk); 1357 list_del(&dc->list); 1358 1359 mutex_unlock(&bch_register_lock); 1360 1361 if (dc->sb_disk) 1362 put_page(virt_to_page(dc->sb_disk)); 1363 1364 if (!IS_ERR_OR_NULL(dc->bdev)) 1365 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1366 1367 wake_up(&unregister_wait); 1368 1369 kobject_put(&dc->disk.kobj); 1370 } 1371 1372 static void cached_dev_flush(struct closure *cl) 1373 { 1374 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1375 struct bcache_device *d = &dc->disk; 1376 1377 mutex_lock(&bch_register_lock); 1378 bcache_device_unlink(d); 1379 mutex_unlock(&bch_register_lock); 1380 1381 bch_cache_accounting_destroy(&dc->accounting); 1382 kobject_del(&d->kobj); 1383 1384 continue_at(cl, cached_dev_free, system_wq); 1385 } 1386 1387 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) 1388 { 1389 int ret; 1390 struct io *io; 1391 struct request_queue *q = bdev_get_queue(dc->bdev); 1392 1393 __module_get(THIS_MODULE); 1394 INIT_LIST_HEAD(&dc->list); 1395 closure_init(&dc->disk.cl, NULL); 1396 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1397 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1398 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1399 sema_init(&dc->sb_write_mutex, 1); 1400 INIT_LIST_HEAD(&dc->io_lru); 1401 spin_lock_init(&dc->io_lock); 1402 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1403 1404 dc->sequential_cutoff = 4 << 20; 1405 1406 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1407 list_add(&io->lru, &dc->io_lru); 1408 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1409 } 1410 1411 dc->disk.stripe_size = q->limits.io_opt >> 9; 1412 1413 if (dc->disk.stripe_size) 1414 dc->partial_stripes_expensive = 1415 q->limits.raid_partial_stripes_expensive; 1416 1417 ret = bcache_device_init(&dc->disk, block_size, 1418 dc->bdev->bd_part->nr_sects - dc->sb.data_offset, 1419 dc->bdev, &bcache_cached_ops); 1420 if (ret) 1421 return ret; 1422 1423 blk_queue_io_opt(dc->disk.disk->queue, 1424 max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q))); 1425 1426 atomic_set(&dc->io_errors, 0); 1427 dc->io_disable = false; 1428 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1429 /* default to auto */ 1430 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1431 1432 bch_cached_dev_request_init(dc); 1433 bch_cached_dev_writeback_init(dc); 1434 return 0; 1435 } 1436 1437 /* Cached device - bcache superblock */ 1438 1439 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 1440 struct block_device *bdev, 1441 struct cached_dev *dc) 1442 { 1443 const char *err = "cannot allocate memory"; 1444 struct cache_set *c; 1445 int ret = -ENOMEM; 1446 1447 bdevname(bdev, dc->backing_dev_name); 1448 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1449 dc->bdev = bdev; 1450 dc->bdev->bd_holder = dc; 1451 dc->sb_disk = sb_disk; 1452 1453 if (cached_dev_init(dc, sb->block_size << 9)) 1454 goto err; 1455 1456 err = "error creating kobject"; 1457 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1458 "bcache")) 1459 goto err; 1460 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1461 goto err; 1462 1463 pr_info("registered backing device %s\n", dc->backing_dev_name); 1464 1465 list_add(&dc->list, &uncached_devices); 1466 /* attach to a matched cache set if it exists */ 1467 list_for_each_entry(c, &bch_cache_sets, list) 1468 bch_cached_dev_attach(dc, c, NULL); 1469 1470 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1471 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) { 1472 err = "failed to run cached device"; 1473 ret = bch_cached_dev_run(dc); 1474 if (ret) 1475 goto err; 1476 } 1477 1478 return 0; 1479 err: 1480 pr_notice("error %s: %s\n", dc->backing_dev_name, err); 1481 bcache_device_stop(&dc->disk); 1482 return ret; 1483 } 1484 1485 /* Flash only volumes */ 1486 1487 /* When d->kobj released */ 1488 void bch_flash_dev_release(struct kobject *kobj) 1489 { 1490 struct bcache_device *d = container_of(kobj, struct bcache_device, 1491 kobj); 1492 kfree(d); 1493 } 1494 1495 static void flash_dev_free(struct closure *cl) 1496 { 1497 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1498 1499 mutex_lock(&bch_register_lock); 1500 atomic_long_sub(bcache_dev_sectors_dirty(d), 1501 &d->c->flash_dev_dirty_sectors); 1502 bcache_device_free(d); 1503 mutex_unlock(&bch_register_lock); 1504 kobject_put(&d->kobj); 1505 } 1506 1507 static void flash_dev_flush(struct closure *cl) 1508 { 1509 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1510 1511 mutex_lock(&bch_register_lock); 1512 bcache_device_unlink(d); 1513 mutex_unlock(&bch_register_lock); 1514 kobject_del(&d->kobj); 1515 continue_at(cl, flash_dev_free, system_wq); 1516 } 1517 1518 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1519 { 1520 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1521 GFP_KERNEL); 1522 if (!d) 1523 return -ENOMEM; 1524 1525 closure_init(&d->cl, NULL); 1526 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1527 1528 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1529 1530 if (bcache_device_init(d, block_bytes(c->cache), u->sectors, 1531 NULL, &bcache_flash_ops)) 1532 goto err; 1533 1534 bcache_device_attach(d, c, u - c->uuids); 1535 bch_sectors_dirty_init(d); 1536 bch_flash_dev_request_init(d); 1537 add_disk(d->disk); 1538 1539 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1540 goto err; 1541 1542 bcache_device_link(d, c, "volume"); 1543 1544 return 0; 1545 err: 1546 kobject_put(&d->kobj); 1547 return -ENOMEM; 1548 } 1549 1550 static int flash_devs_run(struct cache_set *c) 1551 { 1552 int ret = 0; 1553 struct uuid_entry *u; 1554 1555 for (u = c->uuids; 1556 u < c->uuids + c->nr_uuids && !ret; 1557 u++) 1558 if (UUID_FLASH_ONLY(u)) 1559 ret = flash_dev_run(c, u); 1560 1561 return ret; 1562 } 1563 1564 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1565 { 1566 struct uuid_entry *u; 1567 1568 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1569 return -EINTR; 1570 1571 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1572 return -EPERM; 1573 1574 u = uuid_find_empty(c); 1575 if (!u) { 1576 pr_err("Can't create volume, no room for UUID\n"); 1577 return -EINVAL; 1578 } 1579 1580 get_random_bytes(u->uuid, 16); 1581 memset(u->label, 0, 32); 1582 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); 1583 1584 SET_UUID_FLASH_ONLY(u, 1); 1585 u->sectors = size >> 9; 1586 1587 bch_uuid_write(c); 1588 1589 return flash_dev_run(c, u); 1590 } 1591 1592 bool bch_cached_dev_error(struct cached_dev *dc) 1593 { 1594 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1595 return false; 1596 1597 dc->io_disable = true; 1598 /* make others know io_disable is true earlier */ 1599 smp_mb(); 1600 1601 pr_err("stop %s: too many IO errors on backing device %s\n", 1602 dc->disk.disk->disk_name, dc->backing_dev_name); 1603 1604 bcache_device_stop(&dc->disk); 1605 return true; 1606 } 1607 1608 /* Cache set */ 1609 1610 __printf(2, 3) 1611 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1612 { 1613 struct va_format vaf; 1614 va_list args; 1615 1616 if (c->on_error != ON_ERROR_PANIC && 1617 test_bit(CACHE_SET_STOPPING, &c->flags)) 1618 return false; 1619 1620 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1621 pr_info("CACHE_SET_IO_DISABLE already set\n"); 1622 1623 /* 1624 * XXX: we can be called from atomic context 1625 * acquire_console_sem(); 1626 */ 1627 1628 va_start(args, fmt); 1629 1630 vaf.fmt = fmt; 1631 vaf.va = &args; 1632 1633 pr_err("error on %pU: %pV, disabling caching\n", 1634 c->set_uuid, &vaf); 1635 1636 va_end(args); 1637 1638 if (c->on_error == ON_ERROR_PANIC) 1639 panic("panic forced after error\n"); 1640 1641 bch_cache_set_unregister(c); 1642 return true; 1643 } 1644 1645 /* When c->kobj released */ 1646 void bch_cache_set_release(struct kobject *kobj) 1647 { 1648 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1649 1650 kfree(c); 1651 module_put(THIS_MODULE); 1652 } 1653 1654 static void cache_set_free(struct closure *cl) 1655 { 1656 struct cache_set *c = container_of(cl, struct cache_set, cl); 1657 struct cache *ca; 1658 1659 debugfs_remove(c->debug); 1660 1661 bch_open_buckets_free(c); 1662 bch_btree_cache_free(c); 1663 bch_journal_free(c); 1664 1665 mutex_lock(&bch_register_lock); 1666 ca = c->cache; 1667 if (ca) { 1668 ca->set = NULL; 1669 c->cache = NULL; 1670 kobject_put(&ca->kobj); 1671 } 1672 1673 bch_bset_sort_state_free(&c->sort); 1674 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb))); 1675 1676 if (c->moving_gc_wq) 1677 destroy_workqueue(c->moving_gc_wq); 1678 bioset_exit(&c->bio_split); 1679 mempool_exit(&c->fill_iter); 1680 mempool_exit(&c->bio_meta); 1681 mempool_exit(&c->search); 1682 kfree(c->devices); 1683 1684 list_del(&c->list); 1685 mutex_unlock(&bch_register_lock); 1686 1687 pr_info("Cache set %pU unregistered\n", c->set_uuid); 1688 wake_up(&unregister_wait); 1689 1690 closure_debug_destroy(&c->cl); 1691 kobject_put(&c->kobj); 1692 } 1693 1694 static void cache_set_flush(struct closure *cl) 1695 { 1696 struct cache_set *c = container_of(cl, struct cache_set, caching); 1697 struct cache *ca = c->cache; 1698 struct btree *b; 1699 1700 bch_cache_accounting_destroy(&c->accounting); 1701 1702 kobject_put(&c->internal); 1703 kobject_del(&c->kobj); 1704 1705 if (!IS_ERR_OR_NULL(c->gc_thread)) 1706 kthread_stop(c->gc_thread); 1707 1708 if (!IS_ERR_OR_NULL(c->root)) 1709 list_add(&c->root->list, &c->btree_cache); 1710 1711 /* 1712 * Avoid flushing cached nodes if cache set is retiring 1713 * due to too many I/O errors detected. 1714 */ 1715 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1716 list_for_each_entry(b, &c->btree_cache, list) { 1717 mutex_lock(&b->write_lock); 1718 if (btree_node_dirty(b)) 1719 __bch_btree_node_write(b, NULL); 1720 mutex_unlock(&b->write_lock); 1721 } 1722 1723 if (ca->alloc_thread) 1724 kthread_stop(ca->alloc_thread); 1725 1726 if (c->journal.cur) { 1727 cancel_delayed_work_sync(&c->journal.work); 1728 /* flush last journal entry if needed */ 1729 c->journal.work.work.func(&c->journal.work.work); 1730 } 1731 1732 closure_return(cl); 1733 } 1734 1735 /* 1736 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1737 * cache set is unregistering due to too many I/O errors. In this condition, 1738 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1739 * value and whether the broken cache has dirty data: 1740 * 1741 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1742 * BCH_CACHED_STOP_AUTO 0 NO 1743 * BCH_CACHED_STOP_AUTO 1 YES 1744 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1745 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1746 * 1747 * The expected behavior is, if stop_when_cache_set_failed is configured to 1748 * "auto" via sysfs interface, the bcache device will not be stopped if the 1749 * backing device is clean on the broken cache device. 1750 */ 1751 static void conditional_stop_bcache_device(struct cache_set *c, 1752 struct bcache_device *d, 1753 struct cached_dev *dc) 1754 { 1755 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1756 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n", 1757 d->disk->disk_name, c->set_uuid); 1758 bcache_device_stop(d); 1759 } else if (atomic_read(&dc->has_dirty)) { 1760 /* 1761 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1762 * and dc->has_dirty == 1 1763 */ 1764 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n", 1765 d->disk->disk_name); 1766 /* 1767 * There might be a small time gap that cache set is 1768 * released but bcache device is not. Inside this time 1769 * gap, regular I/O requests will directly go into 1770 * backing device as no cache set attached to. This 1771 * behavior may also introduce potential inconsistence 1772 * data in writeback mode while cache is dirty. 1773 * Therefore before calling bcache_device_stop() due 1774 * to a broken cache device, dc->io_disable should be 1775 * explicitly set to true. 1776 */ 1777 dc->io_disable = true; 1778 /* make others know io_disable is true earlier */ 1779 smp_mb(); 1780 bcache_device_stop(d); 1781 } else { 1782 /* 1783 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1784 * and dc->has_dirty == 0 1785 */ 1786 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n", 1787 d->disk->disk_name); 1788 } 1789 } 1790 1791 static void __cache_set_unregister(struct closure *cl) 1792 { 1793 struct cache_set *c = container_of(cl, struct cache_set, caching); 1794 struct cached_dev *dc; 1795 struct bcache_device *d; 1796 size_t i; 1797 1798 mutex_lock(&bch_register_lock); 1799 1800 for (i = 0; i < c->devices_max_used; i++) { 1801 d = c->devices[i]; 1802 if (!d) 1803 continue; 1804 1805 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1806 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1807 dc = container_of(d, struct cached_dev, disk); 1808 bch_cached_dev_detach(dc); 1809 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1810 conditional_stop_bcache_device(c, d, dc); 1811 } else { 1812 bcache_device_stop(d); 1813 } 1814 } 1815 1816 mutex_unlock(&bch_register_lock); 1817 1818 continue_at(cl, cache_set_flush, system_wq); 1819 } 1820 1821 void bch_cache_set_stop(struct cache_set *c) 1822 { 1823 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1824 /* closure_fn set to __cache_set_unregister() */ 1825 closure_queue(&c->caching); 1826 } 1827 1828 void bch_cache_set_unregister(struct cache_set *c) 1829 { 1830 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1831 bch_cache_set_stop(c); 1832 } 1833 1834 #define alloc_meta_bucket_pages(gfp, sb) \ 1835 ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb)))) 1836 1837 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1838 { 1839 int iter_size; 1840 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1841 1842 if (!c) 1843 return NULL; 1844 1845 __module_get(THIS_MODULE); 1846 closure_init(&c->cl, NULL); 1847 set_closure_fn(&c->cl, cache_set_free, system_wq); 1848 1849 closure_init(&c->caching, &c->cl); 1850 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1851 1852 /* Maybe create continue_at_noreturn() and use it here? */ 1853 closure_set_stopped(&c->cl); 1854 closure_put(&c->cl); 1855 1856 kobject_init(&c->kobj, &bch_cache_set_ktype); 1857 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1858 1859 bch_cache_accounting_init(&c->accounting, &c->cl); 1860 1861 memcpy(c->set_uuid, sb->set_uuid, 16); 1862 c->sb.block_size = sb->block_size; 1863 c->sb.bucket_size = sb->bucket_size; 1864 c->sb.nr_in_set = sb->nr_in_set; 1865 c->sb.last_mount = sb->last_mount; 1866 c->sb.version = sb->version; 1867 if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { 1868 c->sb.feature_compat = sb->feature_compat; 1869 c->sb.feature_ro_compat = sb->feature_ro_compat; 1870 c->sb.feature_incompat = sb->feature_incompat; 1871 } 1872 1873 c->bucket_bits = ilog2(sb->bucket_size); 1874 c->block_bits = ilog2(sb->block_size); 1875 c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry); 1876 c->devices_max_used = 0; 1877 atomic_set(&c->attached_dev_nr, 0); 1878 c->btree_pages = meta_bucket_pages(&c->sb); 1879 if (c->btree_pages > BTREE_MAX_PAGES) 1880 c->btree_pages = max_t(int, c->btree_pages / 4, 1881 BTREE_MAX_PAGES); 1882 1883 sema_init(&c->sb_write_mutex, 1); 1884 mutex_init(&c->bucket_lock); 1885 init_waitqueue_head(&c->btree_cache_wait); 1886 spin_lock_init(&c->btree_cannibalize_lock); 1887 init_waitqueue_head(&c->bucket_wait); 1888 init_waitqueue_head(&c->gc_wait); 1889 sema_init(&c->uuid_write_mutex, 1); 1890 1891 spin_lock_init(&c->btree_gc_time.lock); 1892 spin_lock_init(&c->btree_split_time.lock); 1893 spin_lock_init(&c->btree_read_time.lock); 1894 1895 bch_moving_init_cache_set(c); 1896 1897 INIT_LIST_HEAD(&c->list); 1898 INIT_LIST_HEAD(&c->cached_devs); 1899 INIT_LIST_HEAD(&c->btree_cache); 1900 INIT_LIST_HEAD(&c->btree_cache_freeable); 1901 INIT_LIST_HEAD(&c->btree_cache_freed); 1902 INIT_LIST_HEAD(&c->data_buckets); 1903 1904 iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * 1905 sizeof(struct btree_iter_set); 1906 1907 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); 1908 if (!c->devices) 1909 goto err; 1910 1911 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) 1912 goto err; 1913 1914 if (mempool_init_kmalloc_pool(&c->bio_meta, 2, 1915 sizeof(struct bbio) + 1916 sizeof(struct bio_vec) * meta_bucket_pages(&c->sb))) 1917 goto err; 1918 1919 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) 1920 goto err; 1921 1922 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), 1923 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 1924 goto err; 1925 1926 c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb); 1927 if (!c->uuids) 1928 goto err; 1929 1930 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); 1931 if (!c->moving_gc_wq) 1932 goto err; 1933 1934 if (bch_journal_alloc(c)) 1935 goto err; 1936 1937 if (bch_btree_cache_alloc(c)) 1938 goto err; 1939 1940 if (bch_open_buckets_alloc(c)) 1941 goto err; 1942 1943 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1944 goto err; 1945 1946 c->congested_read_threshold_us = 2000; 1947 c->congested_write_threshold_us = 20000; 1948 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1949 c->idle_max_writeback_rate_enabled = 1; 1950 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1951 1952 return c; 1953 err: 1954 bch_cache_set_unregister(c); 1955 return NULL; 1956 } 1957 1958 static int run_cache_set(struct cache_set *c) 1959 { 1960 const char *err = "cannot allocate memory"; 1961 struct cached_dev *dc, *t; 1962 struct cache *ca = c->cache; 1963 struct closure cl; 1964 LIST_HEAD(journal); 1965 struct journal_replay *l; 1966 1967 closure_init_stack(&cl); 1968 1969 c->nbuckets = ca->sb.nbuckets; 1970 set_gc_sectors(c); 1971 1972 if (CACHE_SYNC(&c->cache->sb)) { 1973 struct bkey *k; 1974 struct jset *j; 1975 1976 err = "cannot allocate memory for journal"; 1977 if (bch_journal_read(c, &journal)) 1978 goto err; 1979 1980 pr_debug("btree_journal_read() done\n"); 1981 1982 err = "no journal entries found"; 1983 if (list_empty(&journal)) 1984 goto err; 1985 1986 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1987 1988 err = "IO error reading priorities"; 1989 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) 1990 goto err; 1991 1992 /* 1993 * If prio_read() fails it'll call cache_set_error and we'll 1994 * tear everything down right away, but if we perhaps checked 1995 * sooner we could avoid journal replay. 1996 */ 1997 1998 k = &j->btree_root; 1999 2000 err = "bad btree root"; 2001 if (__bch_btree_ptr_invalid(c, k)) 2002 goto err; 2003 2004 err = "error reading btree root"; 2005 c->root = bch_btree_node_get(c, NULL, k, 2006 j->btree_level, 2007 true, NULL); 2008 if (IS_ERR_OR_NULL(c->root)) 2009 goto err; 2010 2011 list_del_init(&c->root->list); 2012 rw_unlock(true, c->root); 2013 2014 err = uuid_read(c, j, &cl); 2015 if (err) 2016 goto err; 2017 2018 err = "error in recovery"; 2019 if (bch_btree_check(c)) 2020 goto err; 2021 2022 bch_journal_mark(c, &journal); 2023 bch_initial_gc_finish(c); 2024 pr_debug("btree_check() done\n"); 2025 2026 /* 2027 * bcache_journal_next() can't happen sooner, or 2028 * btree_gc_finish() will give spurious errors about last_gc > 2029 * gc_gen - this is a hack but oh well. 2030 */ 2031 bch_journal_next(&c->journal); 2032 2033 err = "error starting allocator thread"; 2034 if (bch_cache_allocator_start(ca)) 2035 goto err; 2036 2037 /* 2038 * First place it's safe to allocate: btree_check() and 2039 * btree_gc_finish() have to run before we have buckets to 2040 * allocate, and bch_bucket_alloc_set() might cause a journal 2041 * entry to be written so bcache_journal_next() has to be called 2042 * first. 2043 * 2044 * If the uuids were in the old format we have to rewrite them 2045 * before the next journal entry is written: 2046 */ 2047 if (j->version < BCACHE_JSET_VERSION_UUID) 2048 __uuid_write(c); 2049 2050 err = "bcache: replay journal failed"; 2051 if (bch_journal_replay(c, &journal)) 2052 goto err; 2053 } else { 2054 unsigned int j; 2055 2056 pr_notice("invalidating existing data\n"); 2057 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2058 2, SB_JOURNAL_BUCKETS); 2059 2060 for (j = 0; j < ca->sb.keys; j++) 2061 ca->sb.d[j] = ca->sb.first_bucket + j; 2062 2063 bch_initial_gc_finish(c); 2064 2065 err = "error starting allocator thread"; 2066 if (bch_cache_allocator_start(ca)) 2067 goto err; 2068 2069 mutex_lock(&c->bucket_lock); 2070 bch_prio_write(ca, true); 2071 mutex_unlock(&c->bucket_lock); 2072 2073 err = "cannot allocate new UUID bucket"; 2074 if (__uuid_write(c)) 2075 goto err; 2076 2077 err = "cannot allocate new btree root"; 2078 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 2079 if (IS_ERR_OR_NULL(c->root)) 2080 goto err; 2081 2082 mutex_lock(&c->root->write_lock); 2083 bkey_copy_key(&c->root->key, &MAX_KEY); 2084 bch_btree_node_write(c->root, &cl); 2085 mutex_unlock(&c->root->write_lock); 2086 2087 bch_btree_set_root(c->root); 2088 rw_unlock(true, c->root); 2089 2090 /* 2091 * We don't want to write the first journal entry until 2092 * everything is set up - fortunately journal entries won't be 2093 * written until the SET_CACHE_SYNC() here: 2094 */ 2095 SET_CACHE_SYNC(&c->cache->sb, true); 2096 2097 bch_journal_next(&c->journal); 2098 bch_journal_meta(c, &cl); 2099 } 2100 2101 err = "error starting gc thread"; 2102 if (bch_gc_thread_start(c)) 2103 goto err; 2104 2105 closure_sync(&cl); 2106 c->sb.last_mount = (u32)ktime_get_real_seconds(); 2107 bcache_write_super(c); 2108 2109 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2110 bch_cached_dev_attach(dc, c, NULL); 2111 2112 flash_devs_run(c); 2113 2114 set_bit(CACHE_SET_RUNNING, &c->flags); 2115 return 0; 2116 err: 2117 while (!list_empty(&journal)) { 2118 l = list_first_entry(&journal, struct journal_replay, list); 2119 list_del(&l->list); 2120 kfree(l); 2121 } 2122 2123 closure_sync(&cl); 2124 2125 bch_cache_set_error(c, "%s", err); 2126 2127 return -EIO; 2128 } 2129 2130 static const char *register_cache_set(struct cache *ca) 2131 { 2132 char buf[12]; 2133 const char *err = "cannot allocate memory"; 2134 struct cache_set *c; 2135 2136 list_for_each_entry(c, &bch_cache_sets, list) 2137 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { 2138 if (c->cache) 2139 return "duplicate cache set member"; 2140 2141 goto found; 2142 } 2143 2144 c = bch_cache_set_alloc(&ca->sb); 2145 if (!c) 2146 return err; 2147 2148 err = "error creating kobject"; 2149 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) || 2150 kobject_add(&c->internal, &c->kobj, "internal")) 2151 goto err; 2152 2153 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 2154 goto err; 2155 2156 bch_debug_init_cache_set(c); 2157 2158 list_add(&c->list, &bch_cache_sets); 2159 found: 2160 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 2161 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 2162 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 2163 goto err; 2164 2165 kobject_get(&ca->kobj); 2166 ca->set = c; 2167 ca->set->cache = ca; 2168 2169 err = "failed to run cache set"; 2170 if (run_cache_set(c) < 0) 2171 goto err; 2172 2173 return NULL; 2174 err: 2175 bch_cache_set_unregister(c); 2176 return err; 2177 } 2178 2179 /* Cache device */ 2180 2181 /* When ca->kobj released */ 2182 void bch_cache_release(struct kobject *kobj) 2183 { 2184 struct cache *ca = container_of(kobj, struct cache, kobj); 2185 unsigned int i; 2186 2187 if (ca->set) { 2188 BUG_ON(ca->set->cache != ca); 2189 ca->set->cache = NULL; 2190 } 2191 2192 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); 2193 kfree(ca->prio_buckets); 2194 vfree(ca->buckets); 2195 2196 free_heap(&ca->heap); 2197 free_fifo(&ca->free_inc); 2198 2199 for (i = 0; i < RESERVE_NR; i++) 2200 free_fifo(&ca->free[i]); 2201 2202 if (ca->sb_disk) 2203 put_page(virt_to_page(ca->sb_disk)); 2204 2205 if (!IS_ERR_OR_NULL(ca->bdev)) 2206 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2207 2208 kfree(ca); 2209 module_put(THIS_MODULE); 2210 } 2211 2212 static int cache_alloc(struct cache *ca) 2213 { 2214 size_t free; 2215 size_t btree_buckets; 2216 struct bucket *b; 2217 int ret = -ENOMEM; 2218 const char *err = NULL; 2219 2220 __module_get(THIS_MODULE); 2221 kobject_init(&ca->kobj, &bch_cache_ktype); 2222 2223 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2224 2225 /* 2226 * when ca->sb.njournal_buckets is not zero, journal exists, 2227 * and in bch_journal_replay(), tree node may split, 2228 * so bucket of RESERVE_BTREE type is needed, 2229 * the worst situation is all journal buckets are valid journal, 2230 * and all the keys need to replay, 2231 * so the number of RESERVE_BTREE type buckets should be as much 2232 * as journal buckets 2233 */ 2234 btree_buckets = ca->sb.njournal_buckets ?: 8; 2235 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2236 if (!free) { 2237 ret = -EPERM; 2238 err = "ca->sb.nbuckets is too small"; 2239 goto err_free; 2240 } 2241 2242 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, 2243 GFP_KERNEL)) { 2244 err = "ca->free[RESERVE_BTREE] alloc failed"; 2245 goto err_btree_alloc; 2246 } 2247 2248 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), 2249 GFP_KERNEL)) { 2250 err = "ca->free[RESERVE_PRIO] alloc failed"; 2251 goto err_prio_alloc; 2252 } 2253 2254 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { 2255 err = "ca->free[RESERVE_MOVINGGC] alloc failed"; 2256 goto err_movinggc_alloc; 2257 } 2258 2259 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { 2260 err = "ca->free[RESERVE_NONE] alloc failed"; 2261 goto err_none_alloc; 2262 } 2263 2264 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { 2265 err = "ca->free_inc alloc failed"; 2266 goto err_free_inc_alloc; 2267 } 2268 2269 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { 2270 err = "ca->heap alloc failed"; 2271 goto err_heap_alloc; 2272 } 2273 2274 ca->buckets = vzalloc(array_size(sizeof(struct bucket), 2275 ca->sb.nbuckets)); 2276 if (!ca->buckets) { 2277 err = "ca->buckets alloc failed"; 2278 goto err_buckets_alloc; 2279 } 2280 2281 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), 2282 prio_buckets(ca), 2), 2283 GFP_KERNEL); 2284 if (!ca->prio_buckets) { 2285 err = "ca->prio_buckets alloc failed"; 2286 goto err_prio_buckets_alloc; 2287 } 2288 2289 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); 2290 if (!ca->disk_buckets) { 2291 err = "ca->disk_buckets alloc failed"; 2292 goto err_disk_buckets_alloc; 2293 } 2294 2295 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2296 2297 for_each_bucket(b, ca) 2298 atomic_set(&b->pin, 0); 2299 return 0; 2300 2301 err_disk_buckets_alloc: 2302 kfree(ca->prio_buckets); 2303 err_prio_buckets_alloc: 2304 vfree(ca->buckets); 2305 err_buckets_alloc: 2306 free_heap(&ca->heap); 2307 err_heap_alloc: 2308 free_fifo(&ca->free_inc); 2309 err_free_inc_alloc: 2310 free_fifo(&ca->free[RESERVE_NONE]); 2311 err_none_alloc: 2312 free_fifo(&ca->free[RESERVE_MOVINGGC]); 2313 err_movinggc_alloc: 2314 free_fifo(&ca->free[RESERVE_PRIO]); 2315 err_prio_alloc: 2316 free_fifo(&ca->free[RESERVE_BTREE]); 2317 err_btree_alloc: 2318 err_free: 2319 module_put(THIS_MODULE); 2320 if (err) 2321 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2322 return ret; 2323 } 2324 2325 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 2326 struct block_device *bdev, struct cache *ca) 2327 { 2328 const char *err = NULL; /* must be set for any error case */ 2329 int ret = 0; 2330 2331 bdevname(bdev, ca->cache_dev_name); 2332 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2333 ca->bdev = bdev; 2334 ca->bdev->bd_holder = ca; 2335 ca->sb_disk = sb_disk; 2336 2337 if (blk_queue_discard(bdev_get_queue(bdev))) 2338 ca->discard = CACHE_DISCARD(&ca->sb); 2339 2340 ret = cache_alloc(ca); 2341 if (ret != 0) { 2342 /* 2343 * If we failed here, it means ca->kobj is not initialized yet, 2344 * kobject_put() won't be called and there is no chance to 2345 * call blkdev_put() to bdev in bch_cache_release(). So we 2346 * explicitly call blkdev_put() here. 2347 */ 2348 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2349 if (ret == -ENOMEM) 2350 err = "cache_alloc(): -ENOMEM"; 2351 else if (ret == -EPERM) 2352 err = "cache_alloc(): cache device is too small"; 2353 else 2354 err = "cache_alloc(): unknown error"; 2355 goto err; 2356 } 2357 2358 if (kobject_add(&ca->kobj, 2359 &part_to_dev(bdev->bd_part)->kobj, 2360 "bcache")) { 2361 err = "error calling kobject_add"; 2362 ret = -ENOMEM; 2363 goto out; 2364 } 2365 2366 mutex_lock(&bch_register_lock); 2367 err = register_cache_set(ca); 2368 mutex_unlock(&bch_register_lock); 2369 2370 if (err) { 2371 ret = -ENODEV; 2372 goto out; 2373 } 2374 2375 pr_info("registered cache device %s\n", ca->cache_dev_name); 2376 2377 out: 2378 kobject_put(&ca->kobj); 2379 2380 err: 2381 if (err) 2382 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2383 2384 return ret; 2385 } 2386 2387 /* Global interfaces/init */ 2388 2389 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2390 const char *buffer, size_t size); 2391 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2392 struct kobj_attribute *attr, 2393 const char *buffer, size_t size); 2394 2395 kobj_attribute_write(register, register_bcache); 2396 kobj_attribute_write(register_quiet, register_bcache); 2397 kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); 2398 2399 static bool bch_is_open_backing(struct block_device *bdev) 2400 { 2401 struct cache_set *c, *tc; 2402 struct cached_dev *dc, *t; 2403 2404 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2405 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2406 if (dc->bdev == bdev) 2407 return true; 2408 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2409 if (dc->bdev == bdev) 2410 return true; 2411 return false; 2412 } 2413 2414 static bool bch_is_open_cache(struct block_device *bdev) 2415 { 2416 struct cache_set *c, *tc; 2417 2418 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2419 struct cache *ca = c->cache; 2420 2421 if (ca->bdev == bdev) 2422 return true; 2423 } 2424 2425 return false; 2426 } 2427 2428 static bool bch_is_open(struct block_device *bdev) 2429 { 2430 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2431 } 2432 2433 struct async_reg_args { 2434 struct delayed_work reg_work; 2435 char *path; 2436 struct cache_sb *sb; 2437 struct cache_sb_disk *sb_disk; 2438 struct block_device *bdev; 2439 }; 2440 2441 static void register_bdev_worker(struct work_struct *work) 2442 { 2443 int fail = false; 2444 struct async_reg_args *args = 2445 container_of(work, struct async_reg_args, reg_work.work); 2446 struct cached_dev *dc; 2447 2448 dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2449 if (!dc) { 2450 fail = true; 2451 put_page(virt_to_page(args->sb_disk)); 2452 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2453 goto out; 2454 } 2455 2456 mutex_lock(&bch_register_lock); 2457 if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0) 2458 fail = true; 2459 mutex_unlock(&bch_register_lock); 2460 2461 out: 2462 if (fail) 2463 pr_info("error %s: fail to register backing device\n", 2464 args->path); 2465 kfree(args->sb); 2466 kfree(args->path); 2467 kfree(args); 2468 module_put(THIS_MODULE); 2469 } 2470 2471 static void register_cache_worker(struct work_struct *work) 2472 { 2473 int fail = false; 2474 struct async_reg_args *args = 2475 container_of(work, struct async_reg_args, reg_work.work); 2476 struct cache *ca; 2477 2478 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2479 if (!ca) { 2480 fail = true; 2481 put_page(virt_to_page(args->sb_disk)); 2482 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2483 goto out; 2484 } 2485 2486 /* blkdev_put() will be called in bch_cache_release() */ 2487 if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0) 2488 fail = true; 2489 2490 out: 2491 if (fail) 2492 pr_info("error %s: fail to register cache device\n", 2493 args->path); 2494 kfree(args->sb); 2495 kfree(args->path); 2496 kfree(args); 2497 module_put(THIS_MODULE); 2498 } 2499 2500 static void register_device_aync(struct async_reg_args *args) 2501 { 2502 if (SB_IS_BDEV(args->sb)) 2503 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); 2504 else 2505 INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); 2506 2507 /* 10 jiffies is enough for a delay */ 2508 queue_delayed_work(system_wq, &args->reg_work, 10); 2509 } 2510 2511 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2512 const char *buffer, size_t size) 2513 { 2514 const char *err; 2515 char *path = NULL; 2516 struct cache_sb *sb; 2517 struct cache_sb_disk *sb_disk; 2518 struct block_device *bdev; 2519 ssize_t ret; 2520 bool async_registration = false; 2521 2522 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION 2523 async_registration = true; 2524 #endif 2525 2526 ret = -EBUSY; 2527 err = "failed to reference bcache module"; 2528 if (!try_module_get(THIS_MODULE)) 2529 goto out; 2530 2531 /* For latest state of bcache_is_reboot */ 2532 smp_mb(); 2533 err = "bcache is in reboot"; 2534 if (bcache_is_reboot) 2535 goto out_module_put; 2536 2537 ret = -ENOMEM; 2538 err = "cannot allocate memory"; 2539 path = kstrndup(buffer, size, GFP_KERNEL); 2540 if (!path) 2541 goto out_module_put; 2542 2543 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); 2544 if (!sb) 2545 goto out_free_path; 2546 2547 ret = -EINVAL; 2548 err = "failed to open device"; 2549 bdev = blkdev_get_by_path(strim(path), 2550 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2551 sb); 2552 if (IS_ERR(bdev)) { 2553 if (bdev == ERR_PTR(-EBUSY)) { 2554 bdev = lookup_bdev(strim(path)); 2555 mutex_lock(&bch_register_lock); 2556 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2557 err = "device already registered"; 2558 else 2559 err = "device busy"; 2560 mutex_unlock(&bch_register_lock); 2561 if (!IS_ERR(bdev)) 2562 bdput(bdev); 2563 if (attr == &ksysfs_register_quiet) 2564 goto done; 2565 } 2566 goto out_free_sb; 2567 } 2568 2569 err = "failed to set blocksize"; 2570 if (set_blocksize(bdev, 4096)) 2571 goto out_blkdev_put; 2572 2573 err = read_super(sb, bdev, &sb_disk); 2574 if (err) 2575 goto out_blkdev_put; 2576 2577 err = "failed to register device"; 2578 2579 if (async_registration) { 2580 /* register in asynchronous way */ 2581 struct async_reg_args *args = 2582 kzalloc(sizeof(struct async_reg_args), GFP_KERNEL); 2583 2584 if (!args) { 2585 ret = -ENOMEM; 2586 err = "cannot allocate memory"; 2587 goto out_put_sb_page; 2588 } 2589 2590 args->path = path; 2591 args->sb = sb; 2592 args->sb_disk = sb_disk; 2593 args->bdev = bdev; 2594 register_device_aync(args); 2595 /* No wait and returns to user space */ 2596 goto async_done; 2597 } 2598 2599 if (SB_IS_BDEV(sb)) { 2600 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2601 2602 if (!dc) 2603 goto out_put_sb_page; 2604 2605 mutex_lock(&bch_register_lock); 2606 ret = register_bdev(sb, sb_disk, bdev, dc); 2607 mutex_unlock(&bch_register_lock); 2608 /* blkdev_put() will be called in cached_dev_free() */ 2609 if (ret < 0) 2610 goto out_free_sb; 2611 } else { 2612 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2613 2614 if (!ca) 2615 goto out_put_sb_page; 2616 2617 /* blkdev_put() will be called in bch_cache_release() */ 2618 if (register_cache(sb, sb_disk, bdev, ca) != 0) 2619 goto out_free_sb; 2620 } 2621 2622 done: 2623 kfree(sb); 2624 kfree(path); 2625 module_put(THIS_MODULE); 2626 async_done: 2627 return size; 2628 2629 out_put_sb_page: 2630 put_page(virt_to_page(sb_disk)); 2631 out_blkdev_put: 2632 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2633 out_free_sb: 2634 kfree(sb); 2635 out_free_path: 2636 kfree(path); 2637 path = NULL; 2638 out_module_put: 2639 module_put(THIS_MODULE); 2640 out: 2641 pr_info("error %s: %s\n", path?path:"", err); 2642 return ret; 2643 } 2644 2645 2646 struct pdev { 2647 struct list_head list; 2648 struct cached_dev *dc; 2649 }; 2650 2651 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2652 struct kobj_attribute *attr, 2653 const char *buffer, 2654 size_t size) 2655 { 2656 LIST_HEAD(pending_devs); 2657 ssize_t ret = size; 2658 struct cached_dev *dc, *tdc; 2659 struct pdev *pdev, *tpdev; 2660 struct cache_set *c, *tc; 2661 2662 mutex_lock(&bch_register_lock); 2663 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) { 2664 pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL); 2665 if (!pdev) 2666 break; 2667 pdev->dc = dc; 2668 list_add(&pdev->list, &pending_devs); 2669 } 2670 2671 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2672 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2673 char *pdev_set_uuid = pdev->dc->sb.set_uuid; 2674 char *set_uuid = c->set_uuid; 2675 2676 if (!memcmp(pdev_set_uuid, set_uuid, 16)) { 2677 list_del(&pdev->list); 2678 kfree(pdev); 2679 break; 2680 } 2681 } 2682 } 2683 mutex_unlock(&bch_register_lock); 2684 2685 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2686 pr_info("delete pdev %p\n", pdev); 2687 list_del(&pdev->list); 2688 bcache_device_stop(&pdev->dc->disk); 2689 kfree(pdev); 2690 } 2691 2692 return ret; 2693 } 2694 2695 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2696 { 2697 if (bcache_is_reboot) 2698 return NOTIFY_DONE; 2699 2700 if (code == SYS_DOWN || 2701 code == SYS_HALT || 2702 code == SYS_POWER_OFF) { 2703 DEFINE_WAIT(wait); 2704 unsigned long start = jiffies; 2705 bool stopped = false; 2706 2707 struct cache_set *c, *tc; 2708 struct cached_dev *dc, *tdc; 2709 2710 mutex_lock(&bch_register_lock); 2711 2712 if (bcache_is_reboot) 2713 goto out; 2714 2715 /* New registration is rejected since now */ 2716 bcache_is_reboot = true; 2717 /* 2718 * Make registering caller (if there is) on other CPU 2719 * core know bcache_is_reboot set to true earlier 2720 */ 2721 smp_mb(); 2722 2723 if (list_empty(&bch_cache_sets) && 2724 list_empty(&uncached_devices)) 2725 goto out; 2726 2727 mutex_unlock(&bch_register_lock); 2728 2729 pr_info("Stopping all devices:\n"); 2730 2731 /* 2732 * The reason bch_register_lock is not held to call 2733 * bch_cache_set_stop() and bcache_device_stop() is to 2734 * avoid potential deadlock during reboot, because cache 2735 * set or bcache device stopping process will acqurie 2736 * bch_register_lock too. 2737 * 2738 * We are safe here because bcache_is_reboot sets to 2739 * true already, register_bcache() will reject new 2740 * registration now. bcache_is_reboot also makes sure 2741 * bcache_reboot() won't be re-entered on by other thread, 2742 * so there is no race in following list iteration by 2743 * list_for_each_entry_safe(). 2744 */ 2745 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2746 bch_cache_set_stop(c); 2747 2748 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2749 bcache_device_stop(&dc->disk); 2750 2751 2752 /* 2753 * Give an early chance for other kthreads and 2754 * kworkers to stop themselves 2755 */ 2756 schedule(); 2757 2758 /* What's a condition variable? */ 2759 while (1) { 2760 long timeout = start + 10 * HZ - jiffies; 2761 2762 mutex_lock(&bch_register_lock); 2763 stopped = list_empty(&bch_cache_sets) && 2764 list_empty(&uncached_devices); 2765 2766 if (timeout < 0 || stopped) 2767 break; 2768 2769 prepare_to_wait(&unregister_wait, &wait, 2770 TASK_UNINTERRUPTIBLE); 2771 2772 mutex_unlock(&bch_register_lock); 2773 schedule_timeout(timeout); 2774 } 2775 2776 finish_wait(&unregister_wait, &wait); 2777 2778 if (stopped) 2779 pr_info("All devices stopped\n"); 2780 else 2781 pr_notice("Timeout waiting for devices to be closed\n"); 2782 out: 2783 mutex_unlock(&bch_register_lock); 2784 } 2785 2786 return NOTIFY_DONE; 2787 } 2788 2789 static struct notifier_block reboot = { 2790 .notifier_call = bcache_reboot, 2791 .priority = INT_MAX, /* before any real devices */ 2792 }; 2793 2794 static void bcache_exit(void) 2795 { 2796 bch_debug_exit(); 2797 bch_request_exit(); 2798 if (bcache_kobj) 2799 kobject_put(bcache_kobj); 2800 if (bcache_wq) 2801 destroy_workqueue(bcache_wq); 2802 if (bch_journal_wq) 2803 destroy_workqueue(bch_journal_wq); 2804 2805 if (bcache_major) 2806 unregister_blkdev(bcache_major, "bcache"); 2807 unregister_reboot_notifier(&reboot); 2808 mutex_destroy(&bch_register_lock); 2809 } 2810 2811 /* Check and fixup module parameters */ 2812 static void check_module_parameters(void) 2813 { 2814 if (bch_cutoff_writeback_sync == 0) 2815 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; 2816 else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { 2817 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n", 2818 bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); 2819 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; 2820 } 2821 2822 if (bch_cutoff_writeback == 0) 2823 bch_cutoff_writeback = CUTOFF_WRITEBACK; 2824 else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { 2825 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n", 2826 bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); 2827 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; 2828 } 2829 2830 if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { 2831 pr_warn("set bch_cutoff_writeback (%u) to %u\n", 2832 bch_cutoff_writeback, bch_cutoff_writeback_sync); 2833 bch_cutoff_writeback = bch_cutoff_writeback_sync; 2834 } 2835 } 2836 2837 static int __init bcache_init(void) 2838 { 2839 static const struct attribute *files[] = { 2840 &ksysfs_register.attr, 2841 &ksysfs_register_quiet.attr, 2842 &ksysfs_pendings_cleanup.attr, 2843 NULL 2844 }; 2845 2846 check_module_parameters(); 2847 2848 mutex_init(&bch_register_lock); 2849 init_waitqueue_head(&unregister_wait); 2850 register_reboot_notifier(&reboot); 2851 2852 bcache_major = register_blkdev(0, "bcache"); 2853 if (bcache_major < 0) { 2854 unregister_reboot_notifier(&reboot); 2855 mutex_destroy(&bch_register_lock); 2856 return bcache_major; 2857 } 2858 2859 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2860 if (!bcache_wq) 2861 goto err; 2862 2863 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); 2864 if (!bch_journal_wq) 2865 goto err; 2866 2867 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2868 if (!bcache_kobj) 2869 goto err; 2870 2871 if (bch_request_init() || 2872 sysfs_create_files(bcache_kobj, files)) 2873 goto err; 2874 2875 bch_debug_init(); 2876 closure_debug_init(); 2877 2878 bcache_is_reboot = false; 2879 2880 return 0; 2881 err: 2882 bcache_exit(); 2883 return -ENOMEM; 2884 } 2885 2886 /* 2887 * Module hooks 2888 */ 2889 module_exit(bcache_exit); 2890 module_init(bcache_init); 2891 2892 module_param(bch_cutoff_writeback, uint, 0); 2893 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); 2894 2895 module_param(bch_cutoff_writeback_sync, uint, 0); 2896 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); 2897 2898 MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); 2899 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 2900 MODULE_LICENSE("GPL"); 2901