1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache setup/teardown code, and some metadata io - read a superblock and 4 * figure out what to do with it. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "extents.h" 14 #include "request.h" 15 #include "writeback.h" 16 #include "features.h" 17 18 #include <linux/blkdev.h> 19 #include <linux/debugfs.h> 20 #include <linux/genhd.h> 21 #include <linux/idr.h> 22 #include <linux/kthread.h> 23 #include <linux/workqueue.h> 24 #include <linux/module.h> 25 #include <linux/random.h> 26 #include <linux/reboot.h> 27 #include <linux/sysfs.h> 28 29 unsigned int bch_cutoff_writeback; 30 unsigned int bch_cutoff_writeback_sync; 31 32 static const char bcache_magic[] = { 33 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 34 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 35 }; 36 37 static const char invalid_uuid[] = { 38 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 39 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 40 }; 41 42 static struct kobject *bcache_kobj; 43 struct mutex bch_register_lock; 44 bool bcache_is_reboot; 45 LIST_HEAD(bch_cache_sets); 46 static LIST_HEAD(uncached_devices); 47 48 static int bcache_major; 49 static DEFINE_IDA(bcache_device_idx); 50 static wait_queue_head_t unregister_wait; 51 struct workqueue_struct *bcache_wq; 52 struct workqueue_struct *bch_journal_wq; 53 54 55 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 56 /* limitation of partitions number on single bcache device */ 57 #define BCACHE_MINORS 128 58 /* limitation of bcache devices number on single system */ 59 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) 60 61 /* Superblock */ 62 63 static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s) 64 { 65 unsigned int bucket_size = le16_to_cpu(s->bucket_size); 66 67 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES && 68 bch_has_feature_large_bucket(sb)) 69 bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16; 70 71 return bucket_size; 72 } 73 74 static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, 75 struct cache_sb_disk *s) 76 { 77 const char *err; 78 unsigned int i; 79 80 sb->first_bucket= le16_to_cpu(s->first_bucket); 81 sb->nbuckets = le64_to_cpu(s->nbuckets); 82 sb->bucket_size = get_bucket_size(sb, s); 83 84 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 85 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 86 87 err = "Too many journal buckets"; 88 if (sb->keys > SB_JOURNAL_BUCKETS) 89 goto err; 90 91 err = "Too many buckets"; 92 if (sb->nbuckets > LONG_MAX) 93 goto err; 94 95 err = "Not enough buckets"; 96 if (sb->nbuckets < 1 << 7) 97 goto err; 98 99 err = "Bad block size (not power of 2)"; 100 if (!is_power_of_2(sb->block_size)) 101 goto err; 102 103 err = "Bad block size (larger than page size)"; 104 if (sb->block_size > PAGE_SECTORS) 105 goto err; 106 107 err = "Bad bucket size (not power of 2)"; 108 if (!is_power_of_2(sb->bucket_size)) 109 goto err; 110 111 err = "Bad bucket size (smaller than page size)"; 112 if (sb->bucket_size < PAGE_SECTORS) 113 goto err; 114 115 err = "Invalid superblock: device too small"; 116 if (get_capacity(bdev->bd_disk) < 117 sb->bucket_size * sb->nbuckets) 118 goto err; 119 120 err = "Bad UUID"; 121 if (bch_is_zero(sb->set_uuid, 16)) 122 goto err; 123 124 err = "Bad cache device number in set"; 125 if (!sb->nr_in_set || 126 sb->nr_in_set <= sb->nr_this_dev || 127 sb->nr_in_set > MAX_CACHES_PER_SET) 128 goto err; 129 130 err = "Journal buckets not sequential"; 131 for (i = 0; i < sb->keys; i++) 132 if (sb->d[i] != sb->first_bucket + i) 133 goto err; 134 135 err = "Too many journal buckets"; 136 if (sb->first_bucket + sb->keys > sb->nbuckets) 137 goto err; 138 139 err = "Invalid superblock: first bucket comes before end of super"; 140 if (sb->first_bucket * sb->bucket_size < 16) 141 goto err; 142 143 err = NULL; 144 err: 145 return err; 146 } 147 148 149 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 150 struct cache_sb_disk **res) 151 { 152 const char *err; 153 struct cache_sb_disk *s; 154 struct page *page; 155 unsigned int i; 156 157 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 158 SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); 159 if (IS_ERR(page)) 160 return "IO error"; 161 s = page_address(page) + offset_in_page(SB_OFFSET); 162 163 sb->offset = le64_to_cpu(s->offset); 164 sb->version = le64_to_cpu(s->version); 165 166 memcpy(sb->magic, s->magic, 16); 167 memcpy(sb->uuid, s->uuid, 16); 168 memcpy(sb->set_uuid, s->set_uuid, 16); 169 memcpy(sb->label, s->label, SB_LABEL_SIZE); 170 171 sb->flags = le64_to_cpu(s->flags); 172 sb->seq = le64_to_cpu(s->seq); 173 sb->last_mount = le32_to_cpu(s->last_mount); 174 sb->keys = le16_to_cpu(s->keys); 175 176 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 177 sb->d[i] = le64_to_cpu(s->d[i]); 178 179 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n", 180 sb->version, sb->flags, sb->seq, sb->keys); 181 182 err = "Not a bcache superblock (bad offset)"; 183 if (sb->offset != SB_SECTOR) 184 goto err; 185 186 err = "Not a bcache superblock (bad magic)"; 187 if (memcmp(sb->magic, bcache_magic, 16)) 188 goto err; 189 190 err = "Bad checksum"; 191 if (s->csum != csum_set(s)) 192 goto err; 193 194 err = "Bad UUID"; 195 if (bch_is_zero(sb->uuid, 16)) 196 goto err; 197 198 sb->block_size = le16_to_cpu(s->block_size); 199 200 err = "Superblock block size smaller than device block size"; 201 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 202 goto err; 203 204 switch (sb->version) { 205 case BCACHE_SB_VERSION_BDEV: 206 sb->data_offset = BDEV_DATA_START_DEFAULT; 207 break; 208 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 209 case BCACHE_SB_VERSION_BDEV_WITH_FEATURES: 210 sb->data_offset = le64_to_cpu(s->data_offset); 211 212 err = "Bad data offset"; 213 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 214 goto err; 215 216 break; 217 case BCACHE_SB_VERSION_CDEV: 218 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 219 err = read_super_common(sb, bdev, s); 220 if (err) 221 goto err; 222 break; 223 case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: 224 /* 225 * Feature bits are needed in read_super_common(), 226 * convert them firstly. 227 */ 228 sb->feature_compat = le64_to_cpu(s->feature_compat); 229 sb->feature_incompat = le64_to_cpu(s->feature_incompat); 230 sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); 231 err = read_super_common(sb, bdev, s); 232 if (err) 233 goto err; 234 break; 235 default: 236 err = "Unsupported superblock version"; 237 goto err; 238 } 239 240 sb->last_mount = (u32)ktime_get_real_seconds(); 241 *res = s; 242 return NULL; 243 err: 244 put_page(page); 245 return err; 246 } 247 248 static void write_bdev_super_endio(struct bio *bio) 249 { 250 struct cached_dev *dc = bio->bi_private; 251 252 if (bio->bi_status) 253 bch_count_backing_io_errors(dc, bio); 254 255 closure_put(&dc->sb_write); 256 } 257 258 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, 259 struct bio *bio) 260 { 261 unsigned int i; 262 263 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; 264 bio->bi_iter.bi_sector = SB_SECTOR; 265 __bio_add_page(bio, virt_to_page(out), SB_SIZE, 266 offset_in_page(out)); 267 268 out->offset = cpu_to_le64(sb->offset); 269 270 memcpy(out->uuid, sb->uuid, 16); 271 memcpy(out->set_uuid, sb->set_uuid, 16); 272 memcpy(out->label, sb->label, SB_LABEL_SIZE); 273 274 out->flags = cpu_to_le64(sb->flags); 275 out->seq = cpu_to_le64(sb->seq); 276 277 out->last_mount = cpu_to_le32(sb->last_mount); 278 out->first_bucket = cpu_to_le16(sb->first_bucket); 279 out->keys = cpu_to_le16(sb->keys); 280 281 for (i = 0; i < sb->keys; i++) 282 out->d[i] = cpu_to_le64(sb->d[i]); 283 284 if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { 285 out->feature_compat = cpu_to_le64(sb->feature_compat); 286 out->feature_incompat = cpu_to_le64(sb->feature_incompat); 287 out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat); 288 } 289 290 out->version = cpu_to_le64(sb->version); 291 out->csum = csum_set(out); 292 293 pr_debug("ver %llu, flags %llu, seq %llu\n", 294 sb->version, sb->flags, sb->seq); 295 296 submit_bio(bio); 297 } 298 299 static void bch_write_bdev_super_unlock(struct closure *cl) 300 { 301 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 302 303 up(&dc->sb_write_mutex); 304 } 305 306 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 307 { 308 struct closure *cl = &dc->sb_write; 309 struct bio *bio = &dc->sb_bio; 310 311 down(&dc->sb_write_mutex); 312 closure_init(cl, parent); 313 314 bio_init(bio, dc->sb_bv, 1); 315 bio_set_dev(bio, dc->bdev); 316 bio->bi_end_io = write_bdev_super_endio; 317 bio->bi_private = dc; 318 319 closure_get(cl); 320 /* I/O request sent to backing device */ 321 __write_super(&dc->sb, dc->sb_disk, bio); 322 323 closure_return_with_destructor(cl, bch_write_bdev_super_unlock); 324 } 325 326 static void write_super_endio(struct bio *bio) 327 { 328 struct cache *ca = bio->bi_private; 329 330 /* is_read = 0 */ 331 bch_count_io_errors(ca, bio->bi_status, 0, 332 "writing superblock"); 333 closure_put(&ca->set->sb_write); 334 } 335 336 static void bcache_write_super_unlock(struct closure *cl) 337 { 338 struct cache_set *c = container_of(cl, struct cache_set, sb_write); 339 340 up(&c->sb_write_mutex); 341 } 342 343 void bcache_write_super(struct cache_set *c) 344 { 345 struct closure *cl = &c->sb_write; 346 struct cache *ca; 347 unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 348 349 down(&c->sb_write_mutex); 350 closure_init(cl, &c->cl); 351 352 c->sb.seq++; 353 354 if (c->sb.version > version) 355 version = c->sb.version; 356 357 for_each_cache(ca, c, i) { 358 struct bio *bio = &ca->sb_bio; 359 360 ca->sb.version = version; 361 ca->sb.seq = c->sb.seq; 362 ca->sb.last_mount = c->sb.last_mount; 363 364 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 365 366 bio_init(bio, ca->sb_bv, 1); 367 bio_set_dev(bio, ca->bdev); 368 bio->bi_end_io = write_super_endio; 369 bio->bi_private = ca; 370 371 closure_get(cl); 372 __write_super(&ca->sb, ca->sb_disk, bio); 373 } 374 375 closure_return_with_destructor(cl, bcache_write_super_unlock); 376 } 377 378 /* UUID io */ 379 380 static void uuid_endio(struct bio *bio) 381 { 382 struct closure *cl = bio->bi_private; 383 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 384 385 cache_set_err_on(bio->bi_status, c, "accessing uuids"); 386 bch_bbio_free(bio, c); 387 closure_put(cl); 388 } 389 390 static void uuid_io_unlock(struct closure *cl) 391 { 392 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 393 394 up(&c->uuid_write_mutex); 395 } 396 397 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, 398 struct bkey *k, struct closure *parent) 399 { 400 struct closure *cl = &c->uuid_write; 401 struct uuid_entry *u; 402 unsigned int i; 403 char buf[80]; 404 405 BUG_ON(!parent); 406 down(&c->uuid_write_mutex); 407 closure_init(cl, parent); 408 409 for (i = 0; i < KEY_PTRS(k); i++) { 410 struct bio *bio = bch_bbio_alloc(c); 411 412 bio->bi_opf = REQ_SYNC | REQ_META | op_flags; 413 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; 414 415 bio->bi_end_io = uuid_endio; 416 bio->bi_private = cl; 417 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 418 bch_bio_map(bio, c->uuids); 419 420 bch_submit_bbio(bio, c, k, i); 421 422 if (op != REQ_OP_WRITE) 423 break; 424 } 425 426 bch_extent_to_text(buf, sizeof(buf), k); 427 pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); 428 429 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 430 if (!bch_is_zero(u->uuid, 16)) 431 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n", 432 u - c->uuids, u->uuid, u->label, 433 u->first_reg, u->last_reg, u->invalidated); 434 435 closure_return_with_destructor(cl, uuid_io_unlock); 436 } 437 438 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 439 { 440 struct bkey *k = &j->uuid_bucket; 441 442 if (__bch_btree_ptr_invalid(c, k)) 443 return "bad uuid pointer"; 444 445 bkey_copy(&c->uuid_bucket, k); 446 uuid_io(c, REQ_OP_READ, 0, k, cl); 447 448 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 449 struct uuid_entry_v0 *u0 = (void *) c->uuids; 450 struct uuid_entry *u1 = (void *) c->uuids; 451 int i; 452 453 closure_sync(cl); 454 455 /* 456 * Since the new uuid entry is bigger than the old, we have to 457 * convert starting at the highest memory address and work down 458 * in order to do it in place 459 */ 460 461 for (i = c->nr_uuids - 1; 462 i >= 0; 463 --i) { 464 memcpy(u1[i].uuid, u0[i].uuid, 16); 465 memcpy(u1[i].label, u0[i].label, 32); 466 467 u1[i].first_reg = u0[i].first_reg; 468 u1[i].last_reg = u0[i].last_reg; 469 u1[i].invalidated = u0[i].invalidated; 470 471 u1[i].flags = 0; 472 u1[i].sectors = 0; 473 } 474 } 475 476 return NULL; 477 } 478 479 static int __uuid_write(struct cache_set *c) 480 { 481 BKEY_PADDED(key) k; 482 struct closure cl; 483 struct cache *ca; 484 unsigned int size; 485 486 closure_init_stack(&cl); 487 lockdep_assert_held(&bch_register_lock); 488 489 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) 490 return 1; 491 492 size = meta_bucket_pages(&c->sb) * PAGE_SECTORS; 493 SET_KEY_SIZE(&k.key, size); 494 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); 495 closure_sync(&cl); 496 497 /* Only one bucket used for uuid write */ 498 ca = PTR_CACHE(c, &k.key, 0); 499 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); 500 501 bkey_copy(&c->uuid_bucket, &k.key); 502 bkey_put(c, &k.key); 503 return 0; 504 } 505 506 int bch_uuid_write(struct cache_set *c) 507 { 508 int ret = __uuid_write(c); 509 510 if (!ret) 511 bch_journal_meta(c, NULL); 512 513 return ret; 514 } 515 516 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 517 { 518 struct uuid_entry *u; 519 520 for (u = c->uuids; 521 u < c->uuids + c->nr_uuids; u++) 522 if (!memcmp(u->uuid, uuid, 16)) 523 return u; 524 525 return NULL; 526 } 527 528 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 529 { 530 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 531 532 return uuid_find(c, zero_uuid); 533 } 534 535 /* 536 * Bucket priorities/gens: 537 * 538 * For each bucket, we store on disk its 539 * 8 bit gen 540 * 16 bit priority 541 * 542 * See alloc.c for an explanation of the gen. The priority is used to implement 543 * lru (and in the future other) cache replacement policies; for most purposes 544 * it's just an opaque integer. 545 * 546 * The gens and the priorities don't have a whole lot to do with each other, and 547 * it's actually the gens that must be written out at specific times - it's no 548 * big deal if the priorities don't get written, if we lose them we just reuse 549 * buckets in suboptimal order. 550 * 551 * On disk they're stored in a packed array, and in as many buckets are required 552 * to fit them all. The buckets we use to store them form a list; the journal 553 * header points to the first bucket, the first bucket points to the second 554 * bucket, et cetera. 555 * 556 * This code is used by the allocation code; periodically (whenever it runs out 557 * of buckets to allocate from) the allocation code will invalidate some 558 * buckets, but it can't use those buckets until their new gens are safely on 559 * disk. 560 */ 561 562 static void prio_endio(struct bio *bio) 563 { 564 struct cache *ca = bio->bi_private; 565 566 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 567 bch_bbio_free(bio, ca->set); 568 closure_put(&ca->prio); 569 } 570 571 static void prio_io(struct cache *ca, uint64_t bucket, int op, 572 unsigned long op_flags) 573 { 574 struct closure *cl = &ca->prio; 575 struct bio *bio = bch_bbio_alloc(ca->set); 576 577 closure_init_stack(cl); 578 579 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 580 bio_set_dev(bio, ca->bdev); 581 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); 582 583 bio->bi_end_io = prio_endio; 584 bio->bi_private = ca; 585 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); 586 bch_bio_map(bio, ca->disk_buckets); 587 588 closure_bio_submit(ca->set, bio, &ca->prio); 589 closure_sync(cl); 590 } 591 592 int bch_prio_write(struct cache *ca, bool wait) 593 { 594 int i; 595 struct bucket *b; 596 struct closure cl; 597 598 pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n", 599 fifo_used(&ca->free[RESERVE_PRIO]), 600 fifo_used(&ca->free[RESERVE_NONE]), 601 fifo_used(&ca->free_inc)); 602 603 /* 604 * Pre-check if there are enough free buckets. In the non-blocking 605 * scenario it's better to fail early rather than starting to allocate 606 * buckets and do a cleanup later in case of failure. 607 */ 608 if (!wait) { 609 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + 610 fifo_used(&ca->free[RESERVE_NONE]); 611 if (prio_buckets(ca) > avail) 612 return -ENOMEM; 613 } 614 615 closure_init_stack(&cl); 616 617 lockdep_assert_held(&ca->set->bucket_lock); 618 619 ca->disk_buckets->seq++; 620 621 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 622 &ca->meta_sectors_written); 623 624 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 625 long bucket; 626 struct prio_set *p = ca->disk_buckets; 627 struct bucket_disk *d = p->data; 628 struct bucket_disk *end = d + prios_per_bucket(ca); 629 630 for (b = ca->buckets + i * prios_per_bucket(ca); 631 b < ca->buckets + ca->sb.nbuckets && d < end; 632 b++, d++) { 633 d->prio = cpu_to_le16(b->prio); 634 d->gen = b->gen; 635 } 636 637 p->next_bucket = ca->prio_buckets[i + 1]; 638 p->magic = pset_magic(&ca->sb); 639 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); 640 641 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); 642 BUG_ON(bucket == -1); 643 644 mutex_unlock(&ca->set->bucket_lock); 645 prio_io(ca, bucket, REQ_OP_WRITE, 0); 646 mutex_lock(&ca->set->bucket_lock); 647 648 ca->prio_buckets[i] = bucket; 649 atomic_dec_bug(&ca->buckets[bucket].pin); 650 } 651 652 mutex_unlock(&ca->set->bucket_lock); 653 654 bch_journal_meta(ca->set, &cl); 655 closure_sync(&cl); 656 657 mutex_lock(&ca->set->bucket_lock); 658 659 /* 660 * Don't want the old priorities to get garbage collected until after we 661 * finish writing the new ones, and they're journalled 662 */ 663 for (i = 0; i < prio_buckets(ca); i++) { 664 if (ca->prio_last_buckets[i]) 665 __bch_bucket_free(ca, 666 &ca->buckets[ca->prio_last_buckets[i]]); 667 668 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 669 } 670 return 0; 671 } 672 673 static int prio_read(struct cache *ca, uint64_t bucket) 674 { 675 struct prio_set *p = ca->disk_buckets; 676 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 677 struct bucket *b; 678 unsigned int bucket_nr = 0; 679 int ret = -EIO; 680 681 for (b = ca->buckets; 682 b < ca->buckets + ca->sb.nbuckets; 683 b++, d++) { 684 if (d == end) { 685 ca->prio_buckets[bucket_nr] = bucket; 686 ca->prio_last_buckets[bucket_nr] = bucket; 687 bucket_nr++; 688 689 prio_io(ca, bucket, REQ_OP_READ, 0); 690 691 if (p->csum != 692 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { 693 pr_warn("bad csum reading priorities\n"); 694 goto out; 695 } 696 697 if (p->magic != pset_magic(&ca->sb)) { 698 pr_warn("bad magic reading priorities\n"); 699 goto out; 700 } 701 702 bucket = p->next_bucket; 703 d = p->data; 704 } 705 706 b->prio = le16_to_cpu(d->prio); 707 b->gen = b->last_gc = d->gen; 708 } 709 710 ret = 0; 711 out: 712 return ret; 713 } 714 715 /* Bcache device */ 716 717 static int open_dev(struct block_device *b, fmode_t mode) 718 { 719 struct bcache_device *d = b->bd_disk->private_data; 720 721 if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) 722 return -ENXIO; 723 724 closure_get(&d->cl); 725 return 0; 726 } 727 728 static void release_dev(struct gendisk *b, fmode_t mode) 729 { 730 struct bcache_device *d = b->private_data; 731 732 closure_put(&d->cl); 733 } 734 735 static int ioctl_dev(struct block_device *b, fmode_t mode, 736 unsigned int cmd, unsigned long arg) 737 { 738 struct bcache_device *d = b->bd_disk->private_data; 739 740 return d->ioctl(d, mode, cmd, arg); 741 } 742 743 static const struct block_device_operations bcache_cached_ops = { 744 .submit_bio = cached_dev_submit_bio, 745 .open = open_dev, 746 .release = release_dev, 747 .ioctl = ioctl_dev, 748 .owner = THIS_MODULE, 749 }; 750 751 static const struct block_device_operations bcache_flash_ops = { 752 .submit_bio = flash_dev_submit_bio, 753 .open = open_dev, 754 .release = release_dev, 755 .ioctl = ioctl_dev, 756 .owner = THIS_MODULE, 757 }; 758 759 void bcache_device_stop(struct bcache_device *d) 760 { 761 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) 762 /* 763 * closure_fn set to 764 * - cached device: cached_dev_flush() 765 * - flash dev: flash_dev_flush() 766 */ 767 closure_queue(&d->cl); 768 } 769 770 static void bcache_device_unlink(struct bcache_device *d) 771 { 772 lockdep_assert_held(&bch_register_lock); 773 774 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { 775 unsigned int i; 776 struct cache *ca; 777 778 sysfs_remove_link(&d->c->kobj, d->name); 779 sysfs_remove_link(&d->kobj, "cache"); 780 781 for_each_cache(ca, d->c, i) 782 bd_unlink_disk_holder(ca->bdev, d->disk); 783 } 784 } 785 786 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 787 const char *name) 788 { 789 unsigned int i; 790 struct cache *ca; 791 int ret; 792 793 for_each_cache(ca, d->c, i) 794 bd_link_disk_holder(ca->bdev, d->disk); 795 796 snprintf(d->name, BCACHEDEVNAME_SIZE, 797 "%s%u", name, d->id); 798 799 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); 800 if (ret < 0) 801 pr_err("Couldn't create device -> cache set symlink\n"); 802 803 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); 804 if (ret < 0) 805 pr_err("Couldn't create cache set -> device symlink\n"); 806 807 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); 808 } 809 810 static void bcache_device_detach(struct bcache_device *d) 811 { 812 lockdep_assert_held(&bch_register_lock); 813 814 atomic_dec(&d->c->attached_dev_nr); 815 816 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { 817 struct uuid_entry *u = d->c->uuids + d->id; 818 819 SET_UUID_FLASH_ONLY(u, 0); 820 memcpy(u->uuid, invalid_uuid, 16); 821 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 822 bch_uuid_write(d->c); 823 } 824 825 bcache_device_unlink(d); 826 827 d->c->devices[d->id] = NULL; 828 closure_put(&d->c->caching); 829 d->c = NULL; 830 } 831 832 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 833 unsigned int id) 834 { 835 d->id = id; 836 d->c = c; 837 c->devices[id] = d; 838 839 if (id >= c->devices_max_used) 840 c->devices_max_used = id + 1; 841 842 closure_get(&c->caching); 843 } 844 845 static inline int first_minor_to_idx(int first_minor) 846 { 847 return (first_minor/BCACHE_MINORS); 848 } 849 850 static inline int idx_to_first_minor(int idx) 851 { 852 return (idx * BCACHE_MINORS); 853 } 854 855 static void bcache_device_free(struct bcache_device *d) 856 { 857 struct gendisk *disk = d->disk; 858 859 lockdep_assert_held(&bch_register_lock); 860 861 if (disk) 862 pr_info("%s stopped\n", disk->disk_name); 863 else 864 pr_err("bcache device (NULL gendisk) stopped\n"); 865 866 if (d->c) 867 bcache_device_detach(d); 868 869 if (disk) { 870 bool disk_added = (disk->flags & GENHD_FL_UP) != 0; 871 872 if (disk_added) 873 del_gendisk(disk); 874 875 if (disk->queue) 876 blk_cleanup_queue(disk->queue); 877 878 ida_simple_remove(&bcache_device_idx, 879 first_minor_to_idx(disk->first_minor)); 880 if (disk_added) 881 put_disk(disk); 882 } 883 884 bioset_exit(&d->bio_split); 885 kvfree(d->full_dirty_stripes); 886 kvfree(d->stripe_sectors_dirty); 887 888 closure_debug_destroy(&d->cl); 889 } 890 891 static int bcache_device_init(struct bcache_device *d, unsigned int block_size, 892 sector_t sectors, struct block_device *cached_bdev, 893 const struct block_device_operations *ops) 894 { 895 struct request_queue *q; 896 const size_t max_stripes = min_t(size_t, INT_MAX, 897 SIZE_MAX / sizeof(atomic_t)); 898 uint64_t n; 899 int idx; 900 901 if (!d->stripe_size) 902 d->stripe_size = 1 << 31; 903 904 n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); 905 if (!n || n > max_stripes) { 906 pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", 907 n); 908 return -ENOMEM; 909 } 910 d->nr_stripes = n; 911 912 n = d->nr_stripes * sizeof(atomic_t); 913 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); 914 if (!d->stripe_sectors_dirty) 915 return -ENOMEM; 916 917 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); 918 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); 919 if (!d->full_dirty_stripes) 920 return -ENOMEM; 921 922 idx = ida_simple_get(&bcache_device_idx, 0, 923 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); 924 if (idx < 0) 925 return idx; 926 927 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), 928 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 929 goto err; 930 931 d->disk = alloc_disk(BCACHE_MINORS); 932 if (!d->disk) 933 goto err; 934 935 set_capacity(d->disk, sectors); 936 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); 937 938 d->disk->major = bcache_major; 939 d->disk->first_minor = idx_to_first_minor(idx); 940 d->disk->fops = ops; 941 d->disk->private_data = d; 942 943 q = blk_alloc_queue(NUMA_NO_NODE); 944 if (!q) 945 return -ENOMEM; 946 947 d->disk->queue = q; 948 q->backing_dev_info->congested_data = d; 949 q->limits.max_hw_sectors = UINT_MAX; 950 q->limits.max_sectors = UINT_MAX; 951 q->limits.max_segment_size = UINT_MAX; 952 q->limits.max_segments = BIO_MAX_PAGES; 953 blk_queue_max_discard_sectors(q, UINT_MAX); 954 q->limits.discard_granularity = 512; 955 q->limits.io_min = block_size; 956 q->limits.logical_block_size = block_size; 957 q->limits.physical_block_size = block_size; 958 959 if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { 960 /* 961 * This should only happen with BCACHE_SB_VERSION_BDEV. 962 * Block/page size is checked for BCACHE_SB_VERSION_CDEV. 963 */ 964 pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", 965 d->disk->disk_name, q->limits.logical_block_size, 966 PAGE_SIZE, bdev_logical_block_size(cached_bdev)); 967 968 /* This also adjusts physical block size/min io size if needed */ 969 blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); 970 } 971 972 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); 973 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); 974 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); 975 976 blk_queue_write_cache(q, true, true); 977 978 return 0; 979 980 err: 981 ida_simple_remove(&bcache_device_idx, idx); 982 return -ENOMEM; 983 984 } 985 986 /* Cached device */ 987 988 static void calc_cached_dev_sectors(struct cache_set *c) 989 { 990 uint64_t sectors = 0; 991 struct cached_dev *dc; 992 993 list_for_each_entry(dc, &c->cached_devs, list) 994 sectors += bdev_sectors(dc->bdev); 995 996 c->cached_dev_sectors = sectors; 997 } 998 999 #define BACKING_DEV_OFFLINE_TIMEOUT 5 1000 static int cached_dev_status_update(void *arg) 1001 { 1002 struct cached_dev *dc = arg; 1003 struct request_queue *q; 1004 1005 /* 1006 * If this delayed worker is stopping outside, directly quit here. 1007 * dc->io_disable might be set via sysfs interface, so check it 1008 * here too. 1009 */ 1010 while (!kthread_should_stop() && !dc->io_disable) { 1011 q = bdev_get_queue(dc->bdev); 1012 if (blk_queue_dying(q)) 1013 dc->offline_seconds++; 1014 else 1015 dc->offline_seconds = 0; 1016 1017 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { 1018 pr_err("%s: device offline for %d seconds\n", 1019 dc->backing_dev_name, 1020 BACKING_DEV_OFFLINE_TIMEOUT); 1021 pr_err("%s: disable I/O request due to backing device offline\n", 1022 dc->disk.name); 1023 dc->io_disable = true; 1024 /* let others know earlier that io_disable is true */ 1025 smp_mb(); 1026 bcache_device_stop(&dc->disk); 1027 break; 1028 } 1029 schedule_timeout_interruptible(HZ); 1030 } 1031 1032 wait_for_kthread_stop(); 1033 return 0; 1034 } 1035 1036 1037 int bch_cached_dev_run(struct cached_dev *dc) 1038 { 1039 struct bcache_device *d = &dc->disk; 1040 char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); 1041 char *env[] = { 1042 "DRIVER=bcache", 1043 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 1044 kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""), 1045 NULL, 1046 }; 1047 1048 if (dc->io_disable) { 1049 pr_err("I/O disabled on cached dev %s\n", 1050 dc->backing_dev_name); 1051 kfree(env[1]); 1052 kfree(env[2]); 1053 kfree(buf); 1054 return -EIO; 1055 } 1056 1057 if (atomic_xchg(&dc->running, 1)) { 1058 kfree(env[1]); 1059 kfree(env[2]); 1060 kfree(buf); 1061 pr_info("cached dev %s is running already\n", 1062 dc->backing_dev_name); 1063 return -EBUSY; 1064 } 1065 1066 if (!d->c && 1067 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 1068 struct closure cl; 1069 1070 closure_init_stack(&cl); 1071 1072 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 1073 bch_write_bdev_super(dc, &cl); 1074 closure_sync(&cl); 1075 } 1076 1077 add_disk(d->disk); 1078 bd_link_disk_holder(dc->bdev, dc->disk.disk); 1079 /* 1080 * won't show up in the uevent file, use udevadm monitor -e instead 1081 * only class / kset properties are persistent 1082 */ 1083 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 1084 kfree(env[1]); 1085 kfree(env[2]); 1086 kfree(buf); 1087 1088 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 1089 sysfs_create_link(&disk_to_dev(d->disk)->kobj, 1090 &d->kobj, "bcache")) { 1091 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); 1092 return -ENOMEM; 1093 } 1094 1095 dc->status_update_thread = kthread_run(cached_dev_status_update, 1096 dc, "bcache_status_update"); 1097 if (IS_ERR(dc->status_update_thread)) { 1098 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); 1099 } 1100 1101 return 0; 1102 } 1103 1104 /* 1105 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed 1106 * work dc->writeback_rate_update is running. Wait until the routine 1107 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to 1108 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out 1109 * seconds, give up waiting here and continue to cancel it too. 1110 */ 1111 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) 1112 { 1113 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; 1114 1115 do { 1116 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, 1117 &dc->disk.flags)) 1118 break; 1119 time_out--; 1120 schedule_timeout_interruptible(1); 1121 } while (time_out > 0); 1122 1123 if (time_out == 0) 1124 pr_warn("give up waiting for dc->writeback_write_update to quit\n"); 1125 1126 cancel_delayed_work_sync(&dc->writeback_rate_update); 1127 } 1128 1129 static void cached_dev_detach_finish(struct work_struct *w) 1130 { 1131 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 1132 struct closure cl; 1133 1134 closure_init_stack(&cl); 1135 1136 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); 1137 BUG_ON(refcount_read(&dc->count)); 1138 1139 1140 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1141 cancel_writeback_rate_update_dwork(dc); 1142 1143 if (!IS_ERR_OR_NULL(dc->writeback_thread)) { 1144 kthread_stop(dc->writeback_thread); 1145 dc->writeback_thread = NULL; 1146 } 1147 1148 memset(&dc->sb.set_uuid, 0, 16); 1149 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 1150 1151 bch_write_bdev_super(dc, &cl); 1152 closure_sync(&cl); 1153 1154 mutex_lock(&bch_register_lock); 1155 1156 calc_cached_dev_sectors(dc->disk.c); 1157 bcache_device_detach(&dc->disk); 1158 list_move(&dc->list, &uncached_devices); 1159 1160 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); 1161 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); 1162 1163 mutex_unlock(&bch_register_lock); 1164 1165 pr_info("Caching disabled for %s\n", dc->backing_dev_name); 1166 1167 /* Drop ref we took in cached_dev_detach() */ 1168 closure_put(&dc->disk.cl); 1169 } 1170 1171 void bch_cached_dev_detach(struct cached_dev *dc) 1172 { 1173 lockdep_assert_held(&bch_register_lock); 1174 1175 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1176 return; 1177 1178 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 1179 return; 1180 1181 /* 1182 * Block the device from being closed and freed until we're finished 1183 * detaching 1184 */ 1185 closure_get(&dc->disk.cl); 1186 1187 bch_writeback_queue(dc); 1188 1189 cached_dev_put(dc); 1190 } 1191 1192 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1193 uint8_t *set_uuid) 1194 { 1195 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); 1196 struct uuid_entry *u; 1197 struct cached_dev *exist_dc, *t; 1198 int ret = 0; 1199 1200 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1201 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1202 return -ENOENT; 1203 1204 if (dc->disk.c) { 1205 pr_err("Can't attach %s: already attached\n", 1206 dc->backing_dev_name); 1207 return -EINVAL; 1208 } 1209 1210 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1211 pr_err("Can't attach %s: shutting down\n", 1212 dc->backing_dev_name); 1213 return -EINVAL; 1214 } 1215 1216 if (dc->sb.block_size < c->sb.block_size) { 1217 /* Will die */ 1218 pr_err("Couldn't attach %s: block size less than set's block size\n", 1219 dc->backing_dev_name); 1220 return -EINVAL; 1221 } 1222 1223 /* Check whether already attached */ 1224 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1225 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1226 pr_err("Tried to attach %s but duplicate UUID already attached\n", 1227 dc->backing_dev_name); 1228 1229 return -EINVAL; 1230 } 1231 } 1232 1233 u = uuid_find(c, dc->sb.uuid); 1234 1235 if (u && 1236 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 1237 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 1238 memcpy(u->uuid, invalid_uuid, 16); 1239 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); 1240 u = NULL; 1241 } 1242 1243 if (!u) { 1244 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1245 pr_err("Couldn't find uuid for %s in set\n", 1246 dc->backing_dev_name); 1247 return -ENOENT; 1248 } 1249 1250 u = uuid_find_empty(c); 1251 if (!u) { 1252 pr_err("Not caching %s, no room for UUID\n", 1253 dc->backing_dev_name); 1254 return -EINVAL; 1255 } 1256 } 1257 1258 /* 1259 * Deadlocks since we're called via sysfs... 1260 * sysfs_remove_file(&dc->kobj, &sysfs_attach); 1261 */ 1262 1263 if (bch_is_zero(u->uuid, 16)) { 1264 struct closure cl; 1265 1266 closure_init_stack(&cl); 1267 1268 memcpy(u->uuid, dc->sb.uuid, 16); 1269 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 1270 u->first_reg = u->last_reg = rtime; 1271 bch_uuid_write(c); 1272 1273 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 1274 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 1275 1276 bch_write_bdev_super(dc, &cl); 1277 closure_sync(&cl); 1278 } else { 1279 u->last_reg = rtime; 1280 bch_uuid_write(c); 1281 } 1282 1283 bcache_device_attach(&dc->disk, c, u - c->uuids); 1284 list_move(&dc->list, &c->cached_devs); 1285 calc_cached_dev_sectors(c); 1286 1287 /* 1288 * dc->c must be set before dc->count != 0 - paired with the mb in 1289 * cached_dev_get() 1290 */ 1291 smp_wmb(); 1292 refcount_set(&dc->count, 1); 1293 1294 /* Block writeback thread, but spawn it */ 1295 down_write(&dc->writeback_lock); 1296 if (bch_cached_dev_writeback_start(dc)) { 1297 up_write(&dc->writeback_lock); 1298 pr_err("Couldn't start writeback facilities for %s\n", 1299 dc->disk.disk->disk_name); 1300 return -ENOMEM; 1301 } 1302 1303 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1304 atomic_set(&dc->has_dirty, 1); 1305 bch_writeback_queue(dc); 1306 } 1307 1308 bch_sectors_dirty_init(&dc->disk); 1309 1310 ret = bch_cached_dev_run(dc); 1311 if (ret && (ret != -EBUSY)) { 1312 up_write(&dc->writeback_lock); 1313 /* 1314 * bch_register_lock is held, bcache_device_stop() is not 1315 * able to be directly called. The kthread and kworker 1316 * created previously in bch_cached_dev_writeback_start() 1317 * have to be stopped manually here. 1318 */ 1319 kthread_stop(dc->writeback_thread); 1320 cancel_writeback_rate_update_dwork(dc); 1321 pr_err("Couldn't run cached device %s\n", 1322 dc->backing_dev_name); 1323 return ret; 1324 } 1325 1326 bcache_device_link(&dc->disk, c, "bdev"); 1327 atomic_inc(&c->attached_dev_nr); 1328 1329 /* Allow the writeback thread to proceed */ 1330 up_write(&dc->writeback_lock); 1331 1332 pr_info("Caching %s as %s on set %pU\n", 1333 dc->backing_dev_name, 1334 dc->disk.disk->disk_name, 1335 dc->disk.c->sb.set_uuid); 1336 return 0; 1337 } 1338 1339 /* when dc->disk.kobj released */ 1340 void bch_cached_dev_release(struct kobject *kobj) 1341 { 1342 struct cached_dev *dc = container_of(kobj, struct cached_dev, 1343 disk.kobj); 1344 kfree(dc); 1345 module_put(THIS_MODULE); 1346 } 1347 1348 static void cached_dev_free(struct closure *cl) 1349 { 1350 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1351 1352 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1353 cancel_writeback_rate_update_dwork(dc); 1354 1355 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 1356 kthread_stop(dc->writeback_thread); 1357 if (!IS_ERR_OR_NULL(dc->status_update_thread)) 1358 kthread_stop(dc->status_update_thread); 1359 1360 mutex_lock(&bch_register_lock); 1361 1362 if (atomic_read(&dc->running)) 1363 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1364 bcache_device_free(&dc->disk); 1365 list_del(&dc->list); 1366 1367 mutex_unlock(&bch_register_lock); 1368 1369 if (dc->sb_disk) 1370 put_page(virt_to_page(dc->sb_disk)); 1371 1372 if (!IS_ERR_OR_NULL(dc->bdev)) 1373 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1374 1375 wake_up(&unregister_wait); 1376 1377 kobject_put(&dc->disk.kobj); 1378 } 1379 1380 static void cached_dev_flush(struct closure *cl) 1381 { 1382 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1383 struct bcache_device *d = &dc->disk; 1384 1385 mutex_lock(&bch_register_lock); 1386 bcache_device_unlink(d); 1387 mutex_unlock(&bch_register_lock); 1388 1389 bch_cache_accounting_destroy(&dc->accounting); 1390 kobject_del(&d->kobj); 1391 1392 continue_at(cl, cached_dev_free, system_wq); 1393 } 1394 1395 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) 1396 { 1397 int ret; 1398 struct io *io; 1399 struct request_queue *q = bdev_get_queue(dc->bdev); 1400 1401 __module_get(THIS_MODULE); 1402 INIT_LIST_HEAD(&dc->list); 1403 closure_init(&dc->disk.cl, NULL); 1404 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1405 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1406 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1407 sema_init(&dc->sb_write_mutex, 1); 1408 INIT_LIST_HEAD(&dc->io_lru); 1409 spin_lock_init(&dc->io_lock); 1410 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1411 1412 dc->sequential_cutoff = 4 << 20; 1413 1414 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1415 list_add(&io->lru, &dc->io_lru); 1416 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1417 } 1418 1419 dc->disk.stripe_size = q->limits.io_opt >> 9; 1420 1421 if (dc->disk.stripe_size) 1422 dc->partial_stripes_expensive = 1423 q->limits.raid_partial_stripes_expensive; 1424 1425 ret = bcache_device_init(&dc->disk, block_size, 1426 dc->bdev->bd_part->nr_sects - dc->sb.data_offset, 1427 dc->bdev, &bcache_cached_ops); 1428 if (ret) 1429 return ret; 1430 1431 dc->disk.disk->queue->backing_dev_info->ra_pages = 1432 max(dc->disk.disk->queue->backing_dev_info->ra_pages, 1433 q->backing_dev_info->ra_pages); 1434 1435 atomic_set(&dc->io_errors, 0); 1436 dc->io_disable = false; 1437 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; 1438 /* default to auto */ 1439 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; 1440 1441 bch_cached_dev_request_init(dc); 1442 bch_cached_dev_writeback_init(dc); 1443 return 0; 1444 } 1445 1446 /* Cached device - bcache superblock */ 1447 1448 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 1449 struct block_device *bdev, 1450 struct cached_dev *dc) 1451 { 1452 const char *err = "cannot allocate memory"; 1453 struct cache_set *c; 1454 int ret = -ENOMEM; 1455 1456 bdevname(bdev, dc->backing_dev_name); 1457 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1458 dc->bdev = bdev; 1459 dc->bdev->bd_holder = dc; 1460 dc->sb_disk = sb_disk; 1461 1462 if (cached_dev_init(dc, sb->block_size << 9)) 1463 goto err; 1464 1465 err = "error creating kobject"; 1466 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1467 "bcache")) 1468 goto err; 1469 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1470 goto err; 1471 1472 pr_info("registered backing device %s\n", dc->backing_dev_name); 1473 1474 list_add(&dc->list, &uncached_devices); 1475 /* attach to a matched cache set if it exists */ 1476 list_for_each_entry(c, &bch_cache_sets, list) 1477 bch_cached_dev_attach(dc, c, NULL); 1478 1479 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1480 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) { 1481 err = "failed to run cached device"; 1482 ret = bch_cached_dev_run(dc); 1483 if (ret) 1484 goto err; 1485 } 1486 1487 return 0; 1488 err: 1489 pr_notice("error %s: %s\n", dc->backing_dev_name, err); 1490 bcache_device_stop(&dc->disk); 1491 return ret; 1492 } 1493 1494 /* Flash only volumes */ 1495 1496 /* When d->kobj released */ 1497 void bch_flash_dev_release(struct kobject *kobj) 1498 { 1499 struct bcache_device *d = container_of(kobj, struct bcache_device, 1500 kobj); 1501 kfree(d); 1502 } 1503 1504 static void flash_dev_free(struct closure *cl) 1505 { 1506 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1507 1508 mutex_lock(&bch_register_lock); 1509 atomic_long_sub(bcache_dev_sectors_dirty(d), 1510 &d->c->flash_dev_dirty_sectors); 1511 bcache_device_free(d); 1512 mutex_unlock(&bch_register_lock); 1513 kobject_put(&d->kobj); 1514 } 1515 1516 static void flash_dev_flush(struct closure *cl) 1517 { 1518 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1519 1520 mutex_lock(&bch_register_lock); 1521 bcache_device_unlink(d); 1522 mutex_unlock(&bch_register_lock); 1523 kobject_del(&d->kobj); 1524 continue_at(cl, flash_dev_free, system_wq); 1525 } 1526 1527 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1528 { 1529 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1530 GFP_KERNEL); 1531 if (!d) 1532 return -ENOMEM; 1533 1534 closure_init(&d->cl, NULL); 1535 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1536 1537 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1538 1539 if (bcache_device_init(d, block_bytes(c), u->sectors, 1540 NULL, &bcache_flash_ops)) 1541 goto err; 1542 1543 bcache_device_attach(d, c, u - c->uuids); 1544 bch_sectors_dirty_init(d); 1545 bch_flash_dev_request_init(d); 1546 add_disk(d->disk); 1547 1548 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1549 goto err; 1550 1551 bcache_device_link(d, c, "volume"); 1552 1553 return 0; 1554 err: 1555 kobject_put(&d->kobj); 1556 return -ENOMEM; 1557 } 1558 1559 static int flash_devs_run(struct cache_set *c) 1560 { 1561 int ret = 0; 1562 struct uuid_entry *u; 1563 1564 for (u = c->uuids; 1565 u < c->uuids + c->nr_uuids && !ret; 1566 u++) 1567 if (UUID_FLASH_ONLY(u)) 1568 ret = flash_dev_run(c, u); 1569 1570 return ret; 1571 } 1572 1573 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1574 { 1575 struct uuid_entry *u; 1576 1577 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1578 return -EINTR; 1579 1580 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) 1581 return -EPERM; 1582 1583 u = uuid_find_empty(c); 1584 if (!u) { 1585 pr_err("Can't create volume, no room for UUID\n"); 1586 return -EINVAL; 1587 } 1588 1589 get_random_bytes(u->uuid, 16); 1590 memset(u->label, 0, 32); 1591 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); 1592 1593 SET_UUID_FLASH_ONLY(u, 1); 1594 u->sectors = size >> 9; 1595 1596 bch_uuid_write(c); 1597 1598 return flash_dev_run(c, u); 1599 } 1600 1601 bool bch_cached_dev_error(struct cached_dev *dc) 1602 { 1603 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1604 return false; 1605 1606 dc->io_disable = true; 1607 /* make others know io_disable is true earlier */ 1608 smp_mb(); 1609 1610 pr_err("stop %s: too many IO errors on backing device %s\n", 1611 dc->disk.disk->disk_name, dc->backing_dev_name); 1612 1613 bcache_device_stop(&dc->disk); 1614 return true; 1615 } 1616 1617 /* Cache set */ 1618 1619 __printf(2, 3) 1620 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1621 { 1622 struct va_format vaf; 1623 va_list args; 1624 1625 if (c->on_error != ON_ERROR_PANIC && 1626 test_bit(CACHE_SET_STOPPING, &c->flags)) 1627 return false; 1628 1629 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1630 pr_info("CACHE_SET_IO_DISABLE already set\n"); 1631 1632 /* 1633 * XXX: we can be called from atomic context 1634 * acquire_console_sem(); 1635 */ 1636 1637 va_start(args, fmt); 1638 1639 vaf.fmt = fmt; 1640 vaf.va = &args; 1641 1642 pr_err("error on %pU: %pV, disabling caching\n", 1643 c->sb.set_uuid, &vaf); 1644 1645 va_end(args); 1646 1647 if (c->on_error == ON_ERROR_PANIC) 1648 panic("panic forced after error\n"); 1649 1650 bch_cache_set_unregister(c); 1651 return true; 1652 } 1653 1654 /* When c->kobj released */ 1655 void bch_cache_set_release(struct kobject *kobj) 1656 { 1657 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1658 1659 kfree(c); 1660 module_put(THIS_MODULE); 1661 } 1662 1663 static void cache_set_free(struct closure *cl) 1664 { 1665 struct cache_set *c = container_of(cl, struct cache_set, cl); 1666 struct cache *ca; 1667 unsigned int i; 1668 1669 debugfs_remove(c->debug); 1670 1671 bch_open_buckets_free(c); 1672 bch_btree_cache_free(c); 1673 bch_journal_free(c); 1674 1675 mutex_lock(&bch_register_lock); 1676 for_each_cache(ca, c, i) 1677 if (ca) { 1678 ca->set = NULL; 1679 c->cache[ca->sb.nr_this_dev] = NULL; 1680 kobject_put(&ca->kobj); 1681 } 1682 1683 bch_bset_sort_state_free(&c->sort); 1684 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb))); 1685 1686 if (c->moving_gc_wq) 1687 destroy_workqueue(c->moving_gc_wq); 1688 bioset_exit(&c->bio_split); 1689 mempool_exit(&c->fill_iter); 1690 mempool_exit(&c->bio_meta); 1691 mempool_exit(&c->search); 1692 kfree(c->devices); 1693 1694 list_del(&c->list); 1695 mutex_unlock(&bch_register_lock); 1696 1697 pr_info("Cache set %pU unregistered\n", c->sb.set_uuid); 1698 wake_up(&unregister_wait); 1699 1700 closure_debug_destroy(&c->cl); 1701 kobject_put(&c->kobj); 1702 } 1703 1704 static void cache_set_flush(struct closure *cl) 1705 { 1706 struct cache_set *c = container_of(cl, struct cache_set, caching); 1707 struct cache *ca; 1708 struct btree *b; 1709 unsigned int i; 1710 1711 bch_cache_accounting_destroy(&c->accounting); 1712 1713 kobject_put(&c->internal); 1714 kobject_del(&c->kobj); 1715 1716 if (!IS_ERR_OR_NULL(c->gc_thread)) 1717 kthread_stop(c->gc_thread); 1718 1719 if (!IS_ERR_OR_NULL(c->root)) 1720 list_add(&c->root->list, &c->btree_cache); 1721 1722 /* 1723 * Avoid flushing cached nodes if cache set is retiring 1724 * due to too many I/O errors detected. 1725 */ 1726 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1727 list_for_each_entry(b, &c->btree_cache, list) { 1728 mutex_lock(&b->write_lock); 1729 if (btree_node_dirty(b)) 1730 __bch_btree_node_write(b, NULL); 1731 mutex_unlock(&b->write_lock); 1732 } 1733 1734 for_each_cache(ca, c, i) 1735 if (ca->alloc_thread) 1736 kthread_stop(ca->alloc_thread); 1737 1738 if (c->journal.cur) { 1739 cancel_delayed_work_sync(&c->journal.work); 1740 /* flush last journal entry if needed */ 1741 c->journal.work.work.func(&c->journal.work.work); 1742 } 1743 1744 closure_return(cl); 1745 } 1746 1747 /* 1748 * This function is only called when CACHE_SET_IO_DISABLE is set, which means 1749 * cache set is unregistering due to too many I/O errors. In this condition, 1750 * the bcache device might be stopped, it depends on stop_when_cache_set_failed 1751 * value and whether the broken cache has dirty data: 1752 * 1753 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device 1754 * BCH_CACHED_STOP_AUTO 0 NO 1755 * BCH_CACHED_STOP_AUTO 1 YES 1756 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES 1757 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES 1758 * 1759 * The expected behavior is, if stop_when_cache_set_failed is configured to 1760 * "auto" via sysfs interface, the bcache device will not be stopped if the 1761 * backing device is clean on the broken cache device. 1762 */ 1763 static void conditional_stop_bcache_device(struct cache_set *c, 1764 struct bcache_device *d, 1765 struct cached_dev *dc) 1766 { 1767 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { 1768 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n", 1769 d->disk->disk_name, c->sb.set_uuid); 1770 bcache_device_stop(d); 1771 } else if (atomic_read(&dc->has_dirty)) { 1772 /* 1773 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1774 * and dc->has_dirty == 1 1775 */ 1776 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n", 1777 d->disk->disk_name); 1778 /* 1779 * There might be a small time gap that cache set is 1780 * released but bcache device is not. Inside this time 1781 * gap, regular I/O requests will directly go into 1782 * backing device as no cache set attached to. This 1783 * behavior may also introduce potential inconsistence 1784 * data in writeback mode while cache is dirty. 1785 * Therefore before calling bcache_device_stop() due 1786 * to a broken cache device, dc->io_disable should be 1787 * explicitly set to true. 1788 */ 1789 dc->io_disable = true; 1790 /* make others know io_disable is true earlier */ 1791 smp_mb(); 1792 bcache_device_stop(d); 1793 } else { 1794 /* 1795 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO 1796 * and dc->has_dirty == 0 1797 */ 1798 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n", 1799 d->disk->disk_name); 1800 } 1801 } 1802 1803 static void __cache_set_unregister(struct closure *cl) 1804 { 1805 struct cache_set *c = container_of(cl, struct cache_set, caching); 1806 struct cached_dev *dc; 1807 struct bcache_device *d; 1808 size_t i; 1809 1810 mutex_lock(&bch_register_lock); 1811 1812 for (i = 0; i < c->devices_max_used; i++) { 1813 d = c->devices[i]; 1814 if (!d) 1815 continue; 1816 1817 if (!UUID_FLASH_ONLY(&c->uuids[i]) && 1818 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { 1819 dc = container_of(d, struct cached_dev, disk); 1820 bch_cached_dev_detach(dc); 1821 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1822 conditional_stop_bcache_device(c, d, dc); 1823 } else { 1824 bcache_device_stop(d); 1825 } 1826 } 1827 1828 mutex_unlock(&bch_register_lock); 1829 1830 continue_at(cl, cache_set_flush, system_wq); 1831 } 1832 1833 void bch_cache_set_stop(struct cache_set *c) 1834 { 1835 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1836 /* closure_fn set to __cache_set_unregister() */ 1837 closure_queue(&c->caching); 1838 } 1839 1840 void bch_cache_set_unregister(struct cache_set *c) 1841 { 1842 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1843 bch_cache_set_stop(c); 1844 } 1845 1846 #define alloc_bucket_pages(gfp, c) \ 1847 ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) 1848 1849 #define alloc_meta_bucket_pages(gfp, sb) \ 1850 ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb)))) 1851 1852 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1853 { 1854 int iter_size; 1855 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1856 1857 if (!c) 1858 return NULL; 1859 1860 __module_get(THIS_MODULE); 1861 closure_init(&c->cl, NULL); 1862 set_closure_fn(&c->cl, cache_set_free, system_wq); 1863 1864 closure_init(&c->caching, &c->cl); 1865 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1866 1867 /* Maybe create continue_at_noreturn() and use it here? */ 1868 closure_set_stopped(&c->cl); 1869 closure_put(&c->cl); 1870 1871 kobject_init(&c->kobj, &bch_cache_set_ktype); 1872 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1873 1874 bch_cache_accounting_init(&c->accounting, &c->cl); 1875 1876 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1877 c->sb.block_size = sb->block_size; 1878 c->sb.bucket_size = sb->bucket_size; 1879 c->sb.nr_in_set = sb->nr_in_set; 1880 c->sb.last_mount = sb->last_mount; 1881 c->sb.version = sb->version; 1882 if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { 1883 c->sb.feature_compat = sb->feature_compat; 1884 c->sb.feature_ro_compat = sb->feature_ro_compat; 1885 c->sb.feature_incompat = sb->feature_incompat; 1886 } 1887 1888 c->bucket_bits = ilog2(sb->bucket_size); 1889 c->block_bits = ilog2(sb->block_size); 1890 c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry); 1891 c->devices_max_used = 0; 1892 atomic_set(&c->attached_dev_nr, 0); 1893 c->btree_pages = meta_bucket_pages(&c->sb); 1894 if (c->btree_pages > BTREE_MAX_PAGES) 1895 c->btree_pages = max_t(int, c->btree_pages / 4, 1896 BTREE_MAX_PAGES); 1897 1898 sema_init(&c->sb_write_mutex, 1); 1899 mutex_init(&c->bucket_lock); 1900 init_waitqueue_head(&c->btree_cache_wait); 1901 spin_lock_init(&c->btree_cannibalize_lock); 1902 init_waitqueue_head(&c->bucket_wait); 1903 init_waitqueue_head(&c->gc_wait); 1904 sema_init(&c->uuid_write_mutex, 1); 1905 1906 spin_lock_init(&c->btree_gc_time.lock); 1907 spin_lock_init(&c->btree_split_time.lock); 1908 spin_lock_init(&c->btree_read_time.lock); 1909 1910 bch_moving_init_cache_set(c); 1911 1912 INIT_LIST_HEAD(&c->list); 1913 INIT_LIST_HEAD(&c->cached_devs); 1914 INIT_LIST_HEAD(&c->btree_cache); 1915 INIT_LIST_HEAD(&c->btree_cache_freeable); 1916 INIT_LIST_HEAD(&c->btree_cache_freed); 1917 INIT_LIST_HEAD(&c->data_buckets); 1918 1919 iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * 1920 sizeof(struct btree_iter_set); 1921 1922 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); 1923 if (!c->devices) 1924 goto err; 1925 1926 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) 1927 goto err; 1928 1929 if (mempool_init_kmalloc_pool(&c->bio_meta, 2, 1930 sizeof(struct bbio) + 1931 sizeof(struct bio_vec) * bucket_pages(c))) 1932 goto err; 1933 1934 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) 1935 goto err; 1936 1937 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), 1938 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) 1939 goto err; 1940 1941 c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb); 1942 if (!c->uuids) 1943 goto err; 1944 1945 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); 1946 if (!c->moving_gc_wq) 1947 goto err; 1948 1949 if (bch_journal_alloc(c)) 1950 goto err; 1951 1952 if (bch_btree_cache_alloc(c)) 1953 goto err; 1954 1955 if (bch_open_buckets_alloc(c)) 1956 goto err; 1957 1958 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) 1959 goto err; 1960 1961 c->congested_read_threshold_us = 2000; 1962 c->congested_write_threshold_us = 20000; 1963 c->error_limit = DEFAULT_IO_ERROR_LIMIT; 1964 c->idle_max_writeback_rate_enabled = 1; 1965 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1966 1967 return c; 1968 err: 1969 bch_cache_set_unregister(c); 1970 return NULL; 1971 } 1972 1973 static int run_cache_set(struct cache_set *c) 1974 { 1975 const char *err = "cannot allocate memory"; 1976 struct cached_dev *dc, *t; 1977 struct cache *ca; 1978 struct closure cl; 1979 unsigned int i; 1980 LIST_HEAD(journal); 1981 struct journal_replay *l; 1982 1983 closure_init_stack(&cl); 1984 1985 for_each_cache(ca, c, i) 1986 c->nbuckets += ca->sb.nbuckets; 1987 set_gc_sectors(c); 1988 1989 if (CACHE_SYNC(&c->sb)) { 1990 struct bkey *k; 1991 struct jset *j; 1992 1993 err = "cannot allocate memory for journal"; 1994 if (bch_journal_read(c, &journal)) 1995 goto err; 1996 1997 pr_debug("btree_journal_read() done\n"); 1998 1999 err = "no journal entries found"; 2000 if (list_empty(&journal)) 2001 goto err; 2002 2003 j = &list_entry(journal.prev, struct journal_replay, list)->j; 2004 2005 err = "IO error reading priorities"; 2006 for_each_cache(ca, c, i) { 2007 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) 2008 goto err; 2009 } 2010 2011 /* 2012 * If prio_read() fails it'll call cache_set_error and we'll 2013 * tear everything down right away, but if we perhaps checked 2014 * sooner we could avoid journal replay. 2015 */ 2016 2017 k = &j->btree_root; 2018 2019 err = "bad btree root"; 2020 if (__bch_btree_ptr_invalid(c, k)) 2021 goto err; 2022 2023 err = "error reading btree root"; 2024 c->root = bch_btree_node_get(c, NULL, k, 2025 j->btree_level, 2026 true, NULL); 2027 if (IS_ERR_OR_NULL(c->root)) 2028 goto err; 2029 2030 list_del_init(&c->root->list); 2031 rw_unlock(true, c->root); 2032 2033 err = uuid_read(c, j, &cl); 2034 if (err) 2035 goto err; 2036 2037 err = "error in recovery"; 2038 if (bch_btree_check(c)) 2039 goto err; 2040 2041 bch_journal_mark(c, &journal); 2042 bch_initial_gc_finish(c); 2043 pr_debug("btree_check() done\n"); 2044 2045 /* 2046 * bcache_journal_next() can't happen sooner, or 2047 * btree_gc_finish() will give spurious errors about last_gc > 2048 * gc_gen - this is a hack but oh well. 2049 */ 2050 bch_journal_next(&c->journal); 2051 2052 err = "error starting allocator thread"; 2053 for_each_cache(ca, c, i) 2054 if (bch_cache_allocator_start(ca)) 2055 goto err; 2056 2057 /* 2058 * First place it's safe to allocate: btree_check() and 2059 * btree_gc_finish() have to run before we have buckets to 2060 * allocate, and bch_bucket_alloc_set() might cause a journal 2061 * entry to be written so bcache_journal_next() has to be called 2062 * first. 2063 * 2064 * If the uuids were in the old format we have to rewrite them 2065 * before the next journal entry is written: 2066 */ 2067 if (j->version < BCACHE_JSET_VERSION_UUID) 2068 __uuid_write(c); 2069 2070 err = "bcache: replay journal failed"; 2071 if (bch_journal_replay(c, &journal)) 2072 goto err; 2073 } else { 2074 pr_notice("invalidating existing data\n"); 2075 2076 for_each_cache(ca, c, i) { 2077 unsigned int j; 2078 2079 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2080 2, SB_JOURNAL_BUCKETS); 2081 2082 for (j = 0; j < ca->sb.keys; j++) 2083 ca->sb.d[j] = ca->sb.first_bucket + j; 2084 } 2085 2086 bch_initial_gc_finish(c); 2087 2088 err = "error starting allocator thread"; 2089 for_each_cache(ca, c, i) 2090 if (bch_cache_allocator_start(ca)) 2091 goto err; 2092 2093 mutex_lock(&c->bucket_lock); 2094 for_each_cache(ca, c, i) 2095 bch_prio_write(ca, true); 2096 mutex_unlock(&c->bucket_lock); 2097 2098 err = "cannot allocate new UUID bucket"; 2099 if (__uuid_write(c)) 2100 goto err; 2101 2102 err = "cannot allocate new btree root"; 2103 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); 2104 if (IS_ERR_OR_NULL(c->root)) 2105 goto err; 2106 2107 mutex_lock(&c->root->write_lock); 2108 bkey_copy_key(&c->root->key, &MAX_KEY); 2109 bch_btree_node_write(c->root, &cl); 2110 mutex_unlock(&c->root->write_lock); 2111 2112 bch_btree_set_root(c->root); 2113 rw_unlock(true, c->root); 2114 2115 /* 2116 * We don't want to write the first journal entry until 2117 * everything is set up - fortunately journal entries won't be 2118 * written until the SET_CACHE_SYNC() here: 2119 */ 2120 SET_CACHE_SYNC(&c->sb, true); 2121 2122 bch_journal_next(&c->journal); 2123 bch_journal_meta(c, &cl); 2124 } 2125 2126 err = "error starting gc thread"; 2127 if (bch_gc_thread_start(c)) 2128 goto err; 2129 2130 closure_sync(&cl); 2131 c->sb.last_mount = (u32)ktime_get_real_seconds(); 2132 bcache_write_super(c); 2133 2134 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2135 bch_cached_dev_attach(dc, c, NULL); 2136 2137 flash_devs_run(c); 2138 2139 set_bit(CACHE_SET_RUNNING, &c->flags); 2140 return 0; 2141 err: 2142 while (!list_empty(&journal)) { 2143 l = list_first_entry(&journal, struct journal_replay, list); 2144 list_del(&l->list); 2145 kfree(l); 2146 } 2147 2148 closure_sync(&cl); 2149 2150 bch_cache_set_error(c, "%s", err); 2151 2152 return -EIO; 2153 } 2154 2155 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 2156 { 2157 return ca->sb.block_size == c->sb.block_size && 2158 ca->sb.bucket_size == c->sb.bucket_size && 2159 ca->sb.nr_in_set == c->sb.nr_in_set; 2160 } 2161 2162 static const char *register_cache_set(struct cache *ca) 2163 { 2164 char buf[12]; 2165 const char *err = "cannot allocate memory"; 2166 struct cache_set *c; 2167 2168 list_for_each_entry(c, &bch_cache_sets, list) 2169 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 2170 if (c->cache[ca->sb.nr_this_dev]) 2171 return "duplicate cache set member"; 2172 2173 if (!can_attach_cache(ca, c)) 2174 return "cache sb does not match set"; 2175 2176 if (!CACHE_SYNC(&ca->sb)) 2177 SET_CACHE_SYNC(&c->sb, false); 2178 2179 goto found; 2180 } 2181 2182 c = bch_cache_set_alloc(&ca->sb); 2183 if (!c) 2184 return err; 2185 2186 err = "error creating kobject"; 2187 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 2188 kobject_add(&c->internal, &c->kobj, "internal")) 2189 goto err; 2190 2191 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 2192 goto err; 2193 2194 bch_debug_init_cache_set(c); 2195 2196 list_add(&c->list, &bch_cache_sets); 2197 found: 2198 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 2199 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 2200 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 2201 goto err; 2202 2203 /* 2204 * A special case is both ca->sb.seq and c->sb.seq are 0, 2205 * such condition happens on a new created cache device whose 2206 * super block is never flushed yet. In this case c->sb.version 2207 * and other members should be updated too, otherwise we will 2208 * have a mistaken super block version in cache set. 2209 */ 2210 if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { 2211 c->sb.version = ca->sb.version; 2212 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 2213 c->sb.flags = ca->sb.flags; 2214 c->sb.seq = ca->sb.seq; 2215 pr_debug("set version = %llu\n", c->sb.version); 2216 } 2217 2218 kobject_get(&ca->kobj); 2219 ca->set = c; 2220 ca->set->cache[ca->sb.nr_this_dev] = ca; 2221 c->cache_by_alloc[c->caches_loaded++] = ca; 2222 2223 if (c->caches_loaded == c->sb.nr_in_set) { 2224 err = "failed to run cache set"; 2225 if (run_cache_set(c) < 0) 2226 goto err; 2227 } 2228 2229 return NULL; 2230 err: 2231 bch_cache_set_unregister(c); 2232 return err; 2233 } 2234 2235 /* Cache device */ 2236 2237 /* When ca->kobj released */ 2238 void bch_cache_release(struct kobject *kobj) 2239 { 2240 struct cache *ca = container_of(kobj, struct cache, kobj); 2241 unsigned int i; 2242 2243 if (ca->set) { 2244 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); 2245 ca->set->cache[ca->sb.nr_this_dev] = NULL; 2246 } 2247 2248 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); 2249 kfree(ca->prio_buckets); 2250 vfree(ca->buckets); 2251 2252 free_heap(&ca->heap); 2253 free_fifo(&ca->free_inc); 2254 2255 for (i = 0; i < RESERVE_NR; i++) 2256 free_fifo(&ca->free[i]); 2257 2258 if (ca->sb_disk) 2259 put_page(virt_to_page(ca->sb_disk)); 2260 2261 if (!IS_ERR_OR_NULL(ca->bdev)) 2262 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2263 2264 kfree(ca); 2265 module_put(THIS_MODULE); 2266 } 2267 2268 static int cache_alloc(struct cache *ca) 2269 { 2270 size_t free; 2271 size_t btree_buckets; 2272 struct bucket *b; 2273 int ret = -ENOMEM; 2274 const char *err = NULL; 2275 2276 __module_get(THIS_MODULE); 2277 kobject_init(&ca->kobj, &bch_cache_ktype); 2278 2279 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); 2280 2281 /* 2282 * when ca->sb.njournal_buckets is not zero, journal exists, 2283 * and in bch_journal_replay(), tree node may split, 2284 * so bucket of RESERVE_BTREE type is needed, 2285 * the worst situation is all journal buckets are valid journal, 2286 * and all the keys need to replay, 2287 * so the number of RESERVE_BTREE type buckets should be as much 2288 * as journal buckets 2289 */ 2290 btree_buckets = ca->sb.njournal_buckets ?: 8; 2291 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 2292 if (!free) { 2293 ret = -EPERM; 2294 err = "ca->sb.nbuckets is too small"; 2295 goto err_free; 2296 } 2297 2298 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, 2299 GFP_KERNEL)) { 2300 err = "ca->free[RESERVE_BTREE] alloc failed"; 2301 goto err_btree_alloc; 2302 } 2303 2304 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), 2305 GFP_KERNEL)) { 2306 err = "ca->free[RESERVE_PRIO] alloc failed"; 2307 goto err_prio_alloc; 2308 } 2309 2310 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { 2311 err = "ca->free[RESERVE_MOVINGGC] alloc failed"; 2312 goto err_movinggc_alloc; 2313 } 2314 2315 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { 2316 err = "ca->free[RESERVE_NONE] alloc failed"; 2317 goto err_none_alloc; 2318 } 2319 2320 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { 2321 err = "ca->free_inc alloc failed"; 2322 goto err_free_inc_alloc; 2323 } 2324 2325 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { 2326 err = "ca->heap alloc failed"; 2327 goto err_heap_alloc; 2328 } 2329 2330 ca->buckets = vzalloc(array_size(sizeof(struct bucket), 2331 ca->sb.nbuckets)); 2332 if (!ca->buckets) { 2333 err = "ca->buckets alloc failed"; 2334 goto err_buckets_alloc; 2335 } 2336 2337 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), 2338 prio_buckets(ca), 2), 2339 GFP_KERNEL); 2340 if (!ca->prio_buckets) { 2341 err = "ca->prio_buckets alloc failed"; 2342 goto err_prio_buckets_alloc; 2343 } 2344 2345 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); 2346 if (!ca->disk_buckets) { 2347 err = "ca->disk_buckets alloc failed"; 2348 goto err_disk_buckets_alloc; 2349 } 2350 2351 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 2352 2353 for_each_bucket(b, ca) 2354 atomic_set(&b->pin, 0); 2355 return 0; 2356 2357 err_disk_buckets_alloc: 2358 kfree(ca->prio_buckets); 2359 err_prio_buckets_alloc: 2360 vfree(ca->buckets); 2361 err_buckets_alloc: 2362 free_heap(&ca->heap); 2363 err_heap_alloc: 2364 free_fifo(&ca->free_inc); 2365 err_free_inc_alloc: 2366 free_fifo(&ca->free[RESERVE_NONE]); 2367 err_none_alloc: 2368 free_fifo(&ca->free[RESERVE_MOVINGGC]); 2369 err_movinggc_alloc: 2370 free_fifo(&ca->free[RESERVE_PRIO]); 2371 err_prio_alloc: 2372 free_fifo(&ca->free[RESERVE_BTREE]); 2373 err_btree_alloc: 2374 err_free: 2375 module_put(THIS_MODULE); 2376 if (err) 2377 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2378 return ret; 2379 } 2380 2381 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, 2382 struct block_device *bdev, struct cache *ca) 2383 { 2384 const char *err = NULL; /* must be set for any error case */ 2385 int ret = 0; 2386 2387 bdevname(bdev, ca->cache_dev_name); 2388 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2389 ca->bdev = bdev; 2390 ca->bdev->bd_holder = ca; 2391 ca->sb_disk = sb_disk; 2392 2393 if (blk_queue_discard(bdev_get_queue(bdev))) 2394 ca->discard = CACHE_DISCARD(&ca->sb); 2395 2396 ret = cache_alloc(ca); 2397 if (ret != 0) { 2398 /* 2399 * If we failed here, it means ca->kobj is not initialized yet, 2400 * kobject_put() won't be called and there is no chance to 2401 * call blkdev_put() to bdev in bch_cache_release(). So we 2402 * explicitly call blkdev_put() here. 2403 */ 2404 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2405 if (ret == -ENOMEM) 2406 err = "cache_alloc(): -ENOMEM"; 2407 else if (ret == -EPERM) 2408 err = "cache_alloc(): cache device is too small"; 2409 else 2410 err = "cache_alloc(): unknown error"; 2411 goto err; 2412 } 2413 2414 if (kobject_add(&ca->kobj, 2415 &part_to_dev(bdev->bd_part)->kobj, 2416 "bcache")) { 2417 err = "error calling kobject_add"; 2418 ret = -ENOMEM; 2419 goto out; 2420 } 2421 2422 mutex_lock(&bch_register_lock); 2423 err = register_cache_set(ca); 2424 mutex_unlock(&bch_register_lock); 2425 2426 if (err) { 2427 ret = -ENODEV; 2428 goto out; 2429 } 2430 2431 pr_info("registered cache device %s\n", ca->cache_dev_name); 2432 2433 out: 2434 kobject_put(&ca->kobj); 2435 2436 err: 2437 if (err) 2438 pr_notice("error %s: %s\n", ca->cache_dev_name, err); 2439 2440 return ret; 2441 } 2442 2443 /* Global interfaces/init */ 2444 2445 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2446 const char *buffer, size_t size); 2447 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2448 struct kobj_attribute *attr, 2449 const char *buffer, size_t size); 2450 2451 kobj_attribute_write(register, register_bcache); 2452 kobj_attribute_write(register_quiet, register_bcache); 2453 kobj_attribute_write(register_async, register_bcache); 2454 kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); 2455 2456 static bool bch_is_open_backing(struct block_device *bdev) 2457 { 2458 struct cache_set *c, *tc; 2459 struct cached_dev *dc, *t; 2460 2461 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2462 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2463 if (dc->bdev == bdev) 2464 return true; 2465 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2466 if (dc->bdev == bdev) 2467 return true; 2468 return false; 2469 } 2470 2471 static bool bch_is_open_cache(struct block_device *bdev) 2472 { 2473 struct cache_set *c, *tc; 2474 struct cache *ca; 2475 unsigned int i; 2476 2477 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2478 for_each_cache(ca, c, i) 2479 if (ca->bdev == bdev) 2480 return true; 2481 return false; 2482 } 2483 2484 static bool bch_is_open(struct block_device *bdev) 2485 { 2486 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2487 } 2488 2489 struct async_reg_args { 2490 struct delayed_work reg_work; 2491 char *path; 2492 struct cache_sb *sb; 2493 struct cache_sb_disk *sb_disk; 2494 struct block_device *bdev; 2495 }; 2496 2497 static void register_bdev_worker(struct work_struct *work) 2498 { 2499 int fail = false; 2500 struct async_reg_args *args = 2501 container_of(work, struct async_reg_args, reg_work.work); 2502 struct cached_dev *dc; 2503 2504 dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2505 if (!dc) { 2506 fail = true; 2507 put_page(virt_to_page(args->sb_disk)); 2508 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2509 goto out; 2510 } 2511 2512 mutex_lock(&bch_register_lock); 2513 if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0) 2514 fail = true; 2515 mutex_unlock(&bch_register_lock); 2516 2517 out: 2518 if (fail) 2519 pr_info("error %s: fail to register backing device\n", 2520 args->path); 2521 kfree(args->sb); 2522 kfree(args->path); 2523 kfree(args); 2524 module_put(THIS_MODULE); 2525 } 2526 2527 static void register_cache_worker(struct work_struct *work) 2528 { 2529 int fail = false; 2530 struct async_reg_args *args = 2531 container_of(work, struct async_reg_args, reg_work.work); 2532 struct cache *ca; 2533 2534 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2535 if (!ca) { 2536 fail = true; 2537 put_page(virt_to_page(args->sb_disk)); 2538 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2539 goto out; 2540 } 2541 2542 /* blkdev_put() will be called in bch_cache_release() */ 2543 if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0) 2544 fail = true; 2545 2546 out: 2547 if (fail) 2548 pr_info("error %s: fail to register cache device\n", 2549 args->path); 2550 kfree(args->sb); 2551 kfree(args->path); 2552 kfree(args); 2553 module_put(THIS_MODULE); 2554 } 2555 2556 static void register_device_aync(struct async_reg_args *args) 2557 { 2558 if (SB_IS_BDEV(args->sb)) 2559 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); 2560 else 2561 INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); 2562 2563 /* 10 jiffies is enough for a delay */ 2564 queue_delayed_work(system_wq, &args->reg_work, 10); 2565 } 2566 2567 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2568 const char *buffer, size_t size) 2569 { 2570 const char *err; 2571 char *path = NULL; 2572 struct cache_sb *sb; 2573 struct cache_sb_disk *sb_disk; 2574 struct block_device *bdev; 2575 ssize_t ret; 2576 2577 ret = -EBUSY; 2578 err = "failed to reference bcache module"; 2579 if (!try_module_get(THIS_MODULE)) 2580 goto out; 2581 2582 /* For latest state of bcache_is_reboot */ 2583 smp_mb(); 2584 err = "bcache is in reboot"; 2585 if (bcache_is_reboot) 2586 goto out_module_put; 2587 2588 ret = -ENOMEM; 2589 err = "cannot allocate memory"; 2590 path = kstrndup(buffer, size, GFP_KERNEL); 2591 if (!path) 2592 goto out_module_put; 2593 2594 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); 2595 if (!sb) 2596 goto out_free_path; 2597 2598 ret = -EINVAL; 2599 err = "failed to open device"; 2600 bdev = blkdev_get_by_path(strim(path), 2601 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2602 sb); 2603 if (IS_ERR(bdev)) { 2604 if (bdev == ERR_PTR(-EBUSY)) { 2605 bdev = lookup_bdev(strim(path)); 2606 mutex_lock(&bch_register_lock); 2607 if (!IS_ERR(bdev) && bch_is_open(bdev)) 2608 err = "device already registered"; 2609 else 2610 err = "device busy"; 2611 mutex_unlock(&bch_register_lock); 2612 if (!IS_ERR(bdev)) 2613 bdput(bdev); 2614 if (attr == &ksysfs_register_quiet) 2615 goto done; 2616 } 2617 goto out_free_sb; 2618 } 2619 2620 err = "failed to set blocksize"; 2621 if (set_blocksize(bdev, 4096)) 2622 goto out_blkdev_put; 2623 2624 err = read_super(sb, bdev, &sb_disk); 2625 if (err) 2626 goto out_blkdev_put; 2627 2628 err = "failed to register device"; 2629 if (attr == &ksysfs_register_async) { 2630 /* register in asynchronous way */ 2631 struct async_reg_args *args = 2632 kzalloc(sizeof(struct async_reg_args), GFP_KERNEL); 2633 2634 if (!args) { 2635 ret = -ENOMEM; 2636 err = "cannot allocate memory"; 2637 goto out_put_sb_page; 2638 } 2639 2640 args->path = path; 2641 args->sb = sb; 2642 args->sb_disk = sb_disk; 2643 args->bdev = bdev; 2644 register_device_aync(args); 2645 /* No wait and returns to user space */ 2646 goto async_done; 2647 } 2648 2649 if (SB_IS_BDEV(sb)) { 2650 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2651 2652 if (!dc) 2653 goto out_put_sb_page; 2654 2655 mutex_lock(&bch_register_lock); 2656 ret = register_bdev(sb, sb_disk, bdev, dc); 2657 mutex_unlock(&bch_register_lock); 2658 /* blkdev_put() will be called in cached_dev_free() */ 2659 if (ret < 0) 2660 goto out_free_sb; 2661 } else { 2662 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2663 2664 if (!ca) 2665 goto out_put_sb_page; 2666 2667 /* blkdev_put() will be called in bch_cache_release() */ 2668 if (register_cache(sb, sb_disk, bdev, ca) != 0) 2669 goto out_free_sb; 2670 } 2671 2672 done: 2673 kfree(sb); 2674 kfree(path); 2675 module_put(THIS_MODULE); 2676 async_done: 2677 return size; 2678 2679 out_put_sb_page: 2680 put_page(virt_to_page(sb_disk)); 2681 out_blkdev_put: 2682 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2683 out_free_sb: 2684 kfree(sb); 2685 out_free_path: 2686 kfree(path); 2687 path = NULL; 2688 out_module_put: 2689 module_put(THIS_MODULE); 2690 out: 2691 pr_info("error %s: %s\n", path?path:"", err); 2692 return ret; 2693 } 2694 2695 2696 struct pdev { 2697 struct list_head list; 2698 struct cached_dev *dc; 2699 }; 2700 2701 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, 2702 struct kobj_attribute *attr, 2703 const char *buffer, 2704 size_t size) 2705 { 2706 LIST_HEAD(pending_devs); 2707 ssize_t ret = size; 2708 struct cached_dev *dc, *tdc; 2709 struct pdev *pdev, *tpdev; 2710 struct cache_set *c, *tc; 2711 2712 mutex_lock(&bch_register_lock); 2713 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) { 2714 pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL); 2715 if (!pdev) 2716 break; 2717 pdev->dc = dc; 2718 list_add(&pdev->list, &pending_devs); 2719 } 2720 2721 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2722 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2723 char *pdev_set_uuid = pdev->dc->sb.set_uuid; 2724 char *set_uuid = c->sb.uuid; 2725 2726 if (!memcmp(pdev_set_uuid, set_uuid, 16)) { 2727 list_del(&pdev->list); 2728 kfree(pdev); 2729 break; 2730 } 2731 } 2732 } 2733 mutex_unlock(&bch_register_lock); 2734 2735 list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { 2736 pr_info("delete pdev %p\n", pdev); 2737 list_del(&pdev->list); 2738 bcache_device_stop(&pdev->dc->disk); 2739 kfree(pdev); 2740 } 2741 2742 return ret; 2743 } 2744 2745 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 2746 { 2747 if (bcache_is_reboot) 2748 return NOTIFY_DONE; 2749 2750 if (code == SYS_DOWN || 2751 code == SYS_HALT || 2752 code == SYS_POWER_OFF) { 2753 DEFINE_WAIT(wait); 2754 unsigned long start = jiffies; 2755 bool stopped = false; 2756 2757 struct cache_set *c, *tc; 2758 struct cached_dev *dc, *tdc; 2759 2760 mutex_lock(&bch_register_lock); 2761 2762 if (bcache_is_reboot) 2763 goto out; 2764 2765 /* New registration is rejected since now */ 2766 bcache_is_reboot = true; 2767 /* 2768 * Make registering caller (if there is) on other CPU 2769 * core know bcache_is_reboot set to true earlier 2770 */ 2771 smp_mb(); 2772 2773 if (list_empty(&bch_cache_sets) && 2774 list_empty(&uncached_devices)) 2775 goto out; 2776 2777 mutex_unlock(&bch_register_lock); 2778 2779 pr_info("Stopping all devices:\n"); 2780 2781 /* 2782 * The reason bch_register_lock is not held to call 2783 * bch_cache_set_stop() and bcache_device_stop() is to 2784 * avoid potential deadlock during reboot, because cache 2785 * set or bcache device stopping process will acqurie 2786 * bch_register_lock too. 2787 * 2788 * We are safe here because bcache_is_reboot sets to 2789 * true already, register_bcache() will reject new 2790 * registration now. bcache_is_reboot also makes sure 2791 * bcache_reboot() won't be re-entered on by other thread, 2792 * so there is no race in following list iteration by 2793 * list_for_each_entry_safe(). 2794 */ 2795 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2796 bch_cache_set_stop(c); 2797 2798 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 2799 bcache_device_stop(&dc->disk); 2800 2801 2802 /* 2803 * Give an early chance for other kthreads and 2804 * kworkers to stop themselves 2805 */ 2806 schedule(); 2807 2808 /* What's a condition variable? */ 2809 while (1) { 2810 long timeout = start + 10 * HZ - jiffies; 2811 2812 mutex_lock(&bch_register_lock); 2813 stopped = list_empty(&bch_cache_sets) && 2814 list_empty(&uncached_devices); 2815 2816 if (timeout < 0 || stopped) 2817 break; 2818 2819 prepare_to_wait(&unregister_wait, &wait, 2820 TASK_UNINTERRUPTIBLE); 2821 2822 mutex_unlock(&bch_register_lock); 2823 schedule_timeout(timeout); 2824 } 2825 2826 finish_wait(&unregister_wait, &wait); 2827 2828 if (stopped) 2829 pr_info("All devices stopped\n"); 2830 else 2831 pr_notice("Timeout waiting for devices to be closed\n"); 2832 out: 2833 mutex_unlock(&bch_register_lock); 2834 } 2835 2836 return NOTIFY_DONE; 2837 } 2838 2839 static struct notifier_block reboot = { 2840 .notifier_call = bcache_reboot, 2841 .priority = INT_MAX, /* before any real devices */ 2842 }; 2843 2844 static void bcache_exit(void) 2845 { 2846 bch_debug_exit(); 2847 bch_request_exit(); 2848 if (bcache_kobj) 2849 kobject_put(bcache_kobj); 2850 if (bcache_wq) 2851 destroy_workqueue(bcache_wq); 2852 if (bch_journal_wq) 2853 destroy_workqueue(bch_journal_wq); 2854 2855 if (bcache_major) 2856 unregister_blkdev(bcache_major, "bcache"); 2857 unregister_reboot_notifier(&reboot); 2858 mutex_destroy(&bch_register_lock); 2859 } 2860 2861 /* Check and fixup module parameters */ 2862 static void check_module_parameters(void) 2863 { 2864 if (bch_cutoff_writeback_sync == 0) 2865 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; 2866 else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { 2867 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n", 2868 bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); 2869 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; 2870 } 2871 2872 if (bch_cutoff_writeback == 0) 2873 bch_cutoff_writeback = CUTOFF_WRITEBACK; 2874 else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { 2875 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n", 2876 bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); 2877 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; 2878 } 2879 2880 if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { 2881 pr_warn("set bch_cutoff_writeback (%u) to %u\n", 2882 bch_cutoff_writeback, bch_cutoff_writeback_sync); 2883 bch_cutoff_writeback = bch_cutoff_writeback_sync; 2884 } 2885 } 2886 2887 static int __init bcache_init(void) 2888 { 2889 static const struct attribute *files[] = { 2890 &ksysfs_register.attr, 2891 &ksysfs_register_quiet.attr, 2892 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION 2893 &ksysfs_register_async.attr, 2894 #endif 2895 &ksysfs_pendings_cleanup.attr, 2896 NULL 2897 }; 2898 2899 check_module_parameters(); 2900 2901 mutex_init(&bch_register_lock); 2902 init_waitqueue_head(&unregister_wait); 2903 register_reboot_notifier(&reboot); 2904 2905 bcache_major = register_blkdev(0, "bcache"); 2906 if (bcache_major < 0) { 2907 unregister_reboot_notifier(&reboot); 2908 mutex_destroy(&bch_register_lock); 2909 return bcache_major; 2910 } 2911 2912 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); 2913 if (!bcache_wq) 2914 goto err; 2915 2916 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); 2917 if (!bch_journal_wq) 2918 goto err; 2919 2920 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2921 if (!bcache_kobj) 2922 goto err; 2923 2924 if (bch_request_init() || 2925 sysfs_create_files(bcache_kobj, files)) 2926 goto err; 2927 2928 bch_debug_init(); 2929 closure_debug_init(); 2930 2931 bcache_is_reboot = false; 2932 2933 return 0; 2934 err: 2935 bcache_exit(); 2936 return -ENOMEM; 2937 } 2938 2939 /* 2940 * Module hooks 2941 */ 2942 module_exit(bcache_exit); 2943 module_init(bcache_init); 2944 2945 module_param(bch_cutoff_writeback, uint, 0); 2946 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); 2947 2948 module_param(bch_cutoff_writeback_sync, uint, 0); 2949 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); 2950 2951 MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); 2952 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 2953 MODULE_LICENSE("GPL"); 2954