1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHE_H 3 #define _BCACHE_H 4 5 /* 6 * SOME HIGH LEVEL CODE DOCUMENTATION: 7 * 8 * Bcache mostly works with cache sets, cache devices, and backing devices. 9 * 10 * Support for multiple cache devices hasn't quite been finished off yet, but 11 * it's about 95% plumbed through. A cache set and its cache devices is sort of 12 * like a md raid array and its component devices. Most of the code doesn't care 13 * about individual cache devices, the main abstraction is the cache set. 14 * 15 * Multiple cache devices is intended to give us the ability to mirror dirty 16 * cached data and metadata, without mirroring clean cached data. 17 * 18 * Backing devices are different, in that they have a lifetime independent of a 19 * cache set. When you register a newly formatted backing device it'll come up 20 * in passthrough mode, and then you can attach and detach a backing device from 21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly 22 * invalidates any cached data for that backing device. 23 * 24 * A cache set can have multiple (many) backing devices attached to it. 25 * 26 * There's also flash only volumes - this is the reason for the distinction 27 * between struct cached_dev and struct bcache_device. A flash only volume 28 * works much like a bcache device that has a backing device, except the 29 * "cached" data is always dirty. The end result is that we get thin 30 * provisioning with very little additional code. 31 * 32 * Flash only volumes work but they're not production ready because the moving 33 * garbage collector needs more work. More on that later. 34 * 35 * BUCKETS/ALLOCATION: 36 * 37 * Bcache is primarily designed for caching, which means that in normal 38 * operation all of our available space will be allocated. Thus, we need an 39 * efficient way of deleting things from the cache so we can write new things to 40 * it. 41 * 42 * To do this, we first divide the cache device up into buckets. A bucket is the 43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ 44 * works efficiently. 45 * 46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with 47 * it. The gens and priorities for all the buckets are stored contiguously and 48 * packed on disk (in a linked list of buckets - aside from the superblock, all 49 * of bcache's metadata is stored in buckets). 50 * 51 * The priority is used to implement an LRU. We reset a bucket's priority when 52 * we allocate it or on cache it, and every so often we decrement the priority 53 * of each bucket. It could be used to implement something more sophisticated, 54 * if anyone ever gets around to it. 55 * 56 * The generation is used for invalidating buckets. Each pointer also has an 8 57 * bit generation embedded in it; for a pointer to be considered valid, its gen 58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all 59 * we have to do is increment its gen (and write its new gen to disk; we batch 60 * this up). 61 * 62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 63 * contain metadata (including btree nodes). 64 * 65 * THE BTREE: 66 * 67 * Bcache is in large part design around the btree. 68 * 69 * At a high level, the btree is just an index of key -> ptr tuples. 70 * 71 * Keys represent extents, and thus have a size field. Keys also have a variable 72 * number of pointers attached to them (potentially zero, which is handy for 73 * invalidating the cache). 74 * 75 * The key itself is an inode:offset pair. The inode number corresponds to a 76 * backing device or a flash only volume. The offset is the ending offset of the 77 * extent within the inode - not the starting offset; this makes lookups 78 * slightly more convenient. 79 * 80 * Pointers contain the cache device id, the offset on that device, and an 8 bit 81 * generation number. More on the gen later. 82 * 83 * Index lookups are not fully abstracted - cache lookups in particular are 84 * still somewhat mixed in with the btree code, but things are headed in that 85 * direction. 86 * 87 * Updates are fairly well abstracted, though. There are two different ways of 88 * updating the btree; insert and replace. 89 * 90 * BTREE_INSERT will just take a list of keys and insert them into the btree - 91 * overwriting (possibly only partially) any extents they overlap with. This is 92 * used to update the index after a write. 93 * 94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is 95 * overwriting a key that matches another given key. This is used for inserting 96 * data into the cache after a cache miss, and for background writeback, and for 97 * the moving garbage collector. 98 * 99 * There is no "delete" operation; deleting things from the index is 100 * accomplished by either by invalidating pointers (by incrementing a bucket's 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything 102 * previously present at that location in the index. 103 * 104 * This means that there are always stale/invalid keys in the btree. They're 105 * filtered out by the code that iterates through a btree node, and removed when 106 * a btree node is rewritten. 107 * 108 * BTREE NODES: 109 * 110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and 111 * free smaller than a bucket - so, that's how big our btree nodes are. 112 * 113 * (If buckets are really big we'll only use part of the bucket for a btree node 114 * - no less than 1/4th - but a bucket still contains no more than a single 115 * btree node. I'd actually like to change this, but for now we rely on the 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.) 117 * 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook 119 * btree implementation. 120 * 121 * The way this is solved is that btree nodes are internally log structured; we 122 * can append new keys to an existing btree node without rewriting it. This 123 * means each set of keys we write is sorted, but the node is not. 124 * 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would 126 * be expensive, and we have to distinguish between the keys we have written and 127 * the keys we haven't. So to do a lookup in a btree node, we have to search 128 * each sorted set. But we do merge written sets together lazily, so the cost of 129 * these extra searches is quite low (normally most of the keys in a btree node 130 * will be in one big set, and then there'll be one or two sets that are much 131 * smaller). 132 * 133 * This log structure makes bcache's btree more of a hybrid between a 134 * conventional btree and a compacting data structure, with some of the 135 * advantages of both. 136 * 137 * GARBAGE COLLECTION: 138 * 139 * We can't just invalidate any bucket - it might contain dirty data or 140 * metadata. If it once contained dirty data, other writes might overwrite it 141 * later, leaving no valid pointers into that bucket in the index. 142 * 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse. 144 * It also counts how much valid data it each bucket currently contains, so that 145 * allocation can reuse buckets sooner when they've been mostly overwritten. 146 * 147 * It also does some things that are really internal to the btree 148 * implementation. If a btree node contains pointers that are stale by more than 149 * some threshold, it rewrites the btree node to avoid the bucket's generation 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough. 151 * 152 * THE JOURNAL: 153 * 154 * Bcache's journal is not necessary for consistency; we always strictly 155 * order metadata writes so that the btree and everything else is consistent on 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback 157 * caching (with recovery from unclean shutdown) before journalling was 158 * implemented. 159 * 160 * Rather, the journal is purely a performance optimization; we can't complete a 161 * write until we've updated the index on disk, otherwise the cache would be 162 * inconsistent in the event of an unclean shutdown. This means that without the 163 * journal, on random write workloads we constantly have to update all the leaf 164 * nodes in the btree, and those writes will be mostly empty (appending at most 165 * a few keys each) - highly inefficient in terms of amount of metadata writes, 166 * and it puts more strain on the various btree resorting/compacting code. 167 * 168 * The journal is just a log of keys we've inserted; on startup we just reinsert 169 * all the keys in the open journal entries. That means that when we're updating 170 * a node in the btree, we can wait until a 4k block of keys fills up before 171 * writing them out. 172 * 173 * For simplicity, we only journal updates to leaf nodes; updates to parent 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth 175 * the complexity to deal with journalling them (in particular, journal replay) 176 * - updates to non leaf nodes just happen synchronously (see btree_split()). 177 */ 178 179 #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ 180 181 #include <linux/bcache.h> 182 #include <linux/bio.h> 183 #include <linux/kobject.h> 184 #include <linux/list.h> 185 #include <linux/mutex.h> 186 #include <linux/rbtree.h> 187 #include <linux/rwsem.h> 188 #include <linux/refcount.h> 189 #include <linux/types.h> 190 #include <linux/workqueue.h> 191 #include <linux/kthread.h> 192 193 #include "bset.h" 194 #include "util.h" 195 #include "closure.h" 196 197 struct bucket { 198 atomic_t pin; 199 uint16_t prio; 200 uint8_t gen; 201 uint8_t last_gc; /* Most out of date gen in the btree */ 202 uint16_t gc_mark; /* Bitfield used by GC. See below for field */ 203 }; 204 205 /* 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me 207 * as multiple threads touch struct bucket without locking 208 */ 209 210 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); 211 #define GC_MARK_RECLAIMABLE 1 212 #define GC_MARK_DIRTY 2 213 #define GC_MARK_METADATA 3 214 #define GC_SECTORS_USED_SIZE 13 215 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) 216 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); 217 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 218 219 #include "journal.h" 220 #include "stats.h" 221 struct search; 222 struct btree; 223 struct keybuf; 224 225 struct keybuf_key { 226 struct rb_node node; 227 BKEY_PADDED(key); 228 void *private; 229 }; 230 231 struct keybuf { 232 struct bkey last_scanned; 233 spinlock_t lock; 234 235 /* 236 * Beginning and end of range in rb tree - so that we can skip taking 237 * lock and checking the rb tree when we need to check for overlapping 238 * keys. 239 */ 240 struct bkey start; 241 struct bkey end; 242 243 struct rb_root keys; 244 245 #define KEYBUF_NR 500 246 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); 247 }; 248 249 struct bcache_device { 250 struct closure cl; 251 252 struct kobject kobj; 253 254 struct cache_set *c; 255 unsigned int id; 256 #define BCACHEDEVNAME_SIZE 12 257 char name[BCACHEDEVNAME_SIZE]; 258 259 struct gendisk *disk; 260 261 unsigned long flags; 262 #define BCACHE_DEV_CLOSING 0 263 #define BCACHE_DEV_DETACHING 1 264 #define BCACHE_DEV_UNLINK_DONE 2 265 #define BCACHE_DEV_WB_RUNNING 3 266 #define BCACHE_DEV_RATE_DW_RUNNING 4 267 unsigned int nr_stripes; 268 unsigned int stripe_size; 269 atomic_t *stripe_sectors_dirty; 270 unsigned long *full_dirty_stripes; 271 272 struct bio_set bio_split; 273 274 unsigned int data_csum:1; 275 276 int (*cache_miss)(struct btree *b, struct search *s, 277 struct bio *bio, unsigned int sectors); 278 int (*ioctl)(struct bcache_device *d, fmode_t mode, 279 unsigned int cmd, unsigned long arg); 280 }; 281 282 struct io { 283 /* Used to track sequential IO so it can be skipped */ 284 struct hlist_node hash; 285 struct list_head lru; 286 287 unsigned long jiffies; 288 unsigned int sequential; 289 sector_t last; 290 }; 291 292 enum stop_on_failure { 293 BCH_CACHED_DEV_STOP_AUTO = 0, 294 BCH_CACHED_DEV_STOP_ALWAYS, 295 BCH_CACHED_DEV_STOP_MODE_MAX, 296 }; 297 298 struct cached_dev { 299 struct list_head list; 300 struct bcache_device disk; 301 struct block_device *bdev; 302 303 struct cache_sb sb; 304 struct cache_sb_disk *sb_disk; 305 struct bio sb_bio; 306 struct bio_vec sb_bv[1]; 307 struct closure sb_write; 308 struct semaphore sb_write_mutex; 309 310 /* Refcount on the cache set. Always nonzero when we're caching. */ 311 refcount_t count; 312 struct work_struct detach; 313 314 /* 315 * Device might not be running if it's dirty and the cache set hasn't 316 * showed up yet. 317 */ 318 atomic_t running; 319 320 /* 321 * Writes take a shared lock from start to finish; scanning for dirty 322 * data to refill the rb tree requires an exclusive lock. 323 */ 324 struct rw_semaphore writeback_lock; 325 326 /* 327 * Nonzero, and writeback has a refcount (d->count), iff there is dirty 328 * data in the cache. Protected by writeback_lock; must have an 329 * shared lock to set and exclusive lock to clear. 330 */ 331 atomic_t has_dirty; 332 333 #define BCH_CACHE_READA_ALL 0 334 #define BCH_CACHE_READA_META_ONLY 1 335 unsigned int cache_readahead_policy; 336 struct bch_ratelimit writeback_rate; 337 struct delayed_work writeback_rate_update; 338 339 /* Limit number of writeback bios in flight */ 340 struct semaphore in_flight; 341 struct task_struct *writeback_thread; 342 struct workqueue_struct *writeback_write_wq; 343 344 struct keybuf writeback_keys; 345 346 struct task_struct *status_update_thread; 347 /* 348 * Order the write-half of writeback operations strongly in dispatch 349 * order. (Maintain LBA order; don't allow reads completing out of 350 * order to re-order the writes...) 351 */ 352 struct closure_waitlist writeback_ordering_wait; 353 atomic_t writeback_sequence_next; 354 355 /* For tracking sequential IO */ 356 #define RECENT_IO_BITS 7 357 #define RECENT_IO (1 << RECENT_IO_BITS) 358 struct io io[RECENT_IO]; 359 struct hlist_head io_hash[RECENT_IO + 1]; 360 struct list_head io_lru; 361 spinlock_t io_lock; 362 363 struct cache_accounting accounting; 364 365 /* The rest of this all shows up in sysfs */ 366 unsigned int sequential_cutoff; 367 unsigned int readahead; 368 369 unsigned int io_disable:1; 370 unsigned int verify:1; 371 unsigned int bypass_torture_test:1; 372 373 unsigned int partial_stripes_expensive:1; 374 unsigned int writeback_metadata:1; 375 unsigned int writeback_running:1; 376 unsigned char writeback_percent; 377 unsigned int writeback_delay; 378 379 uint64_t writeback_rate_target; 380 int64_t writeback_rate_proportional; 381 int64_t writeback_rate_integral; 382 int64_t writeback_rate_integral_scaled; 383 int32_t writeback_rate_change; 384 385 unsigned int writeback_rate_update_seconds; 386 unsigned int writeback_rate_i_term_inverse; 387 unsigned int writeback_rate_p_term_inverse; 388 unsigned int writeback_rate_minimum; 389 390 enum stop_on_failure stop_when_cache_set_failed; 391 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 392 atomic_t io_errors; 393 unsigned int error_limit; 394 unsigned int offline_seconds; 395 396 char backing_dev_name[BDEVNAME_SIZE]; 397 }; 398 399 enum alloc_reserve { 400 RESERVE_BTREE, 401 RESERVE_PRIO, 402 RESERVE_MOVINGGC, 403 RESERVE_NONE, 404 RESERVE_NR, 405 }; 406 407 struct cache { 408 struct cache_set *set; 409 struct cache_sb sb; 410 struct cache_sb_disk *sb_disk; 411 struct bio sb_bio; 412 struct bio_vec sb_bv[1]; 413 414 struct kobject kobj; 415 struct block_device *bdev; 416 417 struct task_struct *alloc_thread; 418 419 struct closure prio; 420 struct prio_set *disk_buckets; 421 422 /* 423 * When allocating new buckets, prio_write() gets first dibs - since we 424 * may not be allocate at all without writing priorities and gens. 425 * prio_last_buckets[] contains the last buckets we wrote priorities to 426 * (so gc can mark them as metadata), prio_buckets[] contains the 427 * buckets allocated for the next prio write. 428 */ 429 uint64_t *prio_buckets; 430 uint64_t *prio_last_buckets; 431 432 /* 433 * free: Buckets that are ready to be used 434 * 435 * free_inc: Incoming buckets - these are buckets that currently have 436 * cached data in them, and we can't reuse them until after we write 437 * their new gen to disk. After prio_write() finishes writing the new 438 * gens/prios, they'll be moved to the free list (and possibly discarded 439 * in the process) 440 */ 441 DECLARE_FIFO(long, free)[RESERVE_NR]; 442 DECLARE_FIFO(long, free_inc); 443 444 size_t fifo_last_bucket; 445 446 /* Allocation stuff: */ 447 struct bucket *buckets; 448 449 DECLARE_HEAP(struct bucket *, heap); 450 451 /* 452 * If nonzero, we know we aren't going to find any buckets to invalidate 453 * until a gc finishes - otherwise we could pointlessly burn a ton of 454 * cpu 455 */ 456 unsigned int invalidate_needs_gc; 457 458 bool discard; /* Get rid of? */ 459 460 struct journal_device journal; 461 462 /* The rest of this all shows up in sysfs */ 463 #define IO_ERROR_SHIFT 20 464 atomic_t io_errors; 465 atomic_t io_count; 466 467 atomic_long_t meta_sectors_written; 468 atomic_long_t btree_sectors_written; 469 atomic_long_t sectors_written; 470 471 char cache_dev_name[BDEVNAME_SIZE]; 472 }; 473 474 struct gc_stat { 475 size_t nodes; 476 size_t nodes_pre; 477 size_t key_bytes; 478 479 size_t nkeys; 480 uint64_t data; /* sectors */ 481 unsigned int in_use; /* percent */ 482 }; 483 484 /* 485 * Flag bits, for how the cache set is shutting down, and what phase it's at: 486 * 487 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching 488 * all the backing devices first (their cached data gets invalidated, and they 489 * won't automatically reattach). 490 * 491 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 492 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 493 * flushing dirty data). 494 * 495 * CACHE_SET_RUNNING means all cache devices have been registered and journal 496 * replay is complete. 497 * 498 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all 499 * external and internal I/O should be denied when this flag is set. 500 * 501 */ 502 #define CACHE_SET_UNREGISTERING 0 503 #define CACHE_SET_STOPPING 1 504 #define CACHE_SET_RUNNING 2 505 #define CACHE_SET_IO_DISABLE 3 506 507 struct cache_set { 508 struct closure cl; 509 510 struct list_head list; 511 struct kobject kobj; 512 struct kobject internal; 513 struct dentry *debug; 514 struct cache_accounting accounting; 515 516 unsigned long flags; 517 atomic_t idle_counter; 518 atomic_t at_max_writeback_rate; 519 520 struct cache_sb sb; 521 522 struct cache *cache[MAX_CACHES_PER_SET]; 523 struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; 524 int caches_loaded; 525 526 struct bcache_device **devices; 527 unsigned int devices_max_used; 528 atomic_t attached_dev_nr; 529 struct list_head cached_devs; 530 uint64_t cached_dev_sectors; 531 atomic_long_t flash_dev_dirty_sectors; 532 struct closure caching; 533 534 struct closure sb_write; 535 struct semaphore sb_write_mutex; 536 537 mempool_t search; 538 mempool_t bio_meta; 539 struct bio_set bio_split; 540 541 /* For the btree cache */ 542 struct shrinker shrink; 543 544 /* For the btree cache and anything allocation related */ 545 struct mutex bucket_lock; 546 547 /* log2(bucket_size), in sectors */ 548 unsigned short bucket_bits; 549 550 /* log2(block_size), in sectors */ 551 unsigned short block_bits; 552 553 /* 554 * Default number of pages for a new btree node - may be less than a 555 * full bucket 556 */ 557 unsigned int btree_pages; 558 559 /* 560 * Lists of struct btrees; lru is the list for structs that have memory 561 * allocated for actual btree node, freed is for structs that do not. 562 * 563 * We never free a struct btree, except on shutdown - we just put it on 564 * the btree_cache_freed list and reuse it later. This simplifies the 565 * code, and it doesn't cost us much memory as the memory usage is 566 * dominated by buffers that hold the actual btree node data and those 567 * can be freed - and the number of struct btrees allocated is 568 * effectively bounded. 569 * 570 * btree_cache_freeable effectively is a small cache - we use it because 571 * high order page allocations can be rather expensive, and it's quite 572 * common to delete and allocate btree nodes in quick succession. It 573 * should never grow past ~2-3 nodes in practice. 574 */ 575 struct list_head btree_cache; 576 struct list_head btree_cache_freeable; 577 struct list_head btree_cache_freed; 578 579 /* Number of elements in btree_cache + btree_cache_freeable lists */ 580 unsigned int btree_cache_used; 581 582 /* 583 * If we need to allocate memory for a new btree node and that 584 * allocation fails, we can cannibalize another node in the btree cache 585 * to satisfy the allocation - lock to guarantee only one thread does 586 * this at a time: 587 */ 588 wait_queue_head_t btree_cache_wait; 589 struct task_struct *btree_cache_alloc_lock; 590 spinlock_t btree_cannibalize_lock; 591 592 /* 593 * When we free a btree node, we increment the gen of the bucket the 594 * node is in - but we can't rewrite the prios and gens until we 595 * finished whatever it is we were doing, otherwise after a crash the 596 * btree node would be freed but for say a split, we might not have the 597 * pointers to the new nodes inserted into the btree yet. 598 * 599 * This is a refcount that blocks prio_write() until the new keys are 600 * written. 601 */ 602 atomic_t prio_blocked; 603 wait_queue_head_t bucket_wait; 604 605 /* 606 * For any bio we don't skip we subtract the number of sectors from 607 * rescale; when it hits 0 we rescale all the bucket priorities. 608 */ 609 atomic_t rescale; 610 /* 611 * used for GC, identify if any front side I/Os is inflight 612 */ 613 atomic_t search_inflight; 614 /* 615 * When we invalidate buckets, we use both the priority and the amount 616 * of good data to determine which buckets to reuse first - to weight 617 * those together consistently we keep track of the smallest nonzero 618 * priority of any bucket. 619 */ 620 uint16_t min_prio; 621 622 /* 623 * max(gen - last_gc) for all buckets. When it gets too big we have to 624 * gc to keep gens from wrapping around. 625 */ 626 uint8_t need_gc; 627 struct gc_stat gc_stats; 628 size_t nbuckets; 629 size_t avail_nbuckets; 630 631 struct task_struct *gc_thread; 632 /* Where in the btree gc currently is */ 633 struct bkey gc_done; 634 635 /* 636 * For automatical garbage collection after writeback completed, this 637 * varialbe is used as bit fields, 638 * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback 639 * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback 640 * This is an optimization for following write request after writeback 641 * finished, but read hit rate dropped due to clean data on cache is 642 * discarded. Unless user explicitly sets it via sysfs, it won't be 643 * enabled. 644 */ 645 #define BCH_ENABLE_AUTO_GC 1 646 #define BCH_DO_AUTO_GC 2 647 uint8_t gc_after_writeback; 648 649 /* 650 * The allocation code needs gc_mark in struct bucket to be correct, but 651 * it's not while a gc is in progress. Protected by bucket_lock. 652 */ 653 int gc_mark_valid; 654 655 /* Counts how many sectors bio_insert has added to the cache */ 656 atomic_t sectors_to_gc; 657 wait_queue_head_t gc_wait; 658 659 struct keybuf moving_gc_keys; 660 /* Number of moving GC bios in flight */ 661 struct semaphore moving_in_flight; 662 663 struct workqueue_struct *moving_gc_wq; 664 665 struct btree *root; 666 667 #ifdef CONFIG_BCACHE_DEBUG 668 struct btree *verify_data; 669 struct bset *verify_ondisk; 670 struct mutex verify_lock; 671 #endif 672 673 unsigned int nr_uuids; 674 struct uuid_entry *uuids; 675 BKEY_PADDED(uuid_bucket); 676 struct closure uuid_write; 677 struct semaphore uuid_write_mutex; 678 679 /* 680 * A btree node on disk could have too many bsets for an iterator to fit 681 * on the stack - have to dynamically allocate them. 682 * bch_cache_set_alloc() will make sure the pool can allocate iterators 683 * equipped with enough room that can host 684 * (sb.bucket_size / sb.block_size) 685 * btree_iter_sets, which is more than static MAX_BSETS. 686 */ 687 mempool_t fill_iter; 688 689 struct bset_sort_state sort; 690 691 /* List of buckets we're currently writing data to */ 692 struct list_head data_buckets; 693 spinlock_t data_bucket_lock; 694 695 struct journal journal; 696 697 #define CONGESTED_MAX 1024 698 unsigned int congested_last_us; 699 atomic_t congested; 700 701 /* The rest of this all shows up in sysfs */ 702 unsigned int congested_read_threshold_us; 703 unsigned int congested_write_threshold_us; 704 705 struct time_stats btree_gc_time; 706 struct time_stats btree_split_time; 707 struct time_stats btree_read_time; 708 709 atomic_long_t cache_read_races; 710 atomic_long_t writeback_keys_done; 711 atomic_long_t writeback_keys_failed; 712 713 atomic_long_t reclaim; 714 atomic_long_t reclaimed_journal_buckets; 715 atomic_long_t flush_write; 716 717 enum { 718 ON_ERROR_UNREGISTER, 719 ON_ERROR_PANIC, 720 } on_error; 721 #define DEFAULT_IO_ERROR_LIMIT 8 722 unsigned int error_limit; 723 unsigned int error_decay; 724 725 unsigned short journal_delay_ms; 726 bool expensive_debug_checks; 727 unsigned int verify:1; 728 unsigned int key_merging_disabled:1; 729 unsigned int gc_always_rewrite:1; 730 unsigned int shrinker_disabled:1; 731 unsigned int copy_gc_enabled:1; 732 unsigned int idle_max_writeback_rate_enabled:1; 733 734 #define BUCKET_HASH_BITS 12 735 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; 736 }; 737 738 struct bbio { 739 unsigned int submit_time_us; 740 union { 741 struct bkey key; 742 uint64_t _pad[3]; 743 /* 744 * We only need pad = 3 here because we only ever carry around a 745 * single pointer - i.e. the pointer we're doing io to/from. 746 */ 747 }; 748 struct bio bio; 749 }; 750 751 #define BTREE_PRIO USHRT_MAX 752 #define INITIAL_PRIO 32768U 753 754 #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) 755 #define btree_blocks(b) \ 756 ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) 757 758 #define btree_default_blocks(c) \ 759 ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) 760 761 #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) 762 #define bucket_bytes(c) ((c)->sb.bucket_size << 9) 763 #define block_bytes(c) ((c)->sb.block_size << 9) 764 765 #define prios_per_bucket(c) \ 766 ((bucket_bytes(c) - sizeof(struct prio_set)) / \ 767 sizeof(struct bucket_disk)) 768 #define prio_buckets(c) \ 769 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) 770 771 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) 772 { 773 return s >> c->bucket_bits; 774 } 775 776 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) 777 { 778 return ((sector_t) b) << c->bucket_bits; 779 } 780 781 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) 782 { 783 return s & (c->sb.bucket_size - 1); 784 } 785 786 static inline struct cache *PTR_CACHE(struct cache_set *c, 787 const struct bkey *k, 788 unsigned int ptr) 789 { 790 return c->cache[PTR_DEV(k, ptr)]; 791 } 792 793 static inline size_t PTR_BUCKET_NR(struct cache_set *c, 794 const struct bkey *k, 795 unsigned int ptr) 796 { 797 return sector_to_bucket(c, PTR_OFFSET(k, ptr)); 798 } 799 800 static inline struct bucket *PTR_BUCKET(struct cache_set *c, 801 const struct bkey *k, 802 unsigned int ptr) 803 { 804 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); 805 } 806 807 static inline uint8_t gen_after(uint8_t a, uint8_t b) 808 { 809 uint8_t r = a - b; 810 811 return r > 128U ? 0 : r; 812 } 813 814 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, 815 unsigned int i) 816 { 817 return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); 818 } 819 820 static inline bool ptr_available(struct cache_set *c, const struct bkey *k, 821 unsigned int i) 822 { 823 return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); 824 } 825 826 /* Btree key macros */ 827 828 /* 829 * This is used for various on disk data structures - cache_sb, prio_set, bset, 830 * jset: The checksum is _always_ the first 8 bytes of these structs 831 */ 832 #define csum_set(i) \ 833 bch_crc64(((void *) (i)) + sizeof(uint64_t), \ 834 ((void *) bset_bkey_last(i)) - \ 835 (((void *) (i)) + sizeof(uint64_t))) 836 837 /* Error handling macros */ 838 839 #define btree_bug(b, ...) \ 840 do { \ 841 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ 842 dump_stack(); \ 843 } while (0) 844 845 #define cache_bug(c, ...) \ 846 do { \ 847 if (bch_cache_set_error(c, __VA_ARGS__)) \ 848 dump_stack(); \ 849 } while (0) 850 851 #define btree_bug_on(cond, b, ...) \ 852 do { \ 853 if (cond) \ 854 btree_bug(b, __VA_ARGS__); \ 855 } while (0) 856 857 #define cache_bug_on(cond, c, ...) \ 858 do { \ 859 if (cond) \ 860 cache_bug(c, __VA_ARGS__); \ 861 } while (0) 862 863 #define cache_set_err_on(cond, c, ...) \ 864 do { \ 865 if (cond) \ 866 bch_cache_set_error(c, __VA_ARGS__); \ 867 } while (0) 868 869 /* Looping macros */ 870 871 #define for_each_cache(ca, cs, iter) \ 872 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) 873 874 #define for_each_bucket(b, ca) \ 875 for (b = (ca)->buckets + (ca)->sb.first_bucket; \ 876 b < (ca)->buckets + (ca)->sb.nbuckets; b++) 877 878 static inline void cached_dev_put(struct cached_dev *dc) 879 { 880 if (refcount_dec_and_test(&dc->count)) 881 schedule_work(&dc->detach); 882 } 883 884 static inline bool cached_dev_get(struct cached_dev *dc) 885 { 886 if (!refcount_inc_not_zero(&dc->count)) 887 return false; 888 889 /* Paired with the mb in cached_dev_attach */ 890 smp_mb__after_atomic(); 891 return true; 892 } 893 894 /* 895 * bucket_gc_gen() returns the difference between the bucket's current gen and 896 * the oldest gen of any pointer into that bucket in the btree (last_gc). 897 */ 898 899 static inline uint8_t bucket_gc_gen(struct bucket *b) 900 { 901 return b->gen - b->last_gc; 902 } 903 904 #define BUCKET_GC_GEN_MAX 96U 905 906 #define kobj_attribute_write(n, fn) \ 907 static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) 908 909 #define kobj_attribute_rw(n, show, store) \ 910 static struct kobj_attribute ksysfs_##n = \ 911 __ATTR(n, 0600, show, store) 912 913 static inline void wake_up_allocators(struct cache_set *c) 914 { 915 struct cache *ca; 916 unsigned int i; 917 918 for_each_cache(ca, c, i) 919 wake_up_process(ca->alloc_thread); 920 } 921 922 static inline void closure_bio_submit(struct cache_set *c, 923 struct bio *bio, 924 struct closure *cl) 925 { 926 closure_get(cl); 927 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { 928 bio->bi_status = BLK_STS_IOERR; 929 bio_endio(bio); 930 return; 931 } 932 generic_make_request(bio); 933 } 934 935 /* 936 * Prevent the kthread exits directly, and make sure when kthread_stop() 937 * is called to stop a kthread, it is still alive. If a kthread might be 938 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is 939 * necessary before the kthread returns. 940 */ 941 static inline void wait_for_kthread_stop(void) 942 { 943 while (!kthread_should_stop()) { 944 set_current_state(TASK_INTERRUPTIBLE); 945 schedule(); 946 } 947 } 948 949 /* Forward declarations */ 950 951 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); 952 void bch_count_io_errors(struct cache *ca, blk_status_t error, 953 int is_read, const char *m); 954 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, 955 blk_status_t error, const char *m); 956 void bch_bbio_endio(struct cache_set *c, struct bio *bio, 957 blk_status_t error, const char *m); 958 void bch_bbio_free(struct bio *bio, struct cache_set *c); 959 struct bio *bch_bbio_alloc(struct cache_set *c); 960 961 void __bch_submit_bbio(struct bio *bio, struct cache_set *c); 962 void bch_submit_bbio(struct bio *bio, struct cache_set *c, 963 struct bkey *k, unsigned int ptr); 964 965 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); 966 void bch_rescale_priorities(struct cache_set *c, int sectors); 967 968 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); 969 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); 970 971 void __bch_bucket_free(struct cache *ca, struct bucket *b); 972 void bch_bucket_free(struct cache_set *c, struct bkey *k); 973 974 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); 975 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 976 struct bkey *k, int n, bool wait); 977 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 978 struct bkey *k, int n, bool wait); 979 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, 980 unsigned int sectors, unsigned int write_point, 981 unsigned int write_prio, bool wait); 982 bool bch_cached_dev_error(struct cached_dev *dc); 983 984 __printf(2, 3) 985 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); 986 987 int bch_prio_write(struct cache *ca, bool wait); 988 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 989 990 extern struct workqueue_struct *bcache_wq; 991 extern struct workqueue_struct *bch_journal_wq; 992 extern struct mutex bch_register_lock; 993 extern struct list_head bch_cache_sets; 994 995 extern struct kobj_type bch_cached_dev_ktype; 996 extern struct kobj_type bch_flash_dev_ktype; 997 extern struct kobj_type bch_cache_set_ktype; 998 extern struct kobj_type bch_cache_set_internal_ktype; 999 extern struct kobj_type bch_cache_ktype; 1000 1001 void bch_cached_dev_release(struct kobject *kobj); 1002 void bch_flash_dev_release(struct kobject *kobj); 1003 void bch_cache_set_release(struct kobject *kobj); 1004 void bch_cache_release(struct kobject *kobj); 1005 1006 int bch_uuid_write(struct cache_set *c); 1007 void bcache_write_super(struct cache_set *c); 1008 1009 int bch_flash_dev_create(struct cache_set *c, uint64_t size); 1010 1011 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1012 uint8_t *set_uuid); 1013 void bch_cached_dev_detach(struct cached_dev *dc); 1014 int bch_cached_dev_run(struct cached_dev *dc); 1015 void bcache_device_stop(struct bcache_device *d); 1016 1017 void bch_cache_set_unregister(struct cache_set *c); 1018 void bch_cache_set_stop(struct cache_set *c); 1019 1020 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); 1021 void bch_btree_cache_free(struct cache_set *c); 1022 int bch_btree_cache_alloc(struct cache_set *c); 1023 void bch_moving_init_cache_set(struct cache_set *c); 1024 int bch_open_buckets_alloc(struct cache_set *c); 1025 void bch_open_buckets_free(struct cache_set *c); 1026 1027 int bch_cache_allocator_start(struct cache *ca); 1028 1029 void bch_debug_exit(void); 1030 void bch_debug_init(void); 1031 void bch_request_exit(void); 1032 int bch_request_init(void); 1033 1034 #endif /* _BCACHE_H */ 1035