1 #ifndef _BCACHE_H 2 #define _BCACHE_H 3 4 /* 5 * SOME HIGH LEVEL CODE DOCUMENTATION: 6 * 7 * Bcache mostly works with cache sets, cache devices, and backing devices. 8 * 9 * Support for multiple cache devices hasn't quite been finished off yet, but 10 * it's about 95% plumbed through. A cache set and its cache devices is sort of 11 * like a md raid array and its component devices. Most of the code doesn't care 12 * about individual cache devices, the main abstraction is the cache set. 13 * 14 * Multiple cache devices is intended to give us the ability to mirror dirty 15 * cached data and metadata, without mirroring clean cached data. 16 * 17 * Backing devices are different, in that they have a lifetime independent of a 18 * cache set. When you register a newly formatted backing device it'll come up 19 * in passthrough mode, and then you can attach and detach a backing device from 20 * a cache set at runtime - while it's mounted and in use. Detaching implicitly 21 * invalidates any cached data for that backing device. 22 * 23 * A cache set can have multiple (many) backing devices attached to it. 24 * 25 * There's also flash only volumes - this is the reason for the distinction 26 * between struct cached_dev and struct bcache_device. A flash only volume 27 * works much like a bcache device that has a backing device, except the 28 * "cached" data is always dirty. The end result is that we get thin 29 * provisioning with very little additional code. 30 * 31 * Flash only volumes work but they're not production ready because the moving 32 * garbage collector needs more work. More on that later. 33 * 34 * BUCKETS/ALLOCATION: 35 * 36 * Bcache is primarily designed for caching, which means that in normal 37 * operation all of our available space will be allocated. Thus, we need an 38 * efficient way of deleting things from the cache so we can write new things to 39 * it. 40 * 41 * To do this, we first divide the cache device up into buckets. A bucket is the 42 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ 43 * works efficiently. 44 * 45 * Each bucket has a 16 bit priority, and an 8 bit generation associated with 46 * it. The gens and priorities for all the buckets are stored contiguously and 47 * packed on disk (in a linked list of buckets - aside from the superblock, all 48 * of bcache's metadata is stored in buckets). 49 * 50 * The priority is used to implement an LRU. We reset a bucket's priority when 51 * we allocate it or on cache it, and every so often we decrement the priority 52 * of each bucket. It could be used to implement something more sophisticated, 53 * if anyone ever gets around to it. 54 * 55 * The generation is used for invalidating buckets. Each pointer also has an 8 56 * bit generation embedded in it; for a pointer to be considered valid, its gen 57 * must match the gen of the bucket it points into. Thus, to reuse a bucket all 58 * we have to do is increment its gen (and write its new gen to disk; we batch 59 * this up). 60 * 61 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 62 * contain metadata (including btree nodes). 63 * 64 * THE BTREE: 65 * 66 * Bcache is in large part design around the btree. 67 * 68 * At a high level, the btree is just an index of key -> ptr tuples. 69 * 70 * Keys represent extents, and thus have a size field. Keys also have a variable 71 * number of pointers attached to them (potentially zero, which is handy for 72 * invalidating the cache). 73 * 74 * The key itself is an inode:offset pair. The inode number corresponds to a 75 * backing device or a flash only volume. The offset is the ending offset of the 76 * extent within the inode - not the starting offset; this makes lookups 77 * slightly more convenient. 78 * 79 * Pointers contain the cache device id, the offset on that device, and an 8 bit 80 * generation number. More on the gen later. 81 * 82 * Index lookups are not fully abstracted - cache lookups in particular are 83 * still somewhat mixed in with the btree code, but things are headed in that 84 * direction. 85 * 86 * Updates are fairly well abstracted, though. There are two different ways of 87 * updating the btree; insert and replace. 88 * 89 * BTREE_INSERT will just take a list of keys and insert them into the btree - 90 * overwriting (possibly only partially) any extents they overlap with. This is 91 * used to update the index after a write. 92 * 93 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is 94 * overwriting a key that matches another given key. This is used for inserting 95 * data into the cache after a cache miss, and for background writeback, and for 96 * the moving garbage collector. 97 * 98 * There is no "delete" operation; deleting things from the index is 99 * accomplished by either by invalidating pointers (by incrementing a bucket's 100 * gen) or by inserting a key with 0 pointers - which will overwrite anything 101 * previously present at that location in the index. 102 * 103 * This means that there are always stale/invalid keys in the btree. They're 104 * filtered out by the code that iterates through a btree node, and removed when 105 * a btree node is rewritten. 106 * 107 * BTREE NODES: 108 * 109 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and 110 * free smaller than a bucket - so, that's how big our btree nodes are. 111 * 112 * (If buckets are really big we'll only use part of the bucket for a btree node 113 * - no less than 1/4th - but a bucket still contains no more than a single 114 * btree node. I'd actually like to change this, but for now we rely on the 115 * bucket's gen for deleting btree nodes when we rewrite/split a node.) 116 * 117 * Anyways, btree nodes are big - big enough to be inefficient with a textbook 118 * btree implementation. 119 * 120 * The way this is solved is that btree nodes are internally log structured; we 121 * can append new keys to an existing btree node without rewriting it. This 122 * means each set of keys we write is sorted, but the node is not. 123 * 124 * We maintain this log structure in memory - keeping 1Mb of keys sorted would 125 * be expensive, and we have to distinguish between the keys we have written and 126 * the keys we haven't. So to do a lookup in a btree node, we have to search 127 * each sorted set. But we do merge written sets together lazily, so the cost of 128 * these extra searches is quite low (normally most of the keys in a btree node 129 * will be in one big set, and then there'll be one or two sets that are much 130 * smaller). 131 * 132 * This log structure makes bcache's btree more of a hybrid between a 133 * conventional btree and a compacting data structure, with some of the 134 * advantages of both. 135 * 136 * GARBAGE COLLECTION: 137 * 138 * We can't just invalidate any bucket - it might contain dirty data or 139 * metadata. If it once contained dirty data, other writes might overwrite it 140 * later, leaving no valid pointers into that bucket in the index. 141 * 142 * Thus, the primary purpose of garbage collection is to find buckets to reuse. 143 * It also counts how much valid data it each bucket currently contains, so that 144 * allocation can reuse buckets sooner when they've been mostly overwritten. 145 * 146 * It also does some things that are really internal to the btree 147 * implementation. If a btree node contains pointers that are stale by more than 148 * some threshold, it rewrites the btree node to avoid the bucket's generation 149 * wrapping around. It also merges adjacent btree nodes if they're empty enough. 150 * 151 * THE JOURNAL: 152 * 153 * Bcache's journal is not necessary for consistency; we always strictly 154 * order metadata writes so that the btree and everything else is consistent on 155 * disk in the event of an unclean shutdown, and in fact bcache had writeback 156 * caching (with recovery from unclean shutdown) before journalling was 157 * implemented. 158 * 159 * Rather, the journal is purely a performance optimization; we can't complete a 160 * write until we've updated the index on disk, otherwise the cache would be 161 * inconsistent in the event of an unclean shutdown. This means that without the 162 * journal, on random write workloads we constantly have to update all the leaf 163 * nodes in the btree, and those writes will be mostly empty (appending at most 164 * a few keys each) - highly inefficient in terms of amount of metadata writes, 165 * and it puts more strain on the various btree resorting/compacting code. 166 * 167 * The journal is just a log of keys we've inserted; on startup we just reinsert 168 * all the keys in the open journal entries. That means that when we're updating 169 * a node in the btree, we can wait until a 4k block of keys fills up before 170 * writing them out. 171 * 172 * For simplicity, we only journal updates to leaf nodes; updates to parent 173 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth 174 * the complexity to deal with journalling them (in particular, journal replay) 175 * - updates to non leaf nodes just happen synchronously (see btree_split()). 176 */ 177 178 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ 179 180 #include <linux/bcache.h> 181 #include <linux/bio.h> 182 #include <linux/kobject.h> 183 #include <linux/list.h> 184 #include <linux/mutex.h> 185 #include <linux/rbtree.h> 186 #include <linux/rwsem.h> 187 #include <linux/types.h> 188 #include <linux/workqueue.h> 189 190 #include "bset.h" 191 #include "util.h" 192 #include "closure.h" 193 194 struct bucket { 195 atomic_t pin; 196 uint16_t prio; 197 uint8_t gen; 198 uint8_t disk_gen; 199 uint8_t last_gc; /* Most out of date gen in the btree */ 200 uint8_t gc_gen; 201 uint16_t gc_mark; /* Bitfield used by GC. See below for field */ 202 }; 203 204 /* 205 * I'd use bitfields for these, but I don't trust the compiler not to screw me 206 * as multiple threads touch struct bucket without locking 207 */ 208 209 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); 210 #define GC_MARK_RECLAIMABLE 0 211 #define GC_MARK_DIRTY 1 212 #define GC_MARK_METADATA 2 213 #define GC_SECTORS_USED_SIZE 13 214 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) 215 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); 216 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 217 218 #include "journal.h" 219 #include "stats.h" 220 struct search; 221 struct btree; 222 struct keybuf; 223 224 struct keybuf_key { 225 struct rb_node node; 226 BKEY_PADDED(key); 227 void *private; 228 }; 229 230 struct keybuf { 231 struct bkey last_scanned; 232 spinlock_t lock; 233 234 /* 235 * Beginning and end of range in rb tree - so that we can skip taking 236 * lock and checking the rb tree when we need to check for overlapping 237 * keys. 238 */ 239 struct bkey start; 240 struct bkey end; 241 242 struct rb_root keys; 243 244 #define KEYBUF_NR 500 245 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); 246 }; 247 248 struct bio_split_pool { 249 struct bio_set *bio_split; 250 mempool_t *bio_split_hook; 251 }; 252 253 struct bio_split_hook { 254 struct closure cl; 255 struct bio_split_pool *p; 256 struct bio *bio; 257 bio_end_io_t *bi_end_io; 258 void *bi_private; 259 }; 260 261 struct bcache_device { 262 struct closure cl; 263 264 struct kobject kobj; 265 266 struct cache_set *c; 267 unsigned id; 268 #define BCACHEDEVNAME_SIZE 12 269 char name[BCACHEDEVNAME_SIZE]; 270 271 struct gendisk *disk; 272 273 unsigned long flags; 274 #define BCACHE_DEV_CLOSING 0 275 #define BCACHE_DEV_DETACHING 1 276 #define BCACHE_DEV_UNLINK_DONE 2 277 278 unsigned nr_stripes; 279 unsigned stripe_size; 280 atomic_t *stripe_sectors_dirty; 281 unsigned long *full_dirty_stripes; 282 283 unsigned long sectors_dirty_last; 284 long sectors_dirty_derivative; 285 286 struct bio_set *bio_split; 287 288 unsigned data_csum:1; 289 290 int (*cache_miss)(struct btree *, struct search *, 291 struct bio *, unsigned); 292 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); 293 294 struct bio_split_pool bio_split_hook; 295 }; 296 297 struct io { 298 /* Used to track sequential IO so it can be skipped */ 299 struct hlist_node hash; 300 struct list_head lru; 301 302 unsigned long jiffies; 303 unsigned sequential; 304 sector_t last; 305 }; 306 307 struct cached_dev { 308 struct list_head list; 309 struct bcache_device disk; 310 struct block_device *bdev; 311 312 struct cache_sb sb; 313 struct bio sb_bio; 314 struct bio_vec sb_bv[1]; 315 struct closure sb_write; 316 struct semaphore sb_write_mutex; 317 318 /* Refcount on the cache set. Always nonzero when we're caching. */ 319 atomic_t count; 320 struct work_struct detach; 321 322 /* 323 * Device might not be running if it's dirty and the cache set hasn't 324 * showed up yet. 325 */ 326 atomic_t running; 327 328 /* 329 * Writes take a shared lock from start to finish; scanning for dirty 330 * data to refill the rb tree requires an exclusive lock. 331 */ 332 struct rw_semaphore writeback_lock; 333 334 /* 335 * Nonzero, and writeback has a refcount (d->count), iff there is dirty 336 * data in the cache. Protected by writeback_lock; must have an 337 * shared lock to set and exclusive lock to clear. 338 */ 339 atomic_t has_dirty; 340 341 struct bch_ratelimit writeback_rate; 342 struct delayed_work writeback_rate_update; 343 344 /* 345 * Internal to the writeback code, so read_dirty() can keep track of 346 * where it's at. 347 */ 348 sector_t last_read; 349 350 /* Limit number of writeback bios in flight */ 351 struct semaphore in_flight; 352 struct task_struct *writeback_thread; 353 354 struct keybuf writeback_keys; 355 356 /* For tracking sequential IO */ 357 #define RECENT_IO_BITS 7 358 #define RECENT_IO (1 << RECENT_IO_BITS) 359 struct io io[RECENT_IO]; 360 struct hlist_head io_hash[RECENT_IO + 1]; 361 struct list_head io_lru; 362 spinlock_t io_lock; 363 364 struct cache_accounting accounting; 365 366 /* The rest of this all shows up in sysfs */ 367 unsigned sequential_cutoff; 368 unsigned readahead; 369 370 unsigned verify:1; 371 unsigned bypass_torture_test:1; 372 373 unsigned partial_stripes_expensive:1; 374 unsigned writeback_metadata:1; 375 unsigned writeback_running:1; 376 unsigned char writeback_percent; 377 unsigned writeback_delay; 378 379 uint64_t writeback_rate_target; 380 int64_t writeback_rate_proportional; 381 int64_t writeback_rate_derivative; 382 int64_t writeback_rate_change; 383 384 unsigned writeback_rate_update_seconds; 385 unsigned writeback_rate_d_term; 386 unsigned writeback_rate_p_term_inverse; 387 }; 388 389 enum alloc_reserve { 390 RESERVE_BTREE, 391 RESERVE_PRIO, 392 RESERVE_MOVINGGC, 393 RESERVE_NONE, 394 RESERVE_NR, 395 }; 396 397 struct cache { 398 struct cache_set *set; 399 struct cache_sb sb; 400 struct bio sb_bio; 401 struct bio_vec sb_bv[1]; 402 403 struct kobject kobj; 404 struct block_device *bdev; 405 406 struct task_struct *alloc_thread; 407 408 struct closure prio; 409 struct prio_set *disk_buckets; 410 411 /* 412 * When allocating new buckets, prio_write() gets first dibs - since we 413 * may not be allocate at all without writing priorities and gens. 414 * prio_buckets[] contains the last buckets we wrote priorities to (so 415 * gc can mark them as metadata), prio_next[] contains the buckets 416 * allocated for the next prio write. 417 */ 418 uint64_t *prio_buckets; 419 uint64_t *prio_last_buckets; 420 421 /* 422 * free: Buckets that are ready to be used 423 * 424 * free_inc: Incoming buckets - these are buckets that currently have 425 * cached data in them, and we can't reuse them until after we write 426 * their new gen to disk. After prio_write() finishes writing the new 427 * gens/prios, they'll be moved to the free list (and possibly discarded 428 * in the process) 429 * 430 * unused: GC found nothing pointing into these buckets (possibly 431 * because all the data they contained was overwritten), so we only 432 * need to discard them before they can be moved to the free list. 433 */ 434 DECLARE_FIFO(long, free)[RESERVE_NR]; 435 DECLARE_FIFO(long, free_inc); 436 DECLARE_FIFO(long, unused); 437 438 size_t fifo_last_bucket; 439 440 /* Allocation stuff: */ 441 struct bucket *buckets; 442 443 DECLARE_HEAP(struct bucket *, heap); 444 445 /* 446 * max(gen - disk_gen) for all buckets. When it gets too big we have to 447 * call prio_write() to keep gens from wrapping. 448 */ 449 uint8_t need_save_prio; 450 451 /* 452 * If nonzero, we know we aren't going to find any buckets to invalidate 453 * until a gc finishes - otherwise we could pointlessly burn a ton of 454 * cpu 455 */ 456 unsigned invalidate_needs_gc:1; 457 458 bool discard; /* Get rid of? */ 459 460 struct journal_device journal; 461 462 /* The rest of this all shows up in sysfs */ 463 #define IO_ERROR_SHIFT 20 464 atomic_t io_errors; 465 atomic_t io_count; 466 467 atomic_long_t meta_sectors_written; 468 atomic_long_t btree_sectors_written; 469 atomic_long_t sectors_written; 470 471 struct bio_split_pool bio_split_hook; 472 }; 473 474 struct gc_stat { 475 size_t nodes; 476 size_t key_bytes; 477 478 size_t nkeys; 479 uint64_t data; /* sectors */ 480 unsigned in_use; /* percent */ 481 }; 482 483 /* 484 * Flag bits, for how the cache set is shutting down, and what phase it's at: 485 * 486 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching 487 * all the backing devices first (their cached data gets invalidated, and they 488 * won't automatically reattach). 489 * 490 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; 491 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. 492 * flushing dirty data). 493 */ 494 #define CACHE_SET_UNREGISTERING 0 495 #define CACHE_SET_STOPPING 1 496 497 struct cache_set { 498 struct closure cl; 499 500 struct list_head list; 501 struct kobject kobj; 502 struct kobject internal; 503 struct dentry *debug; 504 struct cache_accounting accounting; 505 506 unsigned long flags; 507 508 struct cache_sb sb; 509 510 struct cache *cache[MAX_CACHES_PER_SET]; 511 struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; 512 int caches_loaded; 513 514 struct bcache_device **devices; 515 struct list_head cached_devs; 516 uint64_t cached_dev_sectors; 517 struct closure caching; 518 519 struct closure sb_write; 520 struct semaphore sb_write_mutex; 521 522 mempool_t *search; 523 mempool_t *bio_meta; 524 struct bio_set *bio_split; 525 526 /* For the btree cache */ 527 struct shrinker shrink; 528 529 /* For the btree cache and anything allocation related */ 530 struct mutex bucket_lock; 531 532 /* log2(bucket_size), in sectors */ 533 unsigned short bucket_bits; 534 535 /* log2(block_size), in sectors */ 536 unsigned short block_bits; 537 538 /* 539 * Default number of pages for a new btree node - may be less than a 540 * full bucket 541 */ 542 unsigned btree_pages; 543 544 /* 545 * Lists of struct btrees; lru is the list for structs that have memory 546 * allocated for actual btree node, freed is for structs that do not. 547 * 548 * We never free a struct btree, except on shutdown - we just put it on 549 * the btree_cache_freed list and reuse it later. This simplifies the 550 * code, and it doesn't cost us much memory as the memory usage is 551 * dominated by buffers that hold the actual btree node data and those 552 * can be freed - and the number of struct btrees allocated is 553 * effectively bounded. 554 * 555 * btree_cache_freeable effectively is a small cache - we use it because 556 * high order page allocations can be rather expensive, and it's quite 557 * common to delete and allocate btree nodes in quick succession. It 558 * should never grow past ~2-3 nodes in practice. 559 */ 560 struct list_head btree_cache; 561 struct list_head btree_cache_freeable; 562 struct list_head btree_cache_freed; 563 564 /* Number of elements in btree_cache + btree_cache_freeable lists */ 565 unsigned bucket_cache_used; 566 567 /* 568 * If we need to allocate memory for a new btree node and that 569 * allocation fails, we can cannibalize another node in the btree cache 570 * to satisfy the allocation. However, only one thread can be doing this 571 * at a time, for obvious reasons - try_harder and try_wait are 572 * basically a lock for this that we can wait on asynchronously. The 573 * btree_root() macro releases the lock when it returns. 574 */ 575 struct task_struct *try_harder; 576 wait_queue_head_t try_wait; 577 uint64_t try_harder_start; 578 579 /* 580 * When we free a btree node, we increment the gen of the bucket the 581 * node is in - but we can't rewrite the prios and gens until we 582 * finished whatever it is we were doing, otherwise after a crash the 583 * btree node would be freed but for say a split, we might not have the 584 * pointers to the new nodes inserted into the btree yet. 585 * 586 * This is a refcount that blocks prio_write() until the new keys are 587 * written. 588 */ 589 atomic_t prio_blocked; 590 wait_queue_head_t bucket_wait; 591 592 /* 593 * For any bio we don't skip we subtract the number of sectors from 594 * rescale; when it hits 0 we rescale all the bucket priorities. 595 */ 596 atomic_t rescale; 597 /* 598 * When we invalidate buckets, we use both the priority and the amount 599 * of good data to determine which buckets to reuse first - to weight 600 * those together consistently we keep track of the smallest nonzero 601 * priority of any bucket. 602 */ 603 uint16_t min_prio; 604 605 /* 606 * max(gen - gc_gen) for all buckets. When it gets too big we have to gc 607 * to keep gens from wrapping around. 608 */ 609 uint8_t need_gc; 610 struct gc_stat gc_stats; 611 size_t nbuckets; 612 613 struct task_struct *gc_thread; 614 /* Where in the btree gc currently is */ 615 struct bkey gc_done; 616 617 /* 618 * The allocation code needs gc_mark in struct bucket to be correct, but 619 * it's not while a gc is in progress. Protected by bucket_lock. 620 */ 621 int gc_mark_valid; 622 623 /* Counts how many sectors bio_insert has added to the cache */ 624 atomic_t sectors_to_gc; 625 626 wait_queue_head_t moving_gc_wait; 627 struct keybuf moving_gc_keys; 628 /* Number of moving GC bios in flight */ 629 struct semaphore moving_in_flight; 630 631 struct btree *root; 632 633 #ifdef CONFIG_BCACHE_DEBUG 634 struct btree *verify_data; 635 struct bset *verify_ondisk; 636 struct mutex verify_lock; 637 #endif 638 639 unsigned nr_uuids; 640 struct uuid_entry *uuids; 641 BKEY_PADDED(uuid_bucket); 642 struct closure uuid_write; 643 struct semaphore uuid_write_mutex; 644 645 /* 646 * A btree node on disk could have too many bsets for an iterator to fit 647 * on the stack - have to dynamically allocate them 648 */ 649 mempool_t *fill_iter; 650 651 struct bset_sort_state sort; 652 653 /* List of buckets we're currently writing data to */ 654 struct list_head data_buckets; 655 spinlock_t data_bucket_lock; 656 657 struct journal journal; 658 659 #define CONGESTED_MAX 1024 660 unsigned congested_last_us; 661 atomic_t congested; 662 663 /* The rest of this all shows up in sysfs */ 664 unsigned congested_read_threshold_us; 665 unsigned congested_write_threshold_us; 666 667 struct time_stats btree_gc_time; 668 struct time_stats btree_split_time; 669 struct time_stats btree_read_time; 670 struct time_stats try_harder_time; 671 672 atomic_long_t cache_read_races; 673 atomic_long_t writeback_keys_done; 674 atomic_long_t writeback_keys_failed; 675 676 enum { 677 ON_ERROR_UNREGISTER, 678 ON_ERROR_PANIC, 679 } on_error; 680 unsigned error_limit; 681 unsigned error_decay; 682 683 unsigned short journal_delay_ms; 684 bool expensive_debug_checks; 685 unsigned verify:1; 686 unsigned key_merging_disabled:1; 687 unsigned gc_always_rewrite:1; 688 unsigned shrinker_disabled:1; 689 unsigned copy_gc_enabled:1; 690 691 #define BUCKET_HASH_BITS 12 692 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; 693 }; 694 695 struct bbio { 696 unsigned submit_time_us; 697 union { 698 struct bkey key; 699 uint64_t _pad[3]; 700 /* 701 * We only need pad = 3 here because we only ever carry around a 702 * single pointer - i.e. the pointer we're doing io to/from. 703 */ 704 }; 705 struct bio bio; 706 }; 707 708 #define BTREE_PRIO USHRT_MAX 709 #define INITIAL_PRIO 32768U 710 711 #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) 712 #define btree_blocks(b) \ 713 ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) 714 715 #define btree_default_blocks(c) \ 716 ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) 717 718 #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) 719 #define bucket_bytes(c) ((c)->sb.bucket_size << 9) 720 #define block_bytes(c) ((c)->sb.block_size << 9) 721 722 #define prios_per_bucket(c) \ 723 ((bucket_bytes(c) - sizeof(struct prio_set)) / \ 724 sizeof(struct bucket_disk)) 725 #define prio_buckets(c) \ 726 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) 727 728 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) 729 { 730 return s >> c->bucket_bits; 731 } 732 733 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) 734 { 735 return ((sector_t) b) << c->bucket_bits; 736 } 737 738 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) 739 { 740 return s & (c->sb.bucket_size - 1); 741 } 742 743 static inline struct cache *PTR_CACHE(struct cache_set *c, 744 const struct bkey *k, 745 unsigned ptr) 746 { 747 return c->cache[PTR_DEV(k, ptr)]; 748 } 749 750 static inline size_t PTR_BUCKET_NR(struct cache_set *c, 751 const struct bkey *k, 752 unsigned ptr) 753 { 754 return sector_to_bucket(c, PTR_OFFSET(k, ptr)); 755 } 756 757 static inline struct bucket *PTR_BUCKET(struct cache_set *c, 758 const struct bkey *k, 759 unsigned ptr) 760 { 761 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); 762 } 763 764 static inline uint8_t gen_after(uint8_t a, uint8_t b) 765 { 766 uint8_t r = a - b; 767 return r > 128U ? 0 : r; 768 } 769 770 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, 771 unsigned i) 772 { 773 return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); 774 } 775 776 static inline bool ptr_available(struct cache_set *c, const struct bkey *k, 777 unsigned i) 778 { 779 return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); 780 } 781 782 /* Btree key macros */ 783 784 /* 785 * This is used for various on disk data structures - cache_sb, prio_set, bset, 786 * jset: The checksum is _always_ the first 8 bytes of these structs 787 */ 788 #define csum_set(i) \ 789 bch_crc64(((void *) (i)) + sizeof(uint64_t), \ 790 ((void *) bset_bkey_last(i)) - \ 791 (((void *) (i)) + sizeof(uint64_t))) 792 793 /* Error handling macros */ 794 795 #define btree_bug(b, ...) \ 796 do { \ 797 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ 798 dump_stack(); \ 799 } while (0) 800 801 #define cache_bug(c, ...) \ 802 do { \ 803 if (bch_cache_set_error(c, __VA_ARGS__)) \ 804 dump_stack(); \ 805 } while (0) 806 807 #define btree_bug_on(cond, b, ...) \ 808 do { \ 809 if (cond) \ 810 btree_bug(b, __VA_ARGS__); \ 811 } while (0) 812 813 #define cache_bug_on(cond, c, ...) \ 814 do { \ 815 if (cond) \ 816 cache_bug(c, __VA_ARGS__); \ 817 } while (0) 818 819 #define cache_set_err_on(cond, c, ...) \ 820 do { \ 821 if (cond) \ 822 bch_cache_set_error(c, __VA_ARGS__); \ 823 } while (0) 824 825 /* Looping macros */ 826 827 #define for_each_cache(ca, cs, iter) \ 828 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) 829 830 #define for_each_bucket(b, ca) \ 831 for (b = (ca)->buckets + (ca)->sb.first_bucket; \ 832 b < (ca)->buckets + (ca)->sb.nbuckets; b++) 833 834 static inline void cached_dev_put(struct cached_dev *dc) 835 { 836 if (atomic_dec_and_test(&dc->count)) 837 schedule_work(&dc->detach); 838 } 839 840 static inline bool cached_dev_get(struct cached_dev *dc) 841 { 842 if (!atomic_inc_not_zero(&dc->count)) 843 return false; 844 845 /* Paired with the mb in cached_dev_attach */ 846 smp_mb__after_atomic_inc(); 847 return true; 848 } 849 850 /* 851 * bucket_gc_gen() returns the difference between the bucket's current gen and 852 * the oldest gen of any pointer into that bucket in the btree (last_gc). 853 * 854 * bucket_disk_gen() returns the difference between the current gen and the gen 855 * on disk; they're both used to make sure gens don't wrap around. 856 */ 857 858 static inline uint8_t bucket_gc_gen(struct bucket *b) 859 { 860 return b->gen - b->last_gc; 861 } 862 863 static inline uint8_t bucket_disk_gen(struct bucket *b) 864 { 865 return b->gen - b->disk_gen; 866 } 867 868 #define BUCKET_GC_GEN_MAX 96U 869 #define BUCKET_DISK_GEN_MAX 64U 870 871 #define kobj_attribute_write(n, fn) \ 872 static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) 873 874 #define kobj_attribute_rw(n, show, store) \ 875 static struct kobj_attribute ksysfs_##n = \ 876 __ATTR(n, S_IWUSR|S_IRUSR, show, store) 877 878 static inline void wake_up_allocators(struct cache_set *c) 879 { 880 struct cache *ca; 881 unsigned i; 882 883 for_each_cache(ca, c, i) 884 wake_up_process(ca->alloc_thread); 885 } 886 887 /* Forward declarations */ 888 889 void bch_count_io_errors(struct cache *, int, const char *); 890 void bch_bbio_count_io_errors(struct cache_set *, struct bio *, 891 int, const char *); 892 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); 893 void bch_bbio_free(struct bio *, struct cache_set *); 894 struct bio *bch_bbio_alloc(struct cache_set *); 895 896 void bch_generic_make_request(struct bio *, struct bio_split_pool *); 897 void __bch_submit_bbio(struct bio *, struct cache_set *); 898 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 899 900 uint8_t bch_inc_gen(struct cache *, struct bucket *); 901 void bch_rescale_priorities(struct cache_set *, int); 902 bool bch_bucket_add_unused(struct cache *, struct bucket *); 903 904 long bch_bucket_alloc(struct cache *, unsigned, bool); 905 void bch_bucket_free(struct cache_set *, struct bkey *); 906 907 int __bch_bucket_alloc_set(struct cache_set *, unsigned, 908 struct bkey *, int, bool); 909 int bch_bucket_alloc_set(struct cache_set *, unsigned, 910 struct bkey *, int, bool); 911 bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, 912 unsigned, unsigned, bool); 913 914 __printf(2, 3) 915 bool bch_cache_set_error(struct cache_set *, const char *, ...); 916 917 void bch_prio_write(struct cache *); 918 void bch_write_bdev_super(struct cached_dev *, struct closure *); 919 920 extern struct workqueue_struct *bcache_wq; 921 extern const char * const bch_cache_modes[]; 922 extern struct mutex bch_register_lock; 923 extern struct list_head bch_cache_sets; 924 925 extern struct kobj_type bch_cached_dev_ktype; 926 extern struct kobj_type bch_flash_dev_ktype; 927 extern struct kobj_type bch_cache_set_ktype; 928 extern struct kobj_type bch_cache_set_internal_ktype; 929 extern struct kobj_type bch_cache_ktype; 930 931 void bch_cached_dev_release(struct kobject *); 932 void bch_flash_dev_release(struct kobject *); 933 void bch_cache_set_release(struct kobject *); 934 void bch_cache_release(struct kobject *); 935 936 int bch_uuid_write(struct cache_set *); 937 void bcache_write_super(struct cache_set *); 938 939 int bch_flash_dev_create(struct cache_set *c, uint64_t size); 940 941 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); 942 void bch_cached_dev_detach(struct cached_dev *); 943 void bch_cached_dev_run(struct cached_dev *); 944 void bcache_device_stop(struct bcache_device *); 945 946 void bch_cache_set_unregister(struct cache_set *); 947 void bch_cache_set_stop(struct cache_set *); 948 949 struct cache_set *bch_cache_set_alloc(struct cache_sb *); 950 void bch_btree_cache_free(struct cache_set *); 951 int bch_btree_cache_alloc(struct cache_set *); 952 void bch_moving_init_cache_set(struct cache_set *); 953 int bch_open_buckets_alloc(struct cache_set *); 954 void bch_open_buckets_free(struct cache_set *); 955 956 int bch_cache_allocator_start(struct cache *ca); 957 int bch_cache_allocator_init(struct cache *ca); 958 959 void bch_debug_exit(void); 960 int bch_debug_init(struct kobject *); 961 void bch_request_exit(void); 962 int bch_request_init(void); 963 void bch_btree_exit(void); 964 int bch_btree_init(void); 965 966 #endif /* _BCACHE_H */ 967