xref: /openbmc/linux/drivers/md/bcache/bcache.h (revision 5ef12cb4a3a78ffb331c03a795a15eea4ae35155)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_H
3 #define _BCACHE_H
4 
5 /*
6  * SOME HIGH LEVEL CODE DOCUMENTATION:
7  *
8  * Bcache mostly works with cache sets, cache devices, and backing devices.
9  *
10  * Support for multiple cache devices hasn't quite been finished off yet, but
11  * it's about 95% plumbed through. A cache set and its cache devices is sort of
12  * like a md raid array and its component devices. Most of the code doesn't care
13  * about individual cache devices, the main abstraction is the cache set.
14  *
15  * Multiple cache devices is intended to give us the ability to mirror dirty
16  * cached data and metadata, without mirroring clean cached data.
17  *
18  * Backing devices are different, in that they have a lifetime independent of a
19  * cache set. When you register a newly formatted backing device it'll come up
20  * in passthrough mode, and then you can attach and detach a backing device from
21  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22  * invalidates any cached data for that backing device.
23  *
24  * A cache set can have multiple (many) backing devices attached to it.
25  *
26  * There's also flash only volumes - this is the reason for the distinction
27  * between struct cached_dev and struct bcache_device. A flash only volume
28  * works much like a bcache device that has a backing device, except the
29  * "cached" data is always dirty. The end result is that we get thin
30  * provisioning with very little additional code.
31  *
32  * Flash only volumes work but they're not production ready because the moving
33  * garbage collector needs more work. More on that later.
34  *
35  * BUCKETS/ALLOCATION:
36  *
37  * Bcache is primarily designed for caching, which means that in normal
38  * operation all of our available space will be allocated. Thus, we need an
39  * efficient way of deleting things from the cache so we can write new things to
40  * it.
41  *
42  * To do this, we first divide the cache device up into buckets. A bucket is the
43  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44  * works efficiently.
45  *
46  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47  * it. The gens and priorities for all the buckets are stored contiguously and
48  * packed on disk (in a linked list of buckets - aside from the superblock, all
49  * of bcache's metadata is stored in buckets).
50  *
51  * The priority is used to implement an LRU. We reset a bucket's priority when
52  * we allocate it or on cache it, and every so often we decrement the priority
53  * of each bucket. It could be used to implement something more sophisticated,
54  * if anyone ever gets around to it.
55  *
56  * The generation is used for invalidating buckets. Each pointer also has an 8
57  * bit generation embedded in it; for a pointer to be considered valid, its gen
58  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
59  * we have to do is increment its gen (and write its new gen to disk; we batch
60  * this up).
61  *
62  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63  * contain metadata (including btree nodes).
64  *
65  * THE BTREE:
66  *
67  * Bcache is in large part design around the btree.
68  *
69  * At a high level, the btree is just an index of key -> ptr tuples.
70  *
71  * Keys represent extents, and thus have a size field. Keys also have a variable
72  * number of pointers attached to them (potentially zero, which is handy for
73  * invalidating the cache).
74  *
75  * The key itself is an inode:offset pair. The inode number corresponds to a
76  * backing device or a flash only volume. The offset is the ending offset of the
77  * extent within the inode - not the starting offset; this makes lookups
78  * slightly more convenient.
79  *
80  * Pointers contain the cache device id, the offset on that device, and an 8 bit
81  * generation number. More on the gen later.
82  *
83  * Index lookups are not fully abstracted - cache lookups in particular are
84  * still somewhat mixed in with the btree code, but things are headed in that
85  * direction.
86  *
87  * Updates are fairly well abstracted, though. There are two different ways of
88  * updating the btree; insert and replace.
89  *
90  * BTREE_INSERT will just take a list of keys and insert them into the btree -
91  * overwriting (possibly only partially) any extents they overlap with. This is
92  * used to update the index after a write.
93  *
94  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95  * overwriting a key that matches another given key. This is used for inserting
96  * data into the cache after a cache miss, and for background writeback, and for
97  * the moving garbage collector.
98  *
99  * There is no "delete" operation; deleting things from the index is
100  * accomplished by either by invalidating pointers (by incrementing a bucket's
101  * gen) or by inserting a key with 0 pointers - which will overwrite anything
102  * previously present at that location in the index.
103  *
104  * This means that there are always stale/invalid keys in the btree. They're
105  * filtered out by the code that iterates through a btree node, and removed when
106  * a btree node is rewritten.
107  *
108  * BTREE NODES:
109  *
110  * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
111  * free smaller than a bucket - so, that's how big our btree nodes are.
112  *
113  * (If buckets are really big we'll only use part of the bucket for a btree node
114  * - no less than 1/4th - but a bucket still contains no more than a single
115  * btree node. I'd actually like to change this, but for now we rely on the
116  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117  *
118  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119  * btree implementation.
120  *
121  * The way this is solved is that btree nodes are internally log structured; we
122  * can append new keys to an existing btree node without rewriting it. This
123  * means each set of keys we write is sorted, but the node is not.
124  *
125  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126  * be expensive, and we have to distinguish between the keys we have written and
127  * the keys we haven't. So to do a lookup in a btree node, we have to search
128  * each sorted set. But we do merge written sets together lazily, so the cost of
129  * these extra searches is quite low (normally most of the keys in a btree node
130  * will be in one big set, and then there'll be one or two sets that are much
131  * smaller).
132  *
133  * This log structure makes bcache's btree more of a hybrid between a
134  * conventional btree and a compacting data structure, with some of the
135  * advantages of both.
136  *
137  * GARBAGE COLLECTION:
138  *
139  * We can't just invalidate any bucket - it might contain dirty data or
140  * metadata. If it once contained dirty data, other writes might overwrite it
141  * later, leaving no valid pointers into that bucket in the index.
142  *
143  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144  * It also counts how much valid data it each bucket currently contains, so that
145  * allocation can reuse buckets sooner when they've been mostly overwritten.
146  *
147  * It also does some things that are really internal to the btree
148  * implementation. If a btree node contains pointers that are stale by more than
149  * some threshold, it rewrites the btree node to avoid the bucket's generation
150  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151  *
152  * THE JOURNAL:
153  *
154  * Bcache's journal is not necessary for consistency; we always strictly
155  * order metadata writes so that the btree and everything else is consistent on
156  * disk in the event of an unclean shutdown, and in fact bcache had writeback
157  * caching (with recovery from unclean shutdown) before journalling was
158  * implemented.
159  *
160  * Rather, the journal is purely a performance optimization; we can't complete a
161  * write until we've updated the index on disk, otherwise the cache would be
162  * inconsistent in the event of an unclean shutdown. This means that without the
163  * journal, on random write workloads we constantly have to update all the leaf
164  * nodes in the btree, and those writes will be mostly empty (appending at most
165  * a few keys each) - highly inefficient in terms of amount of metadata writes,
166  * and it puts more strain on the various btree resorting/compacting code.
167  *
168  * The journal is just a log of keys we've inserted; on startup we just reinsert
169  * all the keys in the open journal entries. That means that when we're updating
170  * a node in the btree, we can wait until a 4k block of keys fills up before
171  * writing them out.
172  *
173  * For simplicity, we only journal updates to leaf nodes; updates to parent
174  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175  * the complexity to deal with journalling them (in particular, journal replay)
176  * - updates to non leaf nodes just happen synchronously (see btree_split()).
177  */
178 
179 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
180 
181 #include <linux/bcache.h>
182 #include <linux/bio.h>
183 #include <linux/kobject.h>
184 #include <linux/list.h>
185 #include <linux/mutex.h>
186 #include <linux/rbtree.h>
187 #include <linux/rwsem.h>
188 #include <linux/refcount.h>
189 #include <linux/types.h>
190 #include <linux/workqueue.h>
191 #include <linux/kthread.h>
192 
193 #include "bset.h"
194 #include "util.h"
195 #include "closure.h"
196 
197 struct bucket {
198 	atomic_t	pin;
199 	uint16_t	prio;
200 	uint8_t		gen;
201 	uint8_t		last_gc; /* Most out of date gen in the btree */
202 	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
203 };
204 
205 /*
206  * I'd use bitfields for these, but I don't trust the compiler not to screw me
207  * as multiple threads touch struct bucket without locking
208  */
209 
210 BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
211 #define GC_MARK_RECLAIMABLE	1
212 #define GC_MARK_DIRTY		2
213 #define GC_MARK_METADATA	3
214 #define GC_SECTORS_USED_SIZE	13
215 #define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
216 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
217 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
218 
219 #include "journal.h"
220 #include "stats.h"
221 struct search;
222 struct btree;
223 struct keybuf;
224 
225 struct keybuf_key {
226 	struct rb_node		node;
227 	BKEY_PADDED(key);
228 	void			*private;
229 };
230 
231 struct keybuf {
232 	struct bkey		last_scanned;
233 	spinlock_t		lock;
234 
235 	/*
236 	 * Beginning and end of range in rb tree - so that we can skip taking
237 	 * lock and checking the rb tree when we need to check for overlapping
238 	 * keys.
239 	 */
240 	struct bkey		start;
241 	struct bkey		end;
242 
243 	struct rb_root		keys;
244 
245 #define KEYBUF_NR		500
246 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
247 };
248 
249 struct bcache_device {
250 	struct closure		cl;
251 
252 	struct kobject		kobj;
253 
254 	struct cache_set	*c;
255 	unsigned		id;
256 #define BCACHEDEVNAME_SIZE	12
257 	char			name[BCACHEDEVNAME_SIZE];
258 
259 	struct gendisk		*disk;
260 
261 	unsigned long		flags;
262 #define BCACHE_DEV_CLOSING		0
263 #define BCACHE_DEV_DETACHING		1
264 #define BCACHE_DEV_UNLINK_DONE		2
265 #define BCACHE_DEV_WB_RUNNING		3
266 #define BCACHE_DEV_RATE_DW_RUNNING	4
267 	unsigned		nr_stripes;
268 	unsigned		stripe_size;
269 	atomic_t		*stripe_sectors_dirty;
270 	unsigned long		*full_dirty_stripes;
271 
272 	struct bio_set		*bio_split;
273 
274 	unsigned		data_csum:1;
275 
276 	int (*cache_miss)(struct btree *, struct search *,
277 			  struct bio *, unsigned);
278 	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
279 };
280 
281 struct io {
282 	/* Used to track sequential IO so it can be skipped */
283 	struct hlist_node	hash;
284 	struct list_head	lru;
285 
286 	unsigned long		jiffies;
287 	unsigned		sequential;
288 	sector_t		last;
289 };
290 
291 enum stop_on_failure {
292 	BCH_CACHED_DEV_STOP_AUTO = 0,
293 	BCH_CACHED_DEV_STOP_ALWAYS,
294 	BCH_CACHED_DEV_STOP_MODE_MAX,
295 };
296 
297 struct cached_dev {
298 	struct list_head	list;
299 	struct bcache_device	disk;
300 	struct block_device	*bdev;
301 
302 	struct cache_sb		sb;
303 	struct bio		sb_bio;
304 	struct bio_vec		sb_bv[1];
305 	struct closure		sb_write;
306 	struct semaphore	sb_write_mutex;
307 
308 	/* Refcount on the cache set. Always nonzero when we're caching. */
309 	refcount_t		count;
310 	struct work_struct	detach;
311 
312 	/*
313 	 * Device might not be running if it's dirty and the cache set hasn't
314 	 * showed up yet.
315 	 */
316 	atomic_t		running;
317 
318 	/*
319 	 * Writes take a shared lock from start to finish; scanning for dirty
320 	 * data to refill the rb tree requires an exclusive lock.
321 	 */
322 	struct rw_semaphore	writeback_lock;
323 
324 	/*
325 	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
326 	 * data in the cache. Protected by writeback_lock; must have an
327 	 * shared lock to set and exclusive lock to clear.
328 	 */
329 	atomic_t		has_dirty;
330 
331 	/*
332 	 * Set to zero by things that touch the backing volume-- except
333 	 * writeback.  Incremented by writeback.  Used to determine when to
334 	 * accelerate idle writeback.
335 	 */
336 	atomic_t		backing_idle;
337 
338 	struct bch_ratelimit	writeback_rate;
339 	struct delayed_work	writeback_rate_update;
340 
341 	/* Limit number of writeback bios in flight */
342 	struct semaphore	in_flight;
343 	struct task_struct	*writeback_thread;
344 	struct workqueue_struct	*writeback_write_wq;
345 
346 	struct keybuf		writeback_keys;
347 
348 	/*
349 	 * Order the write-half of writeback operations strongly in dispatch
350 	 * order.  (Maintain LBA order; don't allow reads completing out of
351 	 * order to re-order the writes...)
352 	 */
353 	struct closure_waitlist writeback_ordering_wait;
354 	atomic_t		writeback_sequence_next;
355 
356 	/* For tracking sequential IO */
357 #define RECENT_IO_BITS	7
358 #define RECENT_IO	(1 << RECENT_IO_BITS)
359 	struct io		io[RECENT_IO];
360 	struct hlist_head	io_hash[RECENT_IO + 1];
361 	struct list_head	io_lru;
362 	spinlock_t		io_lock;
363 
364 	struct cache_accounting	accounting;
365 
366 	/* The rest of this all shows up in sysfs */
367 	unsigned		sequential_cutoff;
368 	unsigned		readahead;
369 
370 	unsigned		io_disable:1;
371 	unsigned		verify:1;
372 	unsigned		bypass_torture_test:1;
373 
374 	unsigned		partial_stripes_expensive:1;
375 	unsigned		writeback_metadata:1;
376 	unsigned		writeback_running:1;
377 	unsigned char		writeback_percent;
378 	unsigned		writeback_delay;
379 
380 	uint64_t		writeback_rate_target;
381 	int64_t			writeback_rate_proportional;
382 	int64_t			writeback_rate_integral;
383 	int64_t			writeback_rate_integral_scaled;
384 	int32_t			writeback_rate_change;
385 
386 	unsigned		writeback_rate_update_seconds;
387 	unsigned		writeback_rate_i_term_inverse;
388 	unsigned		writeback_rate_p_term_inverse;
389 	unsigned		writeback_rate_minimum;
390 
391 	enum stop_on_failure	stop_when_cache_set_failed;
392 #define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
393 	atomic_t		io_errors;
394 	unsigned		error_limit;
395 
396 	char			backing_dev_name[BDEVNAME_SIZE];
397 };
398 
399 enum alloc_reserve {
400 	RESERVE_BTREE,
401 	RESERVE_PRIO,
402 	RESERVE_MOVINGGC,
403 	RESERVE_NONE,
404 	RESERVE_NR,
405 };
406 
407 struct cache {
408 	struct cache_set	*set;
409 	struct cache_sb		sb;
410 	struct bio		sb_bio;
411 	struct bio_vec		sb_bv[1];
412 
413 	struct kobject		kobj;
414 	struct block_device	*bdev;
415 
416 	struct task_struct	*alloc_thread;
417 
418 	struct closure		prio;
419 	struct prio_set		*disk_buckets;
420 
421 	/*
422 	 * When allocating new buckets, prio_write() gets first dibs - since we
423 	 * may not be allocate at all without writing priorities and gens.
424 	 * prio_buckets[] contains the last buckets we wrote priorities to (so
425 	 * gc can mark them as metadata), prio_next[] contains the buckets
426 	 * allocated for the next prio write.
427 	 */
428 	uint64_t		*prio_buckets;
429 	uint64_t		*prio_last_buckets;
430 
431 	/*
432 	 * free: Buckets that are ready to be used
433 	 *
434 	 * free_inc: Incoming buckets - these are buckets that currently have
435 	 * cached data in them, and we can't reuse them until after we write
436 	 * their new gen to disk. After prio_write() finishes writing the new
437 	 * gens/prios, they'll be moved to the free list (and possibly discarded
438 	 * in the process)
439 	 */
440 	DECLARE_FIFO(long, free)[RESERVE_NR];
441 	DECLARE_FIFO(long, free_inc);
442 
443 	size_t			fifo_last_bucket;
444 
445 	/* Allocation stuff: */
446 	struct bucket		*buckets;
447 
448 	DECLARE_HEAP(struct bucket *, heap);
449 
450 	/*
451 	 * If nonzero, we know we aren't going to find any buckets to invalidate
452 	 * until a gc finishes - otherwise we could pointlessly burn a ton of
453 	 * cpu
454 	 */
455 	unsigned		invalidate_needs_gc;
456 
457 	bool			discard; /* Get rid of? */
458 
459 	struct journal_device	journal;
460 
461 	/* The rest of this all shows up in sysfs */
462 #define IO_ERROR_SHIFT		20
463 	atomic_t		io_errors;
464 	atomic_t		io_count;
465 
466 	atomic_long_t		meta_sectors_written;
467 	atomic_long_t		btree_sectors_written;
468 	atomic_long_t		sectors_written;
469 
470 	char			cache_dev_name[BDEVNAME_SIZE];
471 };
472 
473 struct gc_stat {
474 	size_t			nodes;
475 	size_t			key_bytes;
476 
477 	size_t			nkeys;
478 	uint64_t		data;	/* sectors */
479 	unsigned		in_use; /* percent */
480 };
481 
482 /*
483  * Flag bits, for how the cache set is shutting down, and what phase it's at:
484  *
485  * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
486  * all the backing devices first (their cached data gets invalidated, and they
487  * won't automatically reattach).
488  *
489  * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
490  * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
491  * flushing dirty data).
492  *
493  * CACHE_SET_RUNNING means all cache devices have been registered and journal
494  * replay is complete.
495  *
496  * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
497  * external and internal I/O should be denied when this flag is set.
498  *
499  */
500 #define CACHE_SET_UNREGISTERING		0
501 #define	CACHE_SET_STOPPING		1
502 #define	CACHE_SET_RUNNING		2
503 #define CACHE_SET_IO_DISABLE		3
504 
505 struct cache_set {
506 	struct closure		cl;
507 
508 	struct list_head	list;
509 	struct kobject		kobj;
510 	struct kobject		internal;
511 	struct dentry		*debug;
512 	struct cache_accounting accounting;
513 
514 	unsigned long		flags;
515 
516 	struct cache_sb		sb;
517 
518 	struct cache		*cache[MAX_CACHES_PER_SET];
519 	struct cache		*cache_by_alloc[MAX_CACHES_PER_SET];
520 	int			caches_loaded;
521 
522 	struct bcache_device	**devices;
523 	unsigned		devices_max_used;
524 	struct list_head	cached_devs;
525 	uint64_t		cached_dev_sectors;
526 	struct closure		caching;
527 
528 	struct closure		sb_write;
529 	struct semaphore	sb_write_mutex;
530 
531 	mempool_t		*search;
532 	mempool_t		*bio_meta;
533 	struct bio_set		*bio_split;
534 
535 	/* For the btree cache */
536 	struct shrinker		shrink;
537 
538 	/* For the btree cache and anything allocation related */
539 	struct mutex		bucket_lock;
540 
541 	/* log2(bucket_size), in sectors */
542 	unsigned short		bucket_bits;
543 
544 	/* log2(block_size), in sectors */
545 	unsigned short		block_bits;
546 
547 	/*
548 	 * Default number of pages for a new btree node - may be less than a
549 	 * full bucket
550 	 */
551 	unsigned		btree_pages;
552 
553 	/*
554 	 * Lists of struct btrees; lru is the list for structs that have memory
555 	 * allocated for actual btree node, freed is for structs that do not.
556 	 *
557 	 * We never free a struct btree, except on shutdown - we just put it on
558 	 * the btree_cache_freed list and reuse it later. This simplifies the
559 	 * code, and it doesn't cost us much memory as the memory usage is
560 	 * dominated by buffers that hold the actual btree node data and those
561 	 * can be freed - and the number of struct btrees allocated is
562 	 * effectively bounded.
563 	 *
564 	 * btree_cache_freeable effectively is a small cache - we use it because
565 	 * high order page allocations can be rather expensive, and it's quite
566 	 * common to delete and allocate btree nodes in quick succession. It
567 	 * should never grow past ~2-3 nodes in practice.
568 	 */
569 	struct list_head	btree_cache;
570 	struct list_head	btree_cache_freeable;
571 	struct list_head	btree_cache_freed;
572 
573 	/* Number of elements in btree_cache + btree_cache_freeable lists */
574 	unsigned		btree_cache_used;
575 
576 	/*
577 	 * If we need to allocate memory for a new btree node and that
578 	 * allocation fails, we can cannibalize another node in the btree cache
579 	 * to satisfy the allocation - lock to guarantee only one thread does
580 	 * this at a time:
581 	 */
582 	wait_queue_head_t	btree_cache_wait;
583 	struct task_struct	*btree_cache_alloc_lock;
584 
585 	/*
586 	 * When we free a btree node, we increment the gen of the bucket the
587 	 * node is in - but we can't rewrite the prios and gens until we
588 	 * finished whatever it is we were doing, otherwise after a crash the
589 	 * btree node would be freed but for say a split, we might not have the
590 	 * pointers to the new nodes inserted into the btree yet.
591 	 *
592 	 * This is a refcount that blocks prio_write() until the new keys are
593 	 * written.
594 	 */
595 	atomic_t		prio_blocked;
596 	wait_queue_head_t	bucket_wait;
597 
598 	/*
599 	 * For any bio we don't skip we subtract the number of sectors from
600 	 * rescale; when it hits 0 we rescale all the bucket priorities.
601 	 */
602 	atomic_t		rescale;
603 	/*
604 	 * When we invalidate buckets, we use both the priority and the amount
605 	 * of good data to determine which buckets to reuse first - to weight
606 	 * those together consistently we keep track of the smallest nonzero
607 	 * priority of any bucket.
608 	 */
609 	uint16_t		min_prio;
610 
611 	/*
612 	 * max(gen - last_gc) for all buckets. When it gets too big we have to gc
613 	 * to keep gens from wrapping around.
614 	 */
615 	uint8_t			need_gc;
616 	struct gc_stat		gc_stats;
617 	size_t			nbuckets;
618 	size_t			avail_nbuckets;
619 
620 	struct task_struct	*gc_thread;
621 	/* Where in the btree gc currently is */
622 	struct bkey		gc_done;
623 
624 	/*
625 	 * The allocation code needs gc_mark in struct bucket to be correct, but
626 	 * it's not while a gc is in progress. Protected by bucket_lock.
627 	 */
628 	int			gc_mark_valid;
629 
630 	/* Counts how many sectors bio_insert has added to the cache */
631 	atomic_t		sectors_to_gc;
632 	wait_queue_head_t	gc_wait;
633 
634 	struct keybuf		moving_gc_keys;
635 	/* Number of moving GC bios in flight */
636 	struct semaphore	moving_in_flight;
637 
638 	struct workqueue_struct	*moving_gc_wq;
639 
640 	struct btree		*root;
641 
642 #ifdef CONFIG_BCACHE_DEBUG
643 	struct btree		*verify_data;
644 	struct bset		*verify_ondisk;
645 	struct mutex		verify_lock;
646 #endif
647 
648 	unsigned		nr_uuids;
649 	struct uuid_entry	*uuids;
650 	BKEY_PADDED(uuid_bucket);
651 	struct closure		uuid_write;
652 	struct semaphore	uuid_write_mutex;
653 
654 	/*
655 	 * A btree node on disk could have too many bsets for an iterator to fit
656 	 * on the stack - have to dynamically allocate them
657 	 */
658 	mempool_t		*fill_iter;
659 
660 	struct bset_sort_state	sort;
661 
662 	/* List of buckets we're currently writing data to */
663 	struct list_head	data_buckets;
664 	spinlock_t		data_bucket_lock;
665 
666 	struct journal		journal;
667 
668 #define CONGESTED_MAX		1024
669 	unsigned		congested_last_us;
670 	atomic_t		congested;
671 
672 	/* The rest of this all shows up in sysfs */
673 	unsigned		congested_read_threshold_us;
674 	unsigned		congested_write_threshold_us;
675 
676 	struct time_stats	btree_gc_time;
677 	struct time_stats	btree_split_time;
678 	struct time_stats	btree_read_time;
679 
680 	atomic_long_t		cache_read_races;
681 	atomic_long_t		writeback_keys_done;
682 	atomic_long_t		writeback_keys_failed;
683 
684 	atomic_long_t		reclaim;
685 	atomic_long_t		flush_write;
686 	atomic_long_t		retry_flush_write;
687 
688 	enum			{
689 		ON_ERROR_UNREGISTER,
690 		ON_ERROR_PANIC,
691 	}			on_error;
692 #define DEFAULT_IO_ERROR_LIMIT 8
693 	unsigned		error_limit;
694 	unsigned		error_decay;
695 
696 	unsigned short		journal_delay_ms;
697 	bool			expensive_debug_checks;
698 	unsigned		verify:1;
699 	unsigned		key_merging_disabled:1;
700 	unsigned		gc_always_rewrite:1;
701 	unsigned		shrinker_disabled:1;
702 	unsigned		copy_gc_enabled:1;
703 
704 #define BUCKET_HASH_BITS	12
705 	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
706 
707 	DECLARE_HEAP(struct btree *, flush_btree);
708 };
709 
710 struct bbio {
711 	unsigned		submit_time_us;
712 	union {
713 		struct bkey	key;
714 		uint64_t	_pad[3];
715 		/*
716 		 * We only need pad = 3 here because we only ever carry around a
717 		 * single pointer - i.e. the pointer we're doing io to/from.
718 		 */
719 	};
720 	struct bio		bio;
721 };
722 
723 #define BTREE_PRIO		USHRT_MAX
724 #define INITIAL_PRIO		32768U
725 
726 #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
727 #define btree_blocks(b)							\
728 	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
729 
730 #define btree_default_blocks(c)						\
731 	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
732 
733 #define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
734 #define bucket_bytes(c)		((c)->sb.bucket_size << 9)
735 #define block_bytes(c)		((c)->sb.block_size << 9)
736 
737 #define prios_per_bucket(c)				\
738 	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
739 	 sizeof(struct bucket_disk))
740 #define prio_buckets(c)					\
741 	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
742 
743 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
744 {
745 	return s >> c->bucket_bits;
746 }
747 
748 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
749 {
750 	return ((sector_t) b) << c->bucket_bits;
751 }
752 
753 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
754 {
755 	return s & (c->sb.bucket_size - 1);
756 }
757 
758 static inline struct cache *PTR_CACHE(struct cache_set *c,
759 				      const struct bkey *k,
760 				      unsigned ptr)
761 {
762 	return c->cache[PTR_DEV(k, ptr)];
763 }
764 
765 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
766 				   const struct bkey *k,
767 				   unsigned ptr)
768 {
769 	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
770 }
771 
772 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
773 					const struct bkey *k,
774 					unsigned ptr)
775 {
776 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
777 }
778 
779 static inline uint8_t gen_after(uint8_t a, uint8_t b)
780 {
781 	uint8_t r = a - b;
782 	return r > 128U ? 0 : r;
783 }
784 
785 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
786 				unsigned i)
787 {
788 	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
789 }
790 
791 static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
792 				 unsigned i)
793 {
794 	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
795 }
796 
797 /* Btree key macros */
798 
799 /*
800  * This is used for various on disk data structures - cache_sb, prio_set, bset,
801  * jset: The checksum is _always_ the first 8 bytes of these structs
802  */
803 #define csum_set(i)							\
804 	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
805 		  ((void *) bset_bkey_last(i)) -			\
806 		  (((void *) (i)) + sizeof(uint64_t)))
807 
808 /* Error handling macros */
809 
810 #define btree_bug(b, ...)						\
811 do {									\
812 	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
813 		dump_stack();						\
814 } while (0)
815 
816 #define cache_bug(c, ...)						\
817 do {									\
818 	if (bch_cache_set_error(c, __VA_ARGS__))			\
819 		dump_stack();						\
820 } while (0)
821 
822 #define btree_bug_on(cond, b, ...)					\
823 do {									\
824 	if (cond)							\
825 		btree_bug(b, __VA_ARGS__);				\
826 } while (0)
827 
828 #define cache_bug_on(cond, c, ...)					\
829 do {									\
830 	if (cond)							\
831 		cache_bug(c, __VA_ARGS__);				\
832 } while (0)
833 
834 #define cache_set_err_on(cond, c, ...)					\
835 do {									\
836 	if (cond)							\
837 		bch_cache_set_error(c, __VA_ARGS__);			\
838 } while (0)
839 
840 /* Looping macros */
841 
842 #define for_each_cache(ca, cs, iter)					\
843 	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
844 
845 #define for_each_bucket(b, ca)						\
846 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
847 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
848 
849 static inline void cached_dev_put(struct cached_dev *dc)
850 {
851 	if (refcount_dec_and_test(&dc->count))
852 		schedule_work(&dc->detach);
853 }
854 
855 static inline bool cached_dev_get(struct cached_dev *dc)
856 {
857 	if (!refcount_inc_not_zero(&dc->count))
858 		return false;
859 
860 	/* Paired with the mb in cached_dev_attach */
861 	smp_mb__after_atomic();
862 	return true;
863 }
864 
865 /*
866  * bucket_gc_gen() returns the difference between the bucket's current gen and
867  * the oldest gen of any pointer into that bucket in the btree (last_gc).
868  */
869 
870 static inline uint8_t bucket_gc_gen(struct bucket *b)
871 {
872 	return b->gen - b->last_gc;
873 }
874 
875 #define BUCKET_GC_GEN_MAX	96U
876 
877 #define kobj_attribute_write(n, fn)					\
878 	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
879 
880 #define kobj_attribute_rw(n, show, store)				\
881 	static struct kobj_attribute ksysfs_##n =			\
882 		__ATTR(n, S_IWUSR|S_IRUSR, show, store)
883 
884 static inline void wake_up_allocators(struct cache_set *c)
885 {
886 	struct cache *ca;
887 	unsigned i;
888 
889 	for_each_cache(ca, c, i)
890 		wake_up_process(ca->alloc_thread);
891 }
892 
893 static inline void closure_bio_submit(struct cache_set *c,
894 				      struct bio *bio,
895 				      struct closure *cl)
896 {
897 	closure_get(cl);
898 	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
899 		bio->bi_status = BLK_STS_IOERR;
900 		bio_endio(bio);
901 		return;
902 	}
903 	generic_make_request(bio);
904 }
905 
906 /*
907  * Prevent the kthread exits directly, and make sure when kthread_stop()
908  * is called to stop a kthread, it is still alive. If a kthread might be
909  * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
910  * necessary before the kthread returns.
911  */
912 static inline void wait_for_kthread_stop(void)
913 {
914 	while (!kthread_should_stop()) {
915 		set_current_state(TASK_INTERRUPTIBLE);
916 		schedule();
917 	}
918 }
919 
920 /* Forward declarations */
921 
922 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
923 void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
924 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
925 			      blk_status_t, const char *);
926 void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
927 		const char *);
928 void bch_bbio_free(struct bio *, struct cache_set *);
929 struct bio *bch_bbio_alloc(struct cache_set *);
930 
931 void __bch_submit_bbio(struct bio *, struct cache_set *);
932 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
933 
934 uint8_t bch_inc_gen(struct cache *, struct bucket *);
935 void bch_rescale_priorities(struct cache_set *, int);
936 
937 bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
938 void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
939 
940 void __bch_bucket_free(struct cache *, struct bucket *);
941 void bch_bucket_free(struct cache_set *, struct bkey *);
942 
943 long bch_bucket_alloc(struct cache *, unsigned, bool);
944 int __bch_bucket_alloc_set(struct cache_set *, unsigned,
945 			   struct bkey *, int, bool);
946 int bch_bucket_alloc_set(struct cache_set *, unsigned,
947 			 struct bkey *, int, bool);
948 bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
949 		       unsigned, unsigned, bool);
950 bool bch_cached_dev_error(struct cached_dev *dc);
951 
952 __printf(2, 3)
953 bool bch_cache_set_error(struct cache_set *, const char *, ...);
954 
955 void bch_prio_write(struct cache *);
956 void bch_write_bdev_super(struct cached_dev *, struct closure *);
957 
958 extern struct workqueue_struct *bcache_wq;
959 extern const char * const bch_cache_modes[];
960 extern const char * const bch_stop_on_failure_modes[];
961 extern struct mutex bch_register_lock;
962 extern struct list_head bch_cache_sets;
963 
964 extern struct kobj_type bch_cached_dev_ktype;
965 extern struct kobj_type bch_flash_dev_ktype;
966 extern struct kobj_type bch_cache_set_ktype;
967 extern struct kobj_type bch_cache_set_internal_ktype;
968 extern struct kobj_type bch_cache_ktype;
969 
970 void bch_cached_dev_release(struct kobject *);
971 void bch_flash_dev_release(struct kobject *);
972 void bch_cache_set_release(struct kobject *);
973 void bch_cache_release(struct kobject *);
974 
975 int bch_uuid_write(struct cache_set *);
976 void bcache_write_super(struct cache_set *);
977 
978 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
979 
980 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
981 void bch_cached_dev_detach(struct cached_dev *);
982 void bch_cached_dev_run(struct cached_dev *);
983 void bcache_device_stop(struct bcache_device *);
984 
985 void bch_cache_set_unregister(struct cache_set *);
986 void bch_cache_set_stop(struct cache_set *);
987 
988 struct cache_set *bch_cache_set_alloc(struct cache_sb *);
989 void bch_btree_cache_free(struct cache_set *);
990 int bch_btree_cache_alloc(struct cache_set *);
991 void bch_moving_init_cache_set(struct cache_set *);
992 int bch_open_buckets_alloc(struct cache_set *);
993 void bch_open_buckets_free(struct cache_set *);
994 
995 int bch_cache_allocator_start(struct cache *ca);
996 
997 void bch_debug_exit(void);
998 int bch_debug_init(struct kobject *);
999 void bch_request_exit(void);
1000 int bch_request_init(void);
1001 
1002 #endif /* _BCACHE_H */
1003