1 #ifndef _BCACHE_BTREE_H 2 #define _BCACHE_BTREE_H 3 4 /* 5 * THE BTREE: 6 * 7 * At a high level, bcache's btree is relatively standard b+ tree. All keys and 8 * pointers are in the leaves; interior nodes only have pointers to the child 9 * nodes. 10 * 11 * In the interior nodes, a struct bkey always points to a child btree node, and 12 * the key is the highest key in the child node - except that the highest key in 13 * an interior node is always MAX_KEY. The size field refers to the size on disk 14 * of the child node - this would allow us to have variable sized btree nodes 15 * (handy for keeping the depth of the btree 1 by expanding just the root). 16 * 17 * Btree nodes are themselves log structured, but this is hidden fairly 18 * thoroughly. Btree nodes on disk will in practice have extents that overlap 19 * (because they were written at different times), but in memory we never have 20 * overlapping extents - when we read in a btree node from disk, the first thing 21 * we do is resort all the sets of keys with a mergesort, and in the same pass 22 * we check for overlapping extents and adjust them appropriately. 23 * 24 * struct btree_op is a central interface to the btree code. It's used for 25 * specifying read vs. write locking, and the embedded closure is used for 26 * waiting on IO or reserve memory. 27 * 28 * BTREE CACHE: 29 * 30 * Btree nodes are cached in memory; traversing the btree might require reading 31 * in btree nodes which is handled mostly transparently. 32 * 33 * bch_btree_node_get() looks up a btree node in the cache and reads it in from 34 * disk if necessary. This function is almost never called directly though - the 35 * btree() macro is used to get a btree node, call some function on it, and 36 * unlock the node after the function returns. 37 * 38 * The root is special cased - it's taken out of the cache's lru (thus pinning 39 * it in memory), so we can find the root of the btree by just dereferencing a 40 * pointer instead of looking it up in the cache. This makes locking a bit 41 * tricky, since the root pointer is protected by the lock in the btree node it 42 * points to - the btree_root() macro handles this. 43 * 44 * In various places we must be able to allocate memory for multiple btree nodes 45 * in order to make forward progress. To do this we use the btree cache itself 46 * as a reserve; if __get_free_pages() fails, we'll find a node in the btree 47 * cache we can reuse. We can't allow more than one thread to be doing this at a 48 * time, so there's a lock, implemented by a pointer to the btree_op closure - 49 * this allows the btree_root() macro to implicitly release this lock. 50 * 51 * BTREE IO: 52 * 53 * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles 54 * this. 55 * 56 * For writing, we have two btree_write structs embeddded in struct btree - one 57 * write in flight, and one being set up, and we toggle between them. 58 * 59 * Writing is done with a single function - bch_btree_write() really serves two 60 * different purposes and should be broken up into two different functions. When 61 * passing now = false, it merely indicates that the node is now dirty - calling 62 * it ensures that the dirty keys will be written at some point in the future. 63 * 64 * When passing now = true, bch_btree_write() causes a write to happen 65 * "immediately" (if there was already a write in flight, it'll cause the write 66 * to happen as soon as the previous write completes). It returns immediately 67 * though - but it takes a refcount on the closure in struct btree_op you passed 68 * to it, so a closure_sync() later can be used to wait for the write to 69 * complete. 70 * 71 * This is handy because btree_split() and garbage collection can issue writes 72 * in parallel, reducing the amount of time they have to hold write locks. 73 * 74 * LOCKING: 75 * 76 * When traversing the btree, we may need write locks starting at some level - 77 * inserting a key into the btree will typically only require a write lock on 78 * the leaf node. 79 * 80 * This is specified with the lock field in struct btree_op; lock = 0 means we 81 * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get() 82 * checks this field and returns the node with the appropriate lock held. 83 * 84 * If, after traversing the btree, the insertion code discovers it has to split 85 * then it must restart from the root and take new locks - to do this it changes 86 * the lock field and returns -EINTR, which causes the btree_root() macro to 87 * loop. 88 * 89 * Handling cache misses require a different mechanism for upgrading to a write 90 * lock. We do cache lookups with only a read lock held, but if we get a cache 91 * miss and we wish to insert this data into the cache, we have to insert a 92 * placeholder key to detect races - otherwise, we could race with a write and 93 * overwrite the data that was just written to the cache with stale data from 94 * the backing device. 95 * 96 * For this we use a sequence number that write locks and unlocks increment - to 97 * insert the check key it unlocks the btree node and then takes a write lock, 98 * and fails if the sequence number doesn't match. 99 */ 100 101 #include "bset.h" 102 #include "debug.h" 103 104 struct btree_write { 105 atomic_t *journal; 106 107 /* If btree_split() frees a btree node, it writes a new pointer to that 108 * btree node indicating it was freed; it takes a refcount on 109 * c->prio_blocked because we can't write the gens until the new 110 * pointer is on disk. This allows btree_write_endio() to release the 111 * refcount that btree_split() took. 112 */ 113 int prio_blocked; 114 }; 115 116 struct btree { 117 /* Hottest entries first */ 118 struct hlist_node hash; 119 120 /* Key/pointer for this btree node */ 121 BKEY_PADDED(key); 122 123 /* Single bit - set when accessed, cleared by shrinker */ 124 unsigned long accessed; 125 unsigned long seq; 126 struct rw_semaphore lock; 127 struct cache_set *c; 128 129 unsigned long flags; 130 uint16_t written; /* would be nice to kill */ 131 uint8_t level; 132 uint8_t nsets; 133 uint8_t page_order; 134 135 /* 136 * Set of sorted keys - the real btree node - plus a binary search tree 137 * 138 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point 139 * to the memory we have allocated for this btree node. Additionally, 140 * set[0]->data points to the entire btree node as it exists on disk. 141 */ 142 struct bset_tree sets[MAX_BSETS]; 143 144 /* For outstanding btree writes, used as a lock - protects write_idx */ 145 struct closure_with_waitlist io; 146 147 struct list_head list; 148 struct delayed_work work; 149 150 struct btree_write writes[2]; 151 struct bio *bio; 152 }; 153 154 #define BTREE_FLAG(flag) \ 155 static inline bool btree_node_ ## flag(struct btree *b) \ 156 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \ 157 \ 158 static inline void set_btree_node_ ## flag(struct btree *b) \ 159 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ 160 161 enum btree_flags { 162 BTREE_NODE_io_error, 163 BTREE_NODE_dirty, 164 BTREE_NODE_write_idx, 165 }; 166 167 BTREE_FLAG(io_error); 168 BTREE_FLAG(dirty); 169 BTREE_FLAG(write_idx); 170 171 static inline struct btree_write *btree_current_write(struct btree *b) 172 { 173 return b->writes + btree_node_write_idx(b); 174 } 175 176 static inline struct btree_write *btree_prev_write(struct btree *b) 177 { 178 return b->writes + (btree_node_write_idx(b) ^ 1); 179 } 180 181 static inline unsigned bset_offset(struct btree *b, struct bset *i) 182 { 183 return (((size_t) i) - ((size_t) b->sets->data)) >> 9; 184 } 185 186 static inline struct bset *write_block(struct btree *b) 187 { 188 return ((void *) b->sets[0].data) + b->written * block_bytes(b->c); 189 } 190 191 static inline bool bset_written(struct btree *b, struct bset_tree *t) 192 { 193 return t->data < write_block(b); 194 } 195 196 static inline bool bkey_written(struct btree *b, struct bkey *k) 197 { 198 return k < write_block(b)->start; 199 } 200 201 static inline void set_gc_sectors(struct cache_set *c) 202 { 203 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); 204 } 205 206 static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) 207 { 208 return __bch_ptr_invalid(b->c, b->level, k); 209 } 210 211 static inline struct bkey *bch_btree_iter_init(struct btree *b, 212 struct btree_iter *iter, 213 struct bkey *search) 214 { 215 return __bch_btree_iter_init(b, iter, search, b->sets); 216 } 217 218 /* Looping macros */ 219 220 #define for_each_cached_btree(b, c, iter) \ 221 for (iter = 0; \ 222 iter < ARRAY_SIZE((c)->bucket_hash); \ 223 iter++) \ 224 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash) 225 226 #define for_each_key_filter(b, k, iter, filter) \ 227 for (bch_btree_iter_init((b), (iter), NULL); \ 228 ((k) = bch_btree_iter_next_filter((iter), b, filter));) 229 230 #define for_each_key(b, k, iter) \ 231 for (bch_btree_iter_init((b), (iter), NULL); \ 232 ((k) = bch_btree_iter_next(iter));) 233 234 /* Recursing down the btree */ 235 236 struct btree_op { 237 struct closure cl; 238 struct cache_set *c; 239 240 /* Journal entry we have a refcount on */ 241 atomic_t *journal; 242 243 /* Bio to be inserted into the cache */ 244 struct bio *cache_bio; 245 246 unsigned inode; 247 248 uint16_t write_prio; 249 250 /* Btree level at which we start taking write locks */ 251 short lock; 252 253 /* Btree insertion type */ 254 enum { 255 BTREE_INSERT, 256 BTREE_REPLACE 257 } type:8; 258 259 unsigned csum:1; 260 unsigned skip:1; 261 unsigned flush_journal:1; 262 263 unsigned insert_data_done:1; 264 unsigned lookup_done:1; 265 unsigned insert_collision:1; 266 267 /* Anything after this point won't get zeroed in do_bio_hook() */ 268 269 /* Keys to be inserted */ 270 struct keylist keys; 271 BKEY_PADDED(replace); 272 }; 273 274 enum { 275 BTREE_INSERT_STATUS_INSERT, 276 BTREE_INSERT_STATUS_BACK_MERGE, 277 BTREE_INSERT_STATUS_OVERWROTE, 278 BTREE_INSERT_STATUS_FRONT_MERGE, 279 }; 280 281 void bch_btree_op_init_stack(struct btree_op *); 282 283 static inline void rw_lock(bool w, struct btree *b, int level) 284 { 285 w ? down_write_nested(&b->lock, level + 1) 286 : down_read_nested(&b->lock, level + 1); 287 if (w) 288 b->seq++; 289 } 290 291 static inline void rw_unlock(bool w, struct btree *b) 292 { 293 #ifdef CONFIG_BCACHE_EDEBUG 294 unsigned i; 295 296 if (w && b->key.ptr[0]) 297 for (i = 0; i <= b->nsets; i++) 298 bch_check_key_order(b, b->sets[i].data); 299 #endif 300 301 if (w) 302 b->seq++; 303 (w ? up_write : up_read)(&b->lock); 304 } 305 306 #define insert_lock(s, b) ((b)->level <= (s)->lock) 307 308 /* 309 * These macros are for recursing down the btree - they handle the details of 310 * locking and looking up nodes in the cache for you. They're best treated as 311 * mere syntax when reading code that uses them. 312 * 313 * op->lock determines whether we take a read or a write lock at a given depth. 314 * If you've got a read lock and find that you need a write lock (i.e. you're 315 * going to have to split), set op->lock and return -EINTR; btree_root() will 316 * call you again and you'll have the correct lock. 317 */ 318 319 /** 320 * btree - recurse down the btree on a specified key 321 * @fn: function to call, which will be passed the child node 322 * @key: key to recurse on 323 * @b: parent btree node 324 * @op: pointer to struct btree_op 325 */ 326 #define btree(fn, key, b, op, ...) \ 327 ({ \ 328 int _r, l = (b)->level - 1; \ 329 bool _w = l <= (op)->lock; \ 330 struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \ 331 if (!IS_ERR(_b)) { \ 332 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 333 rw_unlock(_w, _b); \ 334 } else \ 335 _r = PTR_ERR(_b); \ 336 _r; \ 337 }) 338 339 /** 340 * btree_root - call a function on the root of the btree 341 * @fn: function to call, which will be passed the child node 342 * @c: cache set 343 * @op: pointer to struct btree_op 344 */ 345 #define btree_root(fn, c, op, ...) \ 346 ({ \ 347 int _r = -EINTR; \ 348 do { \ 349 struct btree *_b = (c)->root; \ 350 bool _w = insert_lock(op, _b); \ 351 rw_lock(_w, _b, _b->level); \ 352 if (_b == (c)->root && \ 353 _w == insert_lock(op, _b)) \ 354 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 355 rw_unlock(_w, _b); \ 356 bch_cannibalize_unlock(c, &(op)->cl); \ 357 } while (_r == -EINTR); \ 358 \ 359 _r; \ 360 }) 361 362 static inline bool should_split(struct btree *b) 363 { 364 struct bset *i = write_block(b); 365 return b->written >= btree_blocks(b) || 366 (i->seq == b->sets[0].data->seq && 367 b->written + __set_blocks(i, i->keys + 15, b->c) 368 > btree_blocks(b)); 369 } 370 371 void bch_btree_node_read(struct btree *); 372 void bch_btree_node_write(struct btree *, struct closure *); 373 374 void bch_cannibalize_unlock(struct cache_set *, struct closure *); 375 void bch_btree_set_root(struct btree *); 376 struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); 377 struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, 378 int, struct btree_op *); 379 380 bool bch_btree_insert_check_key(struct btree *, struct btree_op *, 381 struct bio *); 382 int bch_btree_insert(struct btree_op *, struct cache_set *); 383 384 int bch_btree_search_recurse(struct btree *, struct btree_op *); 385 386 void bch_queue_gc(struct cache_set *); 387 size_t bch_btree_gc_finish(struct cache_set *); 388 void bch_moving_gc(struct closure *); 389 int bch_btree_check(struct cache_set *, struct btree_op *); 390 uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); 391 392 void bch_keybuf_init(struct keybuf *); 393 void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, 394 keybuf_pred_fn *); 395 bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 396 struct bkey *); 397 void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 398 struct keybuf_key *bch_keybuf_next(struct keybuf *); 399 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, 400 struct bkey *, keybuf_pred_fn *); 401 402 #endif 403