Lines Matching full:keys

18  * as keys are inserted we only sort the pages that have not yet been written.
52 * Check for bad keys in replay
115 if (b->level && b->keys.nsets) in bch_btree_init_next()
116 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
118 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
121 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
164 iter->b = &b->keys; in bch_btree_node_read_done()
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
199 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
211 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
216 i = b->keys.set[0].data; in bch_btree_node_read_done()
218 if (b->keys.set[0].size && in bch_btree_node_read_done()
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
223 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
232 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
259 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
373 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
413 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
415 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
436 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
446 if (nsets && !b->keys.nsets) in bch_btree_node_write()
483 BUG_ON(!i->keys); in bch_btree_leaf_dirty()
528 bch_btree_keys_free(&b->keys); in mca_data_free()
550 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
620 if (b->keys.page_order < min_order) in mca_reap()
692 * succeed, so that inserting keys into the btree can always succeed and in bch_mca_scan()
825 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
944 if (!b->keys.set[0].data) in mca_alloc()
955 if (!b->keys.set->data) in mca_alloc()
972 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
975 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
1048 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1049 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1050 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1053 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1054 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1144 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1173 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1231 * ptr_invalid() can't return true for the keys that mark btree nodes as in __bch_btree_mark_key()
1305 unsigned int keys = 0, good_keys = 0; in btree_gc_mark_node() local
1312 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1314 keys++; in btree_gc_mark_node()
1316 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1326 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1328 bset_written(&b->keys, t) && in btree_gc_mark_node()
1338 if ((keys - good_keys) * 2 > keys) in btree_gc_mark_node()
1348 unsigned int keys; member
1359 unsigned int i, nodes = 0, keys = 0, blocks; in btree_gc_coalesce() local
1374 keys += r[nodes++].keys; in btree_gc_coalesce()
1379 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1406 keys = 0; in btree_gc_coalesce()
1412 if (__set_blocks(n1, n1->keys + keys + in btree_gc_coalesce()
1418 keys += bkey_u64s(k); in btree_gc_coalesce()
1424 * the remaining keys into this node; we can't ensure in btree_gc_coalesce()
1426 * length keys (shouldn't be possible in practice, in btree_gc_coalesce()
1429 if (__set_blocks(n1, n1->keys + n2->keys, in btree_gc_coalesce()
1434 keys = n2->keys; in btree_gc_coalesce()
1439 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1447 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); in btree_gc_coalesce()
1449 n1->keys += keys; in btree_gc_coalesce()
1450 r[i].keys = n1->keys; in btree_gc_coalesce()
1453 bset_bkey_idx(n2, keys), in btree_gc_coalesce()
1455 (void *) bset_bkey_idx(n2, keys)); in btree_gc_coalesce()
1457 n2->keys -= keys; in btree_gc_coalesce()
1473 BUG_ON(btree_bset_first(new_nodes[0])->keys); in btree_gc_coalesce()
1530 struct keylist keys; in btree_gc_rewrite_node() local
1549 bch_keylist_init(&keys); in btree_gc_rewrite_node()
1550 bch_keylist_add(&keys, &n->key); in btree_gc_rewrite_node()
1552 make_btree_freeing_key(replace, keys.top); in btree_gc_rewrite_node()
1553 bch_keylist_push(&keys); in btree_gc_rewrite_node()
1555 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1556 BUG_ON(!bch_keylist_empty(&keys)); in btree_gc_rewrite_node()
1571 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1613 bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1619 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in btree_gc_recurse()
1629 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1771 /* don't reclaim buckets to which writeback keys point */ in bch_btree_gc_finish()
1784 &dc->writeback_keys.keys, node) in bch_btree_gc_finish()
1797 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1912 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1918 bch_btree_iter_stack_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1921 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_check_recurse()
1957 /* root node keys are checked before thread created */ in bch_btree_check_thread()
1958 bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1959 k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1965 * Fetch a root node key index, skip the keys which in bch_btree_check_thread()
1978 &c->root->keys, in bch_btree_check_thread()
1984 * No more keys to check in root node, in bch_btree_check_thread()
2053 /* check and mark root node keys */ in bch_btree_check()
2054 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2163 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2165 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2177 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2182 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2193 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2196 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys()
2209 bkey_copy(&temp.key, insert_keys->keys); in bch_btree_insert_keys()
2212 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2226 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2258 unsigned int keys = 0; in btree_split() local
2260 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2282 while (keys < (btree_bset_first(n1)->keys * 3) / 5) in btree_split()
2283 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), in btree_split()
2284 keys)); in btree_split()
2287 bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2288 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2290 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; in btree_split()
2291 btree_bset_first(n1)->keys = keys; in btree_split()
2295 btree_bset_first(n2)->keys * sizeof(uint64_t)); in btree_split()
2304 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2378 b->keys.last_set_unwritten) in bch_btree_insert_node()
2460 struct keylist *keys; member
2470 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2472 if (ret && !bch_keylist_empty(op->keys)) in btree_insert_fn()
2478 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2485 BUG_ON(bch_keylist_empty(keys)); in bch_btree_insert()
2488 op.keys = keys; in bch_btree_insert()
2492 while (!ret && !bch_keylist_empty(keys)) { in bch_btree_insert()
2495 &START_KEY(keys->keys), in bch_btree_insert()
2504 while ((k = bch_keylist_pop(keys))) in bch_btree_insert()
2536 /* Map across nodes or keys */
2548 bch_btree_iter_stack_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2550 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_map_nodes_recurse()
2581 bch_btree_iter_stack_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2583 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_map_keys_recurse()
2612 /* Overlapping keys compare equal */ in keybuf_cmp()
2663 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) in refill_keybuf_fn()
2702 if (!RB_EMPTY_ROOT(&buf->keys)) { in bch_refill_keybuf()
2705 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2708 w = RB_LAST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2720 rb_erase(&w->node, &buf->keys); in __bch_keybuf_del()
2744 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); in bch_keybuf_check_overlapping()
2766 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_keybuf_next()
2804 buf->keys = RB_ROOT; in bch_keybuf_init()