Lines Matching refs:k

33 	i->k = bkey_next(i->k);  in sort_key_next()
35 if (i->k == i->end) in sort_key_next()
42 int64_t c = bkey_cmp(l.k, r.k); in bch_key_sort_cmp()
44 return c ? c > 0 : l.k < r.k; in bch_key_sort_cmp()
47 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid() argument
51 for (i = 0; i < KEY_PTRS(k); i++) in __ptr_invalid()
52 if (ptr_available(c, k, i)) { in __ptr_invalid()
54 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid()
55 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in __ptr_invalid()
57 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size || in __ptr_invalid()
68 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status() argument
72 for (i = 0; i < KEY_PTRS(k); i++) in bch_ptr_status()
73 if (ptr_available(c, k, i)) { in bch_ptr_status()
75 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status()
76 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in bch_ptr_status()
78 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size) in bch_ptr_status()
84 if (ptr_stale(c, k, i)) in bch_ptr_status()
88 if (!bkey_cmp(k, &ZERO_KEY)) in bch_ptr_status()
90 if (!KEY_PTRS(k)) in bch_ptr_status()
92 if (!KEY_SIZE(k)) in bch_ptr_status()
97 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) in bch_extent_to_text() argument
104 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); in bch_extent_to_text()
106 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_to_text()
110 if (PTR_DEV(k, i) == PTR_CHECK_DEV) in bch_extent_to_text()
113 p("%llu:%llu gen %llu", PTR_DEV(k, i), in bch_extent_to_text()
114 PTR_OFFSET(k, i), PTR_GEN(k, i)); in bch_extent_to_text()
119 if (KEY_DIRTY(k)) in bch_extent_to_text()
121 if (KEY_CSUM(k)) in bch_extent_to_text()
122 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); in bch_extent_to_text()
126 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) in bch_bkey_dump() argument
132 bch_extent_to_text(buf, sizeof(buf), k); in bch_bkey_dump()
135 for (j = 0; j < KEY_PTRS(k); j++) { in bch_bkey_dump()
136 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
141 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
144 pr_cont(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
149 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid() argument
153 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) in __bch_btree_ptr_invalid()
156 if (__ptr_invalid(c, k)) in __bch_btree_ptr_invalid()
161 bch_extent_to_text(buf, sizeof(buf), k); in __bch_btree_ptr_invalid()
162 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); in __bch_btree_ptr_invalid()
166 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_invalid() argument
170 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
173 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() argument
180 for (i = 0; i < KEY_PTRS(k); i++) in btree_ptr_bad_expensive()
181 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
182 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
184 if (KEY_DIRTY(k) || in btree_ptr_bad_expensive()
197 bch_extent_to_text(buf, sizeof(buf), k); in btree_ptr_bad_expensive()
200 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
205 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_bad() argument
210 if (!bkey_cmp(k, &ZERO_KEY) || in bch_btree_ptr_bad()
211 !KEY_PTRS(k) || in bch_btree_ptr_bad()
212 bch_ptr_invalid(bk, k)) in bch_btree_ptr_bad()
215 for (i = 0; i < KEY_PTRS(k); i++) in bch_btree_ptr_bad()
216 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
217 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
221 btree_ptr_bad_expensive(b, k)) in bch_btree_ptr_bad()
261 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); in bch_extent_sort_cmp()
263 return c ? c > 0 : l.k < r.k; in bch_extent_sort_cmp()
276 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) in bch_extent_sort_fixup()
279 if (!KEY_SIZE(i->k)) { in bch_extent_sort_fixup()
285 if (top->k > i->k) { in bch_extent_sort_fixup()
286 if (bkey_cmp(top->k, i->k) >= 0) in bch_extent_sort_fixup()
289 bch_cut_front(top->k, i->k); in bch_extent_sort_fixup()
294 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); in bch_extent_sort_fixup()
296 if (bkey_cmp(i->k, top->k) < 0) { in bch_extent_sort_fixup()
297 bkey_copy(tmp, top->k); in bch_extent_sort_fixup()
299 bch_cut_back(&START_KEY(i->k), tmp); in bch_extent_sort_fixup()
300 bch_cut_front(i->k, top->k); in bch_extent_sort_fixup()
305 bch_cut_back(&START_KEY(i->k), top->k); in bch_extent_sort_fixup()
313 static void bch_subtract_dirty(struct bkey *k, in bch_subtract_dirty() argument
318 if (KEY_DIRTY(k)) in bch_subtract_dirty()
319 bcache_dev_sectors_dirty_add(c, KEY_INODE(k), in bch_subtract_dirty()
337 struct bkey *k = bch_btree_iter_next(iter); in bch_extent_insert_fixup() local
339 if (!k) in bch_extent_insert_fixup()
342 if (bkey_cmp(&START_KEY(k), insert) >= 0) { in bch_extent_insert_fixup()
343 if (KEY_SIZE(k)) in bch_extent_insert_fixup()
349 if (bkey_cmp(k, &START_KEY(insert)) <= 0) in bch_extent_insert_fixup()
352 old_offset = KEY_START(k); in bch_extent_insert_fixup()
353 old_size = KEY_SIZE(k); in bch_extent_insert_fixup()
363 if (replace_key && KEY_SIZE(k)) { in bch_extent_insert_fixup()
369 uint64_t offset = KEY_START(k) - in bch_extent_insert_fixup()
373 if (KEY_START(k) < KEY_START(replace_key) || in bch_extent_insert_fixup()
374 KEY_OFFSET(k) > KEY_OFFSET(replace_key)) in bch_extent_insert_fixup()
378 if (KEY_START(k) > KEY_START(insert) + sectors_found) in bch_extent_insert_fixup()
381 if (!bch_bkey_equal_header(k, replace_key)) in bch_extent_insert_fixup()
390 if (k->ptr[i] != replace_key->ptr[i] + offset) in bch_extent_insert_fixup()
393 sectors_found = KEY_OFFSET(k) - KEY_START(insert); in bch_extent_insert_fixup()
396 if (bkey_cmp(insert, k) < 0 && in bch_extent_insert_fixup()
397 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { in bch_extent_insert_fixup()
407 bch_subtract_dirty(k, c, KEY_START(insert), in bch_extent_insert_fixup()
410 if (bkey_written(b, k)) { in bch_extent_insert_fixup()
425 bch_bset_insert(b, top, k); in bch_extent_insert_fixup()
428 bkey_copy(&temp.key, k); in bch_extent_insert_fixup()
429 bch_bset_insert(b, k, &temp.key); in bch_extent_insert_fixup()
430 top = bkey_next(k); in bch_extent_insert_fixup()
434 bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
435 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
439 if (bkey_cmp(insert, k) < 0) { in bch_extent_insert_fixup()
440 bch_cut_front(insert, k); in bch_extent_insert_fixup()
442 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) in bch_extent_insert_fixup()
445 if (bkey_written(b, k) && in bch_extent_insert_fixup()
446 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { in bch_extent_insert_fixup()
451 bch_cut_front(k, k); in bch_extent_insert_fixup()
453 __bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
454 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
458 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); in bch_extent_insert_fixup()
480 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid() argument
484 if (!KEY_SIZE(k)) in __bch_extent_invalid()
487 if (KEY_SIZE(k) > KEY_OFFSET(k)) in __bch_extent_invalid()
490 if (__ptr_invalid(c, k)) in __bch_extent_invalid()
495 bch_extent_to_text(buf, sizeof(buf), k); in __bch_extent_invalid()
496 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); in __bch_extent_invalid()
500 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) in bch_extent_invalid() argument
504 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
507 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() argument
510 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
517 (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) in bch_extent_bad_expensive()
529 bch_extent_to_text(buf, sizeof(buf), k); in bch_extent_bad_expensive()
532 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
537 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) in bch_extent_bad() argument
543 if (!KEY_PTRS(k) || in bch_extent_bad()
544 bch_extent_invalid(bk, k)) in bch_extent_bad()
547 for (i = 0; i < KEY_PTRS(k); i++) in bch_extent_bad()
548 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
551 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_bad()
552 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
554 if (stale && KEY_DIRTY(k)) { in bch_extent_bad()
555 bch_extent_to_text(buf, sizeof(buf), k); in bch_extent_bad()
568 bch_extent_bad_expensive(b, k, i)) in bch_extent_bad()