extents.c (a85e968e66a175c86d0410719ea84a5bd0f1d070) extents.c (dc9d98d621bdce0552997200ce855659875a5c9f)
1/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;

--- 48 unchanged lines hidden (view full) ---

57 bucket < ca->sb.first_bucket ||
58 bucket >= ca->sb.nbuckets)
59 return true;
60 }
61
62 return false;
63}
64
1/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;

--- 48 unchanged lines hidden (view full) ---

57 bucket < ca->sb.first_bucket ||
58 bucket >= ca->sb.nbuckets)
59 return true;
60 }
61
62 return false;
63}
64
65/* Common among btree and extent ptrs */
66
67static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
68{
69 unsigned i;
70
71 for (i = 0; i < KEY_PTRS(k); i++)
72 if (ptr_available(c, k, i)) {
73 struct cache *ca = PTR_CACHE(c, k, i);
74 size_t bucket = PTR_BUCKET_NR(c, k, i);
75 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
76
77 if (KEY_SIZE(k) + r > c->sb.bucket_size)
78 return "bad, length too big";
79 if (bucket < ca->sb.first_bucket)
80 return "bad, short offset";
81 if (bucket >= ca->sb.nbuckets)
82 return "bad, offset past end of device";
83 if (ptr_stale(c, k, i))
84 return "stale";
85 }
86
87 if (!bkey_cmp(k, &ZERO_KEY))
88 return "bad, null key";
89 if (!KEY_PTRS(k))
90 return "bad, no pointers";
91 if (!KEY_SIZE(k))
92 return "zeroed key";
93 return "";
94}
95
96void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
97{
98 unsigned i = 0;
99 char *out = buf, *end = buf + size;
100
101#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
102
103 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
104
105 for (i = 0; i < KEY_PTRS(k); i++) {
106 if (i)
107 p(", ");
108
109 if (PTR_DEV(k, i) == PTR_CHECK_DEV)
110 p("check dev");
111 else
112 p("%llu:%llu gen %llu", PTR_DEV(k, i),
113 PTR_OFFSET(k, i), PTR_GEN(k, i));
114 }
115
116 p("]");
117
118 if (KEY_DIRTY(k))
119 p(" dirty");
120 if (KEY_CSUM(k))
121 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
122#undef p
123}
124
125static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
126{
127 struct btree *b = container_of(keys, struct btree, keys);
128 unsigned j;
129 char buf[80];
130
131 bch_extent_to_text(buf, sizeof(buf), k);
132 printk(" %s", buf);
133
134 for (j = 0; j < KEY_PTRS(k); j++) {
135 size_t n = PTR_BUCKET_NR(b->c, k, j);
136 printk(" bucket %zu", n);
137
138 if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
139 printk(" prio %i",
140 PTR_BUCKET(b->c, k, j)->prio);
141 }
142
143 printk(" %s\n", bch_ptr_status(b->c, k));
144}
145
65/* Btree ptrs */
66
67bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
68{
69 char buf[80];
70
71 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
72 goto bad;
73
74 if (__ptr_invalid(c, k))
75 goto bad;
76
77 return false;
78bad:
146/* Btree ptrs */
147
148bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
149{
150 char buf[80];
151
152 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
153 goto bad;
154
155 if (__ptr_invalid(c, k))
156 goto bad;
157
158 return false;
159bad:
79 bch_bkey_to_text(buf, sizeof(buf), k);
160 bch_extent_to_text(buf, sizeof(buf), k);
80 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
81 return true;
82}
83
84static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
85{
86 struct btree *b = container_of(bk, struct btree, keys);
87 return __bch_btree_ptr_invalid(b->c, k);

--- 18 unchanged lines hidden (view full) ---

106 }
107
108 mutex_unlock(&b->c->bucket_lock);
109 }
110
111 return false;
112err:
113 mutex_unlock(&b->c->bucket_lock);
161 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
162 return true;
163}
164
165static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
166{
167 struct btree *b = container_of(bk, struct btree, keys);
168 return __bch_btree_ptr_invalid(b->c, k);

--- 18 unchanged lines hidden (view full) ---

187 }
188
189 mutex_unlock(&b->c->bucket_lock);
190 }
191
192 return false;
193err:
194 mutex_unlock(&b->c->bucket_lock);
114 bch_bkey_to_text(buf, sizeof(buf), k);
195 bch_extent_to_text(buf, sizeof(buf), k);
115 btree_bug(b,
116"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
117 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
118 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
119 return true;
120}
121
122static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)

--- 17 unchanged lines hidden (view full) ---

140
141 return false;
142}
143
144const struct btree_keys_ops bch_btree_keys_ops = {
145 .sort_cmp = bch_key_sort_cmp,
146 .key_invalid = bch_btree_ptr_invalid,
147 .key_bad = bch_btree_ptr_bad,
196 btree_bug(b,
197"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
200 return true;
201}
202
203static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)

--- 17 unchanged lines hidden (view full) ---

221
222 return false;
223}
224
225const struct btree_keys_ops bch_btree_keys_ops = {
226 .sort_cmp = bch_key_sort_cmp,
227 .key_invalid = bch_btree_ptr_invalid,
228 .key_bad = bch_btree_ptr_bad,
229 .key_to_text = bch_extent_to_text,
230 .key_dump = bch_bkey_dump,
148};
149
150/* Extents */
151
152/*
153 * Returns true if l > r - unless l == r, in which case returns true if l is
154 * older than r.
155 *

--- 66 unchanged lines hidden (view full) ---

222 if (KEY_SIZE(k) > KEY_OFFSET(k))
223 goto bad;
224
225 if (__ptr_invalid(b->c, k))
226 goto bad;
227
228 return false;
229bad:
231};
232
233/* Extents */
234
235/*
236 * Returns true if l > r - unless l == r, in which case returns true if l is
237 * older than r.
238 *

--- 66 unchanged lines hidden (view full) ---

305 if (KEY_SIZE(k) > KEY_OFFSET(k))
306 goto bad;
307
308 if (__ptr_invalid(b->c, k))
309 goto bad;
310
311 return false;
312bad:
230 bch_bkey_to_text(buf, sizeof(buf), k);
313 bch_extent_to_text(buf, sizeof(buf), k);
231 cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
232 return true;
233}
234
235static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
236 unsigned ptr)
237{
238 struct bucket *g = PTR_BUCKET(b->c, k, ptr);

--- 10 unchanged lines hidden (view full) ---

249 goto err;
250
251 mutex_unlock(&b->c->bucket_lock);
252 }
253
254 return false;
255err:
256 mutex_unlock(&b->c->bucket_lock);
314 cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
315 return true;
316}
317
318static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
319 unsigned ptr)
320{
321 struct bucket *g = PTR_BUCKET(b->c, k, ptr);

--- 10 unchanged lines hidden (view full) ---

332 goto err;
333
334 mutex_unlock(&b->c->bucket_lock);
335 }
336
337 return false;
338err:
339 mutex_unlock(&b->c->bucket_lock);
257 bch_bkey_to_text(buf, sizeof(buf), k);
340 bch_extent_to_text(buf, sizeof(buf), k);
258 btree_bug(b,
259"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
260 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
261 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
262 return true;
263}
264
265static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)

--- 84 unchanged lines hidden (view full) ---

350}
351
352const struct btree_keys_ops bch_extent_keys_ops = {
353 .sort_cmp = bch_extent_sort_cmp,
354 .sort_fixup = bch_extent_sort_fixup,
355 .key_invalid = bch_extent_invalid,
356 .key_bad = bch_extent_bad,
357 .key_merge = bch_extent_merge,
341 btree_bug(b,
342"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
343 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
344 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
345 return true;
346}
347
348static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)

--- 84 unchanged lines hidden (view full) ---

433}
434
435const struct btree_keys_ops bch_extent_keys_ops = {
436 .sort_cmp = bch_extent_sort_cmp,
437 .sort_fixup = bch_extent_sort_fixup,
438 .key_invalid = bch_extent_invalid,
439 .key_bad = bch_extent_bad,
440 .key_merge = bch_extent_merge,
441 .key_to_text = bch_extent_to_text,
442 .key_dump = bch_bkey_dump,
358 .is_extents = true,
359};
443 .is_extents = true,
444};