xref: /openbmc/linux/drivers/md/bcache/debug.c (revision c052dd9a26f60bcf70c0c3fcc08e07abb60295cd)
1 /*
2  * Assorted bcache debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 
12 #include <linux/console.h>
13 #include <linux/debugfs.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/seq_file.h>
17 
18 static struct dentry *debug;
19 
20 const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
21 {
22 	unsigned i;
23 
24 	for (i = 0; i < KEY_PTRS(k); i++)
25 		if (ptr_available(c, k, i)) {
26 			struct cache *ca = PTR_CACHE(c, k, i);
27 			size_t bucket = PTR_BUCKET_NR(c, k, i);
28 			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
29 
30 			if (KEY_SIZE(k) + r > c->sb.bucket_size)
31 				return "bad, length too big";
32 			if (bucket <  ca->sb.first_bucket)
33 				return "bad, short offset";
34 			if (bucket >= ca->sb.nbuckets)
35 				return "bad, offset past end of device";
36 			if (ptr_stale(c, k, i))
37 				return "stale";
38 		}
39 
40 	if (!bkey_cmp(k, &ZERO_KEY))
41 		return "bad, null key";
42 	if (!KEY_PTRS(k))
43 		return "bad, no pointers";
44 	if (!KEY_SIZE(k))
45 		return "zeroed key";
46 	return "";
47 }
48 
49 int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
50 {
51 	unsigned i = 0;
52 	char *out = buf, *end = buf + size;
53 
54 #define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
55 
56 	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
57 
58 	for (i = 0; i < KEY_PTRS(k); i++) {
59 		if (i)
60 			p(", ");
61 
62 		if (PTR_DEV(k, i) == PTR_CHECK_DEV)
63 			p("check dev");
64 		else
65 			p("%llu:%llu gen %llu", PTR_DEV(k, i),
66 			  PTR_OFFSET(k, i), PTR_GEN(k, i));
67 	}
68 
69 	p("]");
70 
71 	if (KEY_DIRTY(k))
72 		p(" dirty");
73 	if (KEY_CSUM(k))
74 		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
75 #undef p
76 	return out - buf;
77 }
78 
79 #ifdef CONFIG_BCACHE_DEBUG
80 
81 static void dump_bset(struct btree *b, struct bset *i, unsigned set)
82 {
83 	struct bkey *k, *next;
84 	unsigned j;
85 	char buf[80];
86 
87 	for (k = i->start; k < bset_bkey_last(i); k = next) {
88 		next = bkey_next(k);
89 
90 		bch_bkey_to_text(buf, sizeof(buf), k);
91 		printk(KERN_ERR "b %u k %zi/%u: %s", set,
92 		       (uint64_t *) k - i->d, i->keys, buf);
93 
94 		for (j = 0; j < KEY_PTRS(k); j++) {
95 			size_t n = PTR_BUCKET_NR(b->c, k, j);
96 			printk(" bucket %zu", n);
97 
98 			if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
99 				printk(" prio %i",
100 				       PTR_BUCKET(b->c, k, j)->prio);
101 		}
102 
103 		printk(" %s\n", bch_ptr_status(b->c, k));
104 
105 		if (next < bset_bkey_last(i) &&
106 		    bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
107 			printk(KERN_ERR "Key skipped backwards\n");
108 	}
109 }
110 
111 static void bch_dump_bucket(struct btree *b)
112 {
113 	unsigned i;
114 
115 	console_lock();
116 	for (i = 0; i <= b->keys.nsets; i++)
117 		dump_bset(b, b->keys.set[i].data,
118 			  bset_block_offset(b, b->keys.set[i].data));
119 	console_unlock();
120 }
121 
122 #define for_each_written_bset(b, start, i)				\
123 	for (i = (start);						\
124 	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
125 	     i->seq == (start)->seq;					\
126 	     i = (void *) i + set_blocks(i, block_bytes(b->c)) *	\
127 		 block_bytes(b->c))
128 
129 void bch_btree_verify(struct btree *b)
130 {
131 	struct btree *v = b->c->verify_data;
132 	struct bset *ondisk, *sorted, *inmemory;
133 	struct bio *bio;
134 
135 	if (!b->c->verify || !b->c->verify_ondisk)
136 		return;
137 
138 	down(&b->io_mutex);
139 	mutex_lock(&b->c->verify_lock);
140 
141 	ondisk = b->c->verify_ondisk;
142 	sorted = b->c->verify_data->keys.set->data;
143 	inmemory = b->keys.set->data;
144 
145 	bkey_copy(&v->key, &b->key);
146 	v->written = 0;
147 	v->level = b->level;
148 	v->keys.ops = b->keys.ops;
149 
150 	bio = bch_bbio_alloc(b->c);
151 	bio->bi_bdev		= PTR_CACHE(b->c, &b->key, 0)->bdev;
152 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
153 	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
154 	bch_bio_map(bio, sorted);
155 
156 	submit_bio_wait(REQ_META|READ_SYNC, bio);
157 	bch_bbio_free(bio, b->c);
158 
159 	memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
160 
161 	bch_btree_node_read_done(v);
162 	sorted = v->keys.set->data;
163 
164 	if (inmemory->keys != sorted->keys ||
165 	    memcmp(inmemory->start,
166 		   sorted->start,
167 		   (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
168 		struct bset *i;
169 		unsigned j;
170 
171 		console_lock();
172 
173 		printk(KERN_ERR "*** in memory:\n");
174 		dump_bset(b, inmemory, 0);
175 
176 		printk(KERN_ERR "*** read back in:\n");
177 		dump_bset(v, sorted, 0);
178 
179 		for_each_written_bset(b, ondisk, i) {
180 			unsigned block = ((void *) i - (void *) ondisk) /
181 				block_bytes(b->c);
182 
183 			printk(KERN_ERR "*** on disk block %u:\n", block);
184 			dump_bset(b, i, block);
185 		}
186 
187 		printk(KERN_ERR "*** block %zu not written\n",
188 		       ((void *) i - (void *) ondisk) / block_bytes(b->c));
189 
190 		for (j = 0; j < inmemory->keys; j++)
191 			if (inmemory->d[j] != sorted->d[j])
192 				break;
193 
194 		printk(KERN_ERR "b->written %u\n", b->written);
195 
196 		console_unlock();
197 		panic("verify failed at %u\n", j);
198 	}
199 
200 	mutex_unlock(&b->c->verify_lock);
201 	up(&b->io_mutex);
202 }
203 
204 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
205 {
206 	char name[BDEVNAME_SIZE];
207 	struct bio *check;
208 	struct bio_vec bv, *bv2;
209 	struct bvec_iter iter;
210 	int i;
211 
212 	check = bio_clone(bio, GFP_NOIO);
213 	if (!check)
214 		return;
215 
216 	if (bio_alloc_pages(check, GFP_NOIO))
217 		goto out_put;
218 
219 	submit_bio_wait(READ_SYNC, check);
220 
221 	bio_for_each_segment(bv, bio, iter) {
222 		void *p1 = kmap_atomic(bv.bv_page);
223 		void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
224 
225 		cache_set_err_on(memcmp(p1 + bv.bv_offset,
226 					p2 + bv.bv_offset,
227 					bv.bv_len),
228 				 dc->disk.c,
229 				 "verify failed at dev %s sector %llu",
230 				 bdevname(dc->bdev, name),
231 				 (uint64_t) bio->bi_iter.bi_sector);
232 
233 		kunmap_atomic(p1);
234 	}
235 
236 	bio_for_each_segment_all(bv2, check, i)
237 		__free_page(bv2->bv_page);
238 out_put:
239 	bio_put(check);
240 }
241 
242 int __bch_count_data(struct btree *b)
243 {
244 	unsigned ret = 0;
245 	struct btree_iter iter;
246 	struct bkey *k;
247 
248 	if (!b->level)
249 		for_each_key(&b->keys, k, &iter)
250 			ret += KEY_SIZE(k);
251 	return ret;
252 }
253 
254 void __bch_check_keys(struct btree *b, const char *fmt, ...)
255 {
256 	va_list args;
257 	struct bkey *k, *p = NULL;
258 	struct btree_iter iter;
259 	const char *err;
260 
261 	for_each_key(&b->keys, k, &iter) {
262 		if (!b->level) {
263 			err = "Keys out of order";
264 			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
265 				goto bug;
266 
267 			if (bch_ptr_invalid(&b->keys, k))
268 				continue;
269 
270 			err =  "Overlapping keys";
271 			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
272 				goto bug;
273 		} else {
274 			if (bch_ptr_bad(&b->keys, k))
275 				continue;
276 
277 			err = "Duplicate keys";
278 			if (p && !bkey_cmp(p, k))
279 				goto bug;
280 		}
281 		p = k;
282 	}
283 
284 	err = "Key larger than btree node key";
285 	if (p && bkey_cmp(p, &b->key) > 0)
286 		goto bug;
287 
288 	return;
289 bug:
290 	bch_dump_bucket(b);
291 
292 	va_start(args, fmt);
293 	vprintk(fmt, args);
294 	va_end(args);
295 
296 	panic("bcache error: %s:\n", err);
297 }
298 
299 void bch_btree_iter_next_check(struct btree_iter *iter)
300 {
301 #if 0
302 	struct bkey *k = iter->data->k, *next = bkey_next(k);
303 
304 	if (next < iter->data->end &&
305 	    bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
306 		bch_dump_bucket(iter->b);
307 		panic("Key skipped backwards\n");
308 	}
309 #endif
310 }
311 
312 #endif
313 
314 #ifdef CONFIG_DEBUG_FS
315 
316 /* XXX: cache set refcounting */
317 
318 struct dump_iterator {
319 	char			buf[PAGE_SIZE];
320 	size_t			bytes;
321 	struct cache_set	*c;
322 	struct keybuf		keys;
323 };
324 
325 static bool dump_pred(struct keybuf *buf, struct bkey *k)
326 {
327 	return true;
328 }
329 
330 static ssize_t bch_dump_read(struct file *file, char __user *buf,
331 			     size_t size, loff_t *ppos)
332 {
333 	struct dump_iterator *i = file->private_data;
334 	ssize_t ret = 0;
335 	char kbuf[80];
336 
337 	while (size) {
338 		struct keybuf_key *w;
339 		unsigned bytes = min(i->bytes, size);
340 
341 		int err = copy_to_user(buf, i->buf, bytes);
342 		if (err)
343 			return err;
344 
345 		ret	 += bytes;
346 		buf	 += bytes;
347 		size	 -= bytes;
348 		i->bytes -= bytes;
349 		memmove(i->buf, i->buf + bytes, i->bytes);
350 
351 		if (i->bytes)
352 			break;
353 
354 		w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
355 		if (!w)
356 			break;
357 
358 		bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
359 		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
360 		bch_keybuf_del(&i->keys, w);
361 	}
362 
363 	return ret;
364 }
365 
366 static int bch_dump_open(struct inode *inode, struct file *file)
367 {
368 	struct cache_set *c = inode->i_private;
369 	struct dump_iterator *i;
370 
371 	i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
372 	if (!i)
373 		return -ENOMEM;
374 
375 	file->private_data = i;
376 	i->c = c;
377 	bch_keybuf_init(&i->keys);
378 	i->keys.last_scanned = KEY(0, 0, 0);
379 
380 	return 0;
381 }
382 
383 static int bch_dump_release(struct inode *inode, struct file *file)
384 {
385 	kfree(file->private_data);
386 	return 0;
387 }
388 
389 static const struct file_operations cache_set_debug_ops = {
390 	.owner		= THIS_MODULE,
391 	.open		= bch_dump_open,
392 	.read		= bch_dump_read,
393 	.release	= bch_dump_release
394 };
395 
396 void bch_debug_init_cache_set(struct cache_set *c)
397 {
398 	if (!IS_ERR_OR_NULL(debug)) {
399 		char name[50];
400 		snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
401 
402 		c->debug = debugfs_create_file(name, 0400, debug, c,
403 					       &cache_set_debug_ops);
404 	}
405 }
406 
407 #endif
408 
409 void bch_debug_exit(void)
410 {
411 	if (!IS_ERR_OR_NULL(debug))
412 		debugfs_remove_recursive(debug);
413 }
414 
415 int __init bch_debug_init(struct kobject *kobj)
416 {
417 	int ret = 0;
418 
419 	debug = debugfs_create_dir("bcache", NULL);
420 	return ret;
421 }
422