xref: /openbmc/linux/fs/btrfs/tests/extent-io-tests.c (revision 2f0754f2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Fusion IO.  All rights reserved.
4  */
5 
6 #include <linux/pagemap.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/sizes.h>
10 #include "btrfs-tests.h"
11 #include "../ctree.h"
12 #include "../extent_io.h"
13 #include "../btrfs_inode.h"
14 
15 #define PROCESS_UNLOCK		(1 << 0)
16 #define PROCESS_RELEASE		(1 << 1)
17 #define PROCESS_TEST_LOCKED	(1 << 2)
18 
19 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
20 				       unsigned long flags)
21 {
22 	int ret;
23 	struct page *pages[16];
24 	unsigned long index = start >> PAGE_SHIFT;
25 	unsigned long end_index = end >> PAGE_SHIFT;
26 	unsigned long nr_pages = end_index - index + 1;
27 	int i;
28 	int count = 0;
29 	int loops = 0;
30 
31 	while (nr_pages > 0) {
32 		ret = find_get_pages_contig(inode->i_mapping, index,
33 				     min_t(unsigned long, nr_pages,
34 				     ARRAY_SIZE(pages)), pages);
35 		for (i = 0; i < ret; i++) {
36 			if (flags & PROCESS_TEST_LOCKED &&
37 			    !PageLocked(pages[i]))
38 				count++;
39 			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
40 				unlock_page(pages[i]);
41 			put_page(pages[i]);
42 			if (flags & PROCESS_RELEASE)
43 				put_page(pages[i]);
44 		}
45 		nr_pages -= ret;
46 		index += ret;
47 		cond_resched();
48 		loops++;
49 		if (loops > 100000) {
50 			printk(KERN_ERR
51 		"stuck in a loop, start %llu, end %llu, nr_pages %lu, ret %d\n",
52 				start, end, nr_pages, ret);
53 			break;
54 		}
55 	}
56 	return count;
57 }
58 
59 #define STATE_FLAG_STR_LEN			256
60 
61 #define PRINT_ONE_FLAG(state, dest, cur, name)				\
62 ({									\
63 	if (state->state & EXTENT_##name)				\
64 		cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur,	\
65 				 "%s" #name, cur == 0 ? "" : "|");	\
66 })
67 
68 static void extent_flag_to_str(const struct extent_state *state, char *dest)
69 {
70 	int cur = 0;
71 
72 	dest[0] = 0;
73 	PRINT_ONE_FLAG(state, dest, cur, DIRTY);
74 	PRINT_ONE_FLAG(state, dest, cur, UPTODATE);
75 	PRINT_ONE_FLAG(state, dest, cur, LOCKED);
76 	PRINT_ONE_FLAG(state, dest, cur, NEW);
77 	PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
78 	PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
79 	PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
80 	PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
81 	PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
82 	PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
83 	PRINT_ONE_FLAG(state, dest, cur, DAMAGED);
84 	PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
85 	PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
86 	PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
87 }
88 
89 static void dump_extent_io_tree(const struct extent_io_tree *tree)
90 {
91 	struct rb_node *node;
92 	char flags_str[STATE_FLAG_STR_LEN];
93 
94 	node = rb_first(&tree->state);
95 	test_msg("io tree content:");
96 	while (node) {
97 		struct extent_state *state;
98 
99 		state = rb_entry(node, struct extent_state, rb_node);
100 		extent_flag_to_str(state, flags_str);
101 		test_msg("  start=%llu len=%llu flags=%s", state->start,
102 			 state->end + 1 - state->start, flags_str);
103 		node = rb_next(node);
104 	}
105 }
106 
107 static int test_find_delalloc(u32 sectorsize)
108 {
109 	struct inode *inode;
110 	struct extent_io_tree *tmp;
111 	struct page *page;
112 	struct page *locked_page = NULL;
113 	unsigned long index = 0;
114 	/* In this test we need at least 2 file extents at its maximum size */
115 	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
116 	u64 total_dirty = 2 * max_bytes;
117 	u64 start, end, test_start;
118 	bool found;
119 	int ret = -EINVAL;
120 
121 	test_msg("running find delalloc tests");
122 
123 	inode = btrfs_new_test_inode();
124 	if (!inode) {
125 		test_std_err(TEST_ALLOC_INODE);
126 		return -ENOMEM;
127 	}
128 	tmp = &BTRFS_I(inode)->io_tree;
129 
130 	/*
131 	 * Passing NULL as we don't have fs_info but tracepoints are not used
132 	 * at this point
133 	 */
134 	extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST, NULL);
135 
136 	/*
137 	 * First go through and create and mark all of our pages dirty, we pin
138 	 * everything to make sure our pages don't get evicted and screw up our
139 	 * test.
140 	 */
141 	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
142 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
143 		if (!page) {
144 			test_err("failed to allocate test page");
145 			ret = -ENOMEM;
146 			goto out;
147 		}
148 		SetPageDirty(page);
149 		if (index) {
150 			unlock_page(page);
151 		} else {
152 			get_page(page);
153 			locked_page = page;
154 		}
155 	}
156 
157 	/* Test this scenario
158 	 * |--- delalloc ---|
159 	 * |---  search  ---|
160 	 */
161 	set_extent_delalloc(tmp, 0, sectorsize - 1, 0, NULL);
162 	start = 0;
163 	end = start + PAGE_SIZE - 1;
164 	found = find_lock_delalloc_range(inode, locked_page, &start,
165 					 &end);
166 	if (!found) {
167 		test_err("should have found at least one delalloc");
168 		goto out_bits;
169 	}
170 	if (start != 0 || end != (sectorsize - 1)) {
171 		test_err("expected start 0 end %u, got start %llu end %llu",
172 			sectorsize - 1, start, end);
173 		goto out_bits;
174 	}
175 	unlock_extent(tmp, start, end);
176 	unlock_page(locked_page);
177 	put_page(locked_page);
178 
179 	/*
180 	 * Test this scenario
181 	 *
182 	 * |--- delalloc ---|
183 	 *           |--- search ---|
184 	 */
185 	test_start = SZ_64M;
186 	locked_page = find_lock_page(inode->i_mapping,
187 				     test_start >> PAGE_SHIFT);
188 	if (!locked_page) {
189 		test_err("couldn't find the locked page");
190 		goto out_bits;
191 	}
192 	set_extent_delalloc(tmp, sectorsize, max_bytes - 1, 0, NULL);
193 	start = test_start;
194 	end = start + PAGE_SIZE - 1;
195 	found = find_lock_delalloc_range(inode, locked_page, &start,
196 					 &end);
197 	if (!found) {
198 		test_err("couldn't find delalloc in our range");
199 		goto out_bits;
200 	}
201 	if (start != test_start || end != max_bytes - 1) {
202 		test_err("expected start %llu end %llu, got start %llu, end %llu",
203 				test_start, max_bytes - 1, start, end);
204 		goto out_bits;
205 	}
206 	if (process_page_range(inode, start, end,
207 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
208 		test_err("there were unlocked pages in the range");
209 		goto out_bits;
210 	}
211 	unlock_extent(tmp, start, end);
212 	/* locked_page was unlocked above */
213 	put_page(locked_page);
214 
215 	/*
216 	 * Test this scenario
217 	 * |--- delalloc ---|
218 	 *                    |--- search ---|
219 	 */
220 	test_start = max_bytes + sectorsize;
221 	locked_page = find_lock_page(inode->i_mapping, test_start >>
222 				     PAGE_SHIFT);
223 	if (!locked_page) {
224 		test_err("couldn't find the locked page");
225 		goto out_bits;
226 	}
227 	start = test_start;
228 	end = start + PAGE_SIZE - 1;
229 	found = find_lock_delalloc_range(inode, locked_page, &start,
230 					 &end);
231 	if (found) {
232 		test_err("found range when we shouldn't have");
233 		goto out_bits;
234 	}
235 	if (end != test_start + PAGE_SIZE - 1) {
236 		test_err("did not return the proper end offset");
237 		goto out_bits;
238 	}
239 
240 	/*
241 	 * Test this scenario
242 	 * [------- delalloc -------|
243 	 * [max_bytes]|-- search--|
244 	 *
245 	 * We are re-using our test_start from above since it works out well.
246 	 */
247 	set_extent_delalloc(tmp, max_bytes, total_dirty - 1, 0, NULL);
248 	start = test_start;
249 	end = start + PAGE_SIZE - 1;
250 	found = find_lock_delalloc_range(inode, locked_page, &start,
251 					 &end);
252 	if (!found) {
253 		test_err("didn't find our range");
254 		goto out_bits;
255 	}
256 	if (start != test_start || end != total_dirty - 1) {
257 		test_err("expected start %llu end %llu, got start %llu end %llu",
258 			 test_start, total_dirty - 1, start, end);
259 		goto out_bits;
260 	}
261 	if (process_page_range(inode, start, end,
262 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
263 		test_err("pages in range were not all locked");
264 		goto out_bits;
265 	}
266 	unlock_extent(tmp, start, end);
267 
268 	/*
269 	 * Now to test where we run into a page that is no longer dirty in the
270 	 * range we want to find.
271 	 */
272 	page = find_get_page(inode->i_mapping,
273 			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
274 	if (!page) {
275 		test_err("couldn't find our page");
276 		goto out_bits;
277 	}
278 	ClearPageDirty(page);
279 	put_page(page);
280 
281 	/* We unlocked it in the previous test */
282 	lock_page(locked_page);
283 	start = test_start;
284 	end = start + PAGE_SIZE - 1;
285 	/*
286 	 * Currently if we fail to find dirty pages in the delalloc range we
287 	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
288 	 * this changes at any point in the future we will need to fix this
289 	 * tests expected behavior.
290 	 */
291 	found = find_lock_delalloc_range(inode, locked_page, &start,
292 					 &end);
293 	if (!found) {
294 		test_err("didn't find our range");
295 		goto out_bits;
296 	}
297 	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
298 		test_err("expected start %llu end %llu, got start %llu end %llu",
299 			 test_start, test_start + PAGE_SIZE - 1, start, end);
300 		goto out_bits;
301 	}
302 	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
303 			       PROCESS_UNLOCK)) {
304 		test_err("pages in range were not all locked");
305 		goto out_bits;
306 	}
307 	ret = 0;
308 out_bits:
309 	if (ret)
310 		dump_extent_io_tree(tmp);
311 	clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
312 out:
313 	if (locked_page)
314 		put_page(locked_page);
315 	process_page_range(inode, 0, total_dirty - 1,
316 			   PROCESS_UNLOCK | PROCESS_RELEASE);
317 	iput(inode);
318 	return ret;
319 }
320 
321 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
322 			   unsigned long len)
323 {
324 	unsigned long i;
325 
326 	for (i = 0; i < len * BITS_PER_BYTE; i++) {
327 		int bit, bit1;
328 
329 		bit = !!test_bit(i, bitmap);
330 		bit1 = !!extent_buffer_test_bit(eb, 0, i);
331 		if (bit1 != bit) {
332 			test_err("bits do not match");
333 			return -EINVAL;
334 		}
335 
336 		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
337 						i % BITS_PER_BYTE);
338 		if (bit1 != bit) {
339 			test_err("offset bits do not match");
340 			return -EINVAL;
341 		}
342 	}
343 	return 0;
344 }
345 
346 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
347 			     unsigned long len)
348 {
349 	unsigned long i, j;
350 	u32 x;
351 	int ret;
352 
353 	memset(bitmap, 0, len);
354 	memzero_extent_buffer(eb, 0, len);
355 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
356 		test_err("bitmap was not zeroed");
357 		return -EINVAL;
358 	}
359 
360 	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
361 	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
362 	ret = check_eb_bitmap(bitmap, eb, len);
363 	if (ret) {
364 		test_err("setting all bits failed");
365 		return ret;
366 	}
367 
368 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
369 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
370 	ret = check_eb_bitmap(bitmap, eb, len);
371 	if (ret) {
372 		test_err("clearing all bits failed");
373 		return ret;
374 	}
375 
376 	/* Straddling pages test */
377 	if (len > PAGE_SIZE) {
378 		bitmap_set(bitmap,
379 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
380 			sizeof(long) * BITS_PER_BYTE);
381 		extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
382 					sizeof(long) * BITS_PER_BYTE);
383 		ret = check_eb_bitmap(bitmap, eb, len);
384 		if (ret) {
385 			test_err("setting straddling pages failed");
386 			return ret;
387 		}
388 
389 		bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
390 		bitmap_clear(bitmap,
391 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
392 			sizeof(long) * BITS_PER_BYTE);
393 		extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
394 		extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
395 					sizeof(long) * BITS_PER_BYTE);
396 		ret = check_eb_bitmap(bitmap, eb, len);
397 		if (ret) {
398 			test_err("clearing straddling pages failed");
399 			return ret;
400 		}
401 	}
402 
403 	/*
404 	 * Generate a wonky pseudo-random bit pattern for the sake of not using
405 	 * something repetitive that could miss some hypothetical off-by-n bug.
406 	 */
407 	x = 0;
408 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
409 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
410 	for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
411 		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
412 		for (j = 0; j < 32; j++) {
413 			if (x & (1U << j)) {
414 				bitmap_set(bitmap, i * 32 + j, 1);
415 				extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
416 			}
417 		}
418 	}
419 
420 	ret = check_eb_bitmap(bitmap, eb, len);
421 	if (ret) {
422 		test_err("random bit pattern failed");
423 		return ret;
424 	}
425 
426 	return 0;
427 }
428 
429 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
430 {
431 	struct btrfs_fs_info *fs_info;
432 	unsigned long *bitmap = NULL;
433 	struct extent_buffer *eb = NULL;
434 	int ret;
435 
436 	test_msg("running extent buffer bitmap tests");
437 
438 	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
439 	if (!fs_info) {
440 		test_std_err(TEST_ALLOC_FS_INFO);
441 		return -ENOMEM;
442 	}
443 
444 	bitmap = kmalloc(nodesize, GFP_KERNEL);
445 	if (!bitmap) {
446 		test_err("couldn't allocate test bitmap");
447 		ret = -ENOMEM;
448 		goto out;
449 	}
450 
451 	eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
452 	if (!eb) {
453 		test_std_err(TEST_ALLOC_ROOT);
454 		ret = -ENOMEM;
455 		goto out;
456 	}
457 
458 	ret = __test_eb_bitmaps(bitmap, eb, nodesize);
459 	if (ret)
460 		goto out;
461 
462 	free_extent_buffer(eb);
463 
464 	/*
465 	 * Test again for case where the tree block is sectorsize aligned but
466 	 * not nodesize aligned.
467 	 */
468 	eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
469 	if (!eb) {
470 		test_std_err(TEST_ALLOC_ROOT);
471 		ret = -ENOMEM;
472 		goto out;
473 	}
474 
475 	ret = __test_eb_bitmaps(bitmap, eb, nodesize);
476 out:
477 	free_extent_buffer(eb);
478 	kfree(bitmap);
479 	btrfs_free_dummy_fs_info(fs_info);
480 	return ret;
481 }
482 
483 static int test_find_first_clear_extent_bit(void)
484 {
485 	struct extent_io_tree tree;
486 	u64 start, end;
487 	int ret = -EINVAL;
488 
489 	test_msg("running find_first_clear_extent_bit test");
490 
491 	extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL);
492 
493 	/* Test correct handling of empty tree */
494 	find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
495 	if (start != 0 || end != -1) {
496 		test_err(
497 	"error getting a range from completely empty tree: start %llu end %llu",
498 			 start, end);
499 		goto out;
500 	}
501 	/*
502 	 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
503 	 * 4M-32M
504 	 */
505 	set_extent_bits(&tree, SZ_1M, SZ_4M - 1,
506 			CHUNK_TRIMMED | CHUNK_ALLOCATED);
507 
508 	find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
509 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
510 
511 	if (start != 0 || end != SZ_1M - 1) {
512 		test_err("error finding beginning range: start %llu end %llu",
513 			 start, end);
514 		goto out;
515 	}
516 
517 	/* Now add 32M-64M so that we have a hole between 4M-32M */
518 	set_extent_bits(&tree, SZ_32M, SZ_64M - 1,
519 			CHUNK_TRIMMED | CHUNK_ALLOCATED);
520 
521 	/*
522 	 * Request first hole starting at 12M, we should get 4M-32M
523 	 */
524 	find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
525 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
526 
527 	if (start != SZ_4M || end != SZ_32M - 1) {
528 		test_err("error finding trimmed range: start %llu end %llu",
529 			 start, end);
530 		goto out;
531 	}
532 
533 	/*
534 	 * Search in the middle of allocated range, should get the next one
535 	 * available, which happens to be unallocated -> 4M-32M
536 	 */
537 	find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
538 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
539 
540 	if (start != SZ_4M || end != SZ_32M - 1) {
541 		test_err("error finding next unalloc range: start %llu end %llu",
542 			 start, end);
543 		goto out;
544 	}
545 
546 	/*
547 	 * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
548 	 * being unset in this range, we should get the entry in range 64M-72M
549 	 */
550 	set_extent_bits(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED);
551 	find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
552 				    CHUNK_TRIMMED);
553 
554 	if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
555 		test_err("error finding exact range: start %llu end %llu",
556 			 start, end);
557 		goto out;
558 	}
559 
560 	find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
561 				    CHUNK_TRIMMED);
562 
563 	/*
564 	 * Search in the middle of set range whose immediate neighbour doesn't
565 	 * have the bits set so it must be returned
566 	 */
567 	if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
568 		test_err("error finding next alloc range: start %llu end %llu",
569 			 start, end);
570 		goto out;
571 	}
572 
573 	/*
574 	 * Search beyond any known range, shall return after last known range
575 	 * and end should be -1
576 	 */
577 	find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
578 	if (start != SZ_64M + SZ_8M || end != -1) {
579 		test_err(
580 		"error handling beyond end of range search: start %llu end %llu",
581 			start, end);
582 		goto out;
583 	}
584 
585 	ret = 0;
586 out:
587 	if (ret)
588 		dump_extent_io_tree(&tree);
589 	clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
590 
591 	return ret;
592 }
593 
594 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
595 {
596 	int ret;
597 
598 	test_msg("running extent I/O tests");
599 
600 	ret = test_find_delalloc(sectorsize);
601 	if (ret)
602 		goto out;
603 
604 	ret = test_find_first_clear_extent_bit();
605 	if (ret)
606 		goto out;
607 
608 	ret = test_eb_bitmaps(sectorsize, nodesize);
609 out:
610 	return ret;
611 }
612