xref: /openbmc/linux/fs/btrfs/tests/extent-io-tests.c (revision 60772e48)
1 /*
2  * Copyright (C) 2013 Fusion IO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/sizes.h>
23 #include "btrfs-tests.h"
24 #include "../ctree.h"
25 #include "../extent_io.h"
26 
27 #define PROCESS_UNLOCK		(1 << 0)
28 #define PROCESS_RELEASE		(1 << 1)
29 #define PROCESS_TEST_LOCKED	(1 << 2)
30 
31 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
32 				       unsigned long flags)
33 {
34 	int ret;
35 	struct page *pages[16];
36 	unsigned long index = start >> PAGE_SHIFT;
37 	unsigned long end_index = end >> PAGE_SHIFT;
38 	unsigned long nr_pages = end_index - index + 1;
39 	int i;
40 	int count = 0;
41 	int loops = 0;
42 
43 	while (nr_pages > 0) {
44 		ret = find_get_pages_contig(inode->i_mapping, index,
45 				     min_t(unsigned long, nr_pages,
46 				     ARRAY_SIZE(pages)), pages);
47 		for (i = 0; i < ret; i++) {
48 			if (flags & PROCESS_TEST_LOCKED &&
49 			    !PageLocked(pages[i]))
50 				count++;
51 			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
52 				unlock_page(pages[i]);
53 			put_page(pages[i]);
54 			if (flags & PROCESS_RELEASE)
55 				put_page(pages[i]);
56 		}
57 		nr_pages -= ret;
58 		index += ret;
59 		cond_resched();
60 		loops++;
61 		if (loops > 100000) {
62 			printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
63 			break;
64 		}
65 	}
66 	return count;
67 }
68 
69 static int test_find_delalloc(u32 sectorsize)
70 {
71 	struct inode *inode;
72 	struct extent_io_tree tmp;
73 	struct page *page;
74 	struct page *locked_page = NULL;
75 	unsigned long index = 0;
76 	u64 total_dirty = SZ_256M;
77 	u64 max_bytes = SZ_128M;
78 	u64 start, end, test_start;
79 	u64 found;
80 	int ret = -EINVAL;
81 
82 	test_msg("Running find delalloc tests\n");
83 
84 	inode = btrfs_new_test_inode();
85 	if (!inode) {
86 		test_msg("Failed to allocate test inode\n");
87 		return -ENOMEM;
88 	}
89 
90 	extent_io_tree_init(&tmp, inode);
91 
92 	/*
93 	 * First go through and create and mark all of our pages dirty, we pin
94 	 * everything to make sure our pages don't get evicted and screw up our
95 	 * test.
96 	 */
97 	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
98 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
99 		if (!page) {
100 			test_msg("Failed to allocate test page\n");
101 			ret = -ENOMEM;
102 			goto out;
103 		}
104 		SetPageDirty(page);
105 		if (index) {
106 			unlock_page(page);
107 		} else {
108 			get_page(page);
109 			locked_page = page;
110 		}
111 	}
112 
113 	/* Test this scenario
114 	 * |--- delalloc ---|
115 	 * |---  search  ---|
116 	 */
117 	set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
118 	start = 0;
119 	end = 0;
120 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
121 					 &end, max_bytes);
122 	if (!found) {
123 		test_msg("Should have found at least one delalloc\n");
124 		goto out_bits;
125 	}
126 	if (start != 0 || end != (sectorsize - 1)) {
127 		test_msg("Expected start 0 end %u, got start %llu end %llu\n",
128 			sectorsize - 1, start, end);
129 		goto out_bits;
130 	}
131 	unlock_extent(&tmp, start, end);
132 	unlock_page(locked_page);
133 	put_page(locked_page);
134 
135 	/*
136 	 * Test this scenario
137 	 *
138 	 * |--- delalloc ---|
139 	 *           |--- search ---|
140 	 */
141 	test_start = SZ_64M;
142 	locked_page = find_lock_page(inode->i_mapping,
143 				     test_start >> PAGE_SHIFT);
144 	if (!locked_page) {
145 		test_msg("Couldn't find the locked page\n");
146 		goto out_bits;
147 	}
148 	set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
149 	start = test_start;
150 	end = 0;
151 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
152 					 &end, max_bytes);
153 	if (!found) {
154 		test_msg("Couldn't find delalloc in our range\n");
155 		goto out_bits;
156 	}
157 	if (start != test_start || end != max_bytes - 1) {
158 		test_msg("Expected start %Lu end %Lu, got start %Lu, end "
159 			 "%Lu\n", test_start, max_bytes - 1, start, end);
160 		goto out_bits;
161 	}
162 	if (process_page_range(inode, start, end,
163 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
164 		test_msg("There were unlocked pages in the range\n");
165 		goto out_bits;
166 	}
167 	unlock_extent(&tmp, start, end);
168 	/* locked_page was unlocked above */
169 	put_page(locked_page);
170 
171 	/*
172 	 * Test this scenario
173 	 * |--- delalloc ---|
174 	 *                    |--- search ---|
175 	 */
176 	test_start = max_bytes + sectorsize;
177 	locked_page = find_lock_page(inode->i_mapping, test_start >>
178 				     PAGE_SHIFT);
179 	if (!locked_page) {
180 		test_msg("Couldn't find the locked page\n");
181 		goto out_bits;
182 	}
183 	start = test_start;
184 	end = 0;
185 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
186 					 &end, max_bytes);
187 	if (found) {
188 		test_msg("Found range when we shouldn't have\n");
189 		goto out_bits;
190 	}
191 	if (end != (u64)-1) {
192 		test_msg("Did not return the proper end offset\n");
193 		goto out_bits;
194 	}
195 
196 	/*
197 	 * Test this scenario
198 	 * [------- delalloc -------|
199 	 * [max_bytes]|-- search--|
200 	 *
201 	 * We are re-using our test_start from above since it works out well.
202 	 */
203 	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
204 	start = test_start;
205 	end = 0;
206 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
207 					 &end, max_bytes);
208 	if (!found) {
209 		test_msg("Didn't find our range\n");
210 		goto out_bits;
211 	}
212 	if (start != test_start || end != total_dirty - 1) {
213 		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
214 			 test_start, total_dirty - 1, start, end);
215 		goto out_bits;
216 	}
217 	if (process_page_range(inode, start, end,
218 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
219 		test_msg("Pages in range were not all locked\n");
220 		goto out_bits;
221 	}
222 	unlock_extent(&tmp, start, end);
223 
224 	/*
225 	 * Now to test where we run into a page that is no longer dirty in the
226 	 * range we want to find.
227 	 */
228 	page = find_get_page(inode->i_mapping,
229 			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
230 	if (!page) {
231 		test_msg("Couldn't find our page\n");
232 		goto out_bits;
233 	}
234 	ClearPageDirty(page);
235 	put_page(page);
236 
237 	/* We unlocked it in the previous test */
238 	lock_page(locked_page);
239 	start = test_start;
240 	end = 0;
241 	/*
242 	 * Currently if we fail to find dirty pages in the delalloc range we
243 	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
244 	 * this changes at any point in the future we will need to fix this
245 	 * tests expected behavior.
246 	 */
247 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
248 					 &end, max_bytes);
249 	if (!found) {
250 		test_msg("Didn't find our range\n");
251 		goto out_bits;
252 	}
253 	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
254 		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
255 			 test_start, test_start + PAGE_SIZE - 1, start,
256 			 end);
257 		goto out_bits;
258 	}
259 	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
260 			       PROCESS_UNLOCK)) {
261 		test_msg("Pages in range were not all locked\n");
262 		goto out_bits;
263 	}
264 	ret = 0;
265 out_bits:
266 	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
267 out:
268 	if (locked_page)
269 		put_page(locked_page);
270 	process_page_range(inode, 0, total_dirty - 1,
271 			   PROCESS_UNLOCK | PROCESS_RELEASE);
272 	iput(inode);
273 	return ret;
274 }
275 
276 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
277 			   unsigned long len)
278 {
279 	unsigned long i;
280 
281 	for (i = 0; i < len * BITS_PER_BYTE; i++) {
282 		int bit, bit1;
283 
284 		bit = !!test_bit(i, bitmap);
285 		bit1 = !!extent_buffer_test_bit(eb, 0, i);
286 		if (bit1 != bit) {
287 			test_msg("Bits do not match\n");
288 			return -EINVAL;
289 		}
290 
291 		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
292 						i % BITS_PER_BYTE);
293 		if (bit1 != bit) {
294 			test_msg("Offset bits do not match\n");
295 			return -EINVAL;
296 		}
297 	}
298 	return 0;
299 }
300 
301 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
302 			     unsigned long len)
303 {
304 	unsigned long i, j;
305 	u32 x;
306 	int ret;
307 
308 	memset(bitmap, 0, len);
309 	memzero_extent_buffer(eb, 0, len);
310 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
311 		test_msg("Bitmap was not zeroed\n");
312 		return -EINVAL;
313 	}
314 
315 	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
316 	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
317 	ret = check_eb_bitmap(bitmap, eb, len);
318 	if (ret) {
319 		test_msg("Setting all bits failed\n");
320 		return ret;
321 	}
322 
323 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
324 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
325 	ret = check_eb_bitmap(bitmap, eb, len);
326 	if (ret) {
327 		test_msg("Clearing all bits failed\n");
328 		return ret;
329 	}
330 
331 	/* Straddling pages test */
332 	if (len > PAGE_SIZE) {
333 		bitmap_set(bitmap,
334 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
335 			sizeof(long) * BITS_PER_BYTE);
336 		extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
337 					sizeof(long) * BITS_PER_BYTE);
338 		ret = check_eb_bitmap(bitmap, eb, len);
339 		if (ret) {
340 			test_msg("Setting straddling pages failed\n");
341 			return ret;
342 		}
343 
344 		bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
345 		bitmap_clear(bitmap,
346 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
347 			sizeof(long) * BITS_PER_BYTE);
348 		extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
349 		extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
350 					sizeof(long) * BITS_PER_BYTE);
351 		ret = check_eb_bitmap(bitmap, eb, len);
352 		if (ret) {
353 			test_msg("Clearing straddling pages failed\n");
354 			return ret;
355 		}
356 	}
357 
358 	/*
359 	 * Generate a wonky pseudo-random bit pattern for the sake of not using
360 	 * something repetitive that could miss some hypothetical off-by-n bug.
361 	 */
362 	x = 0;
363 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
364 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
365 	for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
366 		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
367 		for (j = 0; j < 32; j++) {
368 			if (x & (1U << j)) {
369 				bitmap_set(bitmap, i * 32 + j, 1);
370 				extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
371 			}
372 		}
373 	}
374 
375 	ret = check_eb_bitmap(bitmap, eb, len);
376 	if (ret) {
377 		test_msg("Random bit pattern failed\n");
378 		return ret;
379 	}
380 
381 	return 0;
382 }
383 
384 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
385 {
386 	struct btrfs_fs_info *fs_info;
387 	unsigned long len;
388 	unsigned long *bitmap;
389 	struct extent_buffer *eb;
390 	int ret;
391 
392 	test_msg("Running extent buffer bitmap tests\n");
393 
394 	/*
395 	 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
396 	 * BTRFS_MAX_METADATA_BLOCKSIZE.
397 	 */
398 	len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
399 		? sectorsize * 4 : sectorsize;
400 
401 	fs_info = btrfs_alloc_dummy_fs_info(len, len);
402 
403 	bitmap = kmalloc(len, GFP_KERNEL);
404 	if (!bitmap) {
405 		test_msg("Couldn't allocate test bitmap\n");
406 		return -ENOMEM;
407 	}
408 
409 	eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
410 	if (!eb) {
411 		test_msg("Couldn't allocate test extent buffer\n");
412 		kfree(bitmap);
413 		return -ENOMEM;
414 	}
415 
416 	ret = __test_eb_bitmaps(bitmap, eb, len);
417 	if (ret)
418 		goto out;
419 
420 	/* Do it over again with an extent buffer which isn't page-aligned. */
421 	free_extent_buffer(eb);
422 	eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
423 	if (!eb) {
424 		test_msg("Couldn't allocate test extent buffer\n");
425 		kfree(bitmap);
426 		return -ENOMEM;
427 	}
428 
429 	ret = __test_eb_bitmaps(bitmap, eb, len);
430 out:
431 	free_extent_buffer(eb);
432 	kfree(bitmap);
433 	return ret;
434 }
435 
436 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
437 {
438 	int ret;
439 
440 	test_msg("Running extent I/O tests\n");
441 
442 	ret = test_find_delalloc(sectorsize);
443 	if (ret)
444 		goto out;
445 
446 	ret = test_eb_bitmaps(sectorsize, nodesize);
447 out:
448 	test_msg("Extent I/O tests finished\n");
449 	return ret;
450 }
451