xref: /openbmc/linux/fs/btrfs/tests/extent-io-tests.c (revision f3539c12)
1 /*
2  * Copyright (C) 2013 Fusion IO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/sizes.h>
23 #include "btrfs-tests.h"
24 #include "../ctree.h"
25 #include "../extent_io.h"
26 
27 #define PROCESS_UNLOCK		(1 << 0)
28 #define PROCESS_RELEASE		(1 << 1)
29 #define PROCESS_TEST_LOCKED	(1 << 2)
30 
31 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
32 				       unsigned long flags)
33 {
34 	int ret;
35 	struct page *pages[16];
36 	unsigned long index = start >> PAGE_SHIFT;
37 	unsigned long end_index = end >> PAGE_SHIFT;
38 	unsigned long nr_pages = end_index - index + 1;
39 	int i;
40 	int count = 0;
41 	int loops = 0;
42 
43 	while (nr_pages > 0) {
44 		ret = find_get_pages_contig(inode->i_mapping, index,
45 				     min_t(unsigned long, nr_pages,
46 				     ARRAY_SIZE(pages)), pages);
47 		for (i = 0; i < ret; i++) {
48 			if (flags & PROCESS_TEST_LOCKED &&
49 			    !PageLocked(pages[i]))
50 				count++;
51 			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
52 				unlock_page(pages[i]);
53 			put_page(pages[i]);
54 			if (flags & PROCESS_RELEASE)
55 				put_page(pages[i]);
56 		}
57 		nr_pages -= ret;
58 		index += ret;
59 		cond_resched();
60 		loops++;
61 		if (loops > 100000) {
62 			printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
63 			break;
64 		}
65 	}
66 	return count;
67 }
68 
69 static int test_find_delalloc(u32 sectorsize)
70 {
71 	struct inode *inode;
72 	struct extent_io_tree tmp;
73 	struct page *page;
74 	struct page *locked_page = NULL;
75 	unsigned long index = 0;
76 	u64 total_dirty = SZ_256M;
77 	u64 max_bytes = SZ_128M;
78 	u64 start, end, test_start;
79 	u64 found;
80 	int ret = -EINVAL;
81 
82 	test_msg("Running find delalloc tests\n");
83 
84 	inode = btrfs_new_test_inode();
85 	if (!inode) {
86 		test_msg("Failed to allocate test inode\n");
87 		return -ENOMEM;
88 	}
89 
90 	extent_io_tree_init(&tmp, &inode->i_data);
91 
92 	/*
93 	 * First go through and create and mark all of our pages dirty, we pin
94 	 * everything to make sure our pages don't get evicted and screw up our
95 	 * test.
96 	 */
97 	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
98 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
99 		if (!page) {
100 			test_msg("Failed to allocate test page\n");
101 			ret = -ENOMEM;
102 			goto out;
103 		}
104 		SetPageDirty(page);
105 		if (index) {
106 			unlock_page(page);
107 		} else {
108 			get_page(page);
109 			locked_page = page;
110 		}
111 	}
112 
113 	/* Test this scenario
114 	 * |--- delalloc ---|
115 	 * |---  search  ---|
116 	 */
117 	set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
118 	start = 0;
119 	end = 0;
120 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
121 					 &end, max_bytes);
122 	if (!found) {
123 		test_msg("Should have found at least one delalloc\n");
124 		goto out_bits;
125 	}
126 	if (start != 0 || end != (sectorsize - 1)) {
127 		test_msg("Expected start 0 end %u, got start %llu end %llu\n",
128 			sectorsize - 1, start, end);
129 		goto out_bits;
130 	}
131 	unlock_extent(&tmp, start, end);
132 	unlock_page(locked_page);
133 	put_page(locked_page);
134 
135 	/*
136 	 * Test this scenario
137 	 *
138 	 * |--- delalloc ---|
139 	 *           |--- search ---|
140 	 */
141 	test_start = SZ_64M;
142 	locked_page = find_lock_page(inode->i_mapping,
143 				     test_start >> PAGE_SHIFT);
144 	if (!locked_page) {
145 		test_msg("Couldn't find the locked page\n");
146 		goto out_bits;
147 	}
148 	set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
149 	start = test_start;
150 	end = 0;
151 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
152 					 &end, max_bytes);
153 	if (!found) {
154 		test_msg("Couldn't find delalloc in our range\n");
155 		goto out_bits;
156 	}
157 	if (start != test_start || end != max_bytes - 1) {
158 		test_msg("Expected start %Lu end %Lu, got start %Lu, end "
159 			 "%Lu\n", test_start, max_bytes - 1, start, end);
160 		goto out_bits;
161 	}
162 	if (process_page_range(inode, start, end,
163 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
164 		test_msg("There were unlocked pages in the range\n");
165 		goto out_bits;
166 	}
167 	unlock_extent(&tmp, start, end);
168 	/* locked_page was unlocked above */
169 	put_page(locked_page);
170 
171 	/*
172 	 * Test this scenario
173 	 * |--- delalloc ---|
174 	 *                    |--- search ---|
175 	 */
176 	test_start = max_bytes + sectorsize;
177 	locked_page = find_lock_page(inode->i_mapping, test_start >>
178 				     PAGE_SHIFT);
179 	if (!locked_page) {
180 		test_msg("Couldn't find the locked page\n");
181 		goto out_bits;
182 	}
183 	start = test_start;
184 	end = 0;
185 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
186 					 &end, max_bytes);
187 	if (found) {
188 		test_msg("Found range when we shouldn't have\n");
189 		goto out_bits;
190 	}
191 	if (end != (u64)-1) {
192 		test_msg("Did not return the proper end offset\n");
193 		goto out_bits;
194 	}
195 
196 	/*
197 	 * Test this scenario
198 	 * [------- delalloc -------|
199 	 * [max_bytes]|-- search--|
200 	 *
201 	 * We are re-using our test_start from above since it works out well.
202 	 */
203 	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
204 	start = test_start;
205 	end = 0;
206 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
207 					 &end, max_bytes);
208 	if (!found) {
209 		test_msg("Didn't find our range\n");
210 		goto out_bits;
211 	}
212 	if (start != test_start || end != total_dirty - 1) {
213 		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
214 			 test_start, total_dirty - 1, start, end);
215 		goto out_bits;
216 	}
217 	if (process_page_range(inode, start, end,
218 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
219 		test_msg("Pages in range were not all locked\n");
220 		goto out_bits;
221 	}
222 	unlock_extent(&tmp, start, end);
223 
224 	/*
225 	 * Now to test where we run into a page that is no longer dirty in the
226 	 * range we want to find.
227 	 */
228 	page = find_get_page(inode->i_mapping,
229 			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
230 	if (!page) {
231 		test_msg("Couldn't find our page\n");
232 		goto out_bits;
233 	}
234 	ClearPageDirty(page);
235 	put_page(page);
236 
237 	/* We unlocked it in the previous test */
238 	lock_page(locked_page);
239 	start = test_start;
240 	end = 0;
241 	/*
242 	 * Currently if we fail to find dirty pages in the delalloc range we
243 	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
244 	 * this changes at any point in the future we will need to fix this
245 	 * tests expected behavior.
246 	 */
247 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
248 					 &end, max_bytes);
249 	if (!found) {
250 		test_msg("Didn't find our range\n");
251 		goto out_bits;
252 	}
253 	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
254 		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
255 			 test_start, test_start + PAGE_SIZE - 1, start,
256 			 end);
257 		goto out_bits;
258 	}
259 	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
260 			       PROCESS_UNLOCK)) {
261 		test_msg("Pages in range were not all locked\n");
262 		goto out_bits;
263 	}
264 	ret = 0;
265 out_bits:
266 	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
267 out:
268 	if (locked_page)
269 		put_page(locked_page);
270 	process_page_range(inode, 0, total_dirty - 1,
271 			   PROCESS_UNLOCK | PROCESS_RELEASE);
272 	iput(inode);
273 	return ret;
274 }
275 
276 /**
277  * test_bit_in_byte - Determine whether a bit is set in a byte
278  * @nr: bit number to test
279  * @addr: Address to start counting from
280  */
281 static inline int test_bit_in_byte(int nr, const u8 *addr)
282 {
283 	return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
284 }
285 
286 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
287 			     unsigned long len)
288 {
289 	unsigned long i, x;
290 
291 	memset(bitmap, 0, len);
292 	memset_extent_buffer(eb, 0, 0, len);
293 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
294 		test_msg("Bitmap was not zeroed\n");
295 		return -EINVAL;
296 	}
297 
298 	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
299 	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
300 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
301 		test_msg("Setting all bits failed\n");
302 		return -EINVAL;
303 	}
304 
305 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
306 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
307 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
308 		test_msg("Clearing all bits failed\n");
309 		return -EINVAL;
310 	}
311 
312 	/* Straddling pages test */
313 	if (len > PAGE_SIZE) {
314 		bitmap_set(bitmap,
315 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
316 			sizeof(long) * BITS_PER_BYTE);
317 		extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
318 					sizeof(long) * BITS_PER_BYTE);
319 		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
320 			test_msg("Setting straddling pages failed\n");
321 			return -EINVAL;
322 		}
323 
324 		bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
325 		bitmap_clear(bitmap,
326 			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
327 			sizeof(long) * BITS_PER_BYTE);
328 		extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
329 		extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
330 					sizeof(long) * BITS_PER_BYTE);
331 		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
332 			test_msg("Clearing straddling pages failed\n");
333 			return -EINVAL;
334 		}
335 	}
336 
337 	/*
338 	 * Generate a wonky pseudo-random bit pattern for the sake of not using
339 	 * something repetitive that could miss some hypothetical off-by-n bug.
340 	 */
341 	x = 0;
342 	for (i = 0; i < len / sizeof(long); i++) {
343 		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
344 		bitmap[i] = x;
345 	}
346 	write_extent_buffer(eb, bitmap, 0, len);
347 
348 	for (i = 0; i < len * BITS_PER_BYTE; i++) {
349 		int bit, bit1;
350 
351 		bit = !!test_bit_in_byte(i, (u8 *)bitmap);
352 		bit1 = !!extent_buffer_test_bit(eb, 0, i);
353 		if (bit1 != bit) {
354 			test_msg("Testing bit pattern failed\n");
355 			return -EINVAL;
356 		}
357 
358 		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
359 						i % BITS_PER_BYTE);
360 		if (bit1 != bit) {
361 			test_msg("Testing bit pattern with offset failed\n");
362 			return -EINVAL;
363 		}
364 	}
365 
366 	return 0;
367 }
368 
369 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
370 {
371 	unsigned long len;
372 	unsigned long *bitmap;
373 	struct extent_buffer *eb;
374 	int ret;
375 
376 	test_msg("Running extent buffer bitmap tests\n");
377 
378 	/*
379 	 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
380 	 * BTRFS_MAX_METADATA_BLOCKSIZE.
381 	 */
382 	len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
383 		? sectorsize * 4 : sectorsize;
384 
385 	bitmap = kmalloc(len, GFP_KERNEL);
386 	if (!bitmap) {
387 		test_msg("Couldn't allocate test bitmap\n");
388 		return -ENOMEM;
389 	}
390 
391 	eb = __alloc_dummy_extent_buffer(NULL, 0, len);
392 	if (!eb) {
393 		test_msg("Couldn't allocate test extent buffer\n");
394 		kfree(bitmap);
395 		return -ENOMEM;
396 	}
397 
398 	ret = __test_eb_bitmaps(bitmap, eb, len);
399 	if (ret)
400 		goto out;
401 
402 	/* Do it over again with an extent buffer which isn't page-aligned. */
403 	free_extent_buffer(eb);
404 	eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
405 	if (!eb) {
406 		test_msg("Couldn't allocate test extent buffer\n");
407 		kfree(bitmap);
408 		return -ENOMEM;
409 	}
410 
411 	ret = __test_eb_bitmaps(bitmap, eb, len);
412 out:
413 	free_extent_buffer(eb);
414 	kfree(bitmap);
415 	return ret;
416 }
417 
418 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
419 {
420 	int ret;
421 
422 	test_msg("Running extent I/O tests\n");
423 
424 	ret = test_find_delalloc(sectorsize);
425 	if (ret)
426 		goto out;
427 
428 	ret = test_eb_bitmaps(sectorsize, nodesize);
429 out:
430 	test_msg("Extent I/O tests finished\n");
431 	return ret;
432 }
433