1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Fusion IO. All rights reserved. 4 */ 5 6 #include <linux/pagemap.h> 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/sizes.h> 10 #include "btrfs-tests.h" 11 #include "../ctree.h" 12 #include "../extent_io.h" 13 14 #define PROCESS_UNLOCK (1 << 0) 15 #define PROCESS_RELEASE (1 << 1) 16 #define PROCESS_TEST_LOCKED (1 << 2) 17 18 static noinline int process_page_range(struct inode *inode, u64 start, u64 end, 19 unsigned long flags) 20 { 21 int ret; 22 struct page *pages[16]; 23 unsigned long index = start >> PAGE_SHIFT; 24 unsigned long end_index = end >> PAGE_SHIFT; 25 unsigned long nr_pages = end_index - index + 1; 26 int i; 27 int count = 0; 28 int loops = 0; 29 30 while (nr_pages > 0) { 31 ret = find_get_pages_contig(inode->i_mapping, index, 32 min_t(unsigned long, nr_pages, 33 ARRAY_SIZE(pages)), pages); 34 for (i = 0; i < ret; i++) { 35 if (flags & PROCESS_TEST_LOCKED && 36 !PageLocked(pages[i])) 37 count++; 38 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) 39 unlock_page(pages[i]); 40 put_page(pages[i]); 41 if (flags & PROCESS_RELEASE) 42 put_page(pages[i]); 43 } 44 nr_pages -= ret; 45 index += ret; 46 cond_resched(); 47 loops++; 48 if (loops > 100000) { 49 printk(KERN_ERR 50 "stuck in a loop, start %llu, end %llu, nr_pages %lu, ret %d\n", 51 start, end, nr_pages, ret); 52 break; 53 } 54 } 55 return count; 56 } 57 58 static int test_find_delalloc(u32 sectorsize) 59 { 60 struct inode *inode; 61 struct extent_io_tree tmp; 62 struct page *page; 63 struct page *locked_page = NULL; 64 unsigned long index = 0; 65 /* In this test we need at least 2 file extents at its maximum size */ 66 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; 67 u64 total_dirty = 2 * max_bytes; 68 u64 start, end, test_start; 69 bool found; 70 int ret = -EINVAL; 71 72 test_msg("running find delalloc tests"); 73 74 inode = btrfs_new_test_inode(); 75 if (!inode) { 76 test_std_err(TEST_ALLOC_INODE); 77 return -ENOMEM; 78 } 79 80 /* 81 * Passing NULL as we don't have fs_info but tracepoints are not used 82 * at this point 83 */ 84 extent_io_tree_init(NULL, &tmp, IO_TREE_SELFTEST, NULL); 85 86 /* 87 * First go through and create and mark all of our pages dirty, we pin 88 * everything to make sure our pages don't get evicted and screw up our 89 * test. 90 */ 91 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { 92 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 93 if (!page) { 94 test_err("failed to allocate test page"); 95 ret = -ENOMEM; 96 goto out; 97 } 98 SetPageDirty(page); 99 if (index) { 100 unlock_page(page); 101 } else { 102 get_page(page); 103 locked_page = page; 104 } 105 } 106 107 /* Test this scenario 108 * |--- delalloc ---| 109 * |--- search ---| 110 */ 111 set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL); 112 start = 0; 113 end = 0; 114 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 115 &end); 116 if (!found) { 117 test_err("should have found at least one delalloc"); 118 goto out_bits; 119 } 120 if (start != 0 || end != (sectorsize - 1)) { 121 test_err("expected start 0 end %u, got start %llu end %llu", 122 sectorsize - 1, start, end); 123 goto out_bits; 124 } 125 unlock_extent(&tmp, start, end); 126 unlock_page(locked_page); 127 put_page(locked_page); 128 129 /* 130 * Test this scenario 131 * 132 * |--- delalloc ---| 133 * |--- search ---| 134 */ 135 test_start = SZ_64M; 136 locked_page = find_lock_page(inode->i_mapping, 137 test_start >> PAGE_SHIFT); 138 if (!locked_page) { 139 test_err("couldn't find the locked page"); 140 goto out_bits; 141 } 142 set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL); 143 start = test_start; 144 end = 0; 145 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 146 &end); 147 if (!found) { 148 test_err("couldn't find delalloc in our range"); 149 goto out_bits; 150 } 151 if (start != test_start || end != max_bytes - 1) { 152 test_err("expected start %llu end %llu, got start %llu, end %llu", 153 test_start, max_bytes - 1, start, end); 154 goto out_bits; 155 } 156 if (process_page_range(inode, start, end, 157 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { 158 test_err("there were unlocked pages in the range"); 159 goto out_bits; 160 } 161 unlock_extent(&tmp, start, end); 162 /* locked_page was unlocked above */ 163 put_page(locked_page); 164 165 /* 166 * Test this scenario 167 * |--- delalloc ---| 168 * |--- search ---| 169 */ 170 test_start = max_bytes + sectorsize; 171 locked_page = find_lock_page(inode->i_mapping, test_start >> 172 PAGE_SHIFT); 173 if (!locked_page) { 174 test_err("couldn't find the locked page"); 175 goto out_bits; 176 } 177 start = test_start; 178 end = 0; 179 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 180 &end); 181 if (found) { 182 test_err("found range when we shouldn't have"); 183 goto out_bits; 184 } 185 if (end != (u64)-1) { 186 test_err("did not return the proper end offset"); 187 goto out_bits; 188 } 189 190 /* 191 * Test this scenario 192 * [------- delalloc -------| 193 * [max_bytes]|-- search--| 194 * 195 * We are re-using our test_start from above since it works out well. 196 */ 197 set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL); 198 start = test_start; 199 end = 0; 200 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 201 &end); 202 if (!found) { 203 test_err("didn't find our range"); 204 goto out_bits; 205 } 206 if (start != test_start || end != total_dirty - 1) { 207 test_err("expected start %llu end %llu, got start %llu end %llu", 208 test_start, total_dirty - 1, start, end); 209 goto out_bits; 210 } 211 if (process_page_range(inode, start, end, 212 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { 213 test_err("pages in range were not all locked"); 214 goto out_bits; 215 } 216 unlock_extent(&tmp, start, end); 217 218 /* 219 * Now to test where we run into a page that is no longer dirty in the 220 * range we want to find. 221 */ 222 page = find_get_page(inode->i_mapping, 223 (max_bytes + SZ_1M) >> PAGE_SHIFT); 224 if (!page) { 225 test_err("couldn't find our page"); 226 goto out_bits; 227 } 228 ClearPageDirty(page); 229 put_page(page); 230 231 /* We unlocked it in the previous test */ 232 lock_page(locked_page); 233 start = test_start; 234 end = 0; 235 /* 236 * Currently if we fail to find dirty pages in the delalloc range we 237 * will adjust max_bytes down to PAGE_SIZE and then re-search. If 238 * this changes at any point in the future we will need to fix this 239 * tests expected behavior. 240 */ 241 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 242 &end); 243 if (!found) { 244 test_err("didn't find our range"); 245 goto out_bits; 246 } 247 if (start != test_start && end != test_start + PAGE_SIZE - 1) { 248 test_err("expected start %llu end %llu, got start %llu end %llu", 249 test_start, test_start + PAGE_SIZE - 1, start, end); 250 goto out_bits; 251 } 252 if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | 253 PROCESS_UNLOCK)) { 254 test_err("pages in range were not all locked"); 255 goto out_bits; 256 } 257 ret = 0; 258 out_bits: 259 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1); 260 out: 261 if (locked_page) 262 put_page(locked_page); 263 process_page_range(inode, 0, total_dirty - 1, 264 PROCESS_UNLOCK | PROCESS_RELEASE); 265 iput(inode); 266 return ret; 267 } 268 269 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb, 270 unsigned long len) 271 { 272 unsigned long i; 273 274 for (i = 0; i < len * BITS_PER_BYTE; i++) { 275 int bit, bit1; 276 277 bit = !!test_bit(i, bitmap); 278 bit1 = !!extent_buffer_test_bit(eb, 0, i); 279 if (bit1 != bit) { 280 test_err("bits do not match"); 281 return -EINVAL; 282 } 283 284 bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, 285 i % BITS_PER_BYTE); 286 if (bit1 != bit) { 287 test_err("offset bits do not match"); 288 return -EINVAL; 289 } 290 } 291 return 0; 292 } 293 294 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, 295 unsigned long len) 296 { 297 unsigned long i, j; 298 u32 x; 299 int ret; 300 301 memset(bitmap, 0, len); 302 memzero_extent_buffer(eb, 0, len); 303 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 304 test_err("bitmap was not zeroed"); 305 return -EINVAL; 306 } 307 308 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 309 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 310 ret = check_eb_bitmap(bitmap, eb, len); 311 if (ret) { 312 test_err("setting all bits failed"); 313 return ret; 314 } 315 316 bitmap_clear(bitmap, 0, len * BITS_PER_BYTE); 317 extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); 318 ret = check_eb_bitmap(bitmap, eb, len); 319 if (ret) { 320 test_err("clearing all bits failed"); 321 return ret; 322 } 323 324 /* Straddling pages test */ 325 if (len > PAGE_SIZE) { 326 bitmap_set(bitmap, 327 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 328 sizeof(long) * BITS_PER_BYTE); 329 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, 330 sizeof(long) * BITS_PER_BYTE); 331 ret = check_eb_bitmap(bitmap, eb, len); 332 if (ret) { 333 test_err("setting straddling pages failed"); 334 return ret; 335 } 336 337 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 338 bitmap_clear(bitmap, 339 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 340 sizeof(long) * BITS_PER_BYTE); 341 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 342 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, 343 sizeof(long) * BITS_PER_BYTE); 344 ret = check_eb_bitmap(bitmap, eb, len); 345 if (ret) { 346 test_err("clearing straddling pages failed"); 347 return ret; 348 } 349 } 350 351 /* 352 * Generate a wonky pseudo-random bit pattern for the sake of not using 353 * something repetitive that could miss some hypothetical off-by-n bug. 354 */ 355 x = 0; 356 bitmap_clear(bitmap, 0, len * BITS_PER_BYTE); 357 extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); 358 for (i = 0; i < len * BITS_PER_BYTE / 32; i++) { 359 x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; 360 for (j = 0; j < 32; j++) { 361 if (x & (1U << j)) { 362 bitmap_set(bitmap, i * 32 + j, 1); 363 extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1); 364 } 365 } 366 } 367 368 ret = check_eb_bitmap(bitmap, eb, len); 369 if (ret) { 370 test_err("random bit pattern failed"); 371 return ret; 372 } 373 374 return 0; 375 } 376 377 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) 378 { 379 struct btrfs_fs_info *fs_info; 380 unsigned long len; 381 unsigned long *bitmap = NULL; 382 struct extent_buffer *eb = NULL; 383 int ret; 384 385 test_msg("running extent buffer bitmap tests"); 386 387 /* 388 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than 389 * BTRFS_MAX_METADATA_BLOCKSIZE. 390 */ 391 len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE) 392 ? sectorsize * 4 : sectorsize; 393 394 fs_info = btrfs_alloc_dummy_fs_info(len, len); 395 if (!fs_info) { 396 test_std_err(TEST_ALLOC_FS_INFO); 397 return -ENOMEM; 398 } 399 400 bitmap = kmalloc(len, GFP_KERNEL); 401 if (!bitmap) { 402 test_err("couldn't allocate test bitmap"); 403 ret = -ENOMEM; 404 goto out; 405 } 406 407 eb = __alloc_dummy_extent_buffer(fs_info, 0, len); 408 if (!eb) { 409 test_std_err(TEST_ALLOC_ROOT); 410 ret = -ENOMEM; 411 goto out; 412 } 413 414 ret = __test_eb_bitmaps(bitmap, eb, len); 415 if (ret) 416 goto out; 417 418 /* Do it over again with an extent buffer which isn't page-aligned. */ 419 free_extent_buffer(eb); 420 eb = __alloc_dummy_extent_buffer(fs_info, nodesize / 2, len); 421 if (!eb) { 422 test_std_err(TEST_ALLOC_ROOT); 423 ret = -ENOMEM; 424 goto out; 425 } 426 427 ret = __test_eb_bitmaps(bitmap, eb, len); 428 out: 429 free_extent_buffer(eb); 430 kfree(bitmap); 431 btrfs_free_dummy_fs_info(fs_info); 432 return ret; 433 } 434 435 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) 436 { 437 int ret; 438 439 test_msg("running extent I/O tests"); 440 441 ret = test_find_delalloc(sectorsize); 442 if (ret) 443 goto out; 444 445 ret = test_eb_bitmaps(sectorsize, nodesize); 446 out: 447 return ret; 448 } 449