1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Oracle. All rights reserved. 4 */ 5 6 #include <linux/types.h> 7 #include "btrfs-tests.h" 8 #include "../ctree.h" 9 10 static void free_extent_map_tree(struct extent_map_tree *em_tree) 11 { 12 struct extent_map *em; 13 struct rb_node *node; 14 15 while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) { 16 node = rb_first_cached(&em_tree->map); 17 em = rb_entry(node, struct extent_map, rb_node); 18 remove_extent_mapping(em_tree, em); 19 20 #ifdef CONFIG_BTRFS_DEBUG 21 if (refcount_read(&em->refs) != 1) { 22 test_err( 23 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d", 24 em->start, em->len, em->block_start, 25 em->block_len, refcount_read(&em->refs)); 26 27 refcount_set(&em->refs, 1); 28 } 29 #endif 30 free_extent_map(em); 31 } 32 } 33 34 /* 35 * Test scenario: 36 * 37 * Suppose that no extent map has been loaded into memory yet, there is a file 38 * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads 39 * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is 40 * reading [0, 8K) 41 * 42 * t1 t2 43 * btrfs_get_extent() btrfs_get_extent() 44 * -> lookup_extent_mapping() ->lookup_extent_mapping() 45 * -> add_extent_mapping(0, 16K) 46 * -> return em 47 * ->add_extent_mapping(0, 16K) 48 * -> #handle -EEXIST 49 */ 50 static int test_case_1(struct btrfs_fs_info *fs_info, 51 struct extent_map_tree *em_tree) 52 { 53 struct extent_map *em; 54 u64 start = 0; 55 u64 len = SZ_8K; 56 int ret; 57 58 em = alloc_extent_map(); 59 if (!em) { 60 test_std_err(TEST_ALLOC_EXTENT_MAP); 61 return -ENOMEM; 62 } 63 64 /* Add [0, 16K) */ 65 em->start = 0; 66 em->len = SZ_16K; 67 em->block_start = 0; 68 em->block_len = SZ_16K; 69 write_lock(&em_tree->lock); 70 ret = add_extent_mapping(em_tree, em, 0); 71 write_unlock(&em_tree->lock); 72 if (ret < 0) { 73 test_err("cannot add extent range [0, 16K)"); 74 goto out; 75 } 76 free_extent_map(em); 77 78 /* Add [16K, 20K) following [0, 16K) */ 79 em = alloc_extent_map(); 80 if (!em) { 81 test_std_err(TEST_ALLOC_EXTENT_MAP); 82 ret = -ENOMEM; 83 goto out; 84 } 85 86 em->start = SZ_16K; 87 em->len = SZ_4K; 88 em->block_start = SZ_32K; /* avoid merging */ 89 em->block_len = SZ_4K; 90 write_lock(&em_tree->lock); 91 ret = add_extent_mapping(em_tree, em, 0); 92 write_unlock(&em_tree->lock); 93 if (ret < 0) { 94 test_err("cannot add extent range [16K, 20K)"); 95 goto out; 96 } 97 free_extent_map(em); 98 99 em = alloc_extent_map(); 100 if (!em) { 101 test_std_err(TEST_ALLOC_EXTENT_MAP); 102 ret = -ENOMEM; 103 goto out; 104 } 105 106 /* Add [0, 8K), should return [0, 16K) instead. */ 107 em->start = start; 108 em->len = len; 109 em->block_start = start; 110 em->block_len = len; 111 write_lock(&em_tree->lock); 112 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); 113 write_unlock(&em_tree->lock); 114 if (ret) { 115 test_err("case1 [%llu %llu]: ret %d", start, start + len, ret); 116 goto out; 117 } 118 if (em && 119 (em->start != 0 || extent_map_end(em) != SZ_16K || 120 em->block_start != 0 || em->block_len != SZ_16K)) { 121 test_err( 122 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu", 123 start, start + len, ret, em->start, em->len, 124 em->block_start, em->block_len); 125 ret = -EINVAL; 126 } 127 free_extent_map(em); 128 out: 129 free_extent_map_tree(em_tree); 130 131 return ret; 132 } 133 134 /* 135 * Test scenario: 136 * 137 * Reading the inline ending up with EEXIST, ie. read an inline 138 * extent and discard page cache and read it again. 139 */ 140 static int test_case_2(struct btrfs_fs_info *fs_info, 141 struct extent_map_tree *em_tree) 142 { 143 struct extent_map *em; 144 int ret; 145 146 em = alloc_extent_map(); 147 if (!em) { 148 test_std_err(TEST_ALLOC_EXTENT_MAP); 149 return -ENOMEM; 150 } 151 152 /* Add [0, 1K) */ 153 em->start = 0; 154 em->len = SZ_1K; 155 em->block_start = EXTENT_MAP_INLINE; 156 em->block_len = (u64)-1; 157 write_lock(&em_tree->lock); 158 ret = add_extent_mapping(em_tree, em, 0); 159 write_unlock(&em_tree->lock); 160 if (ret < 0) { 161 test_err("cannot add extent range [0, 1K)"); 162 goto out; 163 } 164 free_extent_map(em); 165 166 /* Add [4K, 8K) following [0, 1K) */ 167 em = alloc_extent_map(); 168 if (!em) { 169 test_std_err(TEST_ALLOC_EXTENT_MAP); 170 ret = -ENOMEM; 171 goto out; 172 } 173 174 em->start = SZ_4K; 175 em->len = SZ_4K; 176 em->block_start = SZ_4K; 177 em->block_len = SZ_4K; 178 write_lock(&em_tree->lock); 179 ret = add_extent_mapping(em_tree, em, 0); 180 write_unlock(&em_tree->lock); 181 if (ret < 0) { 182 test_err("cannot add extent range [4K, 8K)"); 183 goto out; 184 } 185 free_extent_map(em); 186 187 em = alloc_extent_map(); 188 if (!em) { 189 test_std_err(TEST_ALLOC_EXTENT_MAP); 190 ret = -ENOMEM; 191 goto out; 192 } 193 194 /* Add [0, 1K) */ 195 em->start = 0; 196 em->len = SZ_1K; 197 em->block_start = EXTENT_MAP_INLINE; 198 em->block_len = (u64)-1; 199 write_lock(&em_tree->lock); 200 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); 201 write_unlock(&em_tree->lock); 202 if (ret) { 203 test_err("case2 [0 1K]: ret %d", ret); 204 goto out; 205 } 206 if (em && 207 (em->start != 0 || extent_map_end(em) != SZ_1K || 208 em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) { 209 test_err( 210 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu", 211 ret, em->start, em->len, em->block_start, 212 em->block_len); 213 ret = -EINVAL; 214 } 215 free_extent_map(em); 216 out: 217 free_extent_map_tree(em_tree); 218 219 return ret; 220 } 221 222 static int __test_case_3(struct btrfs_fs_info *fs_info, 223 struct extent_map_tree *em_tree, u64 start) 224 { 225 struct extent_map *em; 226 u64 len = SZ_4K; 227 int ret; 228 229 em = alloc_extent_map(); 230 if (!em) { 231 test_std_err(TEST_ALLOC_EXTENT_MAP); 232 return -ENOMEM; 233 } 234 235 /* Add [4K, 8K) */ 236 em->start = SZ_4K; 237 em->len = SZ_4K; 238 em->block_start = SZ_4K; 239 em->block_len = SZ_4K; 240 write_lock(&em_tree->lock); 241 ret = add_extent_mapping(em_tree, em, 0); 242 write_unlock(&em_tree->lock); 243 if (ret < 0) { 244 test_err("cannot add extent range [4K, 8K)"); 245 goto out; 246 } 247 free_extent_map(em); 248 249 em = alloc_extent_map(); 250 if (!em) { 251 test_std_err(TEST_ALLOC_EXTENT_MAP); 252 ret = -ENOMEM; 253 goto out; 254 } 255 256 /* Add [0, 16K) */ 257 em->start = 0; 258 em->len = SZ_16K; 259 em->block_start = 0; 260 em->block_len = SZ_16K; 261 write_lock(&em_tree->lock); 262 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 263 write_unlock(&em_tree->lock); 264 if (ret) { 265 test_err("case3 [0x%llx 0x%llx): ret %d", 266 start, start + len, ret); 267 goto out; 268 } 269 /* 270 * Since bytes within em are contiguous, em->block_start is identical to 271 * em->start. 272 */ 273 if (em && 274 (start < em->start || start + len > extent_map_end(em) || 275 em->start != em->block_start || em->len != em->block_len)) { 276 test_err( 277 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)", 278 start, start + len, ret, em->start, em->len, 279 em->block_start, em->block_len); 280 ret = -EINVAL; 281 } 282 free_extent_map(em); 283 out: 284 free_extent_map_tree(em_tree); 285 286 return ret; 287 } 288 289 /* 290 * Test scenario: 291 * 292 * Suppose that no extent map has been loaded into memory yet. 293 * There is a file extent [0, 16K), two jobs are running concurrently 294 * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio 295 * read from [0, 4K) or [8K, 12K) or [12K, 16K). 296 * 297 * t1 goes ahead of t2 and adds em [4K, 8K) into tree. 298 * 299 * t1 t2 300 * cow_file_range() btrfs_get_extent() 301 * -> lookup_extent_mapping() 302 * -> add_extent_mapping() 303 * -> add_extent_mapping() 304 */ 305 static int test_case_3(struct btrfs_fs_info *fs_info, 306 struct extent_map_tree *em_tree) 307 { 308 int ret; 309 310 ret = __test_case_3(fs_info, em_tree, 0); 311 if (ret) 312 return ret; 313 ret = __test_case_3(fs_info, em_tree, SZ_8K); 314 if (ret) 315 return ret; 316 ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K)); 317 318 return ret; 319 } 320 321 static int __test_case_4(struct btrfs_fs_info *fs_info, 322 struct extent_map_tree *em_tree, u64 start) 323 { 324 struct extent_map *em; 325 u64 len = SZ_4K; 326 int ret; 327 328 em = alloc_extent_map(); 329 if (!em) { 330 test_std_err(TEST_ALLOC_EXTENT_MAP); 331 return -ENOMEM; 332 } 333 334 /* Add [0K, 8K) */ 335 em->start = 0; 336 em->len = SZ_8K; 337 em->block_start = 0; 338 em->block_len = SZ_8K; 339 write_lock(&em_tree->lock); 340 ret = add_extent_mapping(em_tree, em, 0); 341 write_unlock(&em_tree->lock); 342 if (ret < 0) { 343 test_err("cannot add extent range [0, 8K)"); 344 goto out; 345 } 346 free_extent_map(em); 347 348 em = alloc_extent_map(); 349 if (!em) { 350 test_std_err(TEST_ALLOC_EXTENT_MAP); 351 ret = -ENOMEM; 352 goto out; 353 } 354 355 /* Add [8K, 32K) */ 356 em->start = SZ_8K; 357 em->len = 24 * SZ_1K; 358 em->block_start = SZ_16K; /* avoid merging */ 359 em->block_len = 24 * SZ_1K; 360 write_lock(&em_tree->lock); 361 ret = add_extent_mapping(em_tree, em, 0); 362 write_unlock(&em_tree->lock); 363 if (ret < 0) { 364 test_err("cannot add extent range [8K, 32K)"); 365 goto out; 366 } 367 free_extent_map(em); 368 369 em = alloc_extent_map(); 370 if (!em) { 371 test_std_err(TEST_ALLOC_EXTENT_MAP); 372 ret = -ENOMEM; 373 goto out; 374 } 375 /* Add [0K, 32K) */ 376 em->start = 0; 377 em->len = SZ_32K; 378 em->block_start = 0; 379 em->block_len = SZ_32K; 380 write_lock(&em_tree->lock); 381 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 382 write_unlock(&em_tree->lock); 383 if (ret) { 384 test_err("case4 [0x%llx 0x%llx): ret %d", 385 start, len, ret); 386 goto out; 387 } 388 if (em && (start < em->start || start + len > extent_map_end(em))) { 389 test_err( 390 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)", 391 start, len, ret, em->start, em->len, em->block_start, 392 em->block_len); 393 ret = -EINVAL; 394 } 395 free_extent_map(em); 396 out: 397 free_extent_map_tree(em_tree); 398 399 return ret; 400 } 401 402 /* 403 * Test scenario: 404 * 405 * Suppose that no extent map has been loaded into memory yet. 406 * There is a file extent [0, 32K), two jobs are running concurrently 407 * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio 408 * read from [0, 4K) or [4K, 8K). 409 * 410 * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K). 411 * 412 * t1 t2 413 * btrfs_get_blocks_direct() btrfs_get_blocks_direct() 414 * -> btrfs_get_extent() -> btrfs_get_extent() 415 * -> lookup_extent_mapping() 416 * -> add_extent_mapping() -> lookup_extent_mapping() 417 * # load [0, 32K) 418 * -> btrfs_new_extent_direct() 419 * -> btrfs_drop_extent_cache() 420 * # split [0, 32K) 421 * -> add_extent_mapping() 422 * # add [8K, 32K) 423 * -> add_extent_mapping() 424 * # handle -EEXIST when adding 425 * # [0, 32K) 426 */ 427 static int test_case_4(struct btrfs_fs_info *fs_info, 428 struct extent_map_tree *em_tree) 429 { 430 int ret; 431 432 ret = __test_case_4(fs_info, em_tree, 0); 433 if (ret) 434 return ret; 435 ret = __test_case_4(fs_info, em_tree, SZ_4K); 436 437 return ret; 438 } 439 440 int btrfs_test_extent_map(void) 441 { 442 struct btrfs_fs_info *fs_info = NULL; 443 struct extent_map_tree *em_tree; 444 int ret = 0; 445 446 test_msg("running extent_map tests"); 447 448 /* 449 * Note: the fs_info is not set up completely, we only need 450 * fs_info::fsid for the tracepoint. 451 */ 452 fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE); 453 if (!fs_info) { 454 test_std_err(TEST_ALLOC_FS_INFO); 455 return -ENOMEM; 456 } 457 458 em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL); 459 if (!em_tree) { 460 ret = -ENOMEM; 461 goto out; 462 } 463 464 extent_map_tree_init(em_tree); 465 466 ret = test_case_1(fs_info, em_tree); 467 if (ret) 468 goto out; 469 ret = test_case_2(fs_info, em_tree); 470 if (ret) 471 goto out; 472 ret = test_case_3(fs_info, em_tree); 473 if (ret) 474 goto out; 475 ret = test_case_4(fs_info, em_tree); 476 477 out: 478 kfree(em_tree); 479 btrfs_free_dummy_fs_info(fs_info); 480 481 return ret; 482 } 483