1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Facebook. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/sched/mm.h> 8 #include "ctree.h" 9 #include "disk-io.h" 10 #include "locking.h" 11 #include "free-space-tree.h" 12 #include "transaction.h" 13 14 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 15 struct btrfs_block_group_cache *block_group, 16 struct btrfs_path *path); 17 18 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) 19 { 20 u32 bitmap_range; 21 size_t bitmap_size; 22 u64 num_bitmaps, total_bitmap_size; 23 24 /* 25 * We convert to bitmaps when the disk space required for using extents 26 * exceeds that required for using bitmaps. 27 */ 28 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 29 num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, 30 bitmap_range); 31 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 32 total_bitmap_size = num_bitmaps * bitmap_size; 33 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 34 sizeof(struct btrfs_item)); 35 36 /* 37 * We allow for a small buffer between the high threshold and low 38 * threshold to avoid thrashing back and forth between the two formats. 39 */ 40 if (cache->bitmap_high_thresh > 100) 41 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 42 else 43 cache->bitmap_low_thresh = 0; 44 } 45 46 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 47 struct btrfs_block_group_cache *block_group, 48 struct btrfs_path *path) 49 { 50 struct btrfs_root *root = trans->fs_info->free_space_root; 51 struct btrfs_free_space_info *info; 52 struct btrfs_key key; 53 struct extent_buffer *leaf; 54 int ret; 55 56 key.objectid = block_group->key.objectid; 57 key.type = BTRFS_FREE_SPACE_INFO_KEY; 58 key.offset = block_group->key.offset; 59 60 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 61 if (ret) 62 goto out; 63 64 leaf = path->nodes[0]; 65 info = btrfs_item_ptr(leaf, path->slots[0], 66 struct btrfs_free_space_info); 67 btrfs_set_free_space_extent_count(leaf, info, 0); 68 btrfs_set_free_space_flags(leaf, info, 0); 69 btrfs_mark_buffer_dirty(leaf); 70 71 ret = 0; 72 out: 73 btrfs_release_path(path); 74 return ret; 75 } 76 77 EXPORT_FOR_TESTS 78 struct btrfs_free_space_info *search_free_space_info( 79 struct btrfs_trans_handle *trans, 80 struct btrfs_block_group_cache *block_group, 81 struct btrfs_path *path, int cow) 82 { 83 struct btrfs_fs_info *fs_info = block_group->fs_info; 84 struct btrfs_root *root = fs_info->free_space_root; 85 struct btrfs_key key; 86 int ret; 87 88 key.objectid = block_group->key.objectid; 89 key.type = BTRFS_FREE_SPACE_INFO_KEY; 90 key.offset = block_group->key.offset; 91 92 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 93 if (ret < 0) 94 return ERR_PTR(ret); 95 if (ret != 0) { 96 btrfs_warn(fs_info, "missing free space info for %llu", 97 block_group->key.objectid); 98 ASSERT(0); 99 return ERR_PTR(-ENOENT); 100 } 101 102 return btrfs_item_ptr(path->nodes[0], path->slots[0], 103 struct btrfs_free_space_info); 104 } 105 106 /* 107 * btrfs_search_slot() but we're looking for the greatest key less than the 108 * passed key. 109 */ 110 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 111 struct btrfs_root *root, 112 struct btrfs_key *key, struct btrfs_path *p, 113 int ins_len, int cow) 114 { 115 int ret; 116 117 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 118 if (ret < 0) 119 return ret; 120 121 if (ret == 0) { 122 ASSERT(0); 123 return -EIO; 124 } 125 126 if (p->slots[0] == 0) { 127 ASSERT(0); 128 return -EIO; 129 } 130 p->slots[0]--; 131 132 return 0; 133 } 134 135 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) 136 { 137 return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); 138 } 139 140 static unsigned long *alloc_bitmap(u32 bitmap_size) 141 { 142 unsigned long *ret; 143 unsigned int nofs_flag; 144 u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long)); 145 146 /* 147 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse 148 * into the filesystem as the free space bitmap can be modified in the 149 * critical section of a transaction commit. 150 * 151 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we 152 * know that recursion is unsafe. 153 */ 154 nofs_flag = memalloc_nofs_save(); 155 ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL); 156 memalloc_nofs_restore(nofs_flag); 157 return ret; 158 } 159 160 static void le_bitmap_set(unsigned long *map, unsigned int start, int len) 161 { 162 u8 *p = ((u8 *)map) + BIT_BYTE(start); 163 const unsigned int size = start + len; 164 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); 165 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); 166 167 while (len - bits_to_set >= 0) { 168 *p |= mask_to_set; 169 len -= bits_to_set; 170 bits_to_set = BITS_PER_BYTE; 171 mask_to_set = ~0; 172 p++; 173 } 174 if (len) { 175 mask_to_set &= BITMAP_LAST_BYTE_MASK(size); 176 *p |= mask_to_set; 177 } 178 } 179 180 EXPORT_FOR_TESTS 181 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 182 struct btrfs_block_group_cache *block_group, 183 struct btrfs_path *path) 184 { 185 struct btrfs_fs_info *fs_info = trans->fs_info; 186 struct btrfs_root *root = fs_info->free_space_root; 187 struct btrfs_free_space_info *info; 188 struct btrfs_key key, found_key; 189 struct extent_buffer *leaf; 190 unsigned long *bitmap; 191 char *bitmap_cursor; 192 u64 start, end; 193 u64 bitmap_range, i; 194 u32 bitmap_size, flags, expected_extent_count; 195 u32 extent_count = 0; 196 int done = 0, nr; 197 int ret; 198 199 bitmap_size = free_space_bitmap_size(block_group->key.offset, 200 fs_info->sectorsize); 201 bitmap = alloc_bitmap(bitmap_size); 202 if (!bitmap) { 203 ret = -ENOMEM; 204 goto out; 205 } 206 207 start = block_group->key.objectid; 208 end = block_group->key.objectid + block_group->key.offset; 209 210 key.objectid = end - 1; 211 key.type = (u8)-1; 212 key.offset = (u64)-1; 213 214 while (!done) { 215 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 216 if (ret) 217 goto out; 218 219 leaf = path->nodes[0]; 220 nr = 0; 221 path->slots[0]++; 222 while (path->slots[0] > 0) { 223 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 224 225 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 226 ASSERT(found_key.objectid == block_group->key.objectid); 227 ASSERT(found_key.offset == block_group->key.offset); 228 done = 1; 229 break; 230 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 231 u64 first, last; 232 233 ASSERT(found_key.objectid >= start); 234 ASSERT(found_key.objectid < end); 235 ASSERT(found_key.objectid + found_key.offset <= end); 236 237 first = div_u64(found_key.objectid - start, 238 fs_info->sectorsize); 239 last = div_u64(found_key.objectid + found_key.offset - start, 240 fs_info->sectorsize); 241 le_bitmap_set(bitmap, first, last - first); 242 243 extent_count++; 244 nr++; 245 path->slots[0]--; 246 } else { 247 ASSERT(0); 248 } 249 } 250 251 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 252 if (ret) 253 goto out; 254 btrfs_release_path(path); 255 } 256 257 info = search_free_space_info(trans, block_group, path, 1); 258 if (IS_ERR(info)) { 259 ret = PTR_ERR(info); 260 goto out; 261 } 262 leaf = path->nodes[0]; 263 flags = btrfs_free_space_flags(leaf, info); 264 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 265 btrfs_set_free_space_flags(leaf, info, flags); 266 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 267 btrfs_mark_buffer_dirty(leaf); 268 btrfs_release_path(path); 269 270 if (extent_count != expected_extent_count) { 271 btrfs_err(fs_info, 272 "incorrect extent count for %llu; counted %u, expected %u", 273 block_group->key.objectid, extent_count, 274 expected_extent_count); 275 ASSERT(0); 276 ret = -EIO; 277 goto out; 278 } 279 280 bitmap_cursor = (char *)bitmap; 281 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 282 i = start; 283 while (i < end) { 284 unsigned long ptr; 285 u64 extent_size; 286 u32 data_size; 287 288 extent_size = min(end - i, bitmap_range); 289 data_size = free_space_bitmap_size(extent_size, 290 fs_info->sectorsize); 291 292 key.objectid = i; 293 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 294 key.offset = extent_size; 295 296 ret = btrfs_insert_empty_item(trans, root, path, &key, 297 data_size); 298 if (ret) 299 goto out; 300 301 leaf = path->nodes[0]; 302 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 303 write_extent_buffer(leaf, bitmap_cursor, ptr, 304 data_size); 305 btrfs_mark_buffer_dirty(leaf); 306 btrfs_release_path(path); 307 308 i += extent_size; 309 bitmap_cursor += data_size; 310 } 311 312 ret = 0; 313 out: 314 kvfree(bitmap); 315 if (ret) 316 btrfs_abort_transaction(trans, ret); 317 return ret; 318 } 319 320 EXPORT_FOR_TESTS 321 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 322 struct btrfs_block_group_cache *block_group, 323 struct btrfs_path *path) 324 { 325 struct btrfs_fs_info *fs_info = trans->fs_info; 326 struct btrfs_root *root = fs_info->free_space_root; 327 struct btrfs_free_space_info *info; 328 struct btrfs_key key, found_key; 329 struct extent_buffer *leaf; 330 unsigned long *bitmap; 331 u64 start, end; 332 u32 bitmap_size, flags, expected_extent_count; 333 unsigned long nrbits, start_bit, end_bit; 334 u32 extent_count = 0; 335 int done = 0, nr; 336 int ret; 337 338 bitmap_size = free_space_bitmap_size(block_group->key.offset, 339 fs_info->sectorsize); 340 bitmap = alloc_bitmap(bitmap_size); 341 if (!bitmap) { 342 ret = -ENOMEM; 343 goto out; 344 } 345 346 start = block_group->key.objectid; 347 end = block_group->key.objectid + block_group->key.offset; 348 349 key.objectid = end - 1; 350 key.type = (u8)-1; 351 key.offset = (u64)-1; 352 353 while (!done) { 354 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 355 if (ret) 356 goto out; 357 358 leaf = path->nodes[0]; 359 nr = 0; 360 path->slots[0]++; 361 while (path->slots[0] > 0) { 362 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 363 364 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 365 ASSERT(found_key.objectid == block_group->key.objectid); 366 ASSERT(found_key.offset == block_group->key.offset); 367 done = 1; 368 break; 369 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 370 unsigned long ptr; 371 char *bitmap_cursor; 372 u32 bitmap_pos, data_size; 373 374 ASSERT(found_key.objectid >= start); 375 ASSERT(found_key.objectid < end); 376 ASSERT(found_key.objectid + found_key.offset <= end); 377 378 bitmap_pos = div_u64(found_key.objectid - start, 379 fs_info->sectorsize * 380 BITS_PER_BYTE); 381 bitmap_cursor = ((char *)bitmap) + bitmap_pos; 382 data_size = free_space_bitmap_size(found_key.offset, 383 fs_info->sectorsize); 384 385 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 386 read_extent_buffer(leaf, bitmap_cursor, ptr, 387 data_size); 388 389 nr++; 390 path->slots[0]--; 391 } else { 392 ASSERT(0); 393 } 394 } 395 396 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 397 if (ret) 398 goto out; 399 btrfs_release_path(path); 400 } 401 402 info = search_free_space_info(trans, block_group, path, 1); 403 if (IS_ERR(info)) { 404 ret = PTR_ERR(info); 405 goto out; 406 } 407 leaf = path->nodes[0]; 408 flags = btrfs_free_space_flags(leaf, info); 409 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 410 btrfs_set_free_space_flags(leaf, info, flags); 411 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 412 btrfs_mark_buffer_dirty(leaf); 413 btrfs_release_path(path); 414 415 nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize); 416 start_bit = find_next_bit_le(bitmap, nrbits, 0); 417 418 while (start_bit < nrbits) { 419 end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); 420 ASSERT(start_bit < end_bit); 421 422 key.objectid = start + start_bit * block_group->fs_info->sectorsize; 423 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 424 key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; 425 426 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 427 if (ret) 428 goto out; 429 btrfs_release_path(path); 430 431 extent_count++; 432 433 start_bit = find_next_bit_le(bitmap, nrbits, end_bit); 434 } 435 436 if (extent_count != expected_extent_count) { 437 btrfs_err(fs_info, 438 "incorrect extent count for %llu; counted %u, expected %u", 439 block_group->key.objectid, extent_count, 440 expected_extent_count); 441 ASSERT(0); 442 ret = -EIO; 443 goto out; 444 } 445 446 ret = 0; 447 out: 448 kvfree(bitmap); 449 if (ret) 450 btrfs_abort_transaction(trans, ret); 451 return ret; 452 } 453 454 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 455 struct btrfs_block_group_cache *block_group, 456 struct btrfs_path *path, 457 int new_extents) 458 { 459 struct btrfs_free_space_info *info; 460 u32 flags; 461 u32 extent_count; 462 int ret = 0; 463 464 if (new_extents == 0) 465 return 0; 466 467 info = search_free_space_info(trans, block_group, path, 1); 468 if (IS_ERR(info)) { 469 ret = PTR_ERR(info); 470 goto out; 471 } 472 flags = btrfs_free_space_flags(path->nodes[0], info); 473 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 474 475 extent_count += new_extents; 476 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 477 btrfs_mark_buffer_dirty(path->nodes[0]); 478 btrfs_release_path(path); 479 480 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 481 extent_count > block_group->bitmap_high_thresh) { 482 ret = convert_free_space_to_bitmaps(trans, block_group, path); 483 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 484 extent_count < block_group->bitmap_low_thresh) { 485 ret = convert_free_space_to_extents(trans, block_group, path); 486 } 487 488 out: 489 return ret; 490 } 491 492 EXPORT_FOR_TESTS 493 int free_space_test_bit(struct btrfs_block_group_cache *block_group, 494 struct btrfs_path *path, u64 offset) 495 { 496 struct extent_buffer *leaf; 497 struct btrfs_key key; 498 u64 found_start, found_end; 499 unsigned long ptr, i; 500 501 leaf = path->nodes[0]; 502 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 503 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 504 505 found_start = key.objectid; 506 found_end = key.objectid + key.offset; 507 ASSERT(offset >= found_start && offset < found_end); 508 509 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 510 i = div_u64(offset - found_start, 511 block_group->fs_info->sectorsize); 512 return !!extent_buffer_test_bit(leaf, ptr, i); 513 } 514 515 static void free_space_set_bits(struct btrfs_block_group_cache *block_group, 516 struct btrfs_path *path, u64 *start, u64 *size, 517 int bit) 518 { 519 struct btrfs_fs_info *fs_info = block_group->fs_info; 520 struct extent_buffer *leaf; 521 struct btrfs_key key; 522 u64 end = *start + *size; 523 u64 found_start, found_end; 524 unsigned long ptr, first, last; 525 526 leaf = path->nodes[0]; 527 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 528 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 529 530 found_start = key.objectid; 531 found_end = key.objectid + key.offset; 532 ASSERT(*start >= found_start && *start < found_end); 533 ASSERT(end > found_start); 534 535 if (end > found_end) 536 end = found_end; 537 538 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 539 first = div_u64(*start - found_start, fs_info->sectorsize); 540 last = div_u64(end - found_start, fs_info->sectorsize); 541 if (bit) 542 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 543 else 544 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 545 btrfs_mark_buffer_dirty(leaf); 546 547 *size -= end - *start; 548 *start = end; 549 } 550 551 /* 552 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 553 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 554 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 555 * looking for. 556 */ 557 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 558 struct btrfs_root *root, struct btrfs_path *p) 559 { 560 struct btrfs_key key; 561 562 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 563 p->slots[0]++; 564 return 0; 565 } 566 567 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 568 btrfs_release_path(p); 569 570 key.objectid += key.offset; 571 key.type = (u8)-1; 572 key.offset = (u64)-1; 573 574 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 575 } 576 577 /* 578 * If remove is 1, then we are removing free space, thus clearing bits in the 579 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 580 * the bitmap. 581 */ 582 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 583 struct btrfs_block_group_cache *block_group, 584 struct btrfs_path *path, 585 u64 start, u64 size, int remove) 586 { 587 struct btrfs_root *root = block_group->fs_info->free_space_root; 588 struct btrfs_key key; 589 u64 end = start + size; 590 u64 cur_start, cur_size; 591 int prev_bit, next_bit; 592 int new_extents; 593 int ret; 594 595 /* 596 * Read the bit for the block immediately before the extent of space if 597 * that block is within the block group. 598 */ 599 if (start > block_group->key.objectid) { 600 u64 prev_block = start - block_group->fs_info->sectorsize; 601 602 key.objectid = prev_block; 603 key.type = (u8)-1; 604 key.offset = (u64)-1; 605 606 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 607 if (ret) 608 goto out; 609 610 prev_bit = free_space_test_bit(block_group, path, prev_block); 611 612 /* The previous block may have been in the previous bitmap. */ 613 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 614 if (start >= key.objectid + key.offset) { 615 ret = free_space_next_bitmap(trans, root, path); 616 if (ret) 617 goto out; 618 } 619 } else { 620 key.objectid = start; 621 key.type = (u8)-1; 622 key.offset = (u64)-1; 623 624 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 625 if (ret) 626 goto out; 627 628 prev_bit = -1; 629 } 630 631 /* 632 * Iterate over all of the bitmaps overlapped by the extent of space, 633 * clearing/setting bits as required. 634 */ 635 cur_start = start; 636 cur_size = size; 637 while (1) { 638 free_space_set_bits(block_group, path, &cur_start, &cur_size, 639 !remove); 640 if (cur_size == 0) 641 break; 642 ret = free_space_next_bitmap(trans, root, path); 643 if (ret) 644 goto out; 645 } 646 647 /* 648 * Read the bit for the block immediately after the extent of space if 649 * that block is within the block group. 650 */ 651 if (end < block_group->key.objectid + block_group->key.offset) { 652 /* The next block may be in the next bitmap. */ 653 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 654 if (end >= key.objectid + key.offset) { 655 ret = free_space_next_bitmap(trans, root, path); 656 if (ret) 657 goto out; 658 } 659 660 next_bit = free_space_test_bit(block_group, path, end); 661 } else { 662 next_bit = -1; 663 } 664 665 if (remove) { 666 new_extents = -1; 667 if (prev_bit == 1) { 668 /* Leftover on the left. */ 669 new_extents++; 670 } 671 if (next_bit == 1) { 672 /* Leftover on the right. */ 673 new_extents++; 674 } 675 } else { 676 new_extents = 1; 677 if (prev_bit == 1) { 678 /* Merging with neighbor on the left. */ 679 new_extents--; 680 } 681 if (next_bit == 1) { 682 /* Merging with neighbor on the right. */ 683 new_extents--; 684 } 685 } 686 687 btrfs_release_path(path); 688 ret = update_free_space_extent_count(trans, block_group, path, 689 new_extents); 690 691 out: 692 return ret; 693 } 694 695 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 696 struct btrfs_block_group_cache *block_group, 697 struct btrfs_path *path, 698 u64 start, u64 size) 699 { 700 struct btrfs_root *root = trans->fs_info->free_space_root; 701 struct btrfs_key key; 702 u64 found_start, found_end; 703 u64 end = start + size; 704 int new_extents = -1; 705 int ret; 706 707 key.objectid = start; 708 key.type = (u8)-1; 709 key.offset = (u64)-1; 710 711 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 712 if (ret) 713 goto out; 714 715 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 716 717 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 718 719 found_start = key.objectid; 720 found_end = key.objectid + key.offset; 721 ASSERT(start >= found_start && end <= found_end); 722 723 /* 724 * Okay, now that we've found the free space extent which contains the 725 * free space that we are removing, there are four cases: 726 * 727 * 1. We're using the whole extent: delete the key we found and 728 * decrement the free space extent count. 729 * 2. We are using part of the extent starting at the beginning: delete 730 * the key we found and insert a new key representing the leftover at 731 * the end. There is no net change in the number of extents. 732 * 3. We are using part of the extent ending at the end: delete the key 733 * we found and insert a new key representing the leftover at the 734 * beginning. There is no net change in the number of extents. 735 * 4. We are using part of the extent in the middle: delete the key we 736 * found and insert two new keys representing the leftovers on each 737 * side. Where we used to have one extent, we now have two, so increment 738 * the extent count. We may need to convert the block group to bitmaps 739 * as a result. 740 */ 741 742 /* Delete the existing key (cases 1-4). */ 743 ret = btrfs_del_item(trans, root, path); 744 if (ret) 745 goto out; 746 747 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 748 if (start > found_start) { 749 key.objectid = found_start; 750 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 751 key.offset = start - found_start; 752 753 btrfs_release_path(path); 754 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 755 if (ret) 756 goto out; 757 new_extents++; 758 } 759 760 /* Add a key for leftovers at the end (cases 2 and 4). */ 761 if (end < found_end) { 762 key.objectid = end; 763 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 764 key.offset = found_end - end; 765 766 btrfs_release_path(path); 767 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 768 if (ret) 769 goto out; 770 new_extents++; 771 } 772 773 btrfs_release_path(path); 774 ret = update_free_space_extent_count(trans, block_group, path, 775 new_extents); 776 777 out: 778 return ret; 779 } 780 781 EXPORT_FOR_TESTS 782 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 783 struct btrfs_block_group_cache *block_group, 784 struct btrfs_path *path, u64 start, u64 size) 785 { 786 struct btrfs_free_space_info *info; 787 u32 flags; 788 int ret; 789 790 if (block_group->needs_free_space) { 791 ret = __add_block_group_free_space(trans, block_group, path); 792 if (ret) 793 return ret; 794 } 795 796 info = search_free_space_info(NULL, block_group, path, 0); 797 if (IS_ERR(info)) 798 return PTR_ERR(info); 799 flags = btrfs_free_space_flags(path->nodes[0], info); 800 btrfs_release_path(path); 801 802 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 803 return modify_free_space_bitmap(trans, block_group, path, 804 start, size, 1); 805 } else { 806 return remove_free_space_extent(trans, block_group, path, 807 start, size); 808 } 809 } 810 811 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 812 u64 start, u64 size) 813 { 814 struct btrfs_block_group_cache *block_group; 815 struct btrfs_path *path; 816 int ret; 817 818 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 819 return 0; 820 821 path = btrfs_alloc_path(); 822 if (!path) { 823 ret = -ENOMEM; 824 goto out; 825 } 826 827 block_group = btrfs_lookup_block_group(trans->fs_info, start); 828 if (!block_group) { 829 ASSERT(0); 830 ret = -ENOENT; 831 goto out; 832 } 833 834 mutex_lock(&block_group->free_space_lock); 835 ret = __remove_from_free_space_tree(trans, block_group, path, start, 836 size); 837 mutex_unlock(&block_group->free_space_lock); 838 839 btrfs_put_block_group(block_group); 840 out: 841 btrfs_free_path(path); 842 if (ret) 843 btrfs_abort_transaction(trans, ret); 844 return ret; 845 } 846 847 static int add_free_space_extent(struct btrfs_trans_handle *trans, 848 struct btrfs_block_group_cache *block_group, 849 struct btrfs_path *path, 850 u64 start, u64 size) 851 { 852 struct btrfs_root *root = trans->fs_info->free_space_root; 853 struct btrfs_key key, new_key; 854 u64 found_start, found_end; 855 u64 end = start + size; 856 int new_extents = 1; 857 int ret; 858 859 /* 860 * We are adding a new extent of free space, but we need to merge 861 * extents. There are four cases here: 862 * 863 * 1. The new extent does not have any immediate neighbors to merge 864 * with: add the new key and increment the free space extent count. We 865 * may need to convert the block group to bitmaps as a result. 866 * 2. The new extent has an immediate neighbor before it: remove the 867 * previous key and insert a new key combining both of them. There is no 868 * net change in the number of extents. 869 * 3. The new extent has an immediate neighbor after it: remove the next 870 * key and insert a new key combining both of them. There is no net 871 * change in the number of extents. 872 * 4. The new extent has immediate neighbors on both sides: remove both 873 * of the keys and insert a new key combining all of them. Where we used 874 * to have two extents, we now have one, so decrement the extent count. 875 */ 876 877 new_key.objectid = start; 878 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 879 new_key.offset = size; 880 881 /* Search for a neighbor on the left. */ 882 if (start == block_group->key.objectid) 883 goto right; 884 key.objectid = start - 1; 885 key.type = (u8)-1; 886 key.offset = (u64)-1; 887 888 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 889 if (ret) 890 goto out; 891 892 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 893 894 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 895 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 896 btrfs_release_path(path); 897 goto right; 898 } 899 900 found_start = key.objectid; 901 found_end = key.objectid + key.offset; 902 ASSERT(found_start >= block_group->key.objectid && 903 found_end > block_group->key.objectid); 904 ASSERT(found_start < start && found_end <= start); 905 906 /* 907 * Delete the neighbor on the left and absorb it into the new key (cases 908 * 2 and 4). 909 */ 910 if (found_end == start) { 911 ret = btrfs_del_item(trans, root, path); 912 if (ret) 913 goto out; 914 new_key.objectid = found_start; 915 new_key.offset += key.offset; 916 new_extents--; 917 } 918 btrfs_release_path(path); 919 920 right: 921 /* Search for a neighbor on the right. */ 922 if (end == block_group->key.objectid + block_group->key.offset) 923 goto insert; 924 key.objectid = end; 925 key.type = (u8)-1; 926 key.offset = (u64)-1; 927 928 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 929 if (ret) 930 goto out; 931 932 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 933 934 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 935 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 936 btrfs_release_path(path); 937 goto insert; 938 } 939 940 found_start = key.objectid; 941 found_end = key.objectid + key.offset; 942 ASSERT(found_start >= block_group->key.objectid && 943 found_end > block_group->key.objectid); 944 ASSERT((found_start < start && found_end <= start) || 945 (found_start >= end && found_end > end)); 946 947 /* 948 * Delete the neighbor on the right and absorb it into the new key 949 * (cases 3 and 4). 950 */ 951 if (found_start == end) { 952 ret = btrfs_del_item(trans, root, path); 953 if (ret) 954 goto out; 955 new_key.offset += key.offset; 956 new_extents--; 957 } 958 btrfs_release_path(path); 959 960 insert: 961 /* Insert the new key (cases 1-4). */ 962 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 963 if (ret) 964 goto out; 965 966 btrfs_release_path(path); 967 ret = update_free_space_extent_count(trans, block_group, path, 968 new_extents); 969 970 out: 971 return ret; 972 } 973 974 EXPORT_FOR_TESTS 975 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 976 struct btrfs_block_group_cache *block_group, 977 struct btrfs_path *path, u64 start, u64 size) 978 { 979 struct btrfs_free_space_info *info; 980 u32 flags; 981 int ret; 982 983 if (block_group->needs_free_space) { 984 ret = __add_block_group_free_space(trans, block_group, path); 985 if (ret) 986 return ret; 987 } 988 989 info = search_free_space_info(NULL, block_group, path, 0); 990 if (IS_ERR(info)) 991 return PTR_ERR(info); 992 flags = btrfs_free_space_flags(path->nodes[0], info); 993 btrfs_release_path(path); 994 995 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 996 return modify_free_space_bitmap(trans, block_group, path, 997 start, size, 0); 998 } else { 999 return add_free_space_extent(trans, block_group, path, start, 1000 size); 1001 } 1002 } 1003 1004 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1005 u64 start, u64 size) 1006 { 1007 struct btrfs_block_group_cache *block_group; 1008 struct btrfs_path *path; 1009 int ret; 1010 1011 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1012 return 0; 1013 1014 path = btrfs_alloc_path(); 1015 if (!path) { 1016 ret = -ENOMEM; 1017 goto out; 1018 } 1019 1020 block_group = btrfs_lookup_block_group(trans->fs_info, start); 1021 if (!block_group) { 1022 ASSERT(0); 1023 ret = -ENOENT; 1024 goto out; 1025 } 1026 1027 mutex_lock(&block_group->free_space_lock); 1028 ret = __add_to_free_space_tree(trans, block_group, path, start, size); 1029 mutex_unlock(&block_group->free_space_lock); 1030 1031 btrfs_put_block_group(block_group); 1032 out: 1033 btrfs_free_path(path); 1034 if (ret) 1035 btrfs_abort_transaction(trans, ret); 1036 return ret; 1037 } 1038 1039 /* 1040 * Populate the free space tree by walking the extent tree. Operations on the 1041 * extent tree that happen as a result of writes to the free space tree will go 1042 * through the normal add/remove hooks. 1043 */ 1044 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1045 struct btrfs_block_group_cache *block_group) 1046 { 1047 struct btrfs_root *extent_root = trans->fs_info->extent_root; 1048 struct btrfs_path *path, *path2; 1049 struct btrfs_key key; 1050 u64 start, end; 1051 int ret; 1052 1053 path = btrfs_alloc_path(); 1054 if (!path) 1055 return -ENOMEM; 1056 path->reada = READA_FORWARD; 1057 1058 path2 = btrfs_alloc_path(); 1059 if (!path2) { 1060 btrfs_free_path(path); 1061 return -ENOMEM; 1062 } 1063 1064 ret = add_new_free_space_info(trans, block_group, path2); 1065 if (ret) 1066 goto out; 1067 1068 mutex_lock(&block_group->free_space_lock); 1069 1070 /* 1071 * Iterate through all of the extent and metadata items in this block 1072 * group, adding the free space between them and the free space at the 1073 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1074 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1075 * contained in. 1076 */ 1077 key.objectid = block_group->key.objectid; 1078 key.type = BTRFS_EXTENT_ITEM_KEY; 1079 key.offset = 0; 1080 1081 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1082 if (ret < 0) 1083 goto out_locked; 1084 ASSERT(ret == 0); 1085 1086 start = block_group->key.objectid; 1087 end = block_group->key.objectid + block_group->key.offset; 1088 while (1) { 1089 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1090 1091 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1092 key.type == BTRFS_METADATA_ITEM_KEY) { 1093 if (key.objectid >= end) 1094 break; 1095 1096 if (start < key.objectid) { 1097 ret = __add_to_free_space_tree(trans, 1098 block_group, 1099 path2, start, 1100 key.objectid - 1101 start); 1102 if (ret) 1103 goto out_locked; 1104 } 1105 start = key.objectid; 1106 if (key.type == BTRFS_METADATA_ITEM_KEY) 1107 start += trans->fs_info->nodesize; 1108 else 1109 start += key.offset; 1110 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1111 if (key.objectid != block_group->key.objectid) 1112 break; 1113 } 1114 1115 ret = btrfs_next_item(extent_root, path); 1116 if (ret < 0) 1117 goto out_locked; 1118 if (ret) 1119 break; 1120 } 1121 if (start < end) { 1122 ret = __add_to_free_space_tree(trans, block_group, path2, 1123 start, end - start); 1124 if (ret) 1125 goto out_locked; 1126 } 1127 1128 ret = 0; 1129 out_locked: 1130 mutex_unlock(&block_group->free_space_lock); 1131 out: 1132 btrfs_free_path(path2); 1133 btrfs_free_path(path); 1134 return ret; 1135 } 1136 1137 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1138 { 1139 struct btrfs_trans_handle *trans; 1140 struct btrfs_root *tree_root = fs_info->tree_root; 1141 struct btrfs_root *free_space_root; 1142 struct btrfs_block_group_cache *block_group; 1143 struct rb_node *node; 1144 int ret; 1145 1146 trans = btrfs_start_transaction(tree_root, 0); 1147 if (IS_ERR(trans)) 1148 return PTR_ERR(trans); 1149 1150 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1151 free_space_root = btrfs_create_tree(trans, 1152 BTRFS_FREE_SPACE_TREE_OBJECTID); 1153 if (IS_ERR(free_space_root)) { 1154 ret = PTR_ERR(free_space_root); 1155 goto abort; 1156 } 1157 fs_info->free_space_root = free_space_root; 1158 1159 node = rb_first(&fs_info->block_group_cache_tree); 1160 while (node) { 1161 block_group = rb_entry(node, struct btrfs_block_group_cache, 1162 cache_node); 1163 ret = populate_free_space_tree(trans, block_group); 1164 if (ret) 1165 goto abort; 1166 node = rb_next(node); 1167 } 1168 1169 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1170 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1171 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1172 1173 return btrfs_commit_transaction(trans); 1174 1175 abort: 1176 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1177 btrfs_abort_transaction(trans, ret); 1178 btrfs_end_transaction(trans); 1179 return ret; 1180 } 1181 1182 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1183 struct btrfs_root *root) 1184 { 1185 struct btrfs_path *path; 1186 struct btrfs_key key; 1187 int nr; 1188 int ret; 1189 1190 path = btrfs_alloc_path(); 1191 if (!path) 1192 return -ENOMEM; 1193 1194 path->leave_spinning = 1; 1195 1196 key.objectid = 0; 1197 key.type = 0; 1198 key.offset = 0; 1199 1200 while (1) { 1201 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1202 if (ret < 0) 1203 goto out; 1204 1205 nr = btrfs_header_nritems(path->nodes[0]); 1206 if (!nr) 1207 break; 1208 1209 path->slots[0] = 0; 1210 ret = btrfs_del_items(trans, root, path, 0, nr); 1211 if (ret) 1212 goto out; 1213 1214 btrfs_release_path(path); 1215 } 1216 1217 ret = 0; 1218 out: 1219 btrfs_free_path(path); 1220 return ret; 1221 } 1222 1223 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1224 { 1225 struct btrfs_trans_handle *trans; 1226 struct btrfs_root *tree_root = fs_info->tree_root; 1227 struct btrfs_root *free_space_root = fs_info->free_space_root; 1228 int ret; 1229 1230 trans = btrfs_start_transaction(tree_root, 0); 1231 if (IS_ERR(trans)) 1232 return PTR_ERR(trans); 1233 1234 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1235 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1236 fs_info->free_space_root = NULL; 1237 1238 ret = clear_free_space_tree(trans, free_space_root); 1239 if (ret) 1240 goto abort; 1241 1242 ret = btrfs_del_root(trans, &free_space_root->root_key); 1243 if (ret) 1244 goto abort; 1245 1246 list_del(&free_space_root->dirty_list); 1247 1248 btrfs_tree_lock(free_space_root->node); 1249 btrfs_clean_tree_block(free_space_root->node); 1250 btrfs_tree_unlock(free_space_root->node); 1251 btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1252 0, 1); 1253 1254 free_extent_buffer(free_space_root->node); 1255 free_extent_buffer(free_space_root->commit_root); 1256 kfree(free_space_root); 1257 1258 return btrfs_commit_transaction(trans); 1259 1260 abort: 1261 btrfs_abort_transaction(trans, ret); 1262 btrfs_end_transaction(trans); 1263 return ret; 1264 } 1265 1266 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1267 struct btrfs_block_group_cache *block_group, 1268 struct btrfs_path *path) 1269 { 1270 int ret; 1271 1272 block_group->needs_free_space = 0; 1273 1274 ret = add_new_free_space_info(trans, block_group, path); 1275 if (ret) 1276 return ret; 1277 1278 return __add_to_free_space_tree(trans, block_group, path, 1279 block_group->key.objectid, 1280 block_group->key.offset); 1281 } 1282 1283 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1284 struct btrfs_block_group_cache *block_group) 1285 { 1286 struct btrfs_fs_info *fs_info = trans->fs_info; 1287 struct btrfs_path *path = NULL; 1288 int ret = 0; 1289 1290 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1291 return 0; 1292 1293 mutex_lock(&block_group->free_space_lock); 1294 if (!block_group->needs_free_space) 1295 goto out; 1296 1297 path = btrfs_alloc_path(); 1298 if (!path) { 1299 ret = -ENOMEM; 1300 goto out; 1301 } 1302 1303 ret = __add_block_group_free_space(trans, block_group, path); 1304 1305 out: 1306 btrfs_free_path(path); 1307 mutex_unlock(&block_group->free_space_lock); 1308 if (ret) 1309 btrfs_abort_transaction(trans, ret); 1310 return ret; 1311 } 1312 1313 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1314 struct btrfs_block_group_cache *block_group) 1315 { 1316 struct btrfs_root *root = trans->fs_info->free_space_root; 1317 struct btrfs_path *path; 1318 struct btrfs_key key, found_key; 1319 struct extent_buffer *leaf; 1320 u64 start, end; 1321 int done = 0, nr; 1322 int ret; 1323 1324 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1325 return 0; 1326 1327 if (block_group->needs_free_space) { 1328 /* We never added this block group to the free space tree. */ 1329 return 0; 1330 } 1331 1332 path = btrfs_alloc_path(); 1333 if (!path) { 1334 ret = -ENOMEM; 1335 goto out; 1336 } 1337 1338 start = block_group->key.objectid; 1339 end = block_group->key.objectid + block_group->key.offset; 1340 1341 key.objectid = end - 1; 1342 key.type = (u8)-1; 1343 key.offset = (u64)-1; 1344 1345 while (!done) { 1346 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1347 if (ret) 1348 goto out; 1349 1350 leaf = path->nodes[0]; 1351 nr = 0; 1352 path->slots[0]++; 1353 while (path->slots[0] > 0) { 1354 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1355 1356 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1357 ASSERT(found_key.objectid == block_group->key.objectid); 1358 ASSERT(found_key.offset == block_group->key.offset); 1359 done = 1; 1360 nr++; 1361 path->slots[0]--; 1362 break; 1363 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1364 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1365 ASSERT(found_key.objectid >= start); 1366 ASSERT(found_key.objectid < end); 1367 ASSERT(found_key.objectid + found_key.offset <= end); 1368 nr++; 1369 path->slots[0]--; 1370 } else { 1371 ASSERT(0); 1372 } 1373 } 1374 1375 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1376 if (ret) 1377 goto out; 1378 btrfs_release_path(path); 1379 } 1380 1381 ret = 0; 1382 out: 1383 btrfs_free_path(path); 1384 if (ret) 1385 btrfs_abort_transaction(trans, ret); 1386 return ret; 1387 } 1388 1389 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1390 struct btrfs_path *path, 1391 u32 expected_extent_count) 1392 { 1393 struct btrfs_block_group_cache *block_group; 1394 struct btrfs_fs_info *fs_info; 1395 struct btrfs_root *root; 1396 struct btrfs_key key; 1397 int prev_bit = 0, bit; 1398 /* Initialize to silence GCC. */ 1399 u64 extent_start = 0; 1400 u64 end, offset; 1401 u64 total_found = 0; 1402 u32 extent_count = 0; 1403 int ret; 1404 1405 block_group = caching_ctl->block_group; 1406 fs_info = block_group->fs_info; 1407 root = fs_info->free_space_root; 1408 1409 end = block_group->key.objectid + block_group->key.offset; 1410 1411 while (1) { 1412 ret = btrfs_next_item(root, path); 1413 if (ret < 0) 1414 goto out; 1415 if (ret) 1416 break; 1417 1418 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1419 1420 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1421 break; 1422 1423 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1424 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1425 1426 caching_ctl->progress = key.objectid; 1427 1428 offset = key.objectid; 1429 while (offset < key.objectid + key.offset) { 1430 bit = free_space_test_bit(block_group, path, offset); 1431 if (prev_bit == 0 && bit == 1) { 1432 extent_start = offset; 1433 } else if (prev_bit == 1 && bit == 0) { 1434 total_found += add_new_free_space(block_group, 1435 extent_start, 1436 offset); 1437 if (total_found > CACHING_CTL_WAKE_UP) { 1438 total_found = 0; 1439 wake_up(&caching_ctl->wait); 1440 } 1441 extent_count++; 1442 } 1443 prev_bit = bit; 1444 offset += fs_info->sectorsize; 1445 } 1446 } 1447 if (prev_bit == 1) { 1448 total_found += add_new_free_space(block_group, extent_start, 1449 end); 1450 extent_count++; 1451 } 1452 1453 if (extent_count != expected_extent_count) { 1454 btrfs_err(fs_info, 1455 "incorrect extent count for %llu; counted %u, expected %u", 1456 block_group->key.objectid, extent_count, 1457 expected_extent_count); 1458 ASSERT(0); 1459 ret = -EIO; 1460 goto out; 1461 } 1462 1463 caching_ctl->progress = (u64)-1; 1464 1465 ret = 0; 1466 out: 1467 return ret; 1468 } 1469 1470 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1471 struct btrfs_path *path, 1472 u32 expected_extent_count) 1473 { 1474 struct btrfs_block_group_cache *block_group; 1475 struct btrfs_fs_info *fs_info; 1476 struct btrfs_root *root; 1477 struct btrfs_key key; 1478 u64 end; 1479 u64 total_found = 0; 1480 u32 extent_count = 0; 1481 int ret; 1482 1483 block_group = caching_ctl->block_group; 1484 fs_info = block_group->fs_info; 1485 root = fs_info->free_space_root; 1486 1487 end = block_group->key.objectid + block_group->key.offset; 1488 1489 while (1) { 1490 ret = btrfs_next_item(root, path); 1491 if (ret < 0) 1492 goto out; 1493 if (ret) 1494 break; 1495 1496 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1497 1498 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1499 break; 1500 1501 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1502 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1503 1504 caching_ctl->progress = key.objectid; 1505 1506 total_found += add_new_free_space(block_group, key.objectid, 1507 key.objectid + key.offset); 1508 if (total_found > CACHING_CTL_WAKE_UP) { 1509 total_found = 0; 1510 wake_up(&caching_ctl->wait); 1511 } 1512 extent_count++; 1513 } 1514 1515 if (extent_count != expected_extent_count) { 1516 btrfs_err(fs_info, 1517 "incorrect extent count for %llu; counted %u, expected %u", 1518 block_group->key.objectid, extent_count, 1519 expected_extent_count); 1520 ASSERT(0); 1521 ret = -EIO; 1522 goto out; 1523 } 1524 1525 caching_ctl->progress = (u64)-1; 1526 1527 ret = 0; 1528 out: 1529 return ret; 1530 } 1531 1532 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1533 { 1534 struct btrfs_block_group_cache *block_group; 1535 struct btrfs_free_space_info *info; 1536 struct btrfs_path *path; 1537 u32 extent_count, flags; 1538 int ret; 1539 1540 block_group = caching_ctl->block_group; 1541 1542 path = btrfs_alloc_path(); 1543 if (!path) 1544 return -ENOMEM; 1545 1546 /* 1547 * Just like caching_thread() doesn't want to deadlock on the extent 1548 * tree, we don't want to deadlock on the free space tree. 1549 */ 1550 path->skip_locking = 1; 1551 path->search_commit_root = 1; 1552 path->reada = READA_FORWARD; 1553 1554 info = search_free_space_info(NULL, block_group, path, 0); 1555 if (IS_ERR(info)) { 1556 ret = PTR_ERR(info); 1557 goto out; 1558 } 1559 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1560 flags = btrfs_free_space_flags(path->nodes[0], info); 1561 1562 /* 1563 * We left path pointing to the free space info item, so now 1564 * load_free_space_foo can just iterate through the free space tree from 1565 * there. 1566 */ 1567 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1568 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1569 else 1570 ret = load_free_space_extents(caching_ctl, path, extent_count); 1571 1572 out: 1573 btrfs_free_path(path); 1574 return ret; 1575 } 1576