1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Facebook. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/sched/mm.h> 8 #include "ctree.h" 9 #include "disk-io.h" 10 #include "locking.h" 11 #include "free-space-tree.h" 12 #include "transaction.h" 13 #include "block-group.h" 14 15 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 16 struct btrfs_block_group *block_group, 17 struct btrfs_path *path); 18 19 static struct btrfs_root *btrfs_free_space_root( 20 struct btrfs_block_group *block_group) 21 { 22 struct btrfs_key key = { 23 .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, 24 .type = BTRFS_ROOT_ITEM_KEY, 25 .offset = 0, 26 }; 27 28 if (btrfs_fs_incompat(block_group->fs_info, EXTENT_TREE_V2)) 29 key.offset = block_group->global_root_id; 30 return btrfs_global_root(block_group->fs_info, &key); 31 } 32 33 void set_free_space_tree_thresholds(struct btrfs_block_group *cache) 34 { 35 u32 bitmap_range; 36 size_t bitmap_size; 37 u64 num_bitmaps, total_bitmap_size; 38 39 if (WARN_ON(cache->length == 0)) 40 btrfs_warn(cache->fs_info, "block group %llu length is zero", 41 cache->start); 42 43 /* 44 * We convert to bitmaps when the disk space required for using extents 45 * exceeds that required for using bitmaps. 46 */ 47 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 48 num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range); 49 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 50 total_bitmap_size = num_bitmaps * bitmap_size; 51 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 52 sizeof(struct btrfs_item)); 53 54 /* 55 * We allow for a small buffer between the high threshold and low 56 * threshold to avoid thrashing back and forth between the two formats. 57 */ 58 if (cache->bitmap_high_thresh > 100) 59 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 60 else 61 cache->bitmap_low_thresh = 0; 62 } 63 64 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 65 struct btrfs_block_group *block_group, 66 struct btrfs_path *path) 67 { 68 struct btrfs_root *root = btrfs_free_space_root(block_group); 69 struct btrfs_free_space_info *info; 70 struct btrfs_key key; 71 struct extent_buffer *leaf; 72 int ret; 73 74 key.objectid = block_group->start; 75 key.type = BTRFS_FREE_SPACE_INFO_KEY; 76 key.offset = block_group->length; 77 78 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 79 if (ret) 80 goto out; 81 82 leaf = path->nodes[0]; 83 info = btrfs_item_ptr(leaf, path->slots[0], 84 struct btrfs_free_space_info); 85 btrfs_set_free_space_extent_count(leaf, info, 0); 86 btrfs_set_free_space_flags(leaf, info, 0); 87 btrfs_mark_buffer_dirty(leaf); 88 89 ret = 0; 90 out: 91 btrfs_release_path(path); 92 return ret; 93 } 94 95 EXPORT_FOR_TESTS 96 struct btrfs_free_space_info *search_free_space_info( 97 struct btrfs_trans_handle *trans, 98 struct btrfs_block_group *block_group, 99 struct btrfs_path *path, int cow) 100 { 101 struct btrfs_fs_info *fs_info = block_group->fs_info; 102 struct btrfs_root *root = btrfs_free_space_root(block_group); 103 struct btrfs_key key; 104 int ret; 105 106 key.objectid = block_group->start; 107 key.type = BTRFS_FREE_SPACE_INFO_KEY; 108 key.offset = block_group->length; 109 110 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 111 if (ret < 0) 112 return ERR_PTR(ret); 113 if (ret != 0) { 114 btrfs_warn(fs_info, "missing free space info for %llu", 115 block_group->start); 116 ASSERT(0); 117 return ERR_PTR(-ENOENT); 118 } 119 120 return btrfs_item_ptr(path->nodes[0], path->slots[0], 121 struct btrfs_free_space_info); 122 } 123 124 /* 125 * btrfs_search_slot() but we're looking for the greatest key less than the 126 * passed key. 127 */ 128 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 129 struct btrfs_root *root, 130 struct btrfs_key *key, struct btrfs_path *p, 131 int ins_len, int cow) 132 { 133 int ret; 134 135 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 136 if (ret < 0) 137 return ret; 138 139 if (ret == 0) { 140 ASSERT(0); 141 return -EIO; 142 } 143 144 if (p->slots[0] == 0) { 145 ASSERT(0); 146 return -EIO; 147 } 148 p->slots[0]--; 149 150 return 0; 151 } 152 153 static inline u32 free_space_bitmap_size(const struct btrfs_fs_info *fs_info, 154 u64 size) 155 { 156 return DIV_ROUND_UP(size >> fs_info->sectorsize_bits, BITS_PER_BYTE); 157 } 158 159 static unsigned long *alloc_bitmap(u32 bitmap_size) 160 { 161 unsigned long *ret; 162 unsigned int nofs_flag; 163 u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long)); 164 165 /* 166 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse 167 * into the filesystem as the free space bitmap can be modified in the 168 * critical section of a transaction commit. 169 * 170 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we 171 * know that recursion is unsafe. 172 */ 173 nofs_flag = memalloc_nofs_save(); 174 ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL); 175 memalloc_nofs_restore(nofs_flag); 176 return ret; 177 } 178 179 static void le_bitmap_set(unsigned long *map, unsigned int start, int len) 180 { 181 u8 *p = ((u8 *)map) + BIT_BYTE(start); 182 const unsigned int size = start + len; 183 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); 184 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); 185 186 while (len - bits_to_set >= 0) { 187 *p |= mask_to_set; 188 len -= bits_to_set; 189 bits_to_set = BITS_PER_BYTE; 190 mask_to_set = ~0; 191 p++; 192 } 193 if (len) { 194 mask_to_set &= BITMAP_LAST_BYTE_MASK(size); 195 *p |= mask_to_set; 196 } 197 } 198 199 EXPORT_FOR_TESTS 200 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 201 struct btrfs_block_group *block_group, 202 struct btrfs_path *path) 203 { 204 struct btrfs_fs_info *fs_info = trans->fs_info; 205 struct btrfs_root *root = btrfs_free_space_root(block_group); 206 struct btrfs_free_space_info *info; 207 struct btrfs_key key, found_key; 208 struct extent_buffer *leaf; 209 unsigned long *bitmap; 210 char *bitmap_cursor; 211 u64 start, end; 212 u64 bitmap_range, i; 213 u32 bitmap_size, flags, expected_extent_count; 214 u32 extent_count = 0; 215 int done = 0, nr; 216 int ret; 217 218 bitmap_size = free_space_bitmap_size(fs_info, block_group->length); 219 bitmap = alloc_bitmap(bitmap_size); 220 if (!bitmap) { 221 ret = -ENOMEM; 222 goto out; 223 } 224 225 start = block_group->start; 226 end = block_group->start + block_group->length; 227 228 key.objectid = end - 1; 229 key.type = (u8)-1; 230 key.offset = (u64)-1; 231 232 while (!done) { 233 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 234 if (ret) 235 goto out; 236 237 leaf = path->nodes[0]; 238 nr = 0; 239 path->slots[0]++; 240 while (path->slots[0] > 0) { 241 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 242 243 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 244 ASSERT(found_key.objectid == block_group->start); 245 ASSERT(found_key.offset == block_group->length); 246 done = 1; 247 break; 248 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 249 u64 first, last; 250 251 ASSERT(found_key.objectid >= start); 252 ASSERT(found_key.objectid < end); 253 ASSERT(found_key.objectid + found_key.offset <= end); 254 255 first = div_u64(found_key.objectid - start, 256 fs_info->sectorsize); 257 last = div_u64(found_key.objectid + found_key.offset - start, 258 fs_info->sectorsize); 259 le_bitmap_set(bitmap, first, last - first); 260 261 extent_count++; 262 nr++; 263 path->slots[0]--; 264 } else { 265 ASSERT(0); 266 } 267 } 268 269 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 270 if (ret) 271 goto out; 272 btrfs_release_path(path); 273 } 274 275 info = search_free_space_info(trans, block_group, path, 1); 276 if (IS_ERR(info)) { 277 ret = PTR_ERR(info); 278 goto out; 279 } 280 leaf = path->nodes[0]; 281 flags = btrfs_free_space_flags(leaf, info); 282 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 283 btrfs_set_free_space_flags(leaf, info, flags); 284 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 285 btrfs_mark_buffer_dirty(leaf); 286 btrfs_release_path(path); 287 288 if (extent_count != expected_extent_count) { 289 btrfs_err(fs_info, 290 "incorrect extent count for %llu; counted %u, expected %u", 291 block_group->start, extent_count, 292 expected_extent_count); 293 ASSERT(0); 294 ret = -EIO; 295 goto out; 296 } 297 298 bitmap_cursor = (char *)bitmap; 299 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 300 i = start; 301 while (i < end) { 302 unsigned long ptr; 303 u64 extent_size; 304 u32 data_size; 305 306 extent_size = min(end - i, bitmap_range); 307 data_size = free_space_bitmap_size(fs_info, extent_size); 308 309 key.objectid = i; 310 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 311 key.offset = extent_size; 312 313 ret = btrfs_insert_empty_item(trans, root, path, &key, 314 data_size); 315 if (ret) 316 goto out; 317 318 leaf = path->nodes[0]; 319 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 320 write_extent_buffer(leaf, bitmap_cursor, ptr, 321 data_size); 322 btrfs_mark_buffer_dirty(leaf); 323 btrfs_release_path(path); 324 325 i += extent_size; 326 bitmap_cursor += data_size; 327 } 328 329 ret = 0; 330 out: 331 kvfree(bitmap); 332 if (ret) 333 btrfs_abort_transaction(trans, ret); 334 return ret; 335 } 336 337 EXPORT_FOR_TESTS 338 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 339 struct btrfs_block_group *block_group, 340 struct btrfs_path *path) 341 { 342 struct btrfs_fs_info *fs_info = trans->fs_info; 343 struct btrfs_root *root = btrfs_free_space_root(block_group); 344 struct btrfs_free_space_info *info; 345 struct btrfs_key key, found_key; 346 struct extent_buffer *leaf; 347 unsigned long *bitmap; 348 u64 start, end; 349 u32 bitmap_size, flags, expected_extent_count; 350 unsigned long nrbits, start_bit, end_bit; 351 u32 extent_count = 0; 352 int done = 0, nr; 353 int ret; 354 355 bitmap_size = free_space_bitmap_size(fs_info, block_group->length); 356 bitmap = alloc_bitmap(bitmap_size); 357 if (!bitmap) { 358 ret = -ENOMEM; 359 goto out; 360 } 361 362 start = block_group->start; 363 end = block_group->start + block_group->length; 364 365 key.objectid = end - 1; 366 key.type = (u8)-1; 367 key.offset = (u64)-1; 368 369 while (!done) { 370 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 371 if (ret) 372 goto out; 373 374 leaf = path->nodes[0]; 375 nr = 0; 376 path->slots[0]++; 377 while (path->slots[0] > 0) { 378 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 379 380 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 381 ASSERT(found_key.objectid == block_group->start); 382 ASSERT(found_key.offset == block_group->length); 383 done = 1; 384 break; 385 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 386 unsigned long ptr; 387 char *bitmap_cursor; 388 u32 bitmap_pos, data_size; 389 390 ASSERT(found_key.objectid >= start); 391 ASSERT(found_key.objectid < end); 392 ASSERT(found_key.objectid + found_key.offset <= end); 393 394 bitmap_pos = div_u64(found_key.objectid - start, 395 fs_info->sectorsize * 396 BITS_PER_BYTE); 397 bitmap_cursor = ((char *)bitmap) + bitmap_pos; 398 data_size = free_space_bitmap_size(fs_info, 399 found_key.offset); 400 401 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 402 read_extent_buffer(leaf, bitmap_cursor, ptr, 403 data_size); 404 405 nr++; 406 path->slots[0]--; 407 } else { 408 ASSERT(0); 409 } 410 } 411 412 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 413 if (ret) 414 goto out; 415 btrfs_release_path(path); 416 } 417 418 info = search_free_space_info(trans, block_group, path, 1); 419 if (IS_ERR(info)) { 420 ret = PTR_ERR(info); 421 goto out; 422 } 423 leaf = path->nodes[0]; 424 flags = btrfs_free_space_flags(leaf, info); 425 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 426 btrfs_set_free_space_flags(leaf, info, flags); 427 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 428 btrfs_mark_buffer_dirty(leaf); 429 btrfs_release_path(path); 430 431 nrbits = block_group->length >> block_group->fs_info->sectorsize_bits; 432 start_bit = find_next_bit_le(bitmap, nrbits, 0); 433 434 while (start_bit < nrbits) { 435 end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); 436 ASSERT(start_bit < end_bit); 437 438 key.objectid = start + start_bit * block_group->fs_info->sectorsize; 439 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 440 key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; 441 442 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 443 if (ret) 444 goto out; 445 btrfs_release_path(path); 446 447 extent_count++; 448 449 start_bit = find_next_bit_le(bitmap, nrbits, end_bit); 450 } 451 452 if (extent_count != expected_extent_count) { 453 btrfs_err(fs_info, 454 "incorrect extent count for %llu; counted %u, expected %u", 455 block_group->start, extent_count, 456 expected_extent_count); 457 ASSERT(0); 458 ret = -EIO; 459 goto out; 460 } 461 462 ret = 0; 463 out: 464 kvfree(bitmap); 465 if (ret) 466 btrfs_abort_transaction(trans, ret); 467 return ret; 468 } 469 470 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 471 struct btrfs_block_group *block_group, 472 struct btrfs_path *path, 473 int new_extents) 474 { 475 struct btrfs_free_space_info *info; 476 u32 flags; 477 u32 extent_count; 478 int ret = 0; 479 480 if (new_extents == 0) 481 return 0; 482 483 info = search_free_space_info(trans, block_group, path, 1); 484 if (IS_ERR(info)) { 485 ret = PTR_ERR(info); 486 goto out; 487 } 488 flags = btrfs_free_space_flags(path->nodes[0], info); 489 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 490 491 extent_count += new_extents; 492 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 493 btrfs_mark_buffer_dirty(path->nodes[0]); 494 btrfs_release_path(path); 495 496 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 497 extent_count > block_group->bitmap_high_thresh) { 498 ret = convert_free_space_to_bitmaps(trans, block_group, path); 499 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 500 extent_count < block_group->bitmap_low_thresh) { 501 ret = convert_free_space_to_extents(trans, block_group, path); 502 } 503 504 out: 505 return ret; 506 } 507 508 EXPORT_FOR_TESTS 509 int free_space_test_bit(struct btrfs_block_group *block_group, 510 struct btrfs_path *path, u64 offset) 511 { 512 struct extent_buffer *leaf; 513 struct btrfs_key key; 514 u64 found_start, found_end; 515 unsigned long ptr, i; 516 517 leaf = path->nodes[0]; 518 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 519 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 520 521 found_start = key.objectid; 522 found_end = key.objectid + key.offset; 523 ASSERT(offset >= found_start && offset < found_end); 524 525 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 526 i = div_u64(offset - found_start, 527 block_group->fs_info->sectorsize); 528 return !!extent_buffer_test_bit(leaf, ptr, i); 529 } 530 531 static void free_space_set_bits(struct btrfs_block_group *block_group, 532 struct btrfs_path *path, u64 *start, u64 *size, 533 int bit) 534 { 535 struct btrfs_fs_info *fs_info = block_group->fs_info; 536 struct extent_buffer *leaf; 537 struct btrfs_key key; 538 u64 end = *start + *size; 539 u64 found_start, found_end; 540 unsigned long ptr, first, last; 541 542 leaf = path->nodes[0]; 543 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 544 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 545 546 found_start = key.objectid; 547 found_end = key.objectid + key.offset; 548 ASSERT(*start >= found_start && *start < found_end); 549 ASSERT(end > found_start); 550 551 if (end > found_end) 552 end = found_end; 553 554 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 555 first = (*start - found_start) >> fs_info->sectorsize_bits; 556 last = (end - found_start) >> fs_info->sectorsize_bits; 557 if (bit) 558 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 559 else 560 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 561 btrfs_mark_buffer_dirty(leaf); 562 563 *size -= end - *start; 564 *start = end; 565 } 566 567 /* 568 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 569 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 570 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 571 * looking for. 572 */ 573 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 574 struct btrfs_root *root, struct btrfs_path *p) 575 { 576 struct btrfs_key key; 577 578 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 579 p->slots[0]++; 580 return 0; 581 } 582 583 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 584 btrfs_release_path(p); 585 586 key.objectid += key.offset; 587 key.type = (u8)-1; 588 key.offset = (u64)-1; 589 590 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 591 } 592 593 /* 594 * If remove is 1, then we are removing free space, thus clearing bits in the 595 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 596 * the bitmap. 597 */ 598 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 599 struct btrfs_block_group *block_group, 600 struct btrfs_path *path, 601 u64 start, u64 size, int remove) 602 { 603 struct btrfs_root *root = btrfs_free_space_root(block_group); 604 struct btrfs_key key; 605 u64 end = start + size; 606 u64 cur_start, cur_size; 607 int prev_bit, next_bit; 608 int new_extents; 609 int ret; 610 611 /* 612 * Read the bit for the block immediately before the extent of space if 613 * that block is within the block group. 614 */ 615 if (start > block_group->start) { 616 u64 prev_block = start - block_group->fs_info->sectorsize; 617 618 key.objectid = prev_block; 619 key.type = (u8)-1; 620 key.offset = (u64)-1; 621 622 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 623 if (ret) 624 goto out; 625 626 prev_bit = free_space_test_bit(block_group, path, prev_block); 627 628 /* The previous block may have been in the previous bitmap. */ 629 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 630 if (start >= key.objectid + key.offset) { 631 ret = free_space_next_bitmap(trans, root, path); 632 if (ret) 633 goto out; 634 } 635 } else { 636 key.objectid = start; 637 key.type = (u8)-1; 638 key.offset = (u64)-1; 639 640 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 641 if (ret) 642 goto out; 643 644 prev_bit = -1; 645 } 646 647 /* 648 * Iterate over all of the bitmaps overlapped by the extent of space, 649 * clearing/setting bits as required. 650 */ 651 cur_start = start; 652 cur_size = size; 653 while (1) { 654 free_space_set_bits(block_group, path, &cur_start, &cur_size, 655 !remove); 656 if (cur_size == 0) 657 break; 658 ret = free_space_next_bitmap(trans, root, path); 659 if (ret) 660 goto out; 661 } 662 663 /* 664 * Read the bit for the block immediately after the extent of space if 665 * that block is within the block group. 666 */ 667 if (end < block_group->start + block_group->length) { 668 /* The next block may be in the next bitmap. */ 669 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 670 if (end >= key.objectid + key.offset) { 671 ret = free_space_next_bitmap(trans, root, path); 672 if (ret) 673 goto out; 674 } 675 676 next_bit = free_space_test_bit(block_group, path, end); 677 } else { 678 next_bit = -1; 679 } 680 681 if (remove) { 682 new_extents = -1; 683 if (prev_bit == 1) { 684 /* Leftover on the left. */ 685 new_extents++; 686 } 687 if (next_bit == 1) { 688 /* Leftover on the right. */ 689 new_extents++; 690 } 691 } else { 692 new_extents = 1; 693 if (prev_bit == 1) { 694 /* Merging with neighbor on the left. */ 695 new_extents--; 696 } 697 if (next_bit == 1) { 698 /* Merging with neighbor on the right. */ 699 new_extents--; 700 } 701 } 702 703 btrfs_release_path(path); 704 ret = update_free_space_extent_count(trans, block_group, path, 705 new_extents); 706 707 out: 708 return ret; 709 } 710 711 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 712 struct btrfs_block_group *block_group, 713 struct btrfs_path *path, 714 u64 start, u64 size) 715 { 716 struct btrfs_root *root = btrfs_free_space_root(block_group); 717 struct btrfs_key key; 718 u64 found_start, found_end; 719 u64 end = start + size; 720 int new_extents = -1; 721 int ret; 722 723 key.objectid = start; 724 key.type = (u8)-1; 725 key.offset = (u64)-1; 726 727 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 728 if (ret) 729 goto out; 730 731 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 732 733 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 734 735 found_start = key.objectid; 736 found_end = key.objectid + key.offset; 737 ASSERT(start >= found_start && end <= found_end); 738 739 /* 740 * Okay, now that we've found the free space extent which contains the 741 * free space that we are removing, there are four cases: 742 * 743 * 1. We're using the whole extent: delete the key we found and 744 * decrement the free space extent count. 745 * 2. We are using part of the extent starting at the beginning: delete 746 * the key we found and insert a new key representing the leftover at 747 * the end. There is no net change in the number of extents. 748 * 3. We are using part of the extent ending at the end: delete the key 749 * we found and insert a new key representing the leftover at the 750 * beginning. There is no net change in the number of extents. 751 * 4. We are using part of the extent in the middle: delete the key we 752 * found and insert two new keys representing the leftovers on each 753 * side. Where we used to have one extent, we now have two, so increment 754 * the extent count. We may need to convert the block group to bitmaps 755 * as a result. 756 */ 757 758 /* Delete the existing key (cases 1-4). */ 759 ret = btrfs_del_item(trans, root, path); 760 if (ret) 761 goto out; 762 763 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 764 if (start > found_start) { 765 key.objectid = found_start; 766 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 767 key.offset = start - found_start; 768 769 btrfs_release_path(path); 770 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 771 if (ret) 772 goto out; 773 new_extents++; 774 } 775 776 /* Add a key for leftovers at the end (cases 2 and 4). */ 777 if (end < found_end) { 778 key.objectid = end; 779 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 780 key.offset = found_end - end; 781 782 btrfs_release_path(path); 783 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 784 if (ret) 785 goto out; 786 new_extents++; 787 } 788 789 btrfs_release_path(path); 790 ret = update_free_space_extent_count(trans, block_group, path, 791 new_extents); 792 793 out: 794 return ret; 795 } 796 797 EXPORT_FOR_TESTS 798 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 799 struct btrfs_block_group *block_group, 800 struct btrfs_path *path, u64 start, u64 size) 801 { 802 struct btrfs_free_space_info *info; 803 u32 flags; 804 int ret; 805 806 if (block_group->needs_free_space) { 807 ret = __add_block_group_free_space(trans, block_group, path); 808 if (ret) 809 return ret; 810 } 811 812 info = search_free_space_info(NULL, block_group, path, 0); 813 if (IS_ERR(info)) 814 return PTR_ERR(info); 815 flags = btrfs_free_space_flags(path->nodes[0], info); 816 btrfs_release_path(path); 817 818 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 819 return modify_free_space_bitmap(trans, block_group, path, 820 start, size, 1); 821 } else { 822 return remove_free_space_extent(trans, block_group, path, 823 start, size); 824 } 825 } 826 827 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 828 u64 start, u64 size) 829 { 830 struct btrfs_block_group *block_group; 831 struct btrfs_path *path; 832 int ret; 833 834 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 835 return 0; 836 837 path = btrfs_alloc_path(); 838 if (!path) { 839 ret = -ENOMEM; 840 goto out; 841 } 842 843 block_group = btrfs_lookup_block_group(trans->fs_info, start); 844 if (!block_group) { 845 ASSERT(0); 846 ret = -ENOENT; 847 goto out; 848 } 849 850 mutex_lock(&block_group->free_space_lock); 851 ret = __remove_from_free_space_tree(trans, block_group, path, start, 852 size); 853 mutex_unlock(&block_group->free_space_lock); 854 855 btrfs_put_block_group(block_group); 856 out: 857 btrfs_free_path(path); 858 if (ret) 859 btrfs_abort_transaction(trans, ret); 860 return ret; 861 } 862 863 static int add_free_space_extent(struct btrfs_trans_handle *trans, 864 struct btrfs_block_group *block_group, 865 struct btrfs_path *path, 866 u64 start, u64 size) 867 { 868 struct btrfs_root *root = btrfs_free_space_root(block_group); 869 struct btrfs_key key, new_key; 870 u64 found_start, found_end; 871 u64 end = start + size; 872 int new_extents = 1; 873 int ret; 874 875 /* 876 * We are adding a new extent of free space, but we need to merge 877 * extents. There are four cases here: 878 * 879 * 1. The new extent does not have any immediate neighbors to merge 880 * with: add the new key and increment the free space extent count. We 881 * may need to convert the block group to bitmaps as a result. 882 * 2. The new extent has an immediate neighbor before it: remove the 883 * previous key and insert a new key combining both of them. There is no 884 * net change in the number of extents. 885 * 3. The new extent has an immediate neighbor after it: remove the next 886 * key and insert a new key combining both of them. There is no net 887 * change in the number of extents. 888 * 4. The new extent has immediate neighbors on both sides: remove both 889 * of the keys and insert a new key combining all of them. Where we used 890 * to have two extents, we now have one, so decrement the extent count. 891 */ 892 893 new_key.objectid = start; 894 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 895 new_key.offset = size; 896 897 /* Search for a neighbor on the left. */ 898 if (start == block_group->start) 899 goto right; 900 key.objectid = start - 1; 901 key.type = (u8)-1; 902 key.offset = (u64)-1; 903 904 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 905 if (ret) 906 goto out; 907 908 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 909 910 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 911 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 912 btrfs_release_path(path); 913 goto right; 914 } 915 916 found_start = key.objectid; 917 found_end = key.objectid + key.offset; 918 ASSERT(found_start >= block_group->start && 919 found_end > block_group->start); 920 ASSERT(found_start < start && found_end <= start); 921 922 /* 923 * Delete the neighbor on the left and absorb it into the new key (cases 924 * 2 and 4). 925 */ 926 if (found_end == start) { 927 ret = btrfs_del_item(trans, root, path); 928 if (ret) 929 goto out; 930 new_key.objectid = found_start; 931 new_key.offset += key.offset; 932 new_extents--; 933 } 934 btrfs_release_path(path); 935 936 right: 937 /* Search for a neighbor on the right. */ 938 if (end == block_group->start + block_group->length) 939 goto insert; 940 key.objectid = end; 941 key.type = (u8)-1; 942 key.offset = (u64)-1; 943 944 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 945 if (ret) 946 goto out; 947 948 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 949 950 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 951 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 952 btrfs_release_path(path); 953 goto insert; 954 } 955 956 found_start = key.objectid; 957 found_end = key.objectid + key.offset; 958 ASSERT(found_start >= block_group->start && 959 found_end > block_group->start); 960 ASSERT((found_start < start && found_end <= start) || 961 (found_start >= end && found_end > end)); 962 963 /* 964 * Delete the neighbor on the right and absorb it into the new key 965 * (cases 3 and 4). 966 */ 967 if (found_start == end) { 968 ret = btrfs_del_item(trans, root, path); 969 if (ret) 970 goto out; 971 new_key.offset += key.offset; 972 new_extents--; 973 } 974 btrfs_release_path(path); 975 976 insert: 977 /* Insert the new key (cases 1-4). */ 978 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 979 if (ret) 980 goto out; 981 982 btrfs_release_path(path); 983 ret = update_free_space_extent_count(trans, block_group, path, 984 new_extents); 985 986 out: 987 return ret; 988 } 989 990 EXPORT_FOR_TESTS 991 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 992 struct btrfs_block_group *block_group, 993 struct btrfs_path *path, u64 start, u64 size) 994 { 995 struct btrfs_free_space_info *info; 996 u32 flags; 997 int ret; 998 999 if (block_group->needs_free_space) { 1000 ret = __add_block_group_free_space(trans, block_group, path); 1001 if (ret) 1002 return ret; 1003 } 1004 1005 info = search_free_space_info(NULL, block_group, path, 0); 1006 if (IS_ERR(info)) 1007 return PTR_ERR(info); 1008 flags = btrfs_free_space_flags(path->nodes[0], info); 1009 btrfs_release_path(path); 1010 1011 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 1012 return modify_free_space_bitmap(trans, block_group, path, 1013 start, size, 0); 1014 } else { 1015 return add_free_space_extent(trans, block_group, path, start, 1016 size); 1017 } 1018 } 1019 1020 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1021 u64 start, u64 size) 1022 { 1023 struct btrfs_block_group *block_group; 1024 struct btrfs_path *path; 1025 int ret; 1026 1027 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1028 return 0; 1029 1030 path = btrfs_alloc_path(); 1031 if (!path) { 1032 ret = -ENOMEM; 1033 goto out; 1034 } 1035 1036 block_group = btrfs_lookup_block_group(trans->fs_info, start); 1037 if (!block_group) { 1038 ASSERT(0); 1039 ret = -ENOENT; 1040 goto out; 1041 } 1042 1043 mutex_lock(&block_group->free_space_lock); 1044 ret = __add_to_free_space_tree(trans, block_group, path, start, size); 1045 mutex_unlock(&block_group->free_space_lock); 1046 1047 btrfs_put_block_group(block_group); 1048 out: 1049 btrfs_free_path(path); 1050 if (ret) 1051 btrfs_abort_transaction(trans, ret); 1052 return ret; 1053 } 1054 1055 /* 1056 * Populate the free space tree by walking the extent tree. Operations on the 1057 * extent tree that happen as a result of writes to the free space tree will go 1058 * through the normal add/remove hooks. 1059 */ 1060 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1061 struct btrfs_block_group *block_group) 1062 { 1063 struct btrfs_root *extent_root; 1064 struct btrfs_path *path, *path2; 1065 struct btrfs_key key; 1066 u64 start, end; 1067 int ret; 1068 1069 path = btrfs_alloc_path(); 1070 if (!path) 1071 return -ENOMEM; 1072 path->reada = READA_FORWARD; 1073 1074 path2 = btrfs_alloc_path(); 1075 if (!path2) { 1076 btrfs_free_path(path); 1077 return -ENOMEM; 1078 } 1079 1080 ret = add_new_free_space_info(trans, block_group, path2); 1081 if (ret) 1082 goto out; 1083 1084 mutex_lock(&block_group->free_space_lock); 1085 1086 /* 1087 * Iterate through all of the extent and metadata items in this block 1088 * group, adding the free space between them and the free space at the 1089 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1090 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1091 * contained in. 1092 */ 1093 key.objectid = block_group->start; 1094 key.type = BTRFS_EXTENT_ITEM_KEY; 1095 key.offset = 0; 1096 1097 extent_root = btrfs_extent_root(trans->fs_info, key.objectid); 1098 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1099 if (ret < 0) 1100 goto out_locked; 1101 ASSERT(ret == 0); 1102 1103 start = block_group->start; 1104 end = block_group->start + block_group->length; 1105 while (1) { 1106 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1107 1108 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1109 key.type == BTRFS_METADATA_ITEM_KEY) { 1110 if (key.objectid >= end) 1111 break; 1112 1113 if (start < key.objectid) { 1114 ret = __add_to_free_space_tree(trans, 1115 block_group, 1116 path2, start, 1117 key.objectid - 1118 start); 1119 if (ret) 1120 goto out_locked; 1121 } 1122 start = key.objectid; 1123 if (key.type == BTRFS_METADATA_ITEM_KEY) 1124 start += trans->fs_info->nodesize; 1125 else 1126 start += key.offset; 1127 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1128 if (key.objectid != block_group->start) 1129 break; 1130 } 1131 1132 ret = btrfs_next_item(extent_root, path); 1133 if (ret < 0) 1134 goto out_locked; 1135 if (ret) 1136 break; 1137 } 1138 if (start < end) { 1139 ret = __add_to_free_space_tree(trans, block_group, path2, 1140 start, end - start); 1141 if (ret) 1142 goto out_locked; 1143 } 1144 1145 ret = 0; 1146 out_locked: 1147 mutex_unlock(&block_group->free_space_lock); 1148 out: 1149 btrfs_free_path(path2); 1150 btrfs_free_path(path); 1151 return ret; 1152 } 1153 1154 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1155 { 1156 struct btrfs_trans_handle *trans; 1157 struct btrfs_root *tree_root = fs_info->tree_root; 1158 struct btrfs_root *free_space_root; 1159 struct btrfs_block_group *block_group; 1160 struct rb_node *node; 1161 int ret; 1162 1163 trans = btrfs_start_transaction(tree_root, 0); 1164 if (IS_ERR(trans)) 1165 return PTR_ERR(trans); 1166 1167 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1168 set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); 1169 free_space_root = btrfs_create_tree(trans, 1170 BTRFS_FREE_SPACE_TREE_OBJECTID); 1171 if (IS_ERR(free_space_root)) { 1172 ret = PTR_ERR(free_space_root); 1173 goto abort; 1174 } 1175 ret = btrfs_global_root_insert(free_space_root); 1176 if (ret) { 1177 btrfs_put_root(free_space_root); 1178 goto abort; 1179 } 1180 1181 node = rb_first_cached(&fs_info->block_group_cache_tree); 1182 while (node) { 1183 block_group = rb_entry(node, struct btrfs_block_group, 1184 cache_node); 1185 ret = populate_free_space_tree(trans, block_group); 1186 if (ret) 1187 goto abort; 1188 node = rb_next(node); 1189 } 1190 1191 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1192 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1193 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1194 ret = btrfs_commit_transaction(trans); 1195 1196 /* 1197 * Now that we've committed the transaction any reading of our commit 1198 * root will be safe, so we can cache from the free space tree now. 1199 */ 1200 clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); 1201 return ret; 1202 1203 abort: 1204 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1205 clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); 1206 btrfs_abort_transaction(trans, ret); 1207 btrfs_end_transaction(trans); 1208 return ret; 1209 } 1210 1211 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1212 struct btrfs_root *root) 1213 { 1214 struct btrfs_path *path; 1215 struct btrfs_key key; 1216 int nr; 1217 int ret; 1218 1219 path = btrfs_alloc_path(); 1220 if (!path) 1221 return -ENOMEM; 1222 1223 key.objectid = 0; 1224 key.type = 0; 1225 key.offset = 0; 1226 1227 while (1) { 1228 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1229 if (ret < 0) 1230 goto out; 1231 1232 nr = btrfs_header_nritems(path->nodes[0]); 1233 if (!nr) 1234 break; 1235 1236 path->slots[0] = 0; 1237 ret = btrfs_del_items(trans, root, path, 0, nr); 1238 if (ret) 1239 goto out; 1240 1241 btrfs_release_path(path); 1242 } 1243 1244 ret = 0; 1245 out: 1246 btrfs_free_path(path); 1247 return ret; 1248 } 1249 1250 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1251 { 1252 struct btrfs_trans_handle *trans; 1253 struct btrfs_root *tree_root = fs_info->tree_root; 1254 struct btrfs_key key = { 1255 .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, 1256 .type = BTRFS_ROOT_ITEM_KEY, 1257 .offset = 0, 1258 }; 1259 struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key); 1260 int ret; 1261 1262 trans = btrfs_start_transaction(tree_root, 0); 1263 if (IS_ERR(trans)) 1264 return PTR_ERR(trans); 1265 1266 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1267 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1268 1269 ret = clear_free_space_tree(trans, free_space_root); 1270 if (ret) 1271 goto abort; 1272 1273 ret = btrfs_del_root(trans, &free_space_root->root_key); 1274 if (ret) 1275 goto abort; 1276 1277 btrfs_global_root_delete(free_space_root); 1278 list_del(&free_space_root->dirty_list); 1279 1280 btrfs_tree_lock(free_space_root->node); 1281 btrfs_clean_tree_block(free_space_root->node); 1282 btrfs_tree_unlock(free_space_root->node); 1283 btrfs_free_tree_block(trans, btrfs_root_id(free_space_root), 1284 free_space_root->node, 0, 1); 1285 1286 btrfs_put_root(free_space_root); 1287 1288 return btrfs_commit_transaction(trans); 1289 1290 abort: 1291 btrfs_abort_transaction(trans, ret); 1292 btrfs_end_transaction(trans); 1293 return ret; 1294 } 1295 1296 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1297 struct btrfs_block_group *block_group, 1298 struct btrfs_path *path) 1299 { 1300 int ret; 1301 1302 block_group->needs_free_space = 0; 1303 1304 ret = add_new_free_space_info(trans, block_group, path); 1305 if (ret) 1306 return ret; 1307 1308 return __add_to_free_space_tree(trans, block_group, path, 1309 block_group->start, 1310 block_group->length); 1311 } 1312 1313 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1314 struct btrfs_block_group *block_group) 1315 { 1316 struct btrfs_fs_info *fs_info = trans->fs_info; 1317 struct btrfs_path *path = NULL; 1318 int ret = 0; 1319 1320 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1321 return 0; 1322 1323 mutex_lock(&block_group->free_space_lock); 1324 if (!block_group->needs_free_space) 1325 goto out; 1326 1327 path = btrfs_alloc_path(); 1328 if (!path) { 1329 ret = -ENOMEM; 1330 goto out; 1331 } 1332 1333 ret = __add_block_group_free_space(trans, block_group, path); 1334 1335 out: 1336 btrfs_free_path(path); 1337 mutex_unlock(&block_group->free_space_lock); 1338 if (ret) 1339 btrfs_abort_transaction(trans, ret); 1340 return ret; 1341 } 1342 1343 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1344 struct btrfs_block_group *block_group) 1345 { 1346 struct btrfs_root *root = btrfs_free_space_root(block_group); 1347 struct btrfs_path *path; 1348 struct btrfs_key key, found_key; 1349 struct extent_buffer *leaf; 1350 u64 start, end; 1351 int done = 0, nr; 1352 int ret; 1353 1354 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1355 return 0; 1356 1357 if (block_group->needs_free_space) { 1358 /* We never added this block group to the free space tree. */ 1359 return 0; 1360 } 1361 1362 path = btrfs_alloc_path(); 1363 if (!path) { 1364 ret = -ENOMEM; 1365 goto out; 1366 } 1367 1368 start = block_group->start; 1369 end = block_group->start + block_group->length; 1370 1371 key.objectid = end - 1; 1372 key.type = (u8)-1; 1373 key.offset = (u64)-1; 1374 1375 while (!done) { 1376 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1377 if (ret) 1378 goto out; 1379 1380 leaf = path->nodes[0]; 1381 nr = 0; 1382 path->slots[0]++; 1383 while (path->slots[0] > 0) { 1384 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1385 1386 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1387 ASSERT(found_key.objectid == block_group->start); 1388 ASSERT(found_key.offset == block_group->length); 1389 done = 1; 1390 nr++; 1391 path->slots[0]--; 1392 break; 1393 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1394 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1395 ASSERT(found_key.objectid >= start); 1396 ASSERT(found_key.objectid < end); 1397 ASSERT(found_key.objectid + found_key.offset <= end); 1398 nr++; 1399 path->slots[0]--; 1400 } else { 1401 ASSERT(0); 1402 } 1403 } 1404 1405 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1406 if (ret) 1407 goto out; 1408 btrfs_release_path(path); 1409 } 1410 1411 ret = 0; 1412 out: 1413 btrfs_free_path(path); 1414 if (ret) 1415 btrfs_abort_transaction(trans, ret); 1416 return ret; 1417 } 1418 1419 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1420 struct btrfs_path *path, 1421 u32 expected_extent_count) 1422 { 1423 struct btrfs_block_group *block_group; 1424 struct btrfs_fs_info *fs_info; 1425 struct btrfs_root *root; 1426 struct btrfs_key key; 1427 int prev_bit = 0, bit; 1428 /* Initialize to silence GCC. */ 1429 u64 extent_start = 0; 1430 u64 end, offset; 1431 u64 total_found = 0; 1432 u32 extent_count = 0; 1433 int ret; 1434 1435 block_group = caching_ctl->block_group; 1436 fs_info = block_group->fs_info; 1437 root = btrfs_free_space_root(block_group); 1438 1439 end = block_group->start + block_group->length; 1440 1441 while (1) { 1442 ret = btrfs_next_item(root, path); 1443 if (ret < 0) 1444 goto out; 1445 if (ret) 1446 break; 1447 1448 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1449 1450 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1451 break; 1452 1453 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1454 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1455 1456 caching_ctl->progress = key.objectid; 1457 1458 offset = key.objectid; 1459 while (offset < key.objectid + key.offset) { 1460 bit = free_space_test_bit(block_group, path, offset); 1461 if (prev_bit == 0 && bit == 1) { 1462 extent_start = offset; 1463 } else if (prev_bit == 1 && bit == 0) { 1464 total_found += add_new_free_space(block_group, 1465 extent_start, 1466 offset); 1467 if (total_found > CACHING_CTL_WAKE_UP) { 1468 total_found = 0; 1469 wake_up(&caching_ctl->wait); 1470 } 1471 extent_count++; 1472 } 1473 prev_bit = bit; 1474 offset += fs_info->sectorsize; 1475 } 1476 } 1477 if (prev_bit == 1) { 1478 total_found += add_new_free_space(block_group, extent_start, 1479 end); 1480 extent_count++; 1481 } 1482 1483 if (extent_count != expected_extent_count) { 1484 btrfs_err(fs_info, 1485 "incorrect extent count for %llu; counted %u, expected %u", 1486 block_group->start, extent_count, 1487 expected_extent_count); 1488 ASSERT(0); 1489 ret = -EIO; 1490 goto out; 1491 } 1492 1493 caching_ctl->progress = (u64)-1; 1494 1495 ret = 0; 1496 out: 1497 return ret; 1498 } 1499 1500 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1501 struct btrfs_path *path, 1502 u32 expected_extent_count) 1503 { 1504 struct btrfs_block_group *block_group; 1505 struct btrfs_fs_info *fs_info; 1506 struct btrfs_root *root; 1507 struct btrfs_key key; 1508 u64 end; 1509 u64 total_found = 0; 1510 u32 extent_count = 0; 1511 int ret; 1512 1513 block_group = caching_ctl->block_group; 1514 fs_info = block_group->fs_info; 1515 root = btrfs_free_space_root(block_group); 1516 1517 end = block_group->start + block_group->length; 1518 1519 while (1) { 1520 ret = btrfs_next_item(root, path); 1521 if (ret < 0) 1522 goto out; 1523 if (ret) 1524 break; 1525 1526 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1527 1528 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1529 break; 1530 1531 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1532 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1533 1534 caching_ctl->progress = key.objectid; 1535 1536 total_found += add_new_free_space(block_group, key.objectid, 1537 key.objectid + key.offset); 1538 if (total_found > CACHING_CTL_WAKE_UP) { 1539 total_found = 0; 1540 wake_up(&caching_ctl->wait); 1541 } 1542 extent_count++; 1543 } 1544 1545 if (extent_count != expected_extent_count) { 1546 btrfs_err(fs_info, 1547 "incorrect extent count for %llu; counted %u, expected %u", 1548 block_group->start, extent_count, 1549 expected_extent_count); 1550 ASSERT(0); 1551 ret = -EIO; 1552 goto out; 1553 } 1554 1555 caching_ctl->progress = (u64)-1; 1556 1557 ret = 0; 1558 out: 1559 return ret; 1560 } 1561 1562 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1563 { 1564 struct btrfs_block_group *block_group; 1565 struct btrfs_free_space_info *info; 1566 struct btrfs_path *path; 1567 u32 extent_count, flags; 1568 int ret; 1569 1570 block_group = caching_ctl->block_group; 1571 1572 path = btrfs_alloc_path(); 1573 if (!path) 1574 return -ENOMEM; 1575 1576 /* 1577 * Just like caching_thread() doesn't want to deadlock on the extent 1578 * tree, we don't want to deadlock on the free space tree. 1579 */ 1580 path->skip_locking = 1; 1581 path->search_commit_root = 1; 1582 path->reada = READA_FORWARD; 1583 1584 info = search_free_space_info(NULL, block_group, path, 0); 1585 if (IS_ERR(info)) { 1586 ret = PTR_ERR(info); 1587 goto out; 1588 } 1589 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1590 flags = btrfs_free_space_flags(path->nodes[0], info); 1591 1592 /* 1593 * We left path pointing to the free space info item, so now 1594 * load_free_space_foo can just iterate through the free space tree from 1595 * there. 1596 */ 1597 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1598 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1599 else 1600 ret = load_free_space_extents(caching_ctl, path, extent_count); 1601 1602 out: 1603 btrfs_free_path(path); 1604 return ret; 1605 } 1606