1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Facebook. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/sched/mm.h> 8 #include "ctree.h" 9 #include "disk-io.h" 10 #include "locking.h" 11 #include "free-space-tree.h" 12 #include "transaction.h" 13 14 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 15 struct btrfs_block_group_cache *block_group, 16 struct btrfs_path *path); 17 18 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) 19 { 20 u32 bitmap_range; 21 size_t bitmap_size; 22 u64 num_bitmaps, total_bitmap_size; 23 24 /* 25 * We convert to bitmaps when the disk space required for using extents 26 * exceeds that required for using bitmaps. 27 */ 28 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 29 num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, 30 bitmap_range); 31 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 32 total_bitmap_size = num_bitmaps * bitmap_size; 33 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 34 sizeof(struct btrfs_item)); 35 36 /* 37 * We allow for a small buffer between the high threshold and low 38 * threshold to avoid thrashing back and forth between the two formats. 39 */ 40 if (cache->bitmap_high_thresh > 100) 41 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 42 else 43 cache->bitmap_low_thresh = 0; 44 } 45 46 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 47 struct btrfs_block_group_cache *block_group, 48 struct btrfs_path *path) 49 { 50 struct btrfs_root *root = trans->fs_info->free_space_root; 51 struct btrfs_free_space_info *info; 52 struct btrfs_key key; 53 struct extent_buffer *leaf; 54 int ret; 55 56 key.objectid = block_group->key.objectid; 57 key.type = BTRFS_FREE_SPACE_INFO_KEY; 58 key.offset = block_group->key.offset; 59 60 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 61 if (ret) 62 goto out; 63 64 leaf = path->nodes[0]; 65 info = btrfs_item_ptr(leaf, path->slots[0], 66 struct btrfs_free_space_info); 67 btrfs_set_free_space_extent_count(leaf, info, 0); 68 btrfs_set_free_space_flags(leaf, info, 0); 69 btrfs_mark_buffer_dirty(leaf); 70 71 ret = 0; 72 out: 73 btrfs_release_path(path); 74 return ret; 75 } 76 77 struct btrfs_free_space_info * 78 search_free_space_info(struct btrfs_trans_handle *trans, 79 struct btrfs_fs_info *fs_info, 80 struct btrfs_block_group_cache *block_group, 81 struct btrfs_path *path, int cow) 82 { 83 struct btrfs_root *root = fs_info->free_space_root; 84 struct btrfs_key key; 85 int ret; 86 87 key.objectid = block_group->key.objectid; 88 key.type = BTRFS_FREE_SPACE_INFO_KEY; 89 key.offset = block_group->key.offset; 90 91 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 92 if (ret < 0) 93 return ERR_PTR(ret); 94 if (ret != 0) { 95 btrfs_warn(fs_info, "missing free space info for %llu", 96 block_group->key.objectid); 97 ASSERT(0); 98 return ERR_PTR(-ENOENT); 99 } 100 101 return btrfs_item_ptr(path->nodes[0], path->slots[0], 102 struct btrfs_free_space_info); 103 } 104 105 /* 106 * btrfs_search_slot() but we're looking for the greatest key less than the 107 * passed key. 108 */ 109 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 110 struct btrfs_root *root, 111 struct btrfs_key *key, struct btrfs_path *p, 112 int ins_len, int cow) 113 { 114 int ret; 115 116 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 117 if (ret < 0) 118 return ret; 119 120 if (ret == 0) { 121 ASSERT(0); 122 return -EIO; 123 } 124 125 if (p->slots[0] == 0) { 126 ASSERT(0); 127 return -EIO; 128 } 129 p->slots[0]--; 130 131 return 0; 132 } 133 134 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) 135 { 136 return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); 137 } 138 139 static unsigned long *alloc_bitmap(u32 bitmap_size) 140 { 141 unsigned long *ret; 142 unsigned int nofs_flag; 143 u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long)); 144 145 /* 146 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse 147 * into the filesystem as the free space bitmap can be modified in the 148 * critical section of a transaction commit. 149 * 150 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we 151 * know that recursion is unsafe. 152 */ 153 nofs_flag = memalloc_nofs_save(); 154 ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL); 155 memalloc_nofs_restore(nofs_flag); 156 return ret; 157 } 158 159 static void le_bitmap_set(unsigned long *map, unsigned int start, int len) 160 { 161 u8 *p = ((u8 *)map) + BIT_BYTE(start); 162 const unsigned int size = start + len; 163 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); 164 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); 165 166 while (len - bits_to_set >= 0) { 167 *p |= mask_to_set; 168 len -= bits_to_set; 169 bits_to_set = BITS_PER_BYTE; 170 mask_to_set = ~0; 171 p++; 172 } 173 if (len) { 174 mask_to_set &= BITMAP_LAST_BYTE_MASK(size); 175 *p |= mask_to_set; 176 } 177 } 178 179 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 180 struct btrfs_block_group_cache *block_group, 181 struct btrfs_path *path) 182 { 183 struct btrfs_fs_info *fs_info = trans->fs_info; 184 struct btrfs_root *root = fs_info->free_space_root; 185 struct btrfs_free_space_info *info; 186 struct btrfs_key key, found_key; 187 struct extent_buffer *leaf; 188 unsigned long *bitmap; 189 char *bitmap_cursor; 190 u64 start, end; 191 u64 bitmap_range, i; 192 u32 bitmap_size, flags, expected_extent_count; 193 u32 extent_count = 0; 194 int done = 0, nr; 195 int ret; 196 197 bitmap_size = free_space_bitmap_size(block_group->key.offset, 198 fs_info->sectorsize); 199 bitmap = alloc_bitmap(bitmap_size); 200 if (!bitmap) { 201 ret = -ENOMEM; 202 goto out; 203 } 204 205 start = block_group->key.objectid; 206 end = block_group->key.objectid + block_group->key.offset; 207 208 key.objectid = end - 1; 209 key.type = (u8)-1; 210 key.offset = (u64)-1; 211 212 while (!done) { 213 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 214 if (ret) 215 goto out; 216 217 leaf = path->nodes[0]; 218 nr = 0; 219 path->slots[0]++; 220 while (path->slots[0] > 0) { 221 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 222 223 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 224 ASSERT(found_key.objectid == block_group->key.objectid); 225 ASSERT(found_key.offset == block_group->key.offset); 226 done = 1; 227 break; 228 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 229 u64 first, last; 230 231 ASSERT(found_key.objectid >= start); 232 ASSERT(found_key.objectid < end); 233 ASSERT(found_key.objectid + found_key.offset <= end); 234 235 first = div_u64(found_key.objectid - start, 236 fs_info->sectorsize); 237 last = div_u64(found_key.objectid + found_key.offset - start, 238 fs_info->sectorsize); 239 le_bitmap_set(bitmap, first, last - first); 240 241 extent_count++; 242 nr++; 243 path->slots[0]--; 244 } else { 245 ASSERT(0); 246 } 247 } 248 249 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 250 if (ret) 251 goto out; 252 btrfs_release_path(path); 253 } 254 255 info = search_free_space_info(trans, fs_info, block_group, path, 1); 256 if (IS_ERR(info)) { 257 ret = PTR_ERR(info); 258 goto out; 259 } 260 leaf = path->nodes[0]; 261 flags = btrfs_free_space_flags(leaf, info); 262 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 263 btrfs_set_free_space_flags(leaf, info, flags); 264 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 265 btrfs_mark_buffer_dirty(leaf); 266 btrfs_release_path(path); 267 268 if (extent_count != expected_extent_count) { 269 btrfs_err(fs_info, 270 "incorrect extent count for %llu; counted %u, expected %u", 271 block_group->key.objectid, extent_count, 272 expected_extent_count); 273 ASSERT(0); 274 ret = -EIO; 275 goto out; 276 } 277 278 bitmap_cursor = (char *)bitmap; 279 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 280 i = start; 281 while (i < end) { 282 unsigned long ptr; 283 u64 extent_size; 284 u32 data_size; 285 286 extent_size = min(end - i, bitmap_range); 287 data_size = free_space_bitmap_size(extent_size, 288 fs_info->sectorsize); 289 290 key.objectid = i; 291 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 292 key.offset = extent_size; 293 294 ret = btrfs_insert_empty_item(trans, root, path, &key, 295 data_size); 296 if (ret) 297 goto out; 298 299 leaf = path->nodes[0]; 300 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 301 write_extent_buffer(leaf, bitmap_cursor, ptr, 302 data_size); 303 btrfs_mark_buffer_dirty(leaf); 304 btrfs_release_path(path); 305 306 i += extent_size; 307 bitmap_cursor += data_size; 308 } 309 310 ret = 0; 311 out: 312 kvfree(bitmap); 313 if (ret) 314 btrfs_abort_transaction(trans, ret); 315 return ret; 316 } 317 318 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 319 struct btrfs_block_group_cache *block_group, 320 struct btrfs_path *path) 321 { 322 struct btrfs_fs_info *fs_info = trans->fs_info; 323 struct btrfs_root *root = fs_info->free_space_root; 324 struct btrfs_free_space_info *info; 325 struct btrfs_key key, found_key; 326 struct extent_buffer *leaf; 327 unsigned long *bitmap; 328 u64 start, end; 329 u32 bitmap_size, flags, expected_extent_count; 330 unsigned long nrbits, start_bit, end_bit; 331 u32 extent_count = 0; 332 int done = 0, nr; 333 int ret; 334 335 bitmap_size = free_space_bitmap_size(block_group->key.offset, 336 fs_info->sectorsize); 337 bitmap = alloc_bitmap(bitmap_size); 338 if (!bitmap) { 339 ret = -ENOMEM; 340 goto out; 341 } 342 343 start = block_group->key.objectid; 344 end = block_group->key.objectid + block_group->key.offset; 345 346 key.objectid = end - 1; 347 key.type = (u8)-1; 348 key.offset = (u64)-1; 349 350 while (!done) { 351 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 352 if (ret) 353 goto out; 354 355 leaf = path->nodes[0]; 356 nr = 0; 357 path->slots[0]++; 358 while (path->slots[0] > 0) { 359 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 360 361 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 362 ASSERT(found_key.objectid == block_group->key.objectid); 363 ASSERT(found_key.offset == block_group->key.offset); 364 done = 1; 365 break; 366 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 367 unsigned long ptr; 368 char *bitmap_cursor; 369 u32 bitmap_pos, data_size; 370 371 ASSERT(found_key.objectid >= start); 372 ASSERT(found_key.objectid < end); 373 ASSERT(found_key.objectid + found_key.offset <= end); 374 375 bitmap_pos = div_u64(found_key.objectid - start, 376 fs_info->sectorsize * 377 BITS_PER_BYTE); 378 bitmap_cursor = ((char *)bitmap) + bitmap_pos; 379 data_size = free_space_bitmap_size(found_key.offset, 380 fs_info->sectorsize); 381 382 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 383 read_extent_buffer(leaf, bitmap_cursor, ptr, 384 data_size); 385 386 nr++; 387 path->slots[0]--; 388 } else { 389 ASSERT(0); 390 } 391 } 392 393 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 394 if (ret) 395 goto out; 396 btrfs_release_path(path); 397 } 398 399 info = search_free_space_info(trans, fs_info, block_group, path, 1); 400 if (IS_ERR(info)) { 401 ret = PTR_ERR(info); 402 goto out; 403 } 404 leaf = path->nodes[0]; 405 flags = btrfs_free_space_flags(leaf, info); 406 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 407 btrfs_set_free_space_flags(leaf, info, flags); 408 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 409 btrfs_mark_buffer_dirty(leaf); 410 btrfs_release_path(path); 411 412 nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize); 413 start_bit = find_next_bit_le(bitmap, nrbits, 0); 414 415 while (start_bit < nrbits) { 416 end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); 417 ASSERT(start_bit < end_bit); 418 419 key.objectid = start + start_bit * block_group->fs_info->sectorsize; 420 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 421 key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; 422 423 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 424 if (ret) 425 goto out; 426 btrfs_release_path(path); 427 428 extent_count++; 429 430 start_bit = find_next_bit_le(bitmap, nrbits, end_bit); 431 } 432 433 if (extent_count != expected_extent_count) { 434 btrfs_err(fs_info, 435 "incorrect extent count for %llu; counted %u, expected %u", 436 block_group->key.objectid, extent_count, 437 expected_extent_count); 438 ASSERT(0); 439 ret = -EIO; 440 goto out; 441 } 442 443 ret = 0; 444 out: 445 kvfree(bitmap); 446 if (ret) 447 btrfs_abort_transaction(trans, ret); 448 return ret; 449 } 450 451 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 452 struct btrfs_block_group_cache *block_group, 453 struct btrfs_path *path, 454 int new_extents) 455 { 456 struct btrfs_free_space_info *info; 457 u32 flags; 458 u32 extent_count; 459 int ret = 0; 460 461 if (new_extents == 0) 462 return 0; 463 464 info = search_free_space_info(trans, trans->fs_info, block_group, path, 465 1); 466 if (IS_ERR(info)) { 467 ret = PTR_ERR(info); 468 goto out; 469 } 470 flags = btrfs_free_space_flags(path->nodes[0], info); 471 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 472 473 extent_count += new_extents; 474 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 475 btrfs_mark_buffer_dirty(path->nodes[0]); 476 btrfs_release_path(path); 477 478 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 479 extent_count > block_group->bitmap_high_thresh) { 480 ret = convert_free_space_to_bitmaps(trans, block_group, path); 481 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 482 extent_count < block_group->bitmap_low_thresh) { 483 ret = convert_free_space_to_extents(trans, block_group, path); 484 } 485 486 out: 487 return ret; 488 } 489 490 int free_space_test_bit(struct btrfs_block_group_cache *block_group, 491 struct btrfs_path *path, u64 offset) 492 { 493 struct extent_buffer *leaf; 494 struct btrfs_key key; 495 u64 found_start, found_end; 496 unsigned long ptr, i; 497 498 leaf = path->nodes[0]; 499 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 500 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 501 502 found_start = key.objectid; 503 found_end = key.objectid + key.offset; 504 ASSERT(offset >= found_start && offset < found_end); 505 506 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 507 i = div_u64(offset - found_start, 508 block_group->fs_info->sectorsize); 509 return !!extent_buffer_test_bit(leaf, ptr, i); 510 } 511 512 static void free_space_set_bits(struct btrfs_block_group_cache *block_group, 513 struct btrfs_path *path, u64 *start, u64 *size, 514 int bit) 515 { 516 struct btrfs_fs_info *fs_info = block_group->fs_info; 517 struct extent_buffer *leaf; 518 struct btrfs_key key; 519 u64 end = *start + *size; 520 u64 found_start, found_end; 521 unsigned long ptr, first, last; 522 523 leaf = path->nodes[0]; 524 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 525 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 526 527 found_start = key.objectid; 528 found_end = key.objectid + key.offset; 529 ASSERT(*start >= found_start && *start < found_end); 530 ASSERT(end > found_start); 531 532 if (end > found_end) 533 end = found_end; 534 535 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 536 first = div_u64(*start - found_start, fs_info->sectorsize); 537 last = div_u64(end - found_start, fs_info->sectorsize); 538 if (bit) 539 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 540 else 541 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 542 btrfs_mark_buffer_dirty(leaf); 543 544 *size -= end - *start; 545 *start = end; 546 } 547 548 /* 549 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 550 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 551 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 552 * looking for. 553 */ 554 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 555 struct btrfs_root *root, struct btrfs_path *p) 556 { 557 struct btrfs_key key; 558 559 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 560 p->slots[0]++; 561 return 0; 562 } 563 564 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 565 btrfs_release_path(p); 566 567 key.objectid += key.offset; 568 key.type = (u8)-1; 569 key.offset = (u64)-1; 570 571 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 572 } 573 574 /* 575 * If remove is 1, then we are removing free space, thus clearing bits in the 576 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 577 * the bitmap. 578 */ 579 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 580 struct btrfs_block_group_cache *block_group, 581 struct btrfs_path *path, 582 u64 start, u64 size, int remove) 583 { 584 struct btrfs_root *root = block_group->fs_info->free_space_root; 585 struct btrfs_key key; 586 u64 end = start + size; 587 u64 cur_start, cur_size; 588 int prev_bit, next_bit; 589 int new_extents; 590 int ret; 591 592 /* 593 * Read the bit for the block immediately before the extent of space if 594 * that block is within the block group. 595 */ 596 if (start > block_group->key.objectid) { 597 u64 prev_block = start - block_group->fs_info->sectorsize; 598 599 key.objectid = prev_block; 600 key.type = (u8)-1; 601 key.offset = (u64)-1; 602 603 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 604 if (ret) 605 goto out; 606 607 prev_bit = free_space_test_bit(block_group, path, prev_block); 608 609 /* The previous block may have been in the previous bitmap. */ 610 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 611 if (start >= key.objectid + key.offset) { 612 ret = free_space_next_bitmap(trans, root, path); 613 if (ret) 614 goto out; 615 } 616 } else { 617 key.objectid = start; 618 key.type = (u8)-1; 619 key.offset = (u64)-1; 620 621 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 622 if (ret) 623 goto out; 624 625 prev_bit = -1; 626 } 627 628 /* 629 * Iterate over all of the bitmaps overlapped by the extent of space, 630 * clearing/setting bits as required. 631 */ 632 cur_start = start; 633 cur_size = size; 634 while (1) { 635 free_space_set_bits(block_group, path, &cur_start, &cur_size, 636 !remove); 637 if (cur_size == 0) 638 break; 639 ret = free_space_next_bitmap(trans, root, path); 640 if (ret) 641 goto out; 642 } 643 644 /* 645 * Read the bit for the block immediately after the extent of space if 646 * that block is within the block group. 647 */ 648 if (end < block_group->key.objectid + block_group->key.offset) { 649 /* The next block may be in the next bitmap. */ 650 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 651 if (end >= key.objectid + key.offset) { 652 ret = free_space_next_bitmap(trans, root, path); 653 if (ret) 654 goto out; 655 } 656 657 next_bit = free_space_test_bit(block_group, path, end); 658 } else { 659 next_bit = -1; 660 } 661 662 if (remove) { 663 new_extents = -1; 664 if (prev_bit == 1) { 665 /* Leftover on the left. */ 666 new_extents++; 667 } 668 if (next_bit == 1) { 669 /* Leftover on the right. */ 670 new_extents++; 671 } 672 } else { 673 new_extents = 1; 674 if (prev_bit == 1) { 675 /* Merging with neighbor on the left. */ 676 new_extents--; 677 } 678 if (next_bit == 1) { 679 /* Merging with neighbor on the right. */ 680 new_extents--; 681 } 682 } 683 684 btrfs_release_path(path); 685 ret = update_free_space_extent_count(trans, block_group, path, 686 new_extents); 687 688 out: 689 return ret; 690 } 691 692 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 693 struct btrfs_block_group_cache *block_group, 694 struct btrfs_path *path, 695 u64 start, u64 size) 696 { 697 struct btrfs_root *root = trans->fs_info->free_space_root; 698 struct btrfs_key key; 699 u64 found_start, found_end; 700 u64 end = start + size; 701 int new_extents = -1; 702 int ret; 703 704 key.objectid = start; 705 key.type = (u8)-1; 706 key.offset = (u64)-1; 707 708 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 709 if (ret) 710 goto out; 711 712 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 713 714 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 715 716 found_start = key.objectid; 717 found_end = key.objectid + key.offset; 718 ASSERT(start >= found_start && end <= found_end); 719 720 /* 721 * Okay, now that we've found the free space extent which contains the 722 * free space that we are removing, there are four cases: 723 * 724 * 1. We're using the whole extent: delete the key we found and 725 * decrement the free space extent count. 726 * 2. We are using part of the extent starting at the beginning: delete 727 * the key we found and insert a new key representing the leftover at 728 * the end. There is no net change in the number of extents. 729 * 3. We are using part of the extent ending at the end: delete the key 730 * we found and insert a new key representing the leftover at the 731 * beginning. There is no net change in the number of extents. 732 * 4. We are using part of the extent in the middle: delete the key we 733 * found and insert two new keys representing the leftovers on each 734 * side. Where we used to have one extent, we now have two, so increment 735 * the extent count. We may need to convert the block group to bitmaps 736 * as a result. 737 */ 738 739 /* Delete the existing key (cases 1-4). */ 740 ret = btrfs_del_item(trans, root, path); 741 if (ret) 742 goto out; 743 744 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 745 if (start > found_start) { 746 key.objectid = found_start; 747 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 748 key.offset = start - found_start; 749 750 btrfs_release_path(path); 751 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 752 if (ret) 753 goto out; 754 new_extents++; 755 } 756 757 /* Add a key for leftovers at the end (cases 2 and 4). */ 758 if (end < found_end) { 759 key.objectid = end; 760 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 761 key.offset = found_end - end; 762 763 btrfs_release_path(path); 764 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 765 if (ret) 766 goto out; 767 new_extents++; 768 } 769 770 btrfs_release_path(path); 771 ret = update_free_space_extent_count(trans, block_group, path, 772 new_extents); 773 774 out: 775 return ret; 776 } 777 778 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 779 struct btrfs_block_group_cache *block_group, 780 struct btrfs_path *path, u64 start, u64 size) 781 { 782 struct btrfs_free_space_info *info; 783 u32 flags; 784 int ret; 785 786 if (block_group->needs_free_space) { 787 ret = __add_block_group_free_space(trans, block_group, path); 788 if (ret) 789 return ret; 790 } 791 792 info = search_free_space_info(NULL, trans->fs_info, block_group, path, 793 0); 794 if (IS_ERR(info)) 795 return PTR_ERR(info); 796 flags = btrfs_free_space_flags(path->nodes[0], info); 797 btrfs_release_path(path); 798 799 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 800 return modify_free_space_bitmap(trans, block_group, path, 801 start, size, 1); 802 } else { 803 return remove_free_space_extent(trans, block_group, path, 804 start, size); 805 } 806 } 807 808 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 809 u64 start, u64 size) 810 { 811 struct btrfs_block_group_cache *block_group; 812 struct btrfs_path *path; 813 int ret; 814 815 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 816 return 0; 817 818 path = btrfs_alloc_path(); 819 if (!path) { 820 ret = -ENOMEM; 821 goto out; 822 } 823 824 block_group = btrfs_lookup_block_group(trans->fs_info, start); 825 if (!block_group) { 826 ASSERT(0); 827 ret = -ENOENT; 828 goto out; 829 } 830 831 mutex_lock(&block_group->free_space_lock); 832 ret = __remove_from_free_space_tree(trans, block_group, path, start, 833 size); 834 mutex_unlock(&block_group->free_space_lock); 835 836 btrfs_put_block_group(block_group); 837 out: 838 btrfs_free_path(path); 839 if (ret) 840 btrfs_abort_transaction(trans, ret); 841 return ret; 842 } 843 844 static int add_free_space_extent(struct btrfs_trans_handle *trans, 845 struct btrfs_block_group_cache *block_group, 846 struct btrfs_path *path, 847 u64 start, u64 size) 848 { 849 struct btrfs_root *root = trans->fs_info->free_space_root; 850 struct btrfs_key key, new_key; 851 u64 found_start, found_end; 852 u64 end = start + size; 853 int new_extents = 1; 854 int ret; 855 856 /* 857 * We are adding a new extent of free space, but we need to merge 858 * extents. There are four cases here: 859 * 860 * 1. The new extent does not have any immediate neighbors to merge 861 * with: add the new key and increment the free space extent count. We 862 * may need to convert the block group to bitmaps as a result. 863 * 2. The new extent has an immediate neighbor before it: remove the 864 * previous key and insert a new key combining both of them. There is no 865 * net change in the number of extents. 866 * 3. The new extent has an immediate neighbor after it: remove the next 867 * key and insert a new key combining both of them. There is no net 868 * change in the number of extents. 869 * 4. The new extent has immediate neighbors on both sides: remove both 870 * of the keys and insert a new key combining all of them. Where we used 871 * to have two extents, we now have one, so decrement the extent count. 872 */ 873 874 new_key.objectid = start; 875 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 876 new_key.offset = size; 877 878 /* Search for a neighbor on the left. */ 879 if (start == block_group->key.objectid) 880 goto right; 881 key.objectid = start - 1; 882 key.type = (u8)-1; 883 key.offset = (u64)-1; 884 885 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 886 if (ret) 887 goto out; 888 889 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 890 891 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 892 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 893 btrfs_release_path(path); 894 goto right; 895 } 896 897 found_start = key.objectid; 898 found_end = key.objectid + key.offset; 899 ASSERT(found_start >= block_group->key.objectid && 900 found_end > block_group->key.objectid); 901 ASSERT(found_start < start && found_end <= start); 902 903 /* 904 * Delete the neighbor on the left and absorb it into the new key (cases 905 * 2 and 4). 906 */ 907 if (found_end == start) { 908 ret = btrfs_del_item(trans, root, path); 909 if (ret) 910 goto out; 911 new_key.objectid = found_start; 912 new_key.offset += key.offset; 913 new_extents--; 914 } 915 btrfs_release_path(path); 916 917 right: 918 /* Search for a neighbor on the right. */ 919 if (end == block_group->key.objectid + block_group->key.offset) 920 goto insert; 921 key.objectid = end; 922 key.type = (u8)-1; 923 key.offset = (u64)-1; 924 925 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 926 if (ret) 927 goto out; 928 929 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 930 931 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 932 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 933 btrfs_release_path(path); 934 goto insert; 935 } 936 937 found_start = key.objectid; 938 found_end = key.objectid + key.offset; 939 ASSERT(found_start >= block_group->key.objectid && 940 found_end > block_group->key.objectid); 941 ASSERT((found_start < start && found_end <= start) || 942 (found_start >= end && found_end > end)); 943 944 /* 945 * Delete the neighbor on the right and absorb it into the new key 946 * (cases 3 and 4). 947 */ 948 if (found_start == end) { 949 ret = btrfs_del_item(trans, root, path); 950 if (ret) 951 goto out; 952 new_key.offset += key.offset; 953 new_extents--; 954 } 955 btrfs_release_path(path); 956 957 insert: 958 /* Insert the new key (cases 1-4). */ 959 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 960 if (ret) 961 goto out; 962 963 btrfs_release_path(path); 964 ret = update_free_space_extent_count(trans, block_group, path, 965 new_extents); 966 967 out: 968 return ret; 969 } 970 971 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 972 struct btrfs_block_group_cache *block_group, 973 struct btrfs_path *path, u64 start, u64 size) 974 { 975 struct btrfs_fs_info *fs_info = trans->fs_info; 976 struct btrfs_free_space_info *info; 977 u32 flags; 978 int ret; 979 980 if (block_group->needs_free_space) { 981 ret = __add_block_group_free_space(trans, block_group, path); 982 if (ret) 983 return ret; 984 } 985 986 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 987 if (IS_ERR(info)) 988 return PTR_ERR(info); 989 flags = btrfs_free_space_flags(path->nodes[0], info); 990 btrfs_release_path(path); 991 992 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 993 return modify_free_space_bitmap(trans, block_group, path, 994 start, size, 0); 995 } else { 996 return add_free_space_extent(trans, block_group, path, start, 997 size); 998 } 999 } 1000 1001 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1002 u64 start, u64 size) 1003 { 1004 struct btrfs_block_group_cache *block_group; 1005 struct btrfs_path *path; 1006 int ret; 1007 1008 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1009 return 0; 1010 1011 path = btrfs_alloc_path(); 1012 if (!path) { 1013 ret = -ENOMEM; 1014 goto out; 1015 } 1016 1017 block_group = btrfs_lookup_block_group(trans->fs_info, start); 1018 if (!block_group) { 1019 ASSERT(0); 1020 ret = -ENOENT; 1021 goto out; 1022 } 1023 1024 mutex_lock(&block_group->free_space_lock); 1025 ret = __add_to_free_space_tree(trans, block_group, path, start, size); 1026 mutex_unlock(&block_group->free_space_lock); 1027 1028 btrfs_put_block_group(block_group); 1029 out: 1030 btrfs_free_path(path); 1031 if (ret) 1032 btrfs_abort_transaction(trans, ret); 1033 return ret; 1034 } 1035 1036 /* 1037 * Populate the free space tree by walking the extent tree. Operations on the 1038 * extent tree that happen as a result of writes to the free space tree will go 1039 * through the normal add/remove hooks. 1040 */ 1041 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1042 struct btrfs_block_group_cache *block_group) 1043 { 1044 struct btrfs_root *extent_root = trans->fs_info->extent_root; 1045 struct btrfs_path *path, *path2; 1046 struct btrfs_key key; 1047 u64 start, end; 1048 int ret; 1049 1050 path = btrfs_alloc_path(); 1051 if (!path) 1052 return -ENOMEM; 1053 path->reada = READA_FORWARD; 1054 1055 path2 = btrfs_alloc_path(); 1056 if (!path2) { 1057 btrfs_free_path(path); 1058 return -ENOMEM; 1059 } 1060 1061 ret = add_new_free_space_info(trans, block_group, path2); 1062 if (ret) 1063 goto out; 1064 1065 mutex_lock(&block_group->free_space_lock); 1066 1067 /* 1068 * Iterate through all of the extent and metadata items in this block 1069 * group, adding the free space between them and the free space at the 1070 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1071 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1072 * contained in. 1073 */ 1074 key.objectid = block_group->key.objectid; 1075 key.type = BTRFS_EXTENT_ITEM_KEY; 1076 key.offset = 0; 1077 1078 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1079 if (ret < 0) 1080 goto out_locked; 1081 ASSERT(ret == 0); 1082 1083 start = block_group->key.objectid; 1084 end = block_group->key.objectid + block_group->key.offset; 1085 while (1) { 1086 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1087 1088 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1089 key.type == BTRFS_METADATA_ITEM_KEY) { 1090 if (key.objectid >= end) 1091 break; 1092 1093 if (start < key.objectid) { 1094 ret = __add_to_free_space_tree(trans, 1095 block_group, 1096 path2, start, 1097 key.objectid - 1098 start); 1099 if (ret) 1100 goto out_locked; 1101 } 1102 start = key.objectid; 1103 if (key.type == BTRFS_METADATA_ITEM_KEY) 1104 start += trans->fs_info->nodesize; 1105 else 1106 start += key.offset; 1107 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1108 if (key.objectid != block_group->key.objectid) 1109 break; 1110 } 1111 1112 ret = btrfs_next_item(extent_root, path); 1113 if (ret < 0) 1114 goto out_locked; 1115 if (ret) 1116 break; 1117 } 1118 if (start < end) { 1119 ret = __add_to_free_space_tree(trans, block_group, path2, 1120 start, end - start); 1121 if (ret) 1122 goto out_locked; 1123 } 1124 1125 ret = 0; 1126 out_locked: 1127 mutex_unlock(&block_group->free_space_lock); 1128 out: 1129 btrfs_free_path(path2); 1130 btrfs_free_path(path); 1131 return ret; 1132 } 1133 1134 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1135 { 1136 struct btrfs_trans_handle *trans; 1137 struct btrfs_root *tree_root = fs_info->tree_root; 1138 struct btrfs_root *free_space_root; 1139 struct btrfs_block_group_cache *block_group; 1140 struct rb_node *node; 1141 int ret; 1142 1143 trans = btrfs_start_transaction(tree_root, 0); 1144 if (IS_ERR(trans)) 1145 return PTR_ERR(trans); 1146 1147 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1148 free_space_root = btrfs_create_tree(trans, fs_info, 1149 BTRFS_FREE_SPACE_TREE_OBJECTID); 1150 if (IS_ERR(free_space_root)) { 1151 ret = PTR_ERR(free_space_root); 1152 goto abort; 1153 } 1154 fs_info->free_space_root = free_space_root; 1155 1156 node = rb_first(&fs_info->block_group_cache_tree); 1157 while (node) { 1158 block_group = rb_entry(node, struct btrfs_block_group_cache, 1159 cache_node); 1160 ret = populate_free_space_tree(trans, block_group); 1161 if (ret) 1162 goto abort; 1163 node = rb_next(node); 1164 } 1165 1166 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1167 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1168 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1169 1170 return btrfs_commit_transaction(trans); 1171 1172 abort: 1173 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1174 btrfs_abort_transaction(trans, ret); 1175 btrfs_end_transaction(trans); 1176 return ret; 1177 } 1178 1179 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1180 struct btrfs_root *root) 1181 { 1182 struct btrfs_path *path; 1183 struct btrfs_key key; 1184 int nr; 1185 int ret; 1186 1187 path = btrfs_alloc_path(); 1188 if (!path) 1189 return -ENOMEM; 1190 1191 path->leave_spinning = 1; 1192 1193 key.objectid = 0; 1194 key.type = 0; 1195 key.offset = 0; 1196 1197 while (1) { 1198 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1199 if (ret < 0) 1200 goto out; 1201 1202 nr = btrfs_header_nritems(path->nodes[0]); 1203 if (!nr) 1204 break; 1205 1206 path->slots[0] = 0; 1207 ret = btrfs_del_items(trans, root, path, 0, nr); 1208 if (ret) 1209 goto out; 1210 1211 btrfs_release_path(path); 1212 } 1213 1214 ret = 0; 1215 out: 1216 btrfs_free_path(path); 1217 return ret; 1218 } 1219 1220 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1221 { 1222 struct btrfs_trans_handle *trans; 1223 struct btrfs_root *tree_root = fs_info->tree_root; 1224 struct btrfs_root *free_space_root = fs_info->free_space_root; 1225 int ret; 1226 1227 trans = btrfs_start_transaction(tree_root, 0); 1228 if (IS_ERR(trans)) 1229 return PTR_ERR(trans); 1230 1231 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1232 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1233 fs_info->free_space_root = NULL; 1234 1235 ret = clear_free_space_tree(trans, free_space_root); 1236 if (ret) 1237 goto abort; 1238 1239 ret = btrfs_del_root(trans, &free_space_root->root_key); 1240 if (ret) 1241 goto abort; 1242 1243 list_del(&free_space_root->dirty_list); 1244 1245 btrfs_tree_lock(free_space_root->node); 1246 clean_tree_block(fs_info, free_space_root->node); 1247 btrfs_tree_unlock(free_space_root->node); 1248 btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1249 0, 1); 1250 1251 free_extent_buffer(free_space_root->node); 1252 free_extent_buffer(free_space_root->commit_root); 1253 kfree(free_space_root); 1254 1255 return btrfs_commit_transaction(trans); 1256 1257 abort: 1258 btrfs_abort_transaction(trans, ret); 1259 btrfs_end_transaction(trans); 1260 return ret; 1261 } 1262 1263 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1264 struct btrfs_block_group_cache *block_group, 1265 struct btrfs_path *path) 1266 { 1267 int ret; 1268 1269 block_group->needs_free_space = 0; 1270 1271 ret = add_new_free_space_info(trans, block_group, path); 1272 if (ret) 1273 return ret; 1274 1275 return __add_to_free_space_tree(trans, block_group, path, 1276 block_group->key.objectid, 1277 block_group->key.offset); 1278 } 1279 1280 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1281 struct btrfs_block_group_cache *block_group) 1282 { 1283 struct btrfs_fs_info *fs_info = trans->fs_info; 1284 struct btrfs_path *path = NULL; 1285 int ret = 0; 1286 1287 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1288 return 0; 1289 1290 mutex_lock(&block_group->free_space_lock); 1291 if (!block_group->needs_free_space) 1292 goto out; 1293 1294 path = btrfs_alloc_path(); 1295 if (!path) { 1296 ret = -ENOMEM; 1297 goto out; 1298 } 1299 1300 ret = __add_block_group_free_space(trans, block_group, path); 1301 1302 out: 1303 btrfs_free_path(path); 1304 mutex_unlock(&block_group->free_space_lock); 1305 if (ret) 1306 btrfs_abort_transaction(trans, ret); 1307 return ret; 1308 } 1309 1310 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1311 struct btrfs_block_group_cache *block_group) 1312 { 1313 struct btrfs_root *root = trans->fs_info->free_space_root; 1314 struct btrfs_path *path; 1315 struct btrfs_key key, found_key; 1316 struct extent_buffer *leaf; 1317 u64 start, end; 1318 int done = 0, nr; 1319 int ret; 1320 1321 if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) 1322 return 0; 1323 1324 if (block_group->needs_free_space) { 1325 /* We never added this block group to the free space tree. */ 1326 return 0; 1327 } 1328 1329 path = btrfs_alloc_path(); 1330 if (!path) { 1331 ret = -ENOMEM; 1332 goto out; 1333 } 1334 1335 start = block_group->key.objectid; 1336 end = block_group->key.objectid + block_group->key.offset; 1337 1338 key.objectid = end - 1; 1339 key.type = (u8)-1; 1340 key.offset = (u64)-1; 1341 1342 while (!done) { 1343 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1344 if (ret) 1345 goto out; 1346 1347 leaf = path->nodes[0]; 1348 nr = 0; 1349 path->slots[0]++; 1350 while (path->slots[0] > 0) { 1351 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1352 1353 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1354 ASSERT(found_key.objectid == block_group->key.objectid); 1355 ASSERT(found_key.offset == block_group->key.offset); 1356 done = 1; 1357 nr++; 1358 path->slots[0]--; 1359 break; 1360 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1361 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1362 ASSERT(found_key.objectid >= start); 1363 ASSERT(found_key.objectid < end); 1364 ASSERT(found_key.objectid + found_key.offset <= end); 1365 nr++; 1366 path->slots[0]--; 1367 } else { 1368 ASSERT(0); 1369 } 1370 } 1371 1372 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1373 if (ret) 1374 goto out; 1375 btrfs_release_path(path); 1376 } 1377 1378 ret = 0; 1379 out: 1380 btrfs_free_path(path); 1381 if (ret) 1382 btrfs_abort_transaction(trans, ret); 1383 return ret; 1384 } 1385 1386 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1387 struct btrfs_path *path, 1388 u32 expected_extent_count) 1389 { 1390 struct btrfs_block_group_cache *block_group; 1391 struct btrfs_fs_info *fs_info; 1392 struct btrfs_root *root; 1393 struct btrfs_key key; 1394 int prev_bit = 0, bit; 1395 /* Initialize to silence GCC. */ 1396 u64 extent_start = 0; 1397 u64 end, offset; 1398 u64 total_found = 0; 1399 u32 extent_count = 0; 1400 int ret; 1401 1402 block_group = caching_ctl->block_group; 1403 fs_info = block_group->fs_info; 1404 root = fs_info->free_space_root; 1405 1406 end = block_group->key.objectid + block_group->key.offset; 1407 1408 while (1) { 1409 ret = btrfs_next_item(root, path); 1410 if (ret < 0) 1411 goto out; 1412 if (ret) 1413 break; 1414 1415 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1416 1417 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1418 break; 1419 1420 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1421 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1422 1423 caching_ctl->progress = key.objectid; 1424 1425 offset = key.objectid; 1426 while (offset < key.objectid + key.offset) { 1427 bit = free_space_test_bit(block_group, path, offset); 1428 if (prev_bit == 0 && bit == 1) { 1429 extent_start = offset; 1430 } else if (prev_bit == 1 && bit == 0) { 1431 total_found += add_new_free_space(block_group, 1432 extent_start, 1433 offset); 1434 if (total_found > CACHING_CTL_WAKE_UP) { 1435 total_found = 0; 1436 wake_up(&caching_ctl->wait); 1437 } 1438 extent_count++; 1439 } 1440 prev_bit = bit; 1441 offset += fs_info->sectorsize; 1442 } 1443 } 1444 if (prev_bit == 1) { 1445 total_found += add_new_free_space(block_group, extent_start, 1446 end); 1447 extent_count++; 1448 } 1449 1450 if (extent_count != expected_extent_count) { 1451 btrfs_err(fs_info, 1452 "incorrect extent count for %llu; counted %u, expected %u", 1453 block_group->key.objectid, extent_count, 1454 expected_extent_count); 1455 ASSERT(0); 1456 ret = -EIO; 1457 goto out; 1458 } 1459 1460 caching_ctl->progress = (u64)-1; 1461 1462 ret = 0; 1463 out: 1464 return ret; 1465 } 1466 1467 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1468 struct btrfs_path *path, 1469 u32 expected_extent_count) 1470 { 1471 struct btrfs_block_group_cache *block_group; 1472 struct btrfs_fs_info *fs_info; 1473 struct btrfs_root *root; 1474 struct btrfs_key key; 1475 u64 end; 1476 u64 total_found = 0; 1477 u32 extent_count = 0; 1478 int ret; 1479 1480 block_group = caching_ctl->block_group; 1481 fs_info = block_group->fs_info; 1482 root = fs_info->free_space_root; 1483 1484 end = block_group->key.objectid + block_group->key.offset; 1485 1486 while (1) { 1487 ret = btrfs_next_item(root, path); 1488 if (ret < 0) 1489 goto out; 1490 if (ret) 1491 break; 1492 1493 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1494 1495 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1496 break; 1497 1498 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1499 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1500 1501 caching_ctl->progress = key.objectid; 1502 1503 total_found += add_new_free_space(block_group, key.objectid, 1504 key.objectid + key.offset); 1505 if (total_found > CACHING_CTL_WAKE_UP) { 1506 total_found = 0; 1507 wake_up(&caching_ctl->wait); 1508 } 1509 extent_count++; 1510 } 1511 1512 if (extent_count != expected_extent_count) { 1513 btrfs_err(fs_info, 1514 "incorrect extent count for %llu; counted %u, expected %u", 1515 block_group->key.objectid, extent_count, 1516 expected_extent_count); 1517 ASSERT(0); 1518 ret = -EIO; 1519 goto out; 1520 } 1521 1522 caching_ctl->progress = (u64)-1; 1523 1524 ret = 0; 1525 out: 1526 return ret; 1527 } 1528 1529 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1530 { 1531 struct btrfs_block_group_cache *block_group; 1532 struct btrfs_fs_info *fs_info; 1533 struct btrfs_free_space_info *info; 1534 struct btrfs_path *path; 1535 u32 extent_count, flags; 1536 int ret; 1537 1538 block_group = caching_ctl->block_group; 1539 fs_info = block_group->fs_info; 1540 1541 path = btrfs_alloc_path(); 1542 if (!path) 1543 return -ENOMEM; 1544 1545 /* 1546 * Just like caching_thread() doesn't want to deadlock on the extent 1547 * tree, we don't want to deadlock on the free space tree. 1548 */ 1549 path->skip_locking = 1; 1550 path->search_commit_root = 1; 1551 path->reada = READA_FORWARD; 1552 1553 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1554 if (IS_ERR(info)) { 1555 ret = PTR_ERR(info); 1556 goto out; 1557 } 1558 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1559 flags = btrfs_free_space_flags(path->nodes[0], info); 1560 1561 /* 1562 * We left path pointing to the free space info item, so now 1563 * load_free_space_foo can just iterate through the free space tree from 1564 * there. 1565 */ 1566 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1567 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1568 else 1569 ret = load_free_space_extents(caching_ctl, path, extent_count); 1570 1571 out: 1572 btrfs_free_path(path); 1573 return ret; 1574 } 1575