1 /* 2 * Copyright (C) 2015 Facebook. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/sched/mm.h> 21 #include "ctree.h" 22 #include "disk-io.h" 23 #include "locking.h" 24 #include "free-space-tree.h" 25 #include "transaction.h" 26 27 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 28 struct btrfs_fs_info *fs_info, 29 struct btrfs_block_group_cache *block_group, 30 struct btrfs_path *path); 31 32 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) 33 { 34 u32 bitmap_range; 35 size_t bitmap_size; 36 u64 num_bitmaps, total_bitmap_size; 37 38 /* 39 * We convert to bitmaps when the disk space required for using extents 40 * exceeds that required for using bitmaps. 41 */ 42 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 43 num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, 44 bitmap_range); 45 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 46 total_bitmap_size = num_bitmaps * bitmap_size; 47 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 48 sizeof(struct btrfs_item)); 49 50 /* 51 * We allow for a small buffer between the high threshold and low 52 * threshold to avoid thrashing back and forth between the two formats. 53 */ 54 if (cache->bitmap_high_thresh > 100) 55 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 56 else 57 cache->bitmap_low_thresh = 0; 58 } 59 60 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 61 struct btrfs_fs_info *fs_info, 62 struct btrfs_block_group_cache *block_group, 63 struct btrfs_path *path) 64 { 65 struct btrfs_root *root = fs_info->free_space_root; 66 struct btrfs_free_space_info *info; 67 struct btrfs_key key; 68 struct extent_buffer *leaf; 69 int ret; 70 71 key.objectid = block_group->key.objectid; 72 key.type = BTRFS_FREE_SPACE_INFO_KEY; 73 key.offset = block_group->key.offset; 74 75 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 76 if (ret) 77 goto out; 78 79 leaf = path->nodes[0]; 80 info = btrfs_item_ptr(leaf, path->slots[0], 81 struct btrfs_free_space_info); 82 btrfs_set_free_space_extent_count(leaf, info, 0); 83 btrfs_set_free_space_flags(leaf, info, 0); 84 btrfs_mark_buffer_dirty(leaf); 85 86 ret = 0; 87 out: 88 btrfs_release_path(path); 89 return ret; 90 } 91 92 struct btrfs_free_space_info * 93 search_free_space_info(struct btrfs_trans_handle *trans, 94 struct btrfs_fs_info *fs_info, 95 struct btrfs_block_group_cache *block_group, 96 struct btrfs_path *path, int cow) 97 { 98 struct btrfs_root *root = fs_info->free_space_root; 99 struct btrfs_key key; 100 int ret; 101 102 key.objectid = block_group->key.objectid; 103 key.type = BTRFS_FREE_SPACE_INFO_KEY; 104 key.offset = block_group->key.offset; 105 106 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 107 if (ret < 0) 108 return ERR_PTR(ret); 109 if (ret != 0) { 110 btrfs_warn(fs_info, "missing free space info for %llu", 111 block_group->key.objectid); 112 ASSERT(0); 113 return ERR_PTR(-ENOENT); 114 } 115 116 return btrfs_item_ptr(path->nodes[0], path->slots[0], 117 struct btrfs_free_space_info); 118 } 119 120 /* 121 * btrfs_search_slot() but we're looking for the greatest key less than the 122 * passed key. 123 */ 124 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 125 struct btrfs_root *root, 126 struct btrfs_key *key, struct btrfs_path *p, 127 int ins_len, int cow) 128 { 129 int ret; 130 131 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 132 if (ret < 0) 133 return ret; 134 135 if (ret == 0) { 136 ASSERT(0); 137 return -EIO; 138 } 139 140 if (p->slots[0] == 0) { 141 ASSERT(0); 142 return -EIO; 143 } 144 p->slots[0]--; 145 146 return 0; 147 } 148 149 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) 150 { 151 return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); 152 } 153 154 static u8 *alloc_bitmap(u32 bitmap_size) 155 { 156 u8 *ret; 157 unsigned int nofs_flag; 158 159 /* 160 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse 161 * into the filesystem as the free space bitmap can be modified in the 162 * critical section of a transaction commit. 163 * 164 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we 165 * know that recursion is unsafe. 166 */ 167 nofs_flag = memalloc_nofs_save(); 168 ret = kvzalloc(bitmap_size, GFP_KERNEL); 169 memalloc_nofs_restore(nofs_flag); 170 return ret; 171 } 172 173 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 174 struct btrfs_fs_info *fs_info, 175 struct btrfs_block_group_cache *block_group, 176 struct btrfs_path *path) 177 { 178 struct btrfs_root *root = fs_info->free_space_root; 179 struct btrfs_free_space_info *info; 180 struct btrfs_key key, found_key; 181 struct extent_buffer *leaf; 182 u8 *bitmap, *bitmap_cursor; 183 u64 start, end; 184 u64 bitmap_range, i; 185 u32 bitmap_size, flags, expected_extent_count; 186 u32 extent_count = 0; 187 int done = 0, nr; 188 int ret; 189 190 bitmap_size = free_space_bitmap_size(block_group->key.offset, 191 fs_info->sectorsize); 192 bitmap = alloc_bitmap(bitmap_size); 193 if (!bitmap) { 194 ret = -ENOMEM; 195 goto out; 196 } 197 198 start = block_group->key.objectid; 199 end = block_group->key.objectid + block_group->key.offset; 200 201 key.objectid = end - 1; 202 key.type = (u8)-1; 203 key.offset = (u64)-1; 204 205 while (!done) { 206 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 207 if (ret) 208 goto out; 209 210 leaf = path->nodes[0]; 211 nr = 0; 212 path->slots[0]++; 213 while (path->slots[0] > 0) { 214 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 215 216 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 217 ASSERT(found_key.objectid == block_group->key.objectid); 218 ASSERT(found_key.offset == block_group->key.offset); 219 done = 1; 220 break; 221 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 222 u64 first, last; 223 224 ASSERT(found_key.objectid >= start); 225 ASSERT(found_key.objectid < end); 226 ASSERT(found_key.objectid + found_key.offset <= end); 227 228 first = div_u64(found_key.objectid - start, 229 fs_info->sectorsize); 230 last = div_u64(found_key.objectid + found_key.offset - start, 231 fs_info->sectorsize); 232 le_bitmap_set(bitmap, first, last - first); 233 234 extent_count++; 235 nr++; 236 path->slots[0]--; 237 } else { 238 ASSERT(0); 239 } 240 } 241 242 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 243 if (ret) 244 goto out; 245 btrfs_release_path(path); 246 } 247 248 info = search_free_space_info(trans, fs_info, block_group, path, 1); 249 if (IS_ERR(info)) { 250 ret = PTR_ERR(info); 251 goto out; 252 } 253 leaf = path->nodes[0]; 254 flags = btrfs_free_space_flags(leaf, info); 255 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 256 btrfs_set_free_space_flags(leaf, info, flags); 257 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 258 btrfs_mark_buffer_dirty(leaf); 259 btrfs_release_path(path); 260 261 if (extent_count != expected_extent_count) { 262 btrfs_err(fs_info, 263 "incorrect extent count for %llu; counted %u, expected %u", 264 block_group->key.objectid, extent_count, 265 expected_extent_count); 266 ASSERT(0); 267 ret = -EIO; 268 goto out; 269 } 270 271 bitmap_cursor = bitmap; 272 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 273 i = start; 274 while (i < end) { 275 unsigned long ptr; 276 u64 extent_size; 277 u32 data_size; 278 279 extent_size = min(end - i, bitmap_range); 280 data_size = free_space_bitmap_size(extent_size, 281 fs_info->sectorsize); 282 283 key.objectid = i; 284 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 285 key.offset = extent_size; 286 287 ret = btrfs_insert_empty_item(trans, root, path, &key, 288 data_size); 289 if (ret) 290 goto out; 291 292 leaf = path->nodes[0]; 293 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 294 write_extent_buffer(leaf, bitmap_cursor, ptr, 295 data_size); 296 btrfs_mark_buffer_dirty(leaf); 297 btrfs_release_path(path); 298 299 i += extent_size; 300 bitmap_cursor += data_size; 301 } 302 303 ret = 0; 304 out: 305 kvfree(bitmap); 306 if (ret) 307 btrfs_abort_transaction(trans, ret); 308 return ret; 309 } 310 311 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 312 struct btrfs_fs_info *fs_info, 313 struct btrfs_block_group_cache *block_group, 314 struct btrfs_path *path) 315 { 316 struct btrfs_root *root = fs_info->free_space_root; 317 struct btrfs_free_space_info *info; 318 struct btrfs_key key, found_key; 319 struct extent_buffer *leaf; 320 u8 *bitmap; 321 u64 start, end; 322 /* Initialize to silence GCC. */ 323 u64 extent_start = 0; 324 u64 offset; 325 u32 bitmap_size, flags, expected_extent_count; 326 int prev_bit = 0, bit, bitnr; 327 u32 extent_count = 0; 328 int done = 0, nr; 329 int ret; 330 331 bitmap_size = free_space_bitmap_size(block_group->key.offset, 332 fs_info->sectorsize); 333 bitmap = alloc_bitmap(bitmap_size); 334 if (!bitmap) { 335 ret = -ENOMEM; 336 goto out; 337 } 338 339 start = block_group->key.objectid; 340 end = block_group->key.objectid + block_group->key.offset; 341 342 key.objectid = end - 1; 343 key.type = (u8)-1; 344 key.offset = (u64)-1; 345 346 while (!done) { 347 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 348 if (ret) 349 goto out; 350 351 leaf = path->nodes[0]; 352 nr = 0; 353 path->slots[0]++; 354 while (path->slots[0] > 0) { 355 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 356 357 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 358 ASSERT(found_key.objectid == block_group->key.objectid); 359 ASSERT(found_key.offset == block_group->key.offset); 360 done = 1; 361 break; 362 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 363 unsigned long ptr; 364 u8 *bitmap_cursor; 365 u32 bitmap_pos, data_size; 366 367 ASSERT(found_key.objectid >= start); 368 ASSERT(found_key.objectid < end); 369 ASSERT(found_key.objectid + found_key.offset <= end); 370 371 bitmap_pos = div_u64(found_key.objectid - start, 372 fs_info->sectorsize * 373 BITS_PER_BYTE); 374 bitmap_cursor = bitmap + bitmap_pos; 375 data_size = free_space_bitmap_size(found_key.offset, 376 fs_info->sectorsize); 377 378 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 379 read_extent_buffer(leaf, bitmap_cursor, ptr, 380 data_size); 381 382 nr++; 383 path->slots[0]--; 384 } else { 385 ASSERT(0); 386 } 387 } 388 389 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 390 if (ret) 391 goto out; 392 btrfs_release_path(path); 393 } 394 395 info = search_free_space_info(trans, fs_info, block_group, path, 1); 396 if (IS_ERR(info)) { 397 ret = PTR_ERR(info); 398 goto out; 399 } 400 leaf = path->nodes[0]; 401 flags = btrfs_free_space_flags(leaf, info); 402 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 403 btrfs_set_free_space_flags(leaf, info, flags); 404 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 405 btrfs_mark_buffer_dirty(leaf); 406 btrfs_release_path(path); 407 408 offset = start; 409 bitnr = 0; 410 while (offset < end) { 411 bit = !!le_test_bit(bitnr, bitmap); 412 if (prev_bit == 0 && bit == 1) { 413 extent_start = offset; 414 } else if (prev_bit == 1 && bit == 0) { 415 key.objectid = extent_start; 416 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 417 key.offset = offset - extent_start; 418 419 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 420 if (ret) 421 goto out; 422 btrfs_release_path(path); 423 424 extent_count++; 425 } 426 prev_bit = bit; 427 offset += fs_info->sectorsize; 428 bitnr++; 429 } 430 if (prev_bit == 1) { 431 key.objectid = extent_start; 432 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 433 key.offset = end - extent_start; 434 435 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 436 if (ret) 437 goto out; 438 btrfs_release_path(path); 439 440 extent_count++; 441 } 442 443 if (extent_count != expected_extent_count) { 444 btrfs_err(fs_info, 445 "incorrect extent count for %llu; counted %u, expected %u", 446 block_group->key.objectid, extent_count, 447 expected_extent_count); 448 ASSERT(0); 449 ret = -EIO; 450 goto out; 451 } 452 453 ret = 0; 454 out: 455 kvfree(bitmap); 456 if (ret) 457 btrfs_abort_transaction(trans, ret); 458 return ret; 459 } 460 461 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 462 struct btrfs_fs_info *fs_info, 463 struct btrfs_block_group_cache *block_group, 464 struct btrfs_path *path, 465 int new_extents) 466 { 467 struct btrfs_free_space_info *info; 468 u32 flags; 469 u32 extent_count; 470 int ret = 0; 471 472 if (new_extents == 0) 473 return 0; 474 475 info = search_free_space_info(trans, fs_info, block_group, path, 1); 476 if (IS_ERR(info)) { 477 ret = PTR_ERR(info); 478 goto out; 479 } 480 flags = btrfs_free_space_flags(path->nodes[0], info); 481 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 482 483 extent_count += new_extents; 484 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 485 btrfs_mark_buffer_dirty(path->nodes[0]); 486 btrfs_release_path(path); 487 488 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 489 extent_count > block_group->bitmap_high_thresh) { 490 ret = convert_free_space_to_bitmaps(trans, fs_info, block_group, 491 path); 492 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 493 extent_count < block_group->bitmap_low_thresh) { 494 ret = convert_free_space_to_extents(trans, fs_info, block_group, 495 path); 496 } 497 498 out: 499 return ret; 500 } 501 502 int free_space_test_bit(struct btrfs_block_group_cache *block_group, 503 struct btrfs_path *path, u64 offset) 504 { 505 struct extent_buffer *leaf; 506 struct btrfs_key key; 507 u64 found_start, found_end; 508 unsigned long ptr, i; 509 510 leaf = path->nodes[0]; 511 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 512 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 513 514 found_start = key.objectid; 515 found_end = key.objectid + key.offset; 516 ASSERT(offset >= found_start && offset < found_end); 517 518 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 519 i = div_u64(offset - found_start, 520 block_group->fs_info->sectorsize); 521 return !!extent_buffer_test_bit(leaf, ptr, i); 522 } 523 524 static void free_space_set_bits(struct btrfs_block_group_cache *block_group, 525 struct btrfs_path *path, u64 *start, u64 *size, 526 int bit) 527 { 528 struct btrfs_fs_info *fs_info = block_group->fs_info; 529 struct extent_buffer *leaf; 530 struct btrfs_key key; 531 u64 end = *start + *size; 532 u64 found_start, found_end; 533 unsigned long ptr, first, last; 534 535 leaf = path->nodes[0]; 536 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 537 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 538 539 found_start = key.objectid; 540 found_end = key.objectid + key.offset; 541 ASSERT(*start >= found_start && *start < found_end); 542 ASSERT(end > found_start); 543 544 if (end > found_end) 545 end = found_end; 546 547 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 548 first = div_u64(*start - found_start, fs_info->sectorsize); 549 last = div_u64(end - found_start, fs_info->sectorsize); 550 if (bit) 551 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 552 else 553 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 554 btrfs_mark_buffer_dirty(leaf); 555 556 *size -= end - *start; 557 *start = end; 558 } 559 560 /* 561 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 562 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 563 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 564 * looking for. 565 */ 566 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 567 struct btrfs_root *root, struct btrfs_path *p) 568 { 569 struct btrfs_key key; 570 571 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 572 p->slots[0]++; 573 return 0; 574 } 575 576 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 577 btrfs_release_path(p); 578 579 key.objectid += key.offset; 580 key.type = (u8)-1; 581 key.offset = (u64)-1; 582 583 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 584 } 585 586 /* 587 * If remove is 1, then we are removing free space, thus clearing bits in the 588 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 589 * the bitmap. 590 */ 591 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 592 struct btrfs_fs_info *fs_info, 593 struct btrfs_block_group_cache *block_group, 594 struct btrfs_path *path, 595 u64 start, u64 size, int remove) 596 { 597 struct btrfs_root *root = fs_info->free_space_root; 598 struct btrfs_key key; 599 u64 end = start + size; 600 u64 cur_start, cur_size; 601 int prev_bit, next_bit; 602 int new_extents; 603 int ret; 604 605 /* 606 * Read the bit for the block immediately before the extent of space if 607 * that block is within the block group. 608 */ 609 if (start > block_group->key.objectid) { 610 u64 prev_block = start - block_group->fs_info->sectorsize; 611 612 key.objectid = prev_block; 613 key.type = (u8)-1; 614 key.offset = (u64)-1; 615 616 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 617 if (ret) 618 goto out; 619 620 prev_bit = free_space_test_bit(block_group, path, prev_block); 621 622 /* The previous block may have been in the previous bitmap. */ 623 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 624 if (start >= key.objectid + key.offset) { 625 ret = free_space_next_bitmap(trans, root, path); 626 if (ret) 627 goto out; 628 } 629 } else { 630 key.objectid = start; 631 key.type = (u8)-1; 632 key.offset = (u64)-1; 633 634 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 635 if (ret) 636 goto out; 637 638 prev_bit = -1; 639 } 640 641 /* 642 * Iterate over all of the bitmaps overlapped by the extent of space, 643 * clearing/setting bits as required. 644 */ 645 cur_start = start; 646 cur_size = size; 647 while (1) { 648 free_space_set_bits(block_group, path, &cur_start, &cur_size, 649 !remove); 650 if (cur_size == 0) 651 break; 652 ret = free_space_next_bitmap(trans, root, path); 653 if (ret) 654 goto out; 655 } 656 657 /* 658 * Read the bit for the block immediately after the extent of space if 659 * that block is within the block group. 660 */ 661 if (end < block_group->key.objectid + block_group->key.offset) { 662 /* The next block may be in the next bitmap. */ 663 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 664 if (end >= key.objectid + key.offset) { 665 ret = free_space_next_bitmap(trans, root, path); 666 if (ret) 667 goto out; 668 } 669 670 next_bit = free_space_test_bit(block_group, path, end); 671 } else { 672 next_bit = -1; 673 } 674 675 if (remove) { 676 new_extents = -1; 677 if (prev_bit == 1) { 678 /* Leftover on the left. */ 679 new_extents++; 680 } 681 if (next_bit == 1) { 682 /* Leftover on the right. */ 683 new_extents++; 684 } 685 } else { 686 new_extents = 1; 687 if (prev_bit == 1) { 688 /* Merging with neighbor on the left. */ 689 new_extents--; 690 } 691 if (next_bit == 1) { 692 /* Merging with neighbor on the right. */ 693 new_extents--; 694 } 695 } 696 697 btrfs_release_path(path); 698 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 699 new_extents); 700 701 out: 702 return ret; 703 } 704 705 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 706 struct btrfs_fs_info *fs_info, 707 struct btrfs_block_group_cache *block_group, 708 struct btrfs_path *path, 709 u64 start, u64 size) 710 { 711 struct btrfs_root *root = fs_info->free_space_root; 712 struct btrfs_key key; 713 u64 found_start, found_end; 714 u64 end = start + size; 715 int new_extents = -1; 716 int ret; 717 718 key.objectid = start; 719 key.type = (u8)-1; 720 key.offset = (u64)-1; 721 722 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 723 if (ret) 724 goto out; 725 726 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 727 728 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 729 730 found_start = key.objectid; 731 found_end = key.objectid + key.offset; 732 ASSERT(start >= found_start && end <= found_end); 733 734 /* 735 * Okay, now that we've found the free space extent which contains the 736 * free space that we are removing, there are four cases: 737 * 738 * 1. We're using the whole extent: delete the key we found and 739 * decrement the free space extent count. 740 * 2. We are using part of the extent starting at the beginning: delete 741 * the key we found and insert a new key representing the leftover at 742 * the end. There is no net change in the number of extents. 743 * 3. We are using part of the extent ending at the end: delete the key 744 * we found and insert a new key representing the leftover at the 745 * beginning. There is no net change in the number of extents. 746 * 4. We are using part of the extent in the middle: delete the key we 747 * found and insert two new keys representing the leftovers on each 748 * side. Where we used to have one extent, we now have two, so increment 749 * the extent count. We may need to convert the block group to bitmaps 750 * as a result. 751 */ 752 753 /* Delete the existing key (cases 1-4). */ 754 ret = btrfs_del_item(trans, root, path); 755 if (ret) 756 goto out; 757 758 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 759 if (start > found_start) { 760 key.objectid = found_start; 761 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 762 key.offset = start - found_start; 763 764 btrfs_release_path(path); 765 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 766 if (ret) 767 goto out; 768 new_extents++; 769 } 770 771 /* Add a key for leftovers at the end (cases 2 and 4). */ 772 if (end < found_end) { 773 key.objectid = end; 774 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 775 key.offset = found_end - end; 776 777 btrfs_release_path(path); 778 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 779 if (ret) 780 goto out; 781 new_extents++; 782 } 783 784 btrfs_release_path(path); 785 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 786 new_extents); 787 788 out: 789 return ret; 790 } 791 792 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 793 struct btrfs_fs_info *fs_info, 794 struct btrfs_block_group_cache *block_group, 795 struct btrfs_path *path, u64 start, u64 size) 796 { 797 struct btrfs_free_space_info *info; 798 u32 flags; 799 int ret; 800 801 if (block_group->needs_free_space) { 802 ret = __add_block_group_free_space(trans, fs_info, block_group, 803 path); 804 if (ret) 805 return ret; 806 } 807 808 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 809 if (IS_ERR(info)) 810 return PTR_ERR(info); 811 flags = btrfs_free_space_flags(path->nodes[0], info); 812 btrfs_release_path(path); 813 814 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 815 return modify_free_space_bitmap(trans, fs_info, block_group, 816 path, start, size, 1); 817 } else { 818 return remove_free_space_extent(trans, fs_info, block_group, 819 path, start, size); 820 } 821 } 822 823 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 824 struct btrfs_fs_info *fs_info, 825 u64 start, u64 size) 826 { 827 struct btrfs_block_group_cache *block_group; 828 struct btrfs_path *path; 829 int ret; 830 831 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 832 return 0; 833 834 path = btrfs_alloc_path(); 835 if (!path) { 836 ret = -ENOMEM; 837 goto out; 838 } 839 840 block_group = btrfs_lookup_block_group(fs_info, start); 841 if (!block_group) { 842 ASSERT(0); 843 ret = -ENOENT; 844 goto out; 845 } 846 847 mutex_lock(&block_group->free_space_lock); 848 ret = __remove_from_free_space_tree(trans, fs_info, block_group, path, 849 start, size); 850 mutex_unlock(&block_group->free_space_lock); 851 852 btrfs_put_block_group(block_group); 853 out: 854 btrfs_free_path(path); 855 if (ret) 856 btrfs_abort_transaction(trans, ret); 857 return ret; 858 } 859 860 static int add_free_space_extent(struct btrfs_trans_handle *trans, 861 struct btrfs_fs_info *fs_info, 862 struct btrfs_block_group_cache *block_group, 863 struct btrfs_path *path, 864 u64 start, u64 size) 865 { 866 struct btrfs_root *root = fs_info->free_space_root; 867 struct btrfs_key key, new_key; 868 u64 found_start, found_end; 869 u64 end = start + size; 870 int new_extents = 1; 871 int ret; 872 873 /* 874 * We are adding a new extent of free space, but we need to merge 875 * extents. There are four cases here: 876 * 877 * 1. The new extent does not have any immediate neighbors to merge 878 * with: add the new key and increment the free space extent count. We 879 * may need to convert the block group to bitmaps as a result. 880 * 2. The new extent has an immediate neighbor before it: remove the 881 * previous key and insert a new key combining both of them. There is no 882 * net change in the number of extents. 883 * 3. The new extent has an immediate neighbor after it: remove the next 884 * key and insert a new key combining both of them. There is no net 885 * change in the number of extents. 886 * 4. The new extent has immediate neighbors on both sides: remove both 887 * of the keys and insert a new key combining all of them. Where we used 888 * to have two extents, we now have one, so decrement the extent count. 889 */ 890 891 new_key.objectid = start; 892 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 893 new_key.offset = size; 894 895 /* Search for a neighbor on the left. */ 896 if (start == block_group->key.objectid) 897 goto right; 898 key.objectid = start - 1; 899 key.type = (u8)-1; 900 key.offset = (u64)-1; 901 902 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 903 if (ret) 904 goto out; 905 906 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 907 908 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 909 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 910 btrfs_release_path(path); 911 goto right; 912 } 913 914 found_start = key.objectid; 915 found_end = key.objectid + key.offset; 916 ASSERT(found_start >= block_group->key.objectid && 917 found_end > block_group->key.objectid); 918 ASSERT(found_start < start && found_end <= start); 919 920 /* 921 * Delete the neighbor on the left and absorb it into the new key (cases 922 * 2 and 4). 923 */ 924 if (found_end == start) { 925 ret = btrfs_del_item(trans, root, path); 926 if (ret) 927 goto out; 928 new_key.objectid = found_start; 929 new_key.offset += key.offset; 930 new_extents--; 931 } 932 btrfs_release_path(path); 933 934 right: 935 /* Search for a neighbor on the right. */ 936 if (end == block_group->key.objectid + block_group->key.offset) 937 goto insert; 938 key.objectid = end; 939 key.type = (u8)-1; 940 key.offset = (u64)-1; 941 942 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 943 if (ret) 944 goto out; 945 946 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 947 948 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 949 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 950 btrfs_release_path(path); 951 goto insert; 952 } 953 954 found_start = key.objectid; 955 found_end = key.objectid + key.offset; 956 ASSERT(found_start >= block_group->key.objectid && 957 found_end > block_group->key.objectid); 958 ASSERT((found_start < start && found_end <= start) || 959 (found_start >= end && found_end > end)); 960 961 /* 962 * Delete the neighbor on the right and absorb it into the new key 963 * (cases 3 and 4). 964 */ 965 if (found_start == end) { 966 ret = btrfs_del_item(trans, root, path); 967 if (ret) 968 goto out; 969 new_key.offset += key.offset; 970 new_extents--; 971 } 972 btrfs_release_path(path); 973 974 insert: 975 /* Insert the new key (cases 1-4). */ 976 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 977 if (ret) 978 goto out; 979 980 btrfs_release_path(path); 981 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 982 new_extents); 983 984 out: 985 return ret; 986 } 987 988 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 989 struct btrfs_fs_info *fs_info, 990 struct btrfs_block_group_cache *block_group, 991 struct btrfs_path *path, u64 start, u64 size) 992 { 993 struct btrfs_free_space_info *info; 994 u32 flags; 995 int ret; 996 997 if (block_group->needs_free_space) { 998 ret = __add_block_group_free_space(trans, fs_info, block_group, 999 path); 1000 if (ret) 1001 return ret; 1002 } 1003 1004 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1005 if (IS_ERR(info)) 1006 return PTR_ERR(info); 1007 flags = btrfs_free_space_flags(path->nodes[0], info); 1008 btrfs_release_path(path); 1009 1010 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 1011 return modify_free_space_bitmap(trans, fs_info, block_group, 1012 path, start, size, 0); 1013 } else { 1014 return add_free_space_extent(trans, fs_info, block_group, path, 1015 start, size); 1016 } 1017 } 1018 1019 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1020 struct btrfs_fs_info *fs_info, 1021 u64 start, u64 size) 1022 { 1023 struct btrfs_block_group_cache *block_group; 1024 struct btrfs_path *path; 1025 int ret; 1026 1027 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1028 return 0; 1029 1030 path = btrfs_alloc_path(); 1031 if (!path) { 1032 ret = -ENOMEM; 1033 goto out; 1034 } 1035 1036 block_group = btrfs_lookup_block_group(fs_info, start); 1037 if (!block_group) { 1038 ASSERT(0); 1039 ret = -ENOENT; 1040 goto out; 1041 } 1042 1043 mutex_lock(&block_group->free_space_lock); 1044 ret = __add_to_free_space_tree(trans, fs_info, block_group, path, start, 1045 size); 1046 mutex_unlock(&block_group->free_space_lock); 1047 1048 btrfs_put_block_group(block_group); 1049 out: 1050 btrfs_free_path(path); 1051 if (ret) 1052 btrfs_abort_transaction(trans, ret); 1053 return ret; 1054 } 1055 1056 /* 1057 * Populate the free space tree by walking the extent tree. Operations on the 1058 * extent tree that happen as a result of writes to the free space tree will go 1059 * through the normal add/remove hooks. 1060 */ 1061 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1062 struct btrfs_fs_info *fs_info, 1063 struct btrfs_block_group_cache *block_group) 1064 { 1065 struct btrfs_root *extent_root = fs_info->extent_root; 1066 struct btrfs_path *path, *path2; 1067 struct btrfs_key key; 1068 u64 start, end; 1069 int ret; 1070 1071 path = btrfs_alloc_path(); 1072 if (!path) 1073 return -ENOMEM; 1074 path->reada = 1; 1075 1076 path2 = btrfs_alloc_path(); 1077 if (!path2) { 1078 btrfs_free_path(path); 1079 return -ENOMEM; 1080 } 1081 1082 ret = add_new_free_space_info(trans, fs_info, block_group, path2); 1083 if (ret) 1084 goto out; 1085 1086 mutex_lock(&block_group->free_space_lock); 1087 1088 /* 1089 * Iterate through all of the extent and metadata items in this block 1090 * group, adding the free space between them and the free space at the 1091 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1092 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1093 * contained in. 1094 */ 1095 key.objectid = block_group->key.objectid; 1096 key.type = BTRFS_EXTENT_ITEM_KEY; 1097 key.offset = 0; 1098 1099 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1100 if (ret < 0) 1101 goto out_locked; 1102 ASSERT(ret == 0); 1103 1104 start = block_group->key.objectid; 1105 end = block_group->key.objectid + block_group->key.offset; 1106 while (1) { 1107 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1108 1109 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1110 key.type == BTRFS_METADATA_ITEM_KEY) { 1111 if (key.objectid >= end) 1112 break; 1113 1114 if (start < key.objectid) { 1115 ret = __add_to_free_space_tree(trans, fs_info, 1116 block_group, 1117 path2, start, 1118 key.objectid - 1119 start); 1120 if (ret) 1121 goto out_locked; 1122 } 1123 start = key.objectid; 1124 if (key.type == BTRFS_METADATA_ITEM_KEY) 1125 start += fs_info->nodesize; 1126 else 1127 start += key.offset; 1128 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1129 if (key.objectid != block_group->key.objectid) 1130 break; 1131 } 1132 1133 ret = btrfs_next_item(extent_root, path); 1134 if (ret < 0) 1135 goto out_locked; 1136 if (ret) 1137 break; 1138 } 1139 if (start < end) { 1140 ret = __add_to_free_space_tree(trans, fs_info, block_group, 1141 path2, start, end - start); 1142 if (ret) 1143 goto out_locked; 1144 } 1145 1146 ret = 0; 1147 out_locked: 1148 mutex_unlock(&block_group->free_space_lock); 1149 out: 1150 btrfs_free_path(path2); 1151 btrfs_free_path(path); 1152 return ret; 1153 } 1154 1155 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1156 { 1157 struct btrfs_trans_handle *trans; 1158 struct btrfs_root *tree_root = fs_info->tree_root; 1159 struct btrfs_root *free_space_root; 1160 struct btrfs_block_group_cache *block_group; 1161 struct rb_node *node; 1162 int ret; 1163 1164 trans = btrfs_start_transaction(tree_root, 0); 1165 if (IS_ERR(trans)) 1166 return PTR_ERR(trans); 1167 1168 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1169 free_space_root = btrfs_create_tree(trans, fs_info, 1170 BTRFS_FREE_SPACE_TREE_OBJECTID); 1171 if (IS_ERR(free_space_root)) { 1172 ret = PTR_ERR(free_space_root); 1173 goto abort; 1174 } 1175 fs_info->free_space_root = free_space_root; 1176 1177 node = rb_first(&fs_info->block_group_cache_tree); 1178 while (node) { 1179 block_group = rb_entry(node, struct btrfs_block_group_cache, 1180 cache_node); 1181 ret = populate_free_space_tree(trans, fs_info, block_group); 1182 if (ret) 1183 goto abort; 1184 node = rb_next(node); 1185 } 1186 1187 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1188 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1189 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1190 1191 return btrfs_commit_transaction(trans); 1192 1193 abort: 1194 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1195 btrfs_abort_transaction(trans, ret); 1196 btrfs_end_transaction(trans); 1197 return ret; 1198 } 1199 1200 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1201 struct btrfs_root *root) 1202 { 1203 struct btrfs_path *path; 1204 struct btrfs_key key; 1205 int nr; 1206 int ret; 1207 1208 path = btrfs_alloc_path(); 1209 if (!path) 1210 return -ENOMEM; 1211 1212 path->leave_spinning = 1; 1213 1214 key.objectid = 0; 1215 key.type = 0; 1216 key.offset = 0; 1217 1218 while (1) { 1219 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1220 if (ret < 0) 1221 goto out; 1222 1223 nr = btrfs_header_nritems(path->nodes[0]); 1224 if (!nr) 1225 break; 1226 1227 path->slots[0] = 0; 1228 ret = btrfs_del_items(trans, root, path, 0, nr); 1229 if (ret) 1230 goto out; 1231 1232 btrfs_release_path(path); 1233 } 1234 1235 ret = 0; 1236 out: 1237 btrfs_free_path(path); 1238 return ret; 1239 } 1240 1241 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1242 { 1243 struct btrfs_trans_handle *trans; 1244 struct btrfs_root *tree_root = fs_info->tree_root; 1245 struct btrfs_root *free_space_root = fs_info->free_space_root; 1246 int ret; 1247 1248 trans = btrfs_start_transaction(tree_root, 0); 1249 if (IS_ERR(trans)) 1250 return PTR_ERR(trans); 1251 1252 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1253 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1254 fs_info->free_space_root = NULL; 1255 1256 ret = clear_free_space_tree(trans, free_space_root); 1257 if (ret) 1258 goto abort; 1259 1260 ret = btrfs_del_root(trans, tree_root, &free_space_root->root_key); 1261 if (ret) 1262 goto abort; 1263 1264 list_del(&free_space_root->dirty_list); 1265 1266 btrfs_tree_lock(free_space_root->node); 1267 clean_tree_block(fs_info, free_space_root->node); 1268 btrfs_tree_unlock(free_space_root->node); 1269 btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1270 0, 1); 1271 1272 free_extent_buffer(free_space_root->node); 1273 free_extent_buffer(free_space_root->commit_root); 1274 kfree(free_space_root); 1275 1276 return btrfs_commit_transaction(trans); 1277 1278 abort: 1279 btrfs_abort_transaction(trans, ret); 1280 btrfs_end_transaction(trans); 1281 return ret; 1282 } 1283 1284 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1285 struct btrfs_fs_info *fs_info, 1286 struct btrfs_block_group_cache *block_group, 1287 struct btrfs_path *path) 1288 { 1289 u64 start, end; 1290 int ret; 1291 1292 start = block_group->key.objectid; 1293 end = block_group->key.objectid + block_group->key.offset; 1294 1295 block_group->needs_free_space = 0; 1296 1297 ret = add_new_free_space_info(trans, fs_info, block_group, path); 1298 if (ret) 1299 return ret; 1300 1301 return __add_to_free_space_tree(trans, fs_info, block_group, path, 1302 block_group->key.objectid, 1303 block_group->key.offset); 1304 } 1305 1306 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1307 struct btrfs_fs_info *fs_info, 1308 struct btrfs_block_group_cache *block_group) 1309 { 1310 struct btrfs_path *path = NULL; 1311 int ret = 0; 1312 1313 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1314 return 0; 1315 1316 mutex_lock(&block_group->free_space_lock); 1317 if (!block_group->needs_free_space) 1318 goto out; 1319 1320 path = btrfs_alloc_path(); 1321 if (!path) { 1322 ret = -ENOMEM; 1323 goto out; 1324 } 1325 1326 ret = __add_block_group_free_space(trans, fs_info, block_group, path); 1327 1328 out: 1329 btrfs_free_path(path); 1330 mutex_unlock(&block_group->free_space_lock); 1331 if (ret) 1332 btrfs_abort_transaction(trans, ret); 1333 return ret; 1334 } 1335 1336 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1337 struct btrfs_fs_info *fs_info, 1338 struct btrfs_block_group_cache *block_group) 1339 { 1340 struct btrfs_root *root = fs_info->free_space_root; 1341 struct btrfs_path *path; 1342 struct btrfs_key key, found_key; 1343 struct extent_buffer *leaf; 1344 u64 start, end; 1345 int done = 0, nr; 1346 int ret; 1347 1348 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1349 return 0; 1350 1351 if (block_group->needs_free_space) { 1352 /* We never added this block group to the free space tree. */ 1353 return 0; 1354 } 1355 1356 path = btrfs_alloc_path(); 1357 if (!path) { 1358 ret = -ENOMEM; 1359 goto out; 1360 } 1361 1362 start = block_group->key.objectid; 1363 end = block_group->key.objectid + block_group->key.offset; 1364 1365 key.objectid = end - 1; 1366 key.type = (u8)-1; 1367 key.offset = (u64)-1; 1368 1369 while (!done) { 1370 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1371 if (ret) 1372 goto out; 1373 1374 leaf = path->nodes[0]; 1375 nr = 0; 1376 path->slots[0]++; 1377 while (path->slots[0] > 0) { 1378 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1379 1380 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1381 ASSERT(found_key.objectid == block_group->key.objectid); 1382 ASSERT(found_key.offset == block_group->key.offset); 1383 done = 1; 1384 nr++; 1385 path->slots[0]--; 1386 break; 1387 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1388 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1389 ASSERT(found_key.objectid >= start); 1390 ASSERT(found_key.objectid < end); 1391 ASSERT(found_key.objectid + found_key.offset <= end); 1392 nr++; 1393 path->slots[0]--; 1394 } else { 1395 ASSERT(0); 1396 } 1397 } 1398 1399 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1400 if (ret) 1401 goto out; 1402 btrfs_release_path(path); 1403 } 1404 1405 ret = 0; 1406 out: 1407 btrfs_free_path(path); 1408 if (ret) 1409 btrfs_abort_transaction(trans, ret); 1410 return ret; 1411 } 1412 1413 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1414 struct btrfs_path *path, 1415 u32 expected_extent_count) 1416 { 1417 struct btrfs_block_group_cache *block_group; 1418 struct btrfs_fs_info *fs_info; 1419 struct btrfs_root *root; 1420 struct btrfs_key key; 1421 int prev_bit = 0, bit; 1422 /* Initialize to silence GCC. */ 1423 u64 extent_start = 0; 1424 u64 end, offset; 1425 u64 total_found = 0; 1426 u32 extent_count = 0; 1427 int ret; 1428 1429 block_group = caching_ctl->block_group; 1430 fs_info = block_group->fs_info; 1431 root = fs_info->free_space_root; 1432 1433 end = block_group->key.objectid + block_group->key.offset; 1434 1435 while (1) { 1436 ret = btrfs_next_item(root, path); 1437 if (ret < 0) 1438 goto out; 1439 if (ret) 1440 break; 1441 1442 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1443 1444 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1445 break; 1446 1447 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1448 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1449 1450 caching_ctl->progress = key.objectid; 1451 1452 offset = key.objectid; 1453 while (offset < key.objectid + key.offset) { 1454 bit = free_space_test_bit(block_group, path, offset); 1455 if (prev_bit == 0 && bit == 1) { 1456 extent_start = offset; 1457 } else if (prev_bit == 1 && bit == 0) { 1458 total_found += add_new_free_space(block_group, 1459 fs_info, 1460 extent_start, 1461 offset); 1462 if (total_found > CACHING_CTL_WAKE_UP) { 1463 total_found = 0; 1464 wake_up(&caching_ctl->wait); 1465 } 1466 extent_count++; 1467 } 1468 prev_bit = bit; 1469 offset += fs_info->sectorsize; 1470 } 1471 } 1472 if (prev_bit == 1) { 1473 total_found += add_new_free_space(block_group, fs_info, 1474 extent_start, end); 1475 extent_count++; 1476 } 1477 1478 if (extent_count != expected_extent_count) { 1479 btrfs_err(fs_info, 1480 "incorrect extent count for %llu; counted %u, expected %u", 1481 block_group->key.objectid, extent_count, 1482 expected_extent_count); 1483 ASSERT(0); 1484 ret = -EIO; 1485 goto out; 1486 } 1487 1488 caching_ctl->progress = (u64)-1; 1489 1490 ret = 0; 1491 out: 1492 return ret; 1493 } 1494 1495 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1496 struct btrfs_path *path, 1497 u32 expected_extent_count) 1498 { 1499 struct btrfs_block_group_cache *block_group; 1500 struct btrfs_fs_info *fs_info; 1501 struct btrfs_root *root; 1502 struct btrfs_key key; 1503 u64 end; 1504 u64 total_found = 0; 1505 u32 extent_count = 0; 1506 int ret; 1507 1508 block_group = caching_ctl->block_group; 1509 fs_info = block_group->fs_info; 1510 root = fs_info->free_space_root; 1511 1512 end = block_group->key.objectid + block_group->key.offset; 1513 1514 while (1) { 1515 ret = btrfs_next_item(root, path); 1516 if (ret < 0) 1517 goto out; 1518 if (ret) 1519 break; 1520 1521 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1522 1523 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1524 break; 1525 1526 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1527 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1528 1529 caching_ctl->progress = key.objectid; 1530 1531 total_found += add_new_free_space(block_group, fs_info, 1532 key.objectid, 1533 key.objectid + key.offset); 1534 if (total_found > CACHING_CTL_WAKE_UP) { 1535 total_found = 0; 1536 wake_up(&caching_ctl->wait); 1537 } 1538 extent_count++; 1539 } 1540 1541 if (extent_count != expected_extent_count) { 1542 btrfs_err(fs_info, 1543 "incorrect extent count for %llu; counted %u, expected %u", 1544 block_group->key.objectid, extent_count, 1545 expected_extent_count); 1546 ASSERT(0); 1547 ret = -EIO; 1548 goto out; 1549 } 1550 1551 caching_ctl->progress = (u64)-1; 1552 1553 ret = 0; 1554 out: 1555 return ret; 1556 } 1557 1558 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1559 { 1560 struct btrfs_block_group_cache *block_group; 1561 struct btrfs_fs_info *fs_info; 1562 struct btrfs_free_space_info *info; 1563 struct btrfs_path *path; 1564 u32 extent_count, flags; 1565 int ret; 1566 1567 block_group = caching_ctl->block_group; 1568 fs_info = block_group->fs_info; 1569 1570 path = btrfs_alloc_path(); 1571 if (!path) 1572 return -ENOMEM; 1573 1574 /* 1575 * Just like caching_thread() doesn't want to deadlock on the extent 1576 * tree, we don't want to deadlock on the free space tree. 1577 */ 1578 path->skip_locking = 1; 1579 path->search_commit_root = 1; 1580 path->reada = 1; 1581 1582 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1583 if (IS_ERR(info)) { 1584 ret = PTR_ERR(info); 1585 goto out; 1586 } 1587 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1588 flags = btrfs_free_space_flags(path->nodes[0], info); 1589 1590 /* 1591 * We left path pointing to the free space info item, so now 1592 * load_free_space_foo can just iterate through the free space tree from 1593 * there. 1594 */ 1595 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1596 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1597 else 1598 ret = load_free_space_extents(caching_ctl, path, extent_count); 1599 1600 out: 1601 btrfs_free_path(path); 1602 return ret; 1603 } 1604