1 /* 2 * Copyright (C) 2015 Facebook. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/vmalloc.h> 21 #include "ctree.h" 22 #include "disk-io.h" 23 #include "locking.h" 24 #include "free-space-tree.h" 25 #include "transaction.h" 26 27 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 28 struct btrfs_fs_info *fs_info, 29 struct btrfs_block_group_cache *block_group, 30 struct btrfs_path *path); 31 32 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) 33 { 34 u32 bitmap_range; 35 size_t bitmap_size; 36 u64 num_bitmaps, total_bitmap_size; 37 38 /* 39 * We convert to bitmaps when the disk space required for using extents 40 * exceeds that required for using bitmaps. 41 */ 42 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 43 num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, 44 bitmap_range); 45 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 46 total_bitmap_size = num_bitmaps * bitmap_size; 47 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 48 sizeof(struct btrfs_item)); 49 50 /* 51 * We allow for a small buffer between the high threshold and low 52 * threshold to avoid thrashing back and forth between the two formats. 53 */ 54 if (cache->bitmap_high_thresh > 100) 55 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 56 else 57 cache->bitmap_low_thresh = 0; 58 } 59 60 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 61 struct btrfs_fs_info *fs_info, 62 struct btrfs_block_group_cache *block_group, 63 struct btrfs_path *path) 64 { 65 struct btrfs_root *root = fs_info->free_space_root; 66 struct btrfs_free_space_info *info; 67 struct btrfs_key key; 68 struct extent_buffer *leaf; 69 int ret; 70 71 key.objectid = block_group->key.objectid; 72 key.type = BTRFS_FREE_SPACE_INFO_KEY; 73 key.offset = block_group->key.offset; 74 75 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 76 if (ret) 77 goto out; 78 79 leaf = path->nodes[0]; 80 info = btrfs_item_ptr(leaf, path->slots[0], 81 struct btrfs_free_space_info); 82 btrfs_set_free_space_extent_count(leaf, info, 0); 83 btrfs_set_free_space_flags(leaf, info, 0); 84 btrfs_mark_buffer_dirty(leaf); 85 86 ret = 0; 87 out: 88 btrfs_release_path(path); 89 return ret; 90 } 91 92 struct btrfs_free_space_info * 93 search_free_space_info(struct btrfs_trans_handle *trans, 94 struct btrfs_fs_info *fs_info, 95 struct btrfs_block_group_cache *block_group, 96 struct btrfs_path *path, int cow) 97 { 98 struct btrfs_root *root = fs_info->free_space_root; 99 struct btrfs_key key; 100 int ret; 101 102 key.objectid = block_group->key.objectid; 103 key.type = BTRFS_FREE_SPACE_INFO_KEY; 104 key.offset = block_group->key.offset; 105 106 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 107 if (ret < 0) 108 return ERR_PTR(ret); 109 if (ret != 0) { 110 btrfs_warn(fs_info, "missing free space info for %llu", 111 block_group->key.objectid); 112 ASSERT(0); 113 return ERR_PTR(-ENOENT); 114 } 115 116 return btrfs_item_ptr(path->nodes[0], path->slots[0], 117 struct btrfs_free_space_info); 118 } 119 120 /* 121 * btrfs_search_slot() but we're looking for the greatest key less than the 122 * passed key. 123 */ 124 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 125 struct btrfs_root *root, 126 struct btrfs_key *key, struct btrfs_path *p, 127 int ins_len, int cow) 128 { 129 int ret; 130 131 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 132 if (ret < 0) 133 return ret; 134 135 if (ret == 0) { 136 ASSERT(0); 137 return -EIO; 138 } 139 140 if (p->slots[0] == 0) { 141 ASSERT(0); 142 return -EIO; 143 } 144 p->slots[0]--; 145 146 return 0; 147 } 148 149 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) 150 { 151 return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); 152 } 153 154 static u8 *alloc_bitmap(u32 bitmap_size) 155 { 156 void *mem; 157 158 /* 159 * The allocation size varies, observed numbers were < 4K up to 16K. 160 * Using vmalloc unconditionally would be too heavy, we'll try 161 * contiguous allocations first. 162 */ 163 if (bitmap_size <= PAGE_SIZE) 164 return kzalloc(bitmap_size, GFP_NOFS); 165 166 mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN); 167 if (mem) 168 return mem; 169 170 return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, 171 PAGE_KERNEL); 172 } 173 174 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 175 struct btrfs_fs_info *fs_info, 176 struct btrfs_block_group_cache *block_group, 177 struct btrfs_path *path) 178 { 179 struct btrfs_root *root = fs_info->free_space_root; 180 struct btrfs_free_space_info *info; 181 struct btrfs_key key, found_key; 182 struct extent_buffer *leaf; 183 u8 *bitmap, *bitmap_cursor; 184 u64 start, end; 185 u64 bitmap_range, i; 186 u32 bitmap_size, flags, expected_extent_count; 187 u32 extent_count = 0; 188 int done = 0, nr; 189 int ret; 190 191 bitmap_size = free_space_bitmap_size(block_group->key.offset, 192 fs_info->sectorsize); 193 bitmap = alloc_bitmap(bitmap_size); 194 if (!bitmap) { 195 ret = -ENOMEM; 196 goto out; 197 } 198 199 start = block_group->key.objectid; 200 end = block_group->key.objectid + block_group->key.offset; 201 202 key.objectid = end - 1; 203 key.type = (u8)-1; 204 key.offset = (u64)-1; 205 206 while (!done) { 207 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 208 if (ret) 209 goto out; 210 211 leaf = path->nodes[0]; 212 nr = 0; 213 path->slots[0]++; 214 while (path->slots[0] > 0) { 215 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 216 217 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 218 ASSERT(found_key.objectid == block_group->key.objectid); 219 ASSERT(found_key.offset == block_group->key.offset); 220 done = 1; 221 break; 222 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 223 u64 first, last; 224 225 ASSERT(found_key.objectid >= start); 226 ASSERT(found_key.objectid < end); 227 ASSERT(found_key.objectid + found_key.offset <= end); 228 229 first = div_u64(found_key.objectid - start, 230 fs_info->sectorsize); 231 last = div_u64(found_key.objectid + found_key.offset - start, 232 fs_info->sectorsize); 233 le_bitmap_set(bitmap, first, last - first); 234 235 extent_count++; 236 nr++; 237 path->slots[0]--; 238 } else { 239 ASSERT(0); 240 } 241 } 242 243 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 244 if (ret) 245 goto out; 246 btrfs_release_path(path); 247 } 248 249 info = search_free_space_info(trans, fs_info, block_group, path, 1); 250 if (IS_ERR(info)) { 251 ret = PTR_ERR(info); 252 goto out; 253 } 254 leaf = path->nodes[0]; 255 flags = btrfs_free_space_flags(leaf, info); 256 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 257 btrfs_set_free_space_flags(leaf, info, flags); 258 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 259 btrfs_mark_buffer_dirty(leaf); 260 btrfs_release_path(path); 261 262 if (extent_count != expected_extent_count) { 263 btrfs_err(fs_info, 264 "incorrect extent count for %llu; counted %u, expected %u", 265 block_group->key.objectid, extent_count, 266 expected_extent_count); 267 ASSERT(0); 268 ret = -EIO; 269 goto out; 270 } 271 272 bitmap_cursor = bitmap; 273 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 274 i = start; 275 while (i < end) { 276 unsigned long ptr; 277 u64 extent_size; 278 u32 data_size; 279 280 extent_size = min(end - i, bitmap_range); 281 data_size = free_space_bitmap_size(extent_size, 282 fs_info->sectorsize); 283 284 key.objectid = i; 285 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 286 key.offset = extent_size; 287 288 ret = btrfs_insert_empty_item(trans, root, path, &key, 289 data_size); 290 if (ret) 291 goto out; 292 293 leaf = path->nodes[0]; 294 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 295 write_extent_buffer(leaf, bitmap_cursor, ptr, 296 data_size); 297 btrfs_mark_buffer_dirty(leaf); 298 btrfs_release_path(path); 299 300 i += extent_size; 301 bitmap_cursor += data_size; 302 } 303 304 ret = 0; 305 out: 306 kvfree(bitmap); 307 if (ret) 308 btrfs_abort_transaction(trans, ret); 309 return ret; 310 } 311 312 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 313 struct btrfs_fs_info *fs_info, 314 struct btrfs_block_group_cache *block_group, 315 struct btrfs_path *path) 316 { 317 struct btrfs_root *root = fs_info->free_space_root; 318 struct btrfs_free_space_info *info; 319 struct btrfs_key key, found_key; 320 struct extent_buffer *leaf; 321 u8 *bitmap; 322 u64 start, end; 323 /* Initialize to silence GCC. */ 324 u64 extent_start = 0; 325 u64 offset; 326 u32 bitmap_size, flags, expected_extent_count; 327 int prev_bit = 0, bit, bitnr; 328 u32 extent_count = 0; 329 int done = 0, nr; 330 int ret; 331 332 bitmap_size = free_space_bitmap_size(block_group->key.offset, 333 fs_info->sectorsize); 334 bitmap = alloc_bitmap(bitmap_size); 335 if (!bitmap) { 336 ret = -ENOMEM; 337 goto out; 338 } 339 340 start = block_group->key.objectid; 341 end = block_group->key.objectid + block_group->key.offset; 342 343 key.objectid = end - 1; 344 key.type = (u8)-1; 345 key.offset = (u64)-1; 346 347 while (!done) { 348 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 349 if (ret) 350 goto out; 351 352 leaf = path->nodes[0]; 353 nr = 0; 354 path->slots[0]++; 355 while (path->slots[0] > 0) { 356 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 357 358 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 359 ASSERT(found_key.objectid == block_group->key.objectid); 360 ASSERT(found_key.offset == block_group->key.offset); 361 done = 1; 362 break; 363 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 364 unsigned long ptr; 365 u8 *bitmap_cursor; 366 u32 bitmap_pos, data_size; 367 368 ASSERT(found_key.objectid >= start); 369 ASSERT(found_key.objectid < end); 370 ASSERT(found_key.objectid + found_key.offset <= end); 371 372 bitmap_pos = div_u64(found_key.objectid - start, 373 fs_info->sectorsize * 374 BITS_PER_BYTE); 375 bitmap_cursor = bitmap + bitmap_pos; 376 data_size = free_space_bitmap_size(found_key.offset, 377 fs_info->sectorsize); 378 379 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 380 read_extent_buffer(leaf, bitmap_cursor, ptr, 381 data_size); 382 383 nr++; 384 path->slots[0]--; 385 } else { 386 ASSERT(0); 387 } 388 } 389 390 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 391 if (ret) 392 goto out; 393 btrfs_release_path(path); 394 } 395 396 info = search_free_space_info(trans, fs_info, block_group, path, 1); 397 if (IS_ERR(info)) { 398 ret = PTR_ERR(info); 399 goto out; 400 } 401 leaf = path->nodes[0]; 402 flags = btrfs_free_space_flags(leaf, info); 403 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 404 btrfs_set_free_space_flags(leaf, info, flags); 405 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 406 btrfs_mark_buffer_dirty(leaf); 407 btrfs_release_path(path); 408 409 offset = start; 410 bitnr = 0; 411 while (offset < end) { 412 bit = !!le_test_bit(bitnr, bitmap); 413 if (prev_bit == 0 && bit == 1) { 414 extent_start = offset; 415 } else if (prev_bit == 1 && bit == 0) { 416 key.objectid = extent_start; 417 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 418 key.offset = offset - extent_start; 419 420 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 421 if (ret) 422 goto out; 423 btrfs_release_path(path); 424 425 extent_count++; 426 } 427 prev_bit = bit; 428 offset += fs_info->sectorsize; 429 bitnr++; 430 } 431 if (prev_bit == 1) { 432 key.objectid = extent_start; 433 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 434 key.offset = end - extent_start; 435 436 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 437 if (ret) 438 goto out; 439 btrfs_release_path(path); 440 441 extent_count++; 442 } 443 444 if (extent_count != expected_extent_count) { 445 btrfs_err(fs_info, 446 "incorrect extent count for %llu; counted %u, expected %u", 447 block_group->key.objectid, extent_count, 448 expected_extent_count); 449 ASSERT(0); 450 ret = -EIO; 451 goto out; 452 } 453 454 ret = 0; 455 out: 456 kvfree(bitmap); 457 if (ret) 458 btrfs_abort_transaction(trans, ret); 459 return ret; 460 } 461 462 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 463 struct btrfs_fs_info *fs_info, 464 struct btrfs_block_group_cache *block_group, 465 struct btrfs_path *path, 466 int new_extents) 467 { 468 struct btrfs_free_space_info *info; 469 u32 flags; 470 u32 extent_count; 471 int ret = 0; 472 473 if (new_extents == 0) 474 return 0; 475 476 info = search_free_space_info(trans, fs_info, block_group, path, 1); 477 if (IS_ERR(info)) { 478 ret = PTR_ERR(info); 479 goto out; 480 } 481 flags = btrfs_free_space_flags(path->nodes[0], info); 482 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 483 484 extent_count += new_extents; 485 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 486 btrfs_mark_buffer_dirty(path->nodes[0]); 487 btrfs_release_path(path); 488 489 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 490 extent_count > block_group->bitmap_high_thresh) { 491 ret = convert_free_space_to_bitmaps(trans, fs_info, block_group, 492 path); 493 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 494 extent_count < block_group->bitmap_low_thresh) { 495 ret = convert_free_space_to_extents(trans, fs_info, block_group, 496 path); 497 } 498 499 out: 500 return ret; 501 } 502 503 int free_space_test_bit(struct btrfs_block_group_cache *block_group, 504 struct btrfs_path *path, u64 offset) 505 { 506 struct extent_buffer *leaf; 507 struct btrfs_key key; 508 u64 found_start, found_end; 509 unsigned long ptr, i; 510 511 leaf = path->nodes[0]; 512 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 513 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 514 515 found_start = key.objectid; 516 found_end = key.objectid + key.offset; 517 ASSERT(offset >= found_start && offset < found_end); 518 519 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 520 i = div_u64(offset - found_start, 521 block_group->fs_info->sectorsize); 522 return !!extent_buffer_test_bit(leaf, ptr, i); 523 } 524 525 static void free_space_set_bits(struct btrfs_block_group_cache *block_group, 526 struct btrfs_path *path, u64 *start, u64 *size, 527 int bit) 528 { 529 struct btrfs_fs_info *fs_info = block_group->fs_info; 530 struct extent_buffer *leaf; 531 struct btrfs_key key; 532 u64 end = *start + *size; 533 u64 found_start, found_end; 534 unsigned long ptr, first, last; 535 536 leaf = path->nodes[0]; 537 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 538 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 539 540 found_start = key.objectid; 541 found_end = key.objectid + key.offset; 542 ASSERT(*start >= found_start && *start < found_end); 543 ASSERT(end > found_start); 544 545 if (end > found_end) 546 end = found_end; 547 548 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 549 first = div_u64(*start - found_start, fs_info->sectorsize); 550 last = div_u64(end - found_start, fs_info->sectorsize); 551 if (bit) 552 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 553 else 554 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 555 btrfs_mark_buffer_dirty(leaf); 556 557 *size -= end - *start; 558 *start = end; 559 } 560 561 /* 562 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 563 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 564 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 565 * looking for. 566 */ 567 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 568 struct btrfs_root *root, struct btrfs_path *p) 569 { 570 struct btrfs_key key; 571 572 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 573 p->slots[0]++; 574 return 0; 575 } 576 577 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 578 btrfs_release_path(p); 579 580 key.objectid += key.offset; 581 key.type = (u8)-1; 582 key.offset = (u64)-1; 583 584 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 585 } 586 587 /* 588 * If remove is 1, then we are removing free space, thus clearing bits in the 589 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 590 * the bitmap. 591 */ 592 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 593 struct btrfs_fs_info *fs_info, 594 struct btrfs_block_group_cache *block_group, 595 struct btrfs_path *path, 596 u64 start, u64 size, int remove) 597 { 598 struct btrfs_root *root = fs_info->free_space_root; 599 struct btrfs_key key; 600 u64 end = start + size; 601 u64 cur_start, cur_size; 602 int prev_bit, next_bit; 603 int new_extents; 604 int ret; 605 606 /* 607 * Read the bit for the block immediately before the extent of space if 608 * that block is within the block group. 609 */ 610 if (start > block_group->key.objectid) { 611 u64 prev_block = start - block_group->fs_info->sectorsize; 612 613 key.objectid = prev_block; 614 key.type = (u8)-1; 615 key.offset = (u64)-1; 616 617 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 618 if (ret) 619 goto out; 620 621 prev_bit = free_space_test_bit(block_group, path, prev_block); 622 623 /* The previous block may have been in the previous bitmap. */ 624 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 625 if (start >= key.objectid + key.offset) { 626 ret = free_space_next_bitmap(trans, root, path); 627 if (ret) 628 goto out; 629 } 630 } else { 631 key.objectid = start; 632 key.type = (u8)-1; 633 key.offset = (u64)-1; 634 635 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 636 if (ret) 637 goto out; 638 639 prev_bit = -1; 640 } 641 642 /* 643 * Iterate over all of the bitmaps overlapped by the extent of space, 644 * clearing/setting bits as required. 645 */ 646 cur_start = start; 647 cur_size = size; 648 while (1) { 649 free_space_set_bits(block_group, path, &cur_start, &cur_size, 650 !remove); 651 if (cur_size == 0) 652 break; 653 ret = free_space_next_bitmap(trans, root, path); 654 if (ret) 655 goto out; 656 } 657 658 /* 659 * Read the bit for the block immediately after the extent of space if 660 * that block is within the block group. 661 */ 662 if (end < block_group->key.objectid + block_group->key.offset) { 663 /* The next block may be in the next bitmap. */ 664 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 665 if (end >= key.objectid + key.offset) { 666 ret = free_space_next_bitmap(trans, root, path); 667 if (ret) 668 goto out; 669 } 670 671 next_bit = free_space_test_bit(block_group, path, end); 672 } else { 673 next_bit = -1; 674 } 675 676 if (remove) { 677 new_extents = -1; 678 if (prev_bit == 1) { 679 /* Leftover on the left. */ 680 new_extents++; 681 } 682 if (next_bit == 1) { 683 /* Leftover on the right. */ 684 new_extents++; 685 } 686 } else { 687 new_extents = 1; 688 if (prev_bit == 1) { 689 /* Merging with neighbor on the left. */ 690 new_extents--; 691 } 692 if (next_bit == 1) { 693 /* Merging with neighbor on the right. */ 694 new_extents--; 695 } 696 } 697 698 btrfs_release_path(path); 699 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 700 new_extents); 701 702 out: 703 return ret; 704 } 705 706 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 707 struct btrfs_fs_info *fs_info, 708 struct btrfs_block_group_cache *block_group, 709 struct btrfs_path *path, 710 u64 start, u64 size) 711 { 712 struct btrfs_root *root = fs_info->free_space_root; 713 struct btrfs_key key; 714 u64 found_start, found_end; 715 u64 end = start + size; 716 int new_extents = -1; 717 int ret; 718 719 key.objectid = start; 720 key.type = (u8)-1; 721 key.offset = (u64)-1; 722 723 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 724 if (ret) 725 goto out; 726 727 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 728 729 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 730 731 found_start = key.objectid; 732 found_end = key.objectid + key.offset; 733 ASSERT(start >= found_start && end <= found_end); 734 735 /* 736 * Okay, now that we've found the free space extent which contains the 737 * free space that we are removing, there are four cases: 738 * 739 * 1. We're using the whole extent: delete the key we found and 740 * decrement the free space extent count. 741 * 2. We are using part of the extent starting at the beginning: delete 742 * the key we found and insert a new key representing the leftover at 743 * the end. There is no net change in the number of extents. 744 * 3. We are using part of the extent ending at the end: delete the key 745 * we found and insert a new key representing the leftover at the 746 * beginning. There is no net change in the number of extents. 747 * 4. We are using part of the extent in the middle: delete the key we 748 * found and insert two new keys representing the leftovers on each 749 * side. Where we used to have one extent, we now have two, so increment 750 * the extent count. We may need to convert the block group to bitmaps 751 * as a result. 752 */ 753 754 /* Delete the existing key (cases 1-4). */ 755 ret = btrfs_del_item(trans, root, path); 756 if (ret) 757 goto out; 758 759 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 760 if (start > found_start) { 761 key.objectid = found_start; 762 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 763 key.offset = start - found_start; 764 765 btrfs_release_path(path); 766 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 767 if (ret) 768 goto out; 769 new_extents++; 770 } 771 772 /* Add a key for leftovers at the end (cases 2 and 4). */ 773 if (end < found_end) { 774 key.objectid = end; 775 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 776 key.offset = found_end - end; 777 778 btrfs_release_path(path); 779 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 780 if (ret) 781 goto out; 782 new_extents++; 783 } 784 785 btrfs_release_path(path); 786 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 787 new_extents); 788 789 out: 790 return ret; 791 } 792 793 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 794 struct btrfs_fs_info *fs_info, 795 struct btrfs_block_group_cache *block_group, 796 struct btrfs_path *path, u64 start, u64 size) 797 { 798 struct btrfs_free_space_info *info; 799 u32 flags; 800 int ret; 801 802 if (block_group->needs_free_space) { 803 ret = __add_block_group_free_space(trans, fs_info, block_group, 804 path); 805 if (ret) 806 return ret; 807 } 808 809 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 810 if (IS_ERR(info)) 811 return PTR_ERR(info); 812 flags = btrfs_free_space_flags(path->nodes[0], info); 813 btrfs_release_path(path); 814 815 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 816 return modify_free_space_bitmap(trans, fs_info, block_group, 817 path, start, size, 1); 818 } else { 819 return remove_free_space_extent(trans, fs_info, block_group, 820 path, start, size); 821 } 822 } 823 824 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 825 struct btrfs_fs_info *fs_info, 826 u64 start, u64 size) 827 { 828 struct btrfs_block_group_cache *block_group; 829 struct btrfs_path *path; 830 int ret; 831 832 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 833 return 0; 834 835 path = btrfs_alloc_path(); 836 if (!path) { 837 ret = -ENOMEM; 838 goto out; 839 } 840 841 block_group = btrfs_lookup_block_group(fs_info, start); 842 if (!block_group) { 843 ASSERT(0); 844 ret = -ENOENT; 845 goto out; 846 } 847 848 mutex_lock(&block_group->free_space_lock); 849 ret = __remove_from_free_space_tree(trans, fs_info, block_group, path, 850 start, size); 851 mutex_unlock(&block_group->free_space_lock); 852 853 btrfs_put_block_group(block_group); 854 out: 855 btrfs_free_path(path); 856 if (ret) 857 btrfs_abort_transaction(trans, ret); 858 return ret; 859 } 860 861 static int add_free_space_extent(struct btrfs_trans_handle *trans, 862 struct btrfs_fs_info *fs_info, 863 struct btrfs_block_group_cache *block_group, 864 struct btrfs_path *path, 865 u64 start, u64 size) 866 { 867 struct btrfs_root *root = fs_info->free_space_root; 868 struct btrfs_key key, new_key; 869 u64 found_start, found_end; 870 u64 end = start + size; 871 int new_extents = 1; 872 int ret; 873 874 /* 875 * We are adding a new extent of free space, but we need to merge 876 * extents. There are four cases here: 877 * 878 * 1. The new extent does not have any immediate neighbors to merge 879 * with: add the new key and increment the free space extent count. We 880 * may need to convert the block group to bitmaps as a result. 881 * 2. The new extent has an immediate neighbor before it: remove the 882 * previous key and insert a new key combining both of them. There is no 883 * net change in the number of extents. 884 * 3. The new extent has an immediate neighbor after it: remove the next 885 * key and insert a new key combining both of them. There is no net 886 * change in the number of extents. 887 * 4. The new extent has immediate neighbors on both sides: remove both 888 * of the keys and insert a new key combining all of them. Where we used 889 * to have two extents, we now have one, so decrement the extent count. 890 */ 891 892 new_key.objectid = start; 893 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 894 new_key.offset = size; 895 896 /* Search for a neighbor on the left. */ 897 if (start == block_group->key.objectid) 898 goto right; 899 key.objectid = start - 1; 900 key.type = (u8)-1; 901 key.offset = (u64)-1; 902 903 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 904 if (ret) 905 goto out; 906 907 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 908 909 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 910 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 911 btrfs_release_path(path); 912 goto right; 913 } 914 915 found_start = key.objectid; 916 found_end = key.objectid + key.offset; 917 ASSERT(found_start >= block_group->key.objectid && 918 found_end > block_group->key.objectid); 919 ASSERT(found_start < start && found_end <= start); 920 921 /* 922 * Delete the neighbor on the left and absorb it into the new key (cases 923 * 2 and 4). 924 */ 925 if (found_end == start) { 926 ret = btrfs_del_item(trans, root, path); 927 if (ret) 928 goto out; 929 new_key.objectid = found_start; 930 new_key.offset += key.offset; 931 new_extents--; 932 } 933 btrfs_release_path(path); 934 935 right: 936 /* Search for a neighbor on the right. */ 937 if (end == block_group->key.objectid + block_group->key.offset) 938 goto insert; 939 key.objectid = end; 940 key.type = (u8)-1; 941 key.offset = (u64)-1; 942 943 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 944 if (ret) 945 goto out; 946 947 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 948 949 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 950 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 951 btrfs_release_path(path); 952 goto insert; 953 } 954 955 found_start = key.objectid; 956 found_end = key.objectid + key.offset; 957 ASSERT(found_start >= block_group->key.objectid && 958 found_end > block_group->key.objectid); 959 ASSERT((found_start < start && found_end <= start) || 960 (found_start >= end && found_end > end)); 961 962 /* 963 * Delete the neighbor on the right and absorb it into the new key 964 * (cases 3 and 4). 965 */ 966 if (found_start == end) { 967 ret = btrfs_del_item(trans, root, path); 968 if (ret) 969 goto out; 970 new_key.offset += key.offset; 971 new_extents--; 972 } 973 btrfs_release_path(path); 974 975 insert: 976 /* Insert the new key (cases 1-4). */ 977 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 978 if (ret) 979 goto out; 980 981 btrfs_release_path(path); 982 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 983 new_extents); 984 985 out: 986 return ret; 987 } 988 989 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 990 struct btrfs_fs_info *fs_info, 991 struct btrfs_block_group_cache *block_group, 992 struct btrfs_path *path, u64 start, u64 size) 993 { 994 struct btrfs_free_space_info *info; 995 u32 flags; 996 int ret; 997 998 if (block_group->needs_free_space) { 999 ret = __add_block_group_free_space(trans, fs_info, block_group, 1000 path); 1001 if (ret) 1002 return ret; 1003 } 1004 1005 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1006 if (IS_ERR(info)) 1007 return PTR_ERR(info); 1008 flags = btrfs_free_space_flags(path->nodes[0], info); 1009 btrfs_release_path(path); 1010 1011 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 1012 return modify_free_space_bitmap(trans, fs_info, block_group, 1013 path, start, size, 0); 1014 } else { 1015 return add_free_space_extent(trans, fs_info, block_group, path, 1016 start, size); 1017 } 1018 } 1019 1020 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1021 struct btrfs_fs_info *fs_info, 1022 u64 start, u64 size) 1023 { 1024 struct btrfs_block_group_cache *block_group; 1025 struct btrfs_path *path; 1026 int ret; 1027 1028 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1029 return 0; 1030 1031 path = btrfs_alloc_path(); 1032 if (!path) { 1033 ret = -ENOMEM; 1034 goto out; 1035 } 1036 1037 block_group = btrfs_lookup_block_group(fs_info, start); 1038 if (!block_group) { 1039 ASSERT(0); 1040 ret = -ENOENT; 1041 goto out; 1042 } 1043 1044 mutex_lock(&block_group->free_space_lock); 1045 ret = __add_to_free_space_tree(trans, fs_info, block_group, path, start, 1046 size); 1047 mutex_unlock(&block_group->free_space_lock); 1048 1049 btrfs_put_block_group(block_group); 1050 out: 1051 btrfs_free_path(path); 1052 if (ret) 1053 btrfs_abort_transaction(trans, ret); 1054 return ret; 1055 } 1056 1057 /* 1058 * Populate the free space tree by walking the extent tree. Operations on the 1059 * extent tree that happen as a result of writes to the free space tree will go 1060 * through the normal add/remove hooks. 1061 */ 1062 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1063 struct btrfs_fs_info *fs_info, 1064 struct btrfs_block_group_cache *block_group) 1065 { 1066 struct btrfs_root *extent_root = fs_info->extent_root; 1067 struct btrfs_path *path, *path2; 1068 struct btrfs_key key; 1069 u64 start, end; 1070 int ret; 1071 1072 path = btrfs_alloc_path(); 1073 if (!path) 1074 return -ENOMEM; 1075 path->reada = 1; 1076 1077 path2 = btrfs_alloc_path(); 1078 if (!path2) { 1079 btrfs_free_path(path); 1080 return -ENOMEM; 1081 } 1082 1083 ret = add_new_free_space_info(trans, fs_info, block_group, path2); 1084 if (ret) 1085 goto out; 1086 1087 mutex_lock(&block_group->free_space_lock); 1088 1089 /* 1090 * Iterate through all of the extent and metadata items in this block 1091 * group, adding the free space between them and the free space at the 1092 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1093 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1094 * contained in. 1095 */ 1096 key.objectid = block_group->key.objectid; 1097 key.type = BTRFS_EXTENT_ITEM_KEY; 1098 key.offset = 0; 1099 1100 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1101 if (ret < 0) 1102 goto out_locked; 1103 ASSERT(ret == 0); 1104 1105 start = block_group->key.objectid; 1106 end = block_group->key.objectid + block_group->key.offset; 1107 while (1) { 1108 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1109 1110 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1111 key.type == BTRFS_METADATA_ITEM_KEY) { 1112 if (key.objectid >= end) 1113 break; 1114 1115 if (start < key.objectid) { 1116 ret = __add_to_free_space_tree(trans, fs_info, 1117 block_group, 1118 path2, start, 1119 key.objectid - 1120 start); 1121 if (ret) 1122 goto out_locked; 1123 } 1124 start = key.objectid; 1125 if (key.type == BTRFS_METADATA_ITEM_KEY) 1126 start += fs_info->nodesize; 1127 else 1128 start += key.offset; 1129 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1130 if (key.objectid != block_group->key.objectid) 1131 break; 1132 } 1133 1134 ret = btrfs_next_item(extent_root, path); 1135 if (ret < 0) 1136 goto out_locked; 1137 if (ret) 1138 break; 1139 } 1140 if (start < end) { 1141 ret = __add_to_free_space_tree(trans, fs_info, block_group, 1142 path2, start, end - start); 1143 if (ret) 1144 goto out_locked; 1145 } 1146 1147 ret = 0; 1148 out_locked: 1149 mutex_unlock(&block_group->free_space_lock); 1150 out: 1151 btrfs_free_path(path2); 1152 btrfs_free_path(path); 1153 return ret; 1154 } 1155 1156 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1157 { 1158 struct btrfs_trans_handle *trans; 1159 struct btrfs_root *tree_root = fs_info->tree_root; 1160 struct btrfs_root *free_space_root; 1161 struct btrfs_block_group_cache *block_group; 1162 struct rb_node *node; 1163 int ret; 1164 1165 trans = btrfs_start_transaction(tree_root, 0); 1166 if (IS_ERR(trans)) 1167 return PTR_ERR(trans); 1168 1169 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1170 free_space_root = btrfs_create_tree(trans, fs_info, 1171 BTRFS_FREE_SPACE_TREE_OBJECTID); 1172 if (IS_ERR(free_space_root)) { 1173 ret = PTR_ERR(free_space_root); 1174 goto abort; 1175 } 1176 fs_info->free_space_root = free_space_root; 1177 1178 node = rb_first(&fs_info->block_group_cache_tree); 1179 while (node) { 1180 block_group = rb_entry(node, struct btrfs_block_group_cache, 1181 cache_node); 1182 ret = populate_free_space_tree(trans, fs_info, block_group); 1183 if (ret) 1184 goto abort; 1185 node = rb_next(node); 1186 } 1187 1188 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1189 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1190 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1191 1192 ret = btrfs_commit_transaction(trans); 1193 if (ret) 1194 return ret; 1195 1196 return 0; 1197 1198 abort: 1199 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1200 btrfs_abort_transaction(trans, ret); 1201 btrfs_end_transaction(trans); 1202 return ret; 1203 } 1204 1205 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1206 struct btrfs_root *root) 1207 { 1208 struct btrfs_path *path; 1209 struct btrfs_key key; 1210 int nr; 1211 int ret; 1212 1213 path = btrfs_alloc_path(); 1214 if (!path) 1215 return -ENOMEM; 1216 1217 path->leave_spinning = 1; 1218 1219 key.objectid = 0; 1220 key.type = 0; 1221 key.offset = 0; 1222 1223 while (1) { 1224 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1225 if (ret < 0) 1226 goto out; 1227 1228 nr = btrfs_header_nritems(path->nodes[0]); 1229 if (!nr) 1230 break; 1231 1232 path->slots[0] = 0; 1233 ret = btrfs_del_items(trans, root, path, 0, nr); 1234 if (ret) 1235 goto out; 1236 1237 btrfs_release_path(path); 1238 } 1239 1240 ret = 0; 1241 out: 1242 btrfs_free_path(path); 1243 return ret; 1244 } 1245 1246 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1247 { 1248 struct btrfs_trans_handle *trans; 1249 struct btrfs_root *tree_root = fs_info->tree_root; 1250 struct btrfs_root *free_space_root = fs_info->free_space_root; 1251 int ret; 1252 1253 trans = btrfs_start_transaction(tree_root, 0); 1254 if (IS_ERR(trans)) 1255 return PTR_ERR(trans); 1256 1257 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1258 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); 1259 fs_info->free_space_root = NULL; 1260 1261 ret = clear_free_space_tree(trans, free_space_root); 1262 if (ret) 1263 goto abort; 1264 1265 ret = btrfs_del_root(trans, tree_root, &free_space_root->root_key); 1266 if (ret) 1267 goto abort; 1268 1269 list_del(&free_space_root->dirty_list); 1270 1271 btrfs_tree_lock(free_space_root->node); 1272 clean_tree_block(trans, fs_info, free_space_root->node); 1273 btrfs_tree_unlock(free_space_root->node); 1274 btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1275 0, 1); 1276 1277 free_extent_buffer(free_space_root->node); 1278 free_extent_buffer(free_space_root->commit_root); 1279 kfree(free_space_root); 1280 1281 ret = btrfs_commit_transaction(trans); 1282 if (ret) 1283 return ret; 1284 1285 return 0; 1286 1287 abort: 1288 btrfs_abort_transaction(trans, ret); 1289 btrfs_end_transaction(trans); 1290 return ret; 1291 } 1292 1293 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1294 struct btrfs_fs_info *fs_info, 1295 struct btrfs_block_group_cache *block_group, 1296 struct btrfs_path *path) 1297 { 1298 u64 start, end; 1299 int ret; 1300 1301 start = block_group->key.objectid; 1302 end = block_group->key.objectid + block_group->key.offset; 1303 1304 block_group->needs_free_space = 0; 1305 1306 ret = add_new_free_space_info(trans, fs_info, block_group, path); 1307 if (ret) 1308 return ret; 1309 1310 return __add_to_free_space_tree(trans, fs_info, block_group, path, 1311 block_group->key.objectid, 1312 block_group->key.offset); 1313 } 1314 1315 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1316 struct btrfs_fs_info *fs_info, 1317 struct btrfs_block_group_cache *block_group) 1318 { 1319 struct btrfs_path *path = NULL; 1320 int ret = 0; 1321 1322 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1323 return 0; 1324 1325 mutex_lock(&block_group->free_space_lock); 1326 if (!block_group->needs_free_space) 1327 goto out; 1328 1329 path = btrfs_alloc_path(); 1330 if (!path) { 1331 ret = -ENOMEM; 1332 goto out; 1333 } 1334 1335 ret = __add_block_group_free_space(trans, fs_info, block_group, path); 1336 1337 out: 1338 btrfs_free_path(path); 1339 mutex_unlock(&block_group->free_space_lock); 1340 if (ret) 1341 btrfs_abort_transaction(trans, ret); 1342 return ret; 1343 } 1344 1345 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1346 struct btrfs_fs_info *fs_info, 1347 struct btrfs_block_group_cache *block_group) 1348 { 1349 struct btrfs_root *root = fs_info->free_space_root; 1350 struct btrfs_path *path; 1351 struct btrfs_key key, found_key; 1352 struct extent_buffer *leaf; 1353 u64 start, end; 1354 int done = 0, nr; 1355 int ret; 1356 1357 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1358 return 0; 1359 1360 if (block_group->needs_free_space) { 1361 /* We never added this block group to the free space tree. */ 1362 return 0; 1363 } 1364 1365 path = btrfs_alloc_path(); 1366 if (!path) { 1367 ret = -ENOMEM; 1368 goto out; 1369 } 1370 1371 start = block_group->key.objectid; 1372 end = block_group->key.objectid + block_group->key.offset; 1373 1374 key.objectid = end - 1; 1375 key.type = (u8)-1; 1376 key.offset = (u64)-1; 1377 1378 while (!done) { 1379 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1380 if (ret) 1381 goto out; 1382 1383 leaf = path->nodes[0]; 1384 nr = 0; 1385 path->slots[0]++; 1386 while (path->slots[0] > 0) { 1387 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1388 1389 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1390 ASSERT(found_key.objectid == block_group->key.objectid); 1391 ASSERT(found_key.offset == block_group->key.offset); 1392 done = 1; 1393 nr++; 1394 path->slots[0]--; 1395 break; 1396 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1397 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1398 ASSERT(found_key.objectid >= start); 1399 ASSERT(found_key.objectid < end); 1400 ASSERT(found_key.objectid + found_key.offset <= end); 1401 nr++; 1402 path->slots[0]--; 1403 } else { 1404 ASSERT(0); 1405 } 1406 } 1407 1408 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1409 if (ret) 1410 goto out; 1411 btrfs_release_path(path); 1412 } 1413 1414 ret = 0; 1415 out: 1416 btrfs_free_path(path); 1417 if (ret) 1418 btrfs_abort_transaction(trans, ret); 1419 return ret; 1420 } 1421 1422 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1423 struct btrfs_path *path, 1424 u32 expected_extent_count) 1425 { 1426 struct btrfs_block_group_cache *block_group; 1427 struct btrfs_fs_info *fs_info; 1428 struct btrfs_root *root; 1429 struct btrfs_key key; 1430 int prev_bit = 0, bit; 1431 /* Initialize to silence GCC. */ 1432 u64 extent_start = 0; 1433 u64 end, offset; 1434 u64 total_found = 0; 1435 u32 extent_count = 0; 1436 int ret; 1437 1438 block_group = caching_ctl->block_group; 1439 fs_info = block_group->fs_info; 1440 root = fs_info->free_space_root; 1441 1442 end = block_group->key.objectid + block_group->key.offset; 1443 1444 while (1) { 1445 ret = btrfs_next_item(root, path); 1446 if (ret < 0) 1447 goto out; 1448 if (ret) 1449 break; 1450 1451 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1452 1453 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1454 break; 1455 1456 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1457 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1458 1459 caching_ctl->progress = key.objectid; 1460 1461 offset = key.objectid; 1462 while (offset < key.objectid + key.offset) { 1463 bit = free_space_test_bit(block_group, path, offset); 1464 if (prev_bit == 0 && bit == 1) { 1465 extent_start = offset; 1466 } else if (prev_bit == 1 && bit == 0) { 1467 total_found += add_new_free_space(block_group, 1468 fs_info, 1469 extent_start, 1470 offset); 1471 if (total_found > CACHING_CTL_WAKE_UP) { 1472 total_found = 0; 1473 wake_up(&caching_ctl->wait); 1474 } 1475 extent_count++; 1476 } 1477 prev_bit = bit; 1478 offset += fs_info->sectorsize; 1479 } 1480 } 1481 if (prev_bit == 1) { 1482 total_found += add_new_free_space(block_group, fs_info, 1483 extent_start, end); 1484 extent_count++; 1485 } 1486 1487 if (extent_count != expected_extent_count) { 1488 btrfs_err(fs_info, 1489 "incorrect extent count for %llu; counted %u, expected %u", 1490 block_group->key.objectid, extent_count, 1491 expected_extent_count); 1492 ASSERT(0); 1493 ret = -EIO; 1494 goto out; 1495 } 1496 1497 caching_ctl->progress = (u64)-1; 1498 1499 ret = 0; 1500 out: 1501 return ret; 1502 } 1503 1504 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1505 struct btrfs_path *path, 1506 u32 expected_extent_count) 1507 { 1508 struct btrfs_block_group_cache *block_group; 1509 struct btrfs_fs_info *fs_info; 1510 struct btrfs_root *root; 1511 struct btrfs_key key; 1512 u64 end; 1513 u64 total_found = 0; 1514 u32 extent_count = 0; 1515 int ret; 1516 1517 block_group = caching_ctl->block_group; 1518 fs_info = block_group->fs_info; 1519 root = fs_info->free_space_root; 1520 1521 end = block_group->key.objectid + block_group->key.offset; 1522 1523 while (1) { 1524 ret = btrfs_next_item(root, path); 1525 if (ret < 0) 1526 goto out; 1527 if (ret) 1528 break; 1529 1530 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1531 1532 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1533 break; 1534 1535 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1536 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1537 1538 caching_ctl->progress = key.objectid; 1539 1540 total_found += add_new_free_space(block_group, fs_info, 1541 key.objectid, 1542 key.objectid + key.offset); 1543 if (total_found > CACHING_CTL_WAKE_UP) { 1544 total_found = 0; 1545 wake_up(&caching_ctl->wait); 1546 } 1547 extent_count++; 1548 } 1549 1550 if (extent_count != expected_extent_count) { 1551 btrfs_err(fs_info, 1552 "incorrect extent count for %llu; counted %u, expected %u", 1553 block_group->key.objectid, extent_count, 1554 expected_extent_count); 1555 ASSERT(0); 1556 ret = -EIO; 1557 goto out; 1558 } 1559 1560 caching_ctl->progress = (u64)-1; 1561 1562 ret = 0; 1563 out: 1564 return ret; 1565 } 1566 1567 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1568 { 1569 struct btrfs_block_group_cache *block_group; 1570 struct btrfs_fs_info *fs_info; 1571 struct btrfs_free_space_info *info; 1572 struct btrfs_path *path; 1573 u32 extent_count, flags; 1574 int ret; 1575 1576 block_group = caching_ctl->block_group; 1577 fs_info = block_group->fs_info; 1578 1579 path = btrfs_alloc_path(); 1580 if (!path) 1581 return -ENOMEM; 1582 1583 /* 1584 * Just like caching_thread() doesn't want to deadlock on the extent 1585 * tree, we don't want to deadlock on the free space tree. 1586 */ 1587 path->skip_locking = 1; 1588 path->search_commit_root = 1; 1589 path->reada = 1; 1590 1591 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1592 if (IS_ERR(info)) { 1593 ret = PTR_ERR(info); 1594 goto out; 1595 } 1596 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1597 flags = btrfs_free_space_flags(path->nodes[0], info); 1598 1599 /* 1600 * We left path pointing to the free space info item, so now 1601 * load_free_space_foo can just iterate through the free space tree from 1602 * there. 1603 */ 1604 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1605 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1606 else 1607 ret = load_free_space_extents(caching_ctl, path, extent_count); 1608 1609 out: 1610 btrfs_free_path(path); 1611 return ret; 1612 } 1613