1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 /* 38 * Return target flags in extended format or 0 if restripe for this chunk_type 39 * is not in progress 40 * 41 * Should be called with balance_lock held 42 */ 43 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 44 { 45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46 u64 target = 0; 47 48 if (!bctl) 49 return 0; 50 51 if (flags & BTRFS_BLOCK_GROUP_DATA && 52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 54 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 57 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 60 } 61 62 return target; 63 } 64 65 /* 66 * @flags: available profiles in extended format (see ctree.h) 67 * 68 * Return reduced profile in chunk format. If profile changing is in progress 69 * (either running or paused) picks the target profile (if it's already 70 * available), otherwise falls back to plain reducing. 71 */ 72 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 73 { 74 u64 num_devices = fs_info->fs_devices->rw_devices; 75 u64 target; 76 u64 raid_type; 77 u64 allowed = 0; 78 79 /* 80 * See if restripe for this chunk_type is in progress, if so try to 81 * reduce to the target profile 82 */ 83 spin_lock(&fs_info->balance_lock); 84 target = get_restripe_target(fs_info, flags); 85 if (target) { 86 spin_unlock(&fs_info->balance_lock); 87 return extended_to_chunk(target); 88 } 89 spin_unlock(&fs_info->balance_lock); 90 91 /* First, mask out the RAID levels which aren't possible */ 92 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 93 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 94 allowed |= btrfs_raid_array[raid_type].bg_flag; 95 } 96 allowed &= flags; 97 98 if (allowed & BTRFS_BLOCK_GROUP_RAID6) 99 allowed = BTRFS_BLOCK_GROUP_RAID6; 100 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 101 allowed = BTRFS_BLOCK_GROUP_RAID5; 102 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 103 allowed = BTRFS_BLOCK_GROUP_RAID10; 104 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 105 allowed = BTRFS_BLOCK_GROUP_RAID1; 106 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 107 allowed = BTRFS_BLOCK_GROUP_RAID0; 108 109 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 110 111 return extended_to_chunk(flags | allowed); 112 } 113 114 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 115 { 116 unsigned seq; 117 u64 flags; 118 119 do { 120 flags = orig_flags; 121 seq = read_seqbegin(&fs_info->profiles_lock); 122 123 if (flags & BTRFS_BLOCK_GROUP_DATA) 124 flags |= fs_info->avail_data_alloc_bits; 125 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 126 flags |= fs_info->avail_system_alloc_bits; 127 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 128 flags |= fs_info->avail_metadata_alloc_bits; 129 } while (read_seqretry(&fs_info->profiles_lock, seq)); 130 131 return btrfs_reduce_alloc_profile(fs_info, flags); 132 } 133 134 void btrfs_get_block_group(struct btrfs_block_group *cache) 135 { 136 refcount_inc(&cache->refs); 137 } 138 139 void btrfs_put_block_group(struct btrfs_block_group *cache) 140 { 141 if (refcount_dec_and_test(&cache->refs)) { 142 WARN_ON(cache->pinned > 0); 143 /* 144 * If there was a failure to cleanup a log tree, very likely due 145 * to an IO failure on a writeback attempt of one or more of its 146 * extent buffers, we could not do proper (and cheap) unaccounting 147 * of their reserved space, so don't warn on reserved > 0 in that 148 * case. 149 */ 150 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 151 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 152 WARN_ON(cache->reserved > 0); 153 154 /* 155 * A block_group shouldn't be on the discard_list anymore. 156 * Remove the block_group from the discard_list to prevent us 157 * from causing a panic due to NULL pointer dereference. 158 */ 159 if (WARN_ON(!list_empty(&cache->discard_list))) 160 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 161 cache); 162 163 /* 164 * If not empty, someone is still holding mutex of 165 * full_stripe_lock, which can only be released by caller. 166 * And it will definitely cause use-after-free when caller 167 * tries to release full stripe lock. 168 * 169 * No better way to resolve, but only to warn. 170 */ 171 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); 172 kfree(cache->free_space_ctl); 173 kfree(cache->physical_map); 174 kfree(cache); 175 } 176 } 177 178 /* 179 * This adds the block group to the fs_info rb tree for the block group cache 180 */ 181 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 182 struct btrfs_block_group *block_group) 183 { 184 struct rb_node **p; 185 struct rb_node *parent = NULL; 186 struct btrfs_block_group *cache; 187 bool leftmost = true; 188 189 ASSERT(block_group->length != 0); 190 191 write_lock(&info->block_group_cache_lock); 192 p = &info->block_group_cache_tree.rb_root.rb_node; 193 194 while (*p) { 195 parent = *p; 196 cache = rb_entry(parent, struct btrfs_block_group, cache_node); 197 if (block_group->start < cache->start) { 198 p = &(*p)->rb_left; 199 } else if (block_group->start > cache->start) { 200 p = &(*p)->rb_right; 201 leftmost = false; 202 } else { 203 write_unlock(&info->block_group_cache_lock); 204 return -EEXIST; 205 } 206 } 207 208 rb_link_node(&block_group->cache_node, parent, p); 209 rb_insert_color_cached(&block_group->cache_node, 210 &info->block_group_cache_tree, leftmost); 211 212 write_unlock(&info->block_group_cache_lock); 213 214 return 0; 215 } 216 217 /* 218 * This will return the block group at or after bytenr if contains is 0, else 219 * it will return the block group that contains the bytenr 220 */ 221 static struct btrfs_block_group *block_group_cache_tree_search( 222 struct btrfs_fs_info *info, u64 bytenr, int contains) 223 { 224 struct btrfs_block_group *cache, *ret = NULL; 225 struct rb_node *n; 226 u64 end, start; 227 228 read_lock(&info->block_group_cache_lock); 229 n = info->block_group_cache_tree.rb_root.rb_node; 230 231 while (n) { 232 cache = rb_entry(n, struct btrfs_block_group, cache_node); 233 end = cache->start + cache->length - 1; 234 start = cache->start; 235 236 if (bytenr < start) { 237 if (!contains && (!ret || start < ret->start)) 238 ret = cache; 239 n = n->rb_left; 240 } else if (bytenr > start) { 241 if (contains && bytenr <= end) { 242 ret = cache; 243 break; 244 } 245 n = n->rb_right; 246 } else { 247 ret = cache; 248 break; 249 } 250 } 251 if (ret) 252 btrfs_get_block_group(ret); 253 read_unlock(&info->block_group_cache_lock); 254 255 return ret; 256 } 257 258 /* 259 * Return the block group that starts at or after bytenr 260 */ 261 struct btrfs_block_group *btrfs_lookup_first_block_group( 262 struct btrfs_fs_info *info, u64 bytenr) 263 { 264 return block_group_cache_tree_search(info, bytenr, 0); 265 } 266 267 /* 268 * Return the block group that contains the given bytenr 269 */ 270 struct btrfs_block_group *btrfs_lookup_block_group( 271 struct btrfs_fs_info *info, u64 bytenr) 272 { 273 return block_group_cache_tree_search(info, bytenr, 1); 274 } 275 276 struct btrfs_block_group *btrfs_next_block_group( 277 struct btrfs_block_group *cache) 278 { 279 struct btrfs_fs_info *fs_info = cache->fs_info; 280 struct rb_node *node; 281 282 read_lock(&fs_info->block_group_cache_lock); 283 284 /* If our block group was removed, we need a full search. */ 285 if (RB_EMPTY_NODE(&cache->cache_node)) { 286 const u64 next_bytenr = cache->start + cache->length; 287 288 read_unlock(&fs_info->block_group_cache_lock); 289 btrfs_put_block_group(cache); 290 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 291 } 292 node = rb_next(&cache->cache_node); 293 btrfs_put_block_group(cache); 294 if (node) { 295 cache = rb_entry(node, struct btrfs_block_group, cache_node); 296 btrfs_get_block_group(cache); 297 } else 298 cache = NULL; 299 read_unlock(&fs_info->block_group_cache_lock); 300 return cache; 301 } 302 303 /* 304 * Check if we can do a NOCOW write for a given extent. 305 * 306 * @fs_info: The filesystem information object. 307 * @bytenr: Logical start address of the extent. 308 * 309 * Check if we can do a NOCOW write for the given extent, and increments the 310 * number of NOCOW writers in the block group that contains the extent, as long 311 * as the block group exists and it's currently not in read-only mode. 312 * 313 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 314 * is responsible for calling btrfs_dec_nocow_writers() later. 315 * 316 * Or NULL if we can not do a NOCOW write 317 */ 318 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 319 u64 bytenr) 320 { 321 struct btrfs_block_group *bg; 322 bool can_nocow = true; 323 324 bg = btrfs_lookup_block_group(fs_info, bytenr); 325 if (!bg) 326 return NULL; 327 328 spin_lock(&bg->lock); 329 if (bg->ro) 330 can_nocow = false; 331 else 332 atomic_inc(&bg->nocow_writers); 333 spin_unlock(&bg->lock); 334 335 if (!can_nocow) { 336 btrfs_put_block_group(bg); 337 return NULL; 338 } 339 340 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 341 return bg; 342 } 343 344 /* 345 * Decrement the number of NOCOW writers in a block group. 346 * 347 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 348 * and on the block group returned by that call. Typically this is called after 349 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 350 * relocation. 351 * 352 * After this call, the caller should not use the block group anymore. It it wants 353 * to use it, then it should get a reference on it before calling this function. 354 */ 355 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 356 { 357 if (atomic_dec_and_test(&bg->nocow_writers)) 358 wake_up_var(&bg->nocow_writers); 359 360 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 361 btrfs_put_block_group(bg); 362 } 363 364 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 365 { 366 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 367 } 368 369 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 370 const u64 start) 371 { 372 struct btrfs_block_group *bg; 373 374 bg = btrfs_lookup_block_group(fs_info, start); 375 ASSERT(bg); 376 if (atomic_dec_and_test(&bg->reservations)) 377 wake_up_var(&bg->reservations); 378 btrfs_put_block_group(bg); 379 } 380 381 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 382 { 383 struct btrfs_space_info *space_info = bg->space_info; 384 385 ASSERT(bg->ro); 386 387 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 388 return; 389 390 /* 391 * Our block group is read only but before we set it to read only, 392 * some task might have had allocated an extent from it already, but it 393 * has not yet created a respective ordered extent (and added it to a 394 * root's list of ordered extents). 395 * Therefore wait for any task currently allocating extents, since the 396 * block group's reservations counter is incremented while a read lock 397 * on the groups' semaphore is held and decremented after releasing 398 * the read access on that semaphore and creating the ordered extent. 399 */ 400 down_write(&space_info->groups_sem); 401 up_write(&space_info->groups_sem); 402 403 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 404 } 405 406 struct btrfs_caching_control *btrfs_get_caching_control( 407 struct btrfs_block_group *cache) 408 { 409 struct btrfs_caching_control *ctl; 410 411 spin_lock(&cache->lock); 412 if (!cache->caching_ctl) { 413 spin_unlock(&cache->lock); 414 return NULL; 415 } 416 417 ctl = cache->caching_ctl; 418 refcount_inc(&ctl->count); 419 spin_unlock(&cache->lock); 420 return ctl; 421 } 422 423 void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 424 { 425 if (refcount_dec_and_test(&ctl->count)) 426 kfree(ctl); 427 } 428 429 /* 430 * When we wait for progress in the block group caching, its because our 431 * allocation attempt failed at least once. So, we must sleep and let some 432 * progress happen before we try again. 433 * 434 * This function will sleep at least once waiting for new free space to show 435 * up, and then it will check the block group free space numbers for our min 436 * num_bytes. Another option is to have it go ahead and look in the rbtree for 437 * a free extent of a given size, but this is a good start. 438 * 439 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 440 * any of the information in this block group. 441 */ 442 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 443 u64 num_bytes) 444 { 445 struct btrfs_caching_control *caching_ctl; 446 447 caching_ctl = btrfs_get_caching_control(cache); 448 if (!caching_ctl) 449 return; 450 451 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 452 (cache->free_space_ctl->free_space >= num_bytes)); 453 454 btrfs_put_caching_control(caching_ctl); 455 } 456 457 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 458 struct btrfs_caching_control *caching_ctl) 459 { 460 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 461 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 462 } 463 464 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 465 { 466 struct btrfs_caching_control *caching_ctl; 467 int ret; 468 469 caching_ctl = btrfs_get_caching_control(cache); 470 if (!caching_ctl) 471 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 472 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 473 btrfs_put_caching_control(caching_ctl); 474 return ret; 475 } 476 477 #ifdef CONFIG_BTRFS_DEBUG 478 static void fragment_free_space(struct btrfs_block_group *block_group) 479 { 480 struct btrfs_fs_info *fs_info = block_group->fs_info; 481 u64 start = block_group->start; 482 u64 len = block_group->length; 483 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 484 fs_info->nodesize : fs_info->sectorsize; 485 u64 step = chunk << 1; 486 487 while (len > chunk) { 488 btrfs_remove_free_space(block_group, start, chunk); 489 start += step; 490 if (len < step) 491 len = 0; 492 else 493 len -= step; 494 } 495 } 496 #endif 497 498 /* 499 * This is only called by btrfs_cache_block_group, since we could have freed 500 * extents we need to check the pinned_extents for any extents that can't be 501 * used yet since their free space will be released as soon as the transaction 502 * commits. 503 */ 504 u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) 505 { 506 struct btrfs_fs_info *info = block_group->fs_info; 507 u64 extent_start, extent_end, size, total_added = 0; 508 int ret; 509 510 while (start < end) { 511 ret = find_first_extent_bit(&info->excluded_extents, start, 512 &extent_start, &extent_end, 513 EXTENT_DIRTY | EXTENT_UPTODATE, 514 NULL); 515 if (ret) 516 break; 517 518 if (extent_start <= start) { 519 start = extent_end + 1; 520 } else if (extent_start > start && extent_start < end) { 521 size = extent_start - start; 522 total_added += size; 523 ret = btrfs_add_free_space_async_trimmed(block_group, 524 start, size); 525 BUG_ON(ret); /* -ENOMEM or logic error */ 526 start = extent_end + 1; 527 } else { 528 break; 529 } 530 } 531 532 if (start < end) { 533 size = end - start; 534 total_added += size; 535 ret = btrfs_add_free_space_async_trimmed(block_group, start, 536 size); 537 BUG_ON(ret); /* -ENOMEM or logic error */ 538 } 539 540 return total_added; 541 } 542 543 /* 544 * Get an arbitrary extent item index / max_index through the block group 545 * 546 * @block_group the block group to sample from 547 * @index: the integral step through the block group to grab from 548 * @max_index: the granularity of the sampling 549 * @key: return value parameter for the item we find 550 * 551 * Pre-conditions on indices: 552 * 0 <= index <= max_index 553 * 0 < max_index 554 * 555 * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 556 * error code on error. 557 */ 558 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 559 struct btrfs_block_group *block_group, 560 int index, int max_index, 561 struct btrfs_key *found_key) 562 { 563 struct btrfs_fs_info *fs_info = block_group->fs_info; 564 struct btrfs_root *extent_root; 565 u64 search_offset; 566 u64 search_end = block_group->start + block_group->length; 567 struct btrfs_path *path; 568 struct btrfs_key search_key; 569 int ret = 0; 570 571 ASSERT(index >= 0); 572 ASSERT(index <= max_index); 573 ASSERT(max_index > 0); 574 lockdep_assert_held(&caching_ctl->mutex); 575 lockdep_assert_held_read(&fs_info->commit_root_sem); 576 577 path = btrfs_alloc_path(); 578 if (!path) 579 return -ENOMEM; 580 581 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 582 BTRFS_SUPER_INFO_OFFSET)); 583 584 path->skip_locking = 1; 585 path->search_commit_root = 1; 586 path->reada = READA_FORWARD; 587 588 search_offset = index * div_u64(block_group->length, max_index); 589 search_key.objectid = block_group->start + search_offset; 590 search_key.type = BTRFS_EXTENT_ITEM_KEY; 591 search_key.offset = 0; 592 593 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 594 /* Success; sampled an extent item in the block group */ 595 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 596 found_key->objectid >= block_group->start && 597 found_key->objectid + found_key->offset <= search_end) 598 break; 599 600 /* We can't possibly find a valid extent item anymore */ 601 if (found_key->objectid >= search_end) { 602 ret = 1; 603 break; 604 } 605 } 606 607 lockdep_assert_held(&caching_ctl->mutex); 608 lockdep_assert_held_read(&fs_info->commit_root_sem); 609 btrfs_free_path(path); 610 return ret; 611 } 612 613 /* 614 * Best effort attempt to compute a block group's size class while caching it. 615 * 616 * @block_group: the block group we are caching 617 * 618 * We cannot infer the size class while adding free space extents, because that 619 * logic doesn't care about contiguous file extents (it doesn't differentiate 620 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 621 * file extent items. Reading all of them is quite wasteful, because usually 622 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 623 * them at even steps through the block group and pick the smallest size class 624 * we see. Since size class is best effort, and not guaranteed in general, 625 * inaccuracy is acceptable. 626 * 627 * To be more explicit about why this algorithm makes sense: 628 * 629 * If we are caching in a block group from disk, then there are three major cases 630 * to consider: 631 * 1. the block group is well behaved and all extents in it are the same size 632 * class. 633 * 2. the block group is mostly one size class with rare exceptions for last 634 * ditch allocations 635 * 3. the block group was populated before size classes and can have a totally 636 * arbitrary mix of size classes. 637 * 638 * In case 1, looking at any extent in the block group will yield the correct 639 * result. For the mixed cases, taking the minimum size class seems like a good 640 * approximation, since gaps from frees will be usable to the size class. For 641 * 2., a small handful of file extents is likely to yield the right answer. For 642 * 3, we can either read every file extent, or admit that this is best effort 643 * anyway and try to stay fast. 644 * 645 * Returns: 0 on success, negative error code on error. 646 */ 647 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 648 struct btrfs_block_group *block_group) 649 { 650 struct btrfs_fs_info *fs_info = block_group->fs_info; 651 struct btrfs_key key; 652 int i; 653 u64 min_size = block_group->length; 654 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 655 int ret; 656 657 if (!btrfs_block_group_should_use_size_class(block_group)) 658 return 0; 659 660 lockdep_assert_held(&caching_ctl->mutex); 661 lockdep_assert_held_read(&fs_info->commit_root_sem); 662 for (i = 0; i < 5; ++i) { 663 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 664 if (ret < 0) 665 goto out; 666 if (ret > 0) 667 continue; 668 min_size = min_t(u64, min_size, key.offset); 669 size_class = btrfs_calc_block_group_size_class(min_size); 670 } 671 if (size_class != BTRFS_BG_SZ_NONE) { 672 spin_lock(&block_group->lock); 673 block_group->size_class = size_class; 674 spin_unlock(&block_group->lock); 675 } 676 out: 677 return ret; 678 } 679 680 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 681 { 682 struct btrfs_block_group *block_group = caching_ctl->block_group; 683 struct btrfs_fs_info *fs_info = block_group->fs_info; 684 struct btrfs_root *extent_root; 685 struct btrfs_path *path; 686 struct extent_buffer *leaf; 687 struct btrfs_key key; 688 u64 total_found = 0; 689 u64 last = 0; 690 u32 nritems; 691 int ret; 692 bool wakeup = true; 693 694 path = btrfs_alloc_path(); 695 if (!path) 696 return -ENOMEM; 697 698 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 699 extent_root = btrfs_extent_root(fs_info, last); 700 701 #ifdef CONFIG_BTRFS_DEBUG 702 /* 703 * If we're fragmenting we don't want to make anybody think we can 704 * allocate from this block group until we've had a chance to fragment 705 * the free space. 706 */ 707 if (btrfs_should_fragment_free_space(block_group)) 708 wakeup = false; 709 #endif 710 /* 711 * We don't want to deadlock with somebody trying to allocate a new 712 * extent for the extent root while also trying to search the extent 713 * root to add free space. So we skip locking and search the commit 714 * root, since its read-only 715 */ 716 path->skip_locking = 1; 717 path->search_commit_root = 1; 718 path->reada = READA_FORWARD; 719 720 key.objectid = last; 721 key.offset = 0; 722 key.type = BTRFS_EXTENT_ITEM_KEY; 723 724 next: 725 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 726 if (ret < 0) 727 goto out; 728 729 leaf = path->nodes[0]; 730 nritems = btrfs_header_nritems(leaf); 731 732 while (1) { 733 if (btrfs_fs_closing(fs_info) > 1) { 734 last = (u64)-1; 735 break; 736 } 737 738 if (path->slots[0] < nritems) { 739 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 740 } else { 741 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 742 if (ret) 743 break; 744 745 if (need_resched() || 746 rwsem_is_contended(&fs_info->commit_root_sem)) { 747 btrfs_release_path(path); 748 up_read(&fs_info->commit_root_sem); 749 mutex_unlock(&caching_ctl->mutex); 750 cond_resched(); 751 mutex_lock(&caching_ctl->mutex); 752 down_read(&fs_info->commit_root_sem); 753 goto next; 754 } 755 756 ret = btrfs_next_leaf(extent_root, path); 757 if (ret < 0) 758 goto out; 759 if (ret) 760 break; 761 leaf = path->nodes[0]; 762 nritems = btrfs_header_nritems(leaf); 763 continue; 764 } 765 766 if (key.objectid < last) { 767 key.objectid = last; 768 key.offset = 0; 769 key.type = BTRFS_EXTENT_ITEM_KEY; 770 btrfs_release_path(path); 771 goto next; 772 } 773 774 if (key.objectid < block_group->start) { 775 path->slots[0]++; 776 continue; 777 } 778 779 if (key.objectid >= block_group->start + block_group->length) 780 break; 781 782 if (key.type == BTRFS_EXTENT_ITEM_KEY || 783 key.type == BTRFS_METADATA_ITEM_KEY) { 784 total_found += add_new_free_space(block_group, last, 785 key.objectid); 786 if (key.type == BTRFS_METADATA_ITEM_KEY) 787 last = key.objectid + 788 fs_info->nodesize; 789 else 790 last = key.objectid + key.offset; 791 792 if (total_found > CACHING_CTL_WAKE_UP) { 793 total_found = 0; 794 if (wakeup) 795 wake_up(&caching_ctl->wait); 796 } 797 } 798 path->slots[0]++; 799 } 800 ret = 0; 801 802 total_found += add_new_free_space(block_group, last, 803 block_group->start + block_group->length); 804 805 out: 806 btrfs_free_path(path); 807 return ret; 808 } 809 810 static noinline void caching_thread(struct btrfs_work *work) 811 { 812 struct btrfs_block_group *block_group; 813 struct btrfs_fs_info *fs_info; 814 struct btrfs_caching_control *caching_ctl; 815 int ret; 816 817 caching_ctl = container_of(work, struct btrfs_caching_control, work); 818 block_group = caching_ctl->block_group; 819 fs_info = block_group->fs_info; 820 821 mutex_lock(&caching_ctl->mutex); 822 down_read(&fs_info->commit_root_sem); 823 824 load_block_group_size_class(caching_ctl, block_group); 825 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 826 ret = load_free_space_cache(block_group); 827 if (ret == 1) { 828 ret = 0; 829 goto done; 830 } 831 832 /* 833 * We failed to load the space cache, set ourselves to 834 * CACHE_STARTED and carry on. 835 */ 836 spin_lock(&block_group->lock); 837 block_group->cached = BTRFS_CACHE_STARTED; 838 spin_unlock(&block_group->lock); 839 wake_up(&caching_ctl->wait); 840 } 841 842 /* 843 * If we are in the transaction that populated the free space tree we 844 * can't actually cache from the free space tree as our commit root and 845 * real root are the same, so we could change the contents of the blocks 846 * while caching. Instead do the slow caching in this case, and after 847 * the transaction has committed we will be safe. 848 */ 849 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 850 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 851 ret = load_free_space_tree(caching_ctl); 852 else 853 ret = load_extent_tree_free(caching_ctl); 854 done: 855 spin_lock(&block_group->lock); 856 block_group->caching_ctl = NULL; 857 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 858 spin_unlock(&block_group->lock); 859 860 #ifdef CONFIG_BTRFS_DEBUG 861 if (btrfs_should_fragment_free_space(block_group)) { 862 u64 bytes_used; 863 864 spin_lock(&block_group->space_info->lock); 865 spin_lock(&block_group->lock); 866 bytes_used = block_group->length - block_group->used; 867 block_group->space_info->bytes_used += bytes_used >> 1; 868 spin_unlock(&block_group->lock); 869 spin_unlock(&block_group->space_info->lock); 870 fragment_free_space(block_group); 871 } 872 #endif 873 874 up_read(&fs_info->commit_root_sem); 875 btrfs_free_excluded_extents(block_group); 876 mutex_unlock(&caching_ctl->mutex); 877 878 wake_up(&caching_ctl->wait); 879 880 btrfs_put_caching_control(caching_ctl); 881 btrfs_put_block_group(block_group); 882 } 883 884 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 885 { 886 struct btrfs_fs_info *fs_info = cache->fs_info; 887 struct btrfs_caching_control *caching_ctl = NULL; 888 int ret = 0; 889 890 /* Allocator for zoned filesystems does not use the cache at all */ 891 if (btrfs_is_zoned(fs_info)) 892 return 0; 893 894 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 895 if (!caching_ctl) 896 return -ENOMEM; 897 898 INIT_LIST_HEAD(&caching_ctl->list); 899 mutex_init(&caching_ctl->mutex); 900 init_waitqueue_head(&caching_ctl->wait); 901 caching_ctl->block_group = cache; 902 refcount_set(&caching_ctl->count, 2); 903 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 904 905 spin_lock(&cache->lock); 906 if (cache->cached != BTRFS_CACHE_NO) { 907 kfree(caching_ctl); 908 909 caching_ctl = cache->caching_ctl; 910 if (caching_ctl) 911 refcount_inc(&caching_ctl->count); 912 spin_unlock(&cache->lock); 913 goto out; 914 } 915 WARN_ON(cache->caching_ctl); 916 cache->caching_ctl = caching_ctl; 917 cache->cached = BTRFS_CACHE_STARTED; 918 spin_unlock(&cache->lock); 919 920 write_lock(&fs_info->block_group_cache_lock); 921 refcount_inc(&caching_ctl->count); 922 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 923 write_unlock(&fs_info->block_group_cache_lock); 924 925 btrfs_get_block_group(cache); 926 927 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 928 out: 929 if (wait && caching_ctl) 930 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 931 if (caching_ctl) 932 btrfs_put_caching_control(caching_ctl); 933 934 return ret; 935 } 936 937 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 938 { 939 u64 extra_flags = chunk_to_extended(flags) & 940 BTRFS_EXTENDED_PROFILE_MASK; 941 942 write_seqlock(&fs_info->profiles_lock); 943 if (flags & BTRFS_BLOCK_GROUP_DATA) 944 fs_info->avail_data_alloc_bits &= ~extra_flags; 945 if (flags & BTRFS_BLOCK_GROUP_METADATA) 946 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 947 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 948 fs_info->avail_system_alloc_bits &= ~extra_flags; 949 write_sequnlock(&fs_info->profiles_lock); 950 } 951 952 /* 953 * Clear incompat bits for the following feature(s): 954 * 955 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 956 * in the whole filesystem 957 * 958 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 959 */ 960 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 961 { 962 bool found_raid56 = false; 963 bool found_raid1c34 = false; 964 965 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 966 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 967 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 968 struct list_head *head = &fs_info->space_info; 969 struct btrfs_space_info *sinfo; 970 971 list_for_each_entry_rcu(sinfo, head, list) { 972 down_read(&sinfo->groups_sem); 973 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 974 found_raid56 = true; 975 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 976 found_raid56 = true; 977 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 978 found_raid1c34 = true; 979 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 980 found_raid1c34 = true; 981 up_read(&sinfo->groups_sem); 982 } 983 if (!found_raid56) 984 btrfs_clear_fs_incompat(fs_info, RAID56); 985 if (!found_raid1c34) 986 btrfs_clear_fs_incompat(fs_info, RAID1C34); 987 } 988 } 989 990 static int remove_block_group_item(struct btrfs_trans_handle *trans, 991 struct btrfs_path *path, 992 struct btrfs_block_group *block_group) 993 { 994 struct btrfs_fs_info *fs_info = trans->fs_info; 995 struct btrfs_root *root; 996 struct btrfs_key key; 997 int ret; 998 999 root = btrfs_block_group_root(fs_info); 1000 key.objectid = block_group->start; 1001 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1002 key.offset = block_group->length; 1003 1004 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1005 if (ret > 0) 1006 ret = -ENOENT; 1007 if (ret < 0) 1008 return ret; 1009 1010 ret = btrfs_del_item(trans, root, path); 1011 return ret; 1012 } 1013 1014 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1015 u64 group_start, struct extent_map *em) 1016 { 1017 struct btrfs_fs_info *fs_info = trans->fs_info; 1018 struct btrfs_path *path; 1019 struct btrfs_block_group *block_group; 1020 struct btrfs_free_cluster *cluster; 1021 struct inode *inode; 1022 struct kobject *kobj = NULL; 1023 int ret; 1024 int index; 1025 int factor; 1026 struct btrfs_caching_control *caching_ctl = NULL; 1027 bool remove_em; 1028 bool remove_rsv = false; 1029 1030 block_group = btrfs_lookup_block_group(fs_info, group_start); 1031 BUG_ON(!block_group); 1032 BUG_ON(!block_group->ro); 1033 1034 trace_btrfs_remove_block_group(block_group); 1035 /* 1036 * Free the reserved super bytes from this block group before 1037 * remove it. 1038 */ 1039 btrfs_free_excluded_extents(block_group); 1040 btrfs_free_ref_tree_range(fs_info, block_group->start, 1041 block_group->length); 1042 1043 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1044 factor = btrfs_bg_type_to_factor(block_group->flags); 1045 1046 /* make sure this block group isn't part of an allocation cluster */ 1047 cluster = &fs_info->data_alloc_cluster; 1048 spin_lock(&cluster->refill_lock); 1049 btrfs_return_cluster_to_free_space(block_group, cluster); 1050 spin_unlock(&cluster->refill_lock); 1051 1052 /* 1053 * make sure this block group isn't part of a metadata 1054 * allocation cluster 1055 */ 1056 cluster = &fs_info->meta_alloc_cluster; 1057 spin_lock(&cluster->refill_lock); 1058 btrfs_return_cluster_to_free_space(block_group, cluster); 1059 spin_unlock(&cluster->refill_lock); 1060 1061 btrfs_clear_treelog_bg(block_group); 1062 btrfs_clear_data_reloc_bg(block_group); 1063 1064 path = btrfs_alloc_path(); 1065 if (!path) { 1066 ret = -ENOMEM; 1067 goto out; 1068 } 1069 1070 /* 1071 * get the inode first so any iput calls done for the io_list 1072 * aren't the final iput (no unlinks allowed now) 1073 */ 1074 inode = lookup_free_space_inode(block_group, path); 1075 1076 mutex_lock(&trans->transaction->cache_write_mutex); 1077 /* 1078 * Make sure our free space cache IO is done before removing the 1079 * free space inode 1080 */ 1081 spin_lock(&trans->transaction->dirty_bgs_lock); 1082 if (!list_empty(&block_group->io_list)) { 1083 list_del_init(&block_group->io_list); 1084 1085 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1086 1087 spin_unlock(&trans->transaction->dirty_bgs_lock); 1088 btrfs_wait_cache_io(trans, block_group, path); 1089 btrfs_put_block_group(block_group); 1090 spin_lock(&trans->transaction->dirty_bgs_lock); 1091 } 1092 1093 if (!list_empty(&block_group->dirty_list)) { 1094 list_del_init(&block_group->dirty_list); 1095 remove_rsv = true; 1096 btrfs_put_block_group(block_group); 1097 } 1098 spin_unlock(&trans->transaction->dirty_bgs_lock); 1099 mutex_unlock(&trans->transaction->cache_write_mutex); 1100 1101 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1102 if (ret) 1103 goto out; 1104 1105 write_lock(&fs_info->block_group_cache_lock); 1106 rb_erase_cached(&block_group->cache_node, 1107 &fs_info->block_group_cache_tree); 1108 RB_CLEAR_NODE(&block_group->cache_node); 1109 1110 /* Once for the block groups rbtree */ 1111 btrfs_put_block_group(block_group); 1112 1113 write_unlock(&fs_info->block_group_cache_lock); 1114 1115 down_write(&block_group->space_info->groups_sem); 1116 /* 1117 * we must use list_del_init so people can check to see if they 1118 * are still on the list after taking the semaphore 1119 */ 1120 list_del_init(&block_group->list); 1121 if (list_empty(&block_group->space_info->block_groups[index])) { 1122 kobj = block_group->space_info->block_group_kobjs[index]; 1123 block_group->space_info->block_group_kobjs[index] = NULL; 1124 clear_avail_alloc_bits(fs_info, block_group->flags); 1125 } 1126 up_write(&block_group->space_info->groups_sem); 1127 clear_incompat_bg_bits(fs_info, block_group->flags); 1128 if (kobj) { 1129 kobject_del(kobj); 1130 kobject_put(kobj); 1131 } 1132 1133 if (block_group->cached == BTRFS_CACHE_STARTED) 1134 btrfs_wait_block_group_cache_done(block_group); 1135 1136 write_lock(&fs_info->block_group_cache_lock); 1137 caching_ctl = btrfs_get_caching_control(block_group); 1138 if (!caching_ctl) { 1139 struct btrfs_caching_control *ctl; 1140 1141 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1142 if (ctl->block_group == block_group) { 1143 caching_ctl = ctl; 1144 refcount_inc(&caching_ctl->count); 1145 break; 1146 } 1147 } 1148 } 1149 if (caching_ctl) 1150 list_del_init(&caching_ctl->list); 1151 write_unlock(&fs_info->block_group_cache_lock); 1152 1153 if (caching_ctl) { 1154 /* Once for the caching bgs list and once for us. */ 1155 btrfs_put_caching_control(caching_ctl); 1156 btrfs_put_caching_control(caching_ctl); 1157 } 1158 1159 spin_lock(&trans->transaction->dirty_bgs_lock); 1160 WARN_ON(!list_empty(&block_group->dirty_list)); 1161 WARN_ON(!list_empty(&block_group->io_list)); 1162 spin_unlock(&trans->transaction->dirty_bgs_lock); 1163 1164 btrfs_remove_free_space_cache(block_group); 1165 1166 spin_lock(&block_group->space_info->lock); 1167 list_del_init(&block_group->ro_list); 1168 1169 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1170 WARN_ON(block_group->space_info->total_bytes 1171 < block_group->length); 1172 WARN_ON(block_group->space_info->bytes_readonly 1173 < block_group->length - block_group->zone_unusable); 1174 WARN_ON(block_group->space_info->bytes_zone_unusable 1175 < block_group->zone_unusable); 1176 WARN_ON(block_group->space_info->disk_total 1177 < block_group->length * factor); 1178 } 1179 block_group->space_info->total_bytes -= block_group->length; 1180 block_group->space_info->bytes_readonly -= 1181 (block_group->length - block_group->zone_unusable); 1182 block_group->space_info->bytes_zone_unusable -= 1183 block_group->zone_unusable; 1184 block_group->space_info->disk_total -= block_group->length * factor; 1185 1186 spin_unlock(&block_group->space_info->lock); 1187 1188 /* 1189 * Remove the free space for the block group from the free space tree 1190 * and the block group's item from the extent tree before marking the 1191 * block group as removed. This is to prevent races with tasks that 1192 * freeze and unfreeze a block group, this task and another task 1193 * allocating a new block group - the unfreeze task ends up removing 1194 * the block group's extent map before the task calling this function 1195 * deletes the block group item from the extent tree, allowing for 1196 * another task to attempt to create another block group with the same 1197 * item key (and failing with -EEXIST and a transaction abort). 1198 */ 1199 ret = remove_block_group_free_space(trans, block_group); 1200 if (ret) 1201 goto out; 1202 1203 ret = remove_block_group_item(trans, path, block_group); 1204 if (ret < 0) 1205 goto out; 1206 1207 spin_lock(&block_group->lock); 1208 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1209 1210 /* 1211 * At this point trimming or scrub can't start on this block group, 1212 * because we removed the block group from the rbtree 1213 * fs_info->block_group_cache_tree so no one can't find it anymore and 1214 * even if someone already got this block group before we removed it 1215 * from the rbtree, they have already incremented block_group->frozen - 1216 * if they didn't, for the trimming case they won't find any free space 1217 * entries because we already removed them all when we called 1218 * btrfs_remove_free_space_cache(). 1219 * 1220 * And we must not remove the extent map from the fs_info->mapping_tree 1221 * to prevent the same logical address range and physical device space 1222 * ranges from being reused for a new block group. This is needed to 1223 * avoid races with trimming and scrub. 1224 * 1225 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1226 * completely transactionless, so while it is trimming a range the 1227 * currently running transaction might finish and a new one start, 1228 * allowing for new block groups to be created that can reuse the same 1229 * physical device locations unless we take this special care. 1230 * 1231 * There may also be an implicit trim operation if the file system 1232 * is mounted with -odiscard. The same protections must remain 1233 * in place until the extents have been discarded completely when 1234 * the transaction commit has completed. 1235 */ 1236 remove_em = (atomic_read(&block_group->frozen) == 0); 1237 spin_unlock(&block_group->lock); 1238 1239 if (remove_em) { 1240 struct extent_map_tree *em_tree; 1241 1242 em_tree = &fs_info->mapping_tree; 1243 write_lock(&em_tree->lock); 1244 remove_extent_mapping(em_tree, em); 1245 write_unlock(&em_tree->lock); 1246 /* once for the tree */ 1247 free_extent_map(em); 1248 } 1249 1250 out: 1251 /* Once for the lookup reference */ 1252 btrfs_put_block_group(block_group); 1253 if (remove_rsv) 1254 btrfs_delayed_refs_rsv_release(fs_info, 1); 1255 btrfs_free_path(path); 1256 return ret; 1257 } 1258 1259 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1260 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1261 { 1262 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1263 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1264 struct extent_map *em; 1265 struct map_lookup *map; 1266 unsigned int num_items; 1267 1268 read_lock(&em_tree->lock); 1269 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1270 read_unlock(&em_tree->lock); 1271 ASSERT(em && em->start == chunk_offset); 1272 1273 /* 1274 * We need to reserve 3 + N units from the metadata space info in order 1275 * to remove a block group (done at btrfs_remove_chunk() and at 1276 * btrfs_remove_block_group()), which are used for: 1277 * 1278 * 1 unit for adding the free space inode's orphan (located in the tree 1279 * of tree roots). 1280 * 1 unit for deleting the block group item (located in the extent 1281 * tree). 1282 * 1 unit for deleting the free space item (located in tree of tree 1283 * roots). 1284 * N units for deleting N device extent items corresponding to each 1285 * stripe (located in the device tree). 1286 * 1287 * In order to remove a block group we also need to reserve units in the 1288 * system space info in order to update the chunk tree (update one or 1289 * more device items and remove one chunk item), but this is done at 1290 * btrfs_remove_chunk() through a call to check_system_chunk(). 1291 */ 1292 map = em->map_lookup; 1293 num_items = 3 + map->num_stripes; 1294 free_extent_map(em); 1295 1296 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1297 } 1298 1299 /* 1300 * Mark block group @cache read-only, so later write won't happen to block 1301 * group @cache. 1302 * 1303 * If @force is not set, this function will only mark the block group readonly 1304 * if we have enough free space (1M) in other metadata/system block groups. 1305 * If @force is not set, this function will mark the block group readonly 1306 * without checking free space. 1307 * 1308 * NOTE: This function doesn't care if other block groups can contain all the 1309 * data in this block group. That check should be done by relocation routine, 1310 * not this function. 1311 */ 1312 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 1313 { 1314 struct btrfs_space_info *sinfo = cache->space_info; 1315 u64 num_bytes; 1316 int ret = -ENOSPC; 1317 1318 spin_lock(&sinfo->lock); 1319 spin_lock(&cache->lock); 1320 1321 if (cache->swap_extents) { 1322 ret = -ETXTBSY; 1323 goto out; 1324 } 1325 1326 if (cache->ro) { 1327 cache->ro++; 1328 ret = 0; 1329 goto out; 1330 } 1331 1332 num_bytes = cache->length - cache->reserved - cache->pinned - 1333 cache->bytes_super - cache->zone_unusable - cache->used; 1334 1335 /* 1336 * Data never overcommits, even in mixed mode, so do just the straight 1337 * check of left over space in how much we have allocated. 1338 */ 1339 if (force) { 1340 ret = 0; 1341 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1342 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1343 1344 /* 1345 * Here we make sure if we mark this bg RO, we still have enough 1346 * free space as buffer. 1347 */ 1348 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1349 ret = 0; 1350 } else { 1351 /* 1352 * We overcommit metadata, so we need to do the 1353 * btrfs_can_overcommit check here, and we need to pass in 1354 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1355 * leeway to allow us to mark this block group as read only. 1356 */ 1357 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1358 BTRFS_RESERVE_NO_FLUSH)) 1359 ret = 0; 1360 } 1361 1362 if (!ret) { 1363 sinfo->bytes_readonly += num_bytes; 1364 if (btrfs_is_zoned(cache->fs_info)) { 1365 /* Migrate zone_unusable bytes to readonly */ 1366 sinfo->bytes_readonly += cache->zone_unusable; 1367 sinfo->bytes_zone_unusable -= cache->zone_unusable; 1368 cache->zone_unusable = 0; 1369 } 1370 cache->ro++; 1371 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1372 } 1373 out: 1374 spin_unlock(&cache->lock); 1375 spin_unlock(&sinfo->lock); 1376 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1377 btrfs_info(cache->fs_info, 1378 "unable to make block group %llu ro", cache->start); 1379 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 1380 } 1381 return ret; 1382 } 1383 1384 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1385 struct btrfs_block_group *bg) 1386 { 1387 struct btrfs_fs_info *fs_info = bg->fs_info; 1388 struct btrfs_transaction *prev_trans = NULL; 1389 const u64 start = bg->start; 1390 const u64 end = start + bg->length - 1; 1391 int ret; 1392 1393 spin_lock(&fs_info->trans_lock); 1394 if (trans->transaction->list.prev != &fs_info->trans_list) { 1395 prev_trans = list_last_entry(&trans->transaction->list, 1396 struct btrfs_transaction, list); 1397 refcount_inc(&prev_trans->use_count); 1398 } 1399 spin_unlock(&fs_info->trans_lock); 1400 1401 /* 1402 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1403 * btrfs_finish_extent_commit(). If we are at transaction N, another 1404 * task might be running finish_extent_commit() for the previous 1405 * transaction N - 1, and have seen a range belonging to the block 1406 * group in pinned_extents before we were able to clear the whole block 1407 * group range from pinned_extents. This means that task can lookup for 1408 * the block group after we unpinned it from pinned_extents and removed 1409 * it, leading to a BUG_ON() at unpin_extent_range(). 1410 */ 1411 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1412 if (prev_trans) { 1413 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 1414 EXTENT_DIRTY); 1415 if (ret) 1416 goto out; 1417 } 1418 1419 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 1420 EXTENT_DIRTY); 1421 out: 1422 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1423 if (prev_trans) 1424 btrfs_put_transaction(prev_trans); 1425 1426 return ret == 0; 1427 } 1428 1429 /* 1430 * Process the unused_bgs list and remove any that don't have any allocated 1431 * space inside of them. 1432 */ 1433 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1434 { 1435 struct btrfs_block_group *block_group; 1436 struct btrfs_space_info *space_info; 1437 struct btrfs_trans_handle *trans; 1438 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1439 int ret = 0; 1440 1441 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1442 return; 1443 1444 if (btrfs_fs_closing(fs_info)) 1445 return; 1446 1447 /* 1448 * Long running balances can keep us blocked here for eternity, so 1449 * simply skip deletion if we're unable to get the mutex. 1450 */ 1451 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1452 return; 1453 1454 spin_lock(&fs_info->unused_bgs_lock); 1455 while (!list_empty(&fs_info->unused_bgs)) { 1456 int trimming; 1457 1458 block_group = list_first_entry(&fs_info->unused_bgs, 1459 struct btrfs_block_group, 1460 bg_list); 1461 list_del_init(&block_group->bg_list); 1462 1463 space_info = block_group->space_info; 1464 1465 if (ret || btrfs_mixed_space_info(space_info)) { 1466 btrfs_put_block_group(block_group); 1467 continue; 1468 } 1469 spin_unlock(&fs_info->unused_bgs_lock); 1470 1471 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1472 1473 /* Don't want to race with allocators so take the groups_sem */ 1474 down_write(&space_info->groups_sem); 1475 1476 /* 1477 * Async discard moves the final block group discard to be prior 1478 * to the unused_bgs code path. Therefore, if it's not fully 1479 * trimmed, punt it back to the async discard lists. 1480 */ 1481 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1482 !btrfs_is_free_space_trimmed(block_group)) { 1483 trace_btrfs_skip_unused_block_group(block_group); 1484 up_write(&space_info->groups_sem); 1485 /* Requeue if we failed because of async discard */ 1486 btrfs_discard_queue_work(&fs_info->discard_ctl, 1487 block_group); 1488 goto next; 1489 } 1490 1491 spin_lock(&block_group->lock); 1492 if (block_group->reserved || block_group->pinned || 1493 block_group->used || block_group->ro || 1494 list_is_singular(&block_group->list)) { 1495 /* 1496 * We want to bail if we made new allocations or have 1497 * outstanding allocations in this block group. We do 1498 * the ro check in case balance is currently acting on 1499 * this block group. 1500 */ 1501 trace_btrfs_skip_unused_block_group(block_group); 1502 spin_unlock(&block_group->lock); 1503 up_write(&space_info->groups_sem); 1504 goto next; 1505 } 1506 spin_unlock(&block_group->lock); 1507 1508 /* We don't want to force the issue, only flip if it's ok. */ 1509 ret = inc_block_group_ro(block_group, 0); 1510 up_write(&space_info->groups_sem); 1511 if (ret < 0) { 1512 ret = 0; 1513 goto next; 1514 } 1515 1516 ret = btrfs_zone_finish(block_group); 1517 if (ret < 0) { 1518 btrfs_dec_block_group_ro(block_group); 1519 if (ret == -EAGAIN) 1520 ret = 0; 1521 goto next; 1522 } 1523 1524 /* 1525 * Want to do this before we do anything else so we can recover 1526 * properly if we fail to join the transaction. 1527 */ 1528 trans = btrfs_start_trans_remove_block_group(fs_info, 1529 block_group->start); 1530 if (IS_ERR(trans)) { 1531 btrfs_dec_block_group_ro(block_group); 1532 ret = PTR_ERR(trans); 1533 goto next; 1534 } 1535 1536 /* 1537 * We could have pending pinned extents for this block group, 1538 * just delete them, we don't care about them anymore. 1539 */ 1540 if (!clean_pinned_extents(trans, block_group)) { 1541 btrfs_dec_block_group_ro(block_group); 1542 goto end_trans; 1543 } 1544 1545 /* 1546 * At this point, the block_group is read only and should fail 1547 * new allocations. However, btrfs_finish_extent_commit() can 1548 * cause this block_group to be placed back on the discard 1549 * lists because now the block_group isn't fully discarded. 1550 * Bail here and try again later after discarding everything. 1551 */ 1552 spin_lock(&fs_info->discard_ctl.lock); 1553 if (!list_empty(&block_group->discard_list)) { 1554 spin_unlock(&fs_info->discard_ctl.lock); 1555 btrfs_dec_block_group_ro(block_group); 1556 btrfs_discard_queue_work(&fs_info->discard_ctl, 1557 block_group); 1558 goto end_trans; 1559 } 1560 spin_unlock(&fs_info->discard_ctl.lock); 1561 1562 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1563 spin_lock(&space_info->lock); 1564 spin_lock(&block_group->lock); 1565 1566 btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1567 -block_group->pinned); 1568 space_info->bytes_readonly += block_group->pinned; 1569 block_group->pinned = 0; 1570 1571 spin_unlock(&block_group->lock); 1572 spin_unlock(&space_info->lock); 1573 1574 /* 1575 * The normal path here is an unused block group is passed here, 1576 * then trimming is handled in the transaction commit path. 1577 * Async discard interposes before this to do the trimming 1578 * before coming down the unused block group path as trimming 1579 * will no longer be done later in the transaction commit path. 1580 */ 1581 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1582 goto flip_async; 1583 1584 /* 1585 * DISCARD can flip during remount. On zoned filesystems, we 1586 * need to reset sequential-required zones. 1587 */ 1588 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1589 btrfs_is_zoned(fs_info); 1590 1591 /* Implicit trim during transaction commit. */ 1592 if (trimming) 1593 btrfs_freeze_block_group(block_group); 1594 1595 /* 1596 * Btrfs_remove_chunk will abort the transaction if things go 1597 * horribly wrong. 1598 */ 1599 ret = btrfs_remove_chunk(trans, block_group->start); 1600 1601 if (ret) { 1602 if (trimming) 1603 btrfs_unfreeze_block_group(block_group); 1604 goto end_trans; 1605 } 1606 1607 /* 1608 * If we're not mounted with -odiscard, we can just forget 1609 * about this block group. Otherwise we'll need to wait 1610 * until transaction commit to do the actual discard. 1611 */ 1612 if (trimming) { 1613 spin_lock(&fs_info->unused_bgs_lock); 1614 /* 1615 * A concurrent scrub might have added us to the list 1616 * fs_info->unused_bgs, so use a list_move operation 1617 * to add the block group to the deleted_bgs list. 1618 */ 1619 list_move(&block_group->bg_list, 1620 &trans->transaction->deleted_bgs); 1621 spin_unlock(&fs_info->unused_bgs_lock); 1622 btrfs_get_block_group(block_group); 1623 } 1624 end_trans: 1625 btrfs_end_transaction(trans); 1626 next: 1627 btrfs_put_block_group(block_group); 1628 spin_lock(&fs_info->unused_bgs_lock); 1629 } 1630 spin_unlock(&fs_info->unused_bgs_lock); 1631 mutex_unlock(&fs_info->reclaim_bgs_lock); 1632 return; 1633 1634 flip_async: 1635 btrfs_end_transaction(trans); 1636 mutex_unlock(&fs_info->reclaim_bgs_lock); 1637 btrfs_put_block_group(block_group); 1638 btrfs_discard_punt_unused_bgs_list(fs_info); 1639 } 1640 1641 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1642 { 1643 struct btrfs_fs_info *fs_info = bg->fs_info; 1644 1645 spin_lock(&fs_info->unused_bgs_lock); 1646 if (list_empty(&bg->bg_list)) { 1647 btrfs_get_block_group(bg); 1648 trace_btrfs_add_unused_block_group(bg); 1649 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1650 } 1651 spin_unlock(&fs_info->unused_bgs_lock); 1652 } 1653 1654 /* 1655 * We want block groups with a low number of used bytes to be in the beginning 1656 * of the list, so they will get reclaimed first. 1657 */ 1658 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1659 const struct list_head *b) 1660 { 1661 const struct btrfs_block_group *bg1, *bg2; 1662 1663 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1664 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1665 1666 return bg1->used > bg2->used; 1667 } 1668 1669 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 1670 { 1671 if (btrfs_is_zoned(fs_info)) 1672 return btrfs_zoned_should_reclaim(fs_info); 1673 return true; 1674 } 1675 1676 static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 1677 { 1678 const struct btrfs_space_info *space_info = bg->space_info; 1679 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 1680 const u64 new_val = bg->used; 1681 const u64 old_val = new_val + bytes_freed; 1682 u64 thresh; 1683 1684 if (reclaim_thresh == 0) 1685 return false; 1686 1687 thresh = mult_perc(bg->length, reclaim_thresh); 1688 1689 /* 1690 * If we were below the threshold before don't reclaim, we are likely a 1691 * brand new block group and we don't want to relocate new block groups. 1692 */ 1693 if (old_val < thresh) 1694 return false; 1695 if (new_val >= thresh) 1696 return false; 1697 return true; 1698 } 1699 1700 void btrfs_reclaim_bgs_work(struct work_struct *work) 1701 { 1702 struct btrfs_fs_info *fs_info = 1703 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1704 struct btrfs_block_group *bg; 1705 struct btrfs_space_info *space_info; 1706 1707 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1708 return; 1709 1710 if (btrfs_fs_closing(fs_info)) 1711 return; 1712 1713 if (!btrfs_should_reclaim(fs_info)) 1714 return; 1715 1716 sb_start_write(fs_info->sb); 1717 1718 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1719 sb_end_write(fs_info->sb); 1720 return; 1721 } 1722 1723 /* 1724 * Long running balances can keep us blocked here for eternity, so 1725 * simply skip reclaim if we're unable to get the mutex. 1726 */ 1727 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1728 btrfs_exclop_finish(fs_info); 1729 sb_end_write(fs_info->sb); 1730 return; 1731 } 1732 1733 spin_lock(&fs_info->unused_bgs_lock); 1734 /* 1735 * Sort happens under lock because we can't simply splice it and sort. 1736 * The block groups might still be in use and reachable via bg_list, 1737 * and their presence in the reclaim_bgs list must be preserved. 1738 */ 1739 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1740 while (!list_empty(&fs_info->reclaim_bgs)) { 1741 u64 zone_unusable; 1742 int ret = 0; 1743 1744 bg = list_first_entry(&fs_info->reclaim_bgs, 1745 struct btrfs_block_group, 1746 bg_list); 1747 list_del_init(&bg->bg_list); 1748 1749 space_info = bg->space_info; 1750 spin_unlock(&fs_info->unused_bgs_lock); 1751 1752 /* Don't race with allocators so take the groups_sem */ 1753 down_write(&space_info->groups_sem); 1754 1755 spin_lock(&bg->lock); 1756 if (bg->reserved || bg->pinned || bg->ro) { 1757 /* 1758 * We want to bail if we made new allocations or have 1759 * outstanding allocations in this block group. We do 1760 * the ro check in case balance is currently acting on 1761 * this block group. 1762 */ 1763 spin_unlock(&bg->lock); 1764 up_write(&space_info->groups_sem); 1765 goto next; 1766 } 1767 if (bg->used == 0) { 1768 /* 1769 * It is possible that we trigger relocation on a block 1770 * group as its extents are deleted and it first goes 1771 * below the threshold, then shortly after goes empty. 1772 * 1773 * In this case, relocating it does delete it, but has 1774 * some overhead in relocation specific metadata, looking 1775 * for the non-existent extents and running some extra 1776 * transactions, which we can avoid by using one of the 1777 * other mechanisms for dealing with empty block groups. 1778 */ 1779 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1780 btrfs_mark_bg_unused(bg); 1781 spin_unlock(&bg->lock); 1782 up_write(&space_info->groups_sem); 1783 goto next; 1784 1785 } 1786 /* 1787 * The block group might no longer meet the reclaim condition by 1788 * the time we get around to reclaiming it, so to avoid 1789 * reclaiming overly full block_groups, skip reclaiming them. 1790 * 1791 * Since the decision making process also depends on the amount 1792 * being freed, pass in a fake giant value to skip that extra 1793 * check, which is more meaningful when adding to the list in 1794 * the first place. 1795 */ 1796 if (!should_reclaim_block_group(bg, bg->length)) { 1797 spin_unlock(&bg->lock); 1798 up_write(&space_info->groups_sem); 1799 goto next; 1800 } 1801 spin_unlock(&bg->lock); 1802 1803 /* Get out fast, in case we're unmounting the filesystem */ 1804 if (btrfs_fs_closing(fs_info)) { 1805 up_write(&space_info->groups_sem); 1806 goto next; 1807 } 1808 1809 /* 1810 * Cache the zone_unusable value before turning the block group 1811 * to read only. As soon as the blog group is read only it's 1812 * zone_unusable value gets moved to the block group's read-only 1813 * bytes and isn't available for calculations anymore. 1814 */ 1815 zone_unusable = bg->zone_unusable; 1816 ret = inc_block_group_ro(bg, 0); 1817 up_write(&space_info->groups_sem); 1818 if (ret < 0) 1819 goto next; 1820 1821 btrfs_info(fs_info, 1822 "reclaiming chunk %llu with %llu%% used %llu%% unusable", 1823 bg->start, 1824 div64_u64(bg->used * 100, bg->length), 1825 div64_u64(zone_unusable * 100, bg->length)); 1826 trace_btrfs_reclaim_block_group(bg); 1827 ret = btrfs_relocate_chunk(fs_info, bg->start); 1828 if (ret) { 1829 btrfs_dec_block_group_ro(bg); 1830 btrfs_err(fs_info, "error relocating chunk %llu", 1831 bg->start); 1832 } 1833 1834 next: 1835 btrfs_put_block_group(bg); 1836 spin_lock(&fs_info->unused_bgs_lock); 1837 } 1838 spin_unlock(&fs_info->unused_bgs_lock); 1839 mutex_unlock(&fs_info->reclaim_bgs_lock); 1840 btrfs_exclop_finish(fs_info); 1841 sb_end_write(fs_info->sb); 1842 } 1843 1844 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 1845 { 1846 spin_lock(&fs_info->unused_bgs_lock); 1847 if (!list_empty(&fs_info->reclaim_bgs)) 1848 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 1849 spin_unlock(&fs_info->unused_bgs_lock); 1850 } 1851 1852 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 1853 { 1854 struct btrfs_fs_info *fs_info = bg->fs_info; 1855 1856 spin_lock(&fs_info->unused_bgs_lock); 1857 if (list_empty(&bg->bg_list)) { 1858 btrfs_get_block_group(bg); 1859 trace_btrfs_add_reclaim_block_group(bg); 1860 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 1861 } 1862 spin_unlock(&fs_info->unused_bgs_lock); 1863 } 1864 1865 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1866 struct btrfs_path *path) 1867 { 1868 struct extent_map_tree *em_tree; 1869 struct extent_map *em; 1870 struct btrfs_block_group_item bg; 1871 struct extent_buffer *leaf; 1872 int slot; 1873 u64 flags; 1874 int ret = 0; 1875 1876 slot = path->slots[0]; 1877 leaf = path->nodes[0]; 1878 1879 em_tree = &fs_info->mapping_tree; 1880 read_lock(&em_tree->lock); 1881 em = lookup_extent_mapping(em_tree, key->objectid, key->offset); 1882 read_unlock(&em_tree->lock); 1883 if (!em) { 1884 btrfs_err(fs_info, 1885 "logical %llu len %llu found bg but no related chunk", 1886 key->objectid, key->offset); 1887 return -ENOENT; 1888 } 1889 1890 if (em->start != key->objectid || em->len != key->offset) { 1891 btrfs_err(fs_info, 1892 "block group %llu len %llu mismatch with chunk %llu len %llu", 1893 key->objectid, key->offset, em->start, em->len); 1894 ret = -EUCLEAN; 1895 goto out_free_em; 1896 } 1897 1898 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1899 sizeof(bg)); 1900 flags = btrfs_stack_block_group_flags(&bg) & 1901 BTRFS_BLOCK_GROUP_TYPE_MASK; 1902 1903 if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1904 btrfs_err(fs_info, 1905 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1906 key->objectid, key->offset, flags, 1907 (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); 1908 ret = -EUCLEAN; 1909 } 1910 1911 out_free_em: 1912 free_extent_map(em); 1913 return ret; 1914 } 1915 1916 static int find_first_block_group(struct btrfs_fs_info *fs_info, 1917 struct btrfs_path *path, 1918 struct btrfs_key *key) 1919 { 1920 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1921 int ret; 1922 struct btrfs_key found_key; 1923 1924 btrfs_for_each_slot(root, key, &found_key, path, ret) { 1925 if (found_key.objectid >= key->objectid && 1926 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1927 return read_bg_from_eb(fs_info, &found_key, path); 1928 } 1929 } 1930 return ret; 1931 } 1932 1933 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 1934 { 1935 u64 extra_flags = chunk_to_extended(flags) & 1936 BTRFS_EXTENDED_PROFILE_MASK; 1937 1938 write_seqlock(&fs_info->profiles_lock); 1939 if (flags & BTRFS_BLOCK_GROUP_DATA) 1940 fs_info->avail_data_alloc_bits |= extra_flags; 1941 if (flags & BTRFS_BLOCK_GROUP_METADATA) 1942 fs_info->avail_metadata_alloc_bits |= extra_flags; 1943 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 1944 fs_info->avail_system_alloc_bits |= extra_flags; 1945 write_sequnlock(&fs_info->profiles_lock); 1946 } 1947 1948 /* 1949 * Map a physical disk address to a list of logical addresses. 1950 * 1951 * @fs_info: the filesystem 1952 * @chunk_start: logical address of block group 1953 * @physical: physical address to map to logical addresses 1954 * @logical: return array of logical addresses which map to @physical 1955 * @naddrs: length of @logical 1956 * @stripe_len: size of IO stripe for the given block group 1957 * 1958 * Maps a particular @physical disk address to a list of @logical addresses. 1959 * Used primarily to exclude those portions of a block group that contain super 1960 * block copies. 1961 */ 1962 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 1963 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 1964 { 1965 struct extent_map *em; 1966 struct map_lookup *map; 1967 u64 *buf; 1968 u64 bytenr; 1969 u64 data_stripe_length; 1970 u64 io_stripe_size; 1971 int i, nr = 0; 1972 int ret = 0; 1973 1974 em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 1975 if (IS_ERR(em)) 1976 return -EIO; 1977 1978 map = em->map_lookup; 1979 data_stripe_length = em->orig_block_len; 1980 io_stripe_size = map->stripe_len; 1981 chunk_start = em->start; 1982 1983 /* For RAID5/6 adjust to a full IO stripe length */ 1984 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 1985 io_stripe_size = map->stripe_len * nr_data_stripes(map); 1986 1987 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 1988 if (!buf) { 1989 ret = -ENOMEM; 1990 goto out; 1991 } 1992 1993 for (i = 0; i < map->num_stripes; i++) { 1994 bool already_inserted = false; 1995 u64 stripe_nr; 1996 u64 offset; 1997 int j; 1998 1999 if (!in_range(physical, map->stripes[i].physical, 2000 data_stripe_length)) 2001 continue; 2002 2003 stripe_nr = physical - map->stripes[i].physical; 2004 stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset); 2005 2006 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2007 BTRFS_BLOCK_GROUP_RAID10)) { 2008 stripe_nr = stripe_nr * map->num_stripes + i; 2009 stripe_nr = div_u64(stripe_nr, map->sub_stripes); 2010 } 2011 /* 2012 * The remaining case would be for RAID56, multiply by 2013 * nr_data_stripes(). Alternatively, just use rmap_len below 2014 * instead of map->stripe_len 2015 */ 2016 2017 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2018 2019 /* Ensure we don't add duplicate addresses */ 2020 for (j = 0; j < nr; j++) { 2021 if (buf[j] == bytenr) { 2022 already_inserted = true; 2023 break; 2024 } 2025 } 2026 2027 if (!already_inserted) 2028 buf[nr++] = bytenr; 2029 } 2030 2031 *logical = buf; 2032 *naddrs = nr; 2033 *stripe_len = io_stripe_size; 2034 out: 2035 free_extent_map(em); 2036 return ret; 2037 } 2038 2039 static int exclude_super_stripes(struct btrfs_block_group *cache) 2040 { 2041 struct btrfs_fs_info *fs_info = cache->fs_info; 2042 const bool zoned = btrfs_is_zoned(fs_info); 2043 u64 bytenr; 2044 u64 *logical; 2045 int stripe_len; 2046 int i, nr, ret; 2047 2048 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2049 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2050 cache->bytes_super += stripe_len; 2051 ret = btrfs_add_excluded_extent(fs_info, cache->start, 2052 stripe_len); 2053 if (ret) 2054 return ret; 2055 } 2056 2057 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2058 bytenr = btrfs_sb_offset(i); 2059 ret = btrfs_rmap_block(fs_info, cache->start, 2060 bytenr, &logical, &nr, &stripe_len); 2061 if (ret) 2062 return ret; 2063 2064 /* Shouldn't have super stripes in sequential zones */ 2065 if (zoned && nr) { 2066 btrfs_err(fs_info, 2067 "zoned: block group %llu must not contain super block", 2068 cache->start); 2069 return -EUCLEAN; 2070 } 2071 2072 while (nr--) { 2073 u64 len = min_t(u64, stripe_len, 2074 cache->start + cache->length - logical[nr]); 2075 2076 cache->bytes_super += len; 2077 ret = btrfs_add_excluded_extent(fs_info, logical[nr], 2078 len); 2079 if (ret) { 2080 kfree(logical); 2081 return ret; 2082 } 2083 } 2084 2085 kfree(logical); 2086 } 2087 return 0; 2088 } 2089 2090 static struct btrfs_block_group *btrfs_create_block_group_cache( 2091 struct btrfs_fs_info *fs_info, u64 start) 2092 { 2093 struct btrfs_block_group *cache; 2094 2095 cache = kzalloc(sizeof(*cache), GFP_NOFS); 2096 if (!cache) 2097 return NULL; 2098 2099 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 2100 GFP_NOFS); 2101 if (!cache->free_space_ctl) { 2102 kfree(cache); 2103 return NULL; 2104 } 2105 2106 cache->start = start; 2107 2108 cache->fs_info = fs_info; 2109 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2110 2111 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2112 2113 refcount_set(&cache->refs, 1); 2114 spin_lock_init(&cache->lock); 2115 init_rwsem(&cache->data_rwsem); 2116 INIT_LIST_HEAD(&cache->list); 2117 INIT_LIST_HEAD(&cache->cluster_list); 2118 INIT_LIST_HEAD(&cache->bg_list); 2119 INIT_LIST_HEAD(&cache->ro_list); 2120 INIT_LIST_HEAD(&cache->discard_list); 2121 INIT_LIST_HEAD(&cache->dirty_list); 2122 INIT_LIST_HEAD(&cache->io_list); 2123 INIT_LIST_HEAD(&cache->active_bg_list); 2124 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2125 atomic_set(&cache->frozen, 0); 2126 mutex_init(&cache->free_space_lock); 2127 cache->full_stripe_locks_root.root = RB_ROOT; 2128 mutex_init(&cache->full_stripe_locks_root.lock); 2129 2130 return cache; 2131 } 2132 2133 /* 2134 * Iterate all chunks and verify that each of them has the corresponding block 2135 * group 2136 */ 2137 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2138 { 2139 struct extent_map_tree *map_tree = &fs_info->mapping_tree; 2140 struct extent_map *em; 2141 struct btrfs_block_group *bg; 2142 u64 start = 0; 2143 int ret = 0; 2144 2145 while (1) { 2146 read_lock(&map_tree->lock); 2147 /* 2148 * lookup_extent_mapping will return the first extent map 2149 * intersecting the range, so setting @len to 1 is enough to 2150 * get the first chunk. 2151 */ 2152 em = lookup_extent_mapping(map_tree, start, 1); 2153 read_unlock(&map_tree->lock); 2154 if (!em) 2155 break; 2156 2157 bg = btrfs_lookup_block_group(fs_info, em->start); 2158 if (!bg) { 2159 btrfs_err(fs_info, 2160 "chunk start=%llu len=%llu doesn't have corresponding block group", 2161 em->start, em->len); 2162 ret = -EUCLEAN; 2163 free_extent_map(em); 2164 break; 2165 } 2166 if (bg->start != em->start || bg->length != em->len || 2167 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2168 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2169 btrfs_err(fs_info, 2170 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2171 em->start, em->len, 2172 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2173 bg->start, bg->length, 2174 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2175 ret = -EUCLEAN; 2176 free_extent_map(em); 2177 btrfs_put_block_group(bg); 2178 break; 2179 } 2180 start = em->start + em->len; 2181 free_extent_map(em); 2182 btrfs_put_block_group(bg); 2183 } 2184 return ret; 2185 } 2186 2187 static int read_one_block_group(struct btrfs_fs_info *info, 2188 struct btrfs_block_group_item *bgi, 2189 const struct btrfs_key *key, 2190 int need_clear) 2191 { 2192 struct btrfs_block_group *cache; 2193 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2194 int ret; 2195 2196 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2197 2198 cache = btrfs_create_block_group_cache(info, key->objectid); 2199 if (!cache) 2200 return -ENOMEM; 2201 2202 cache->length = key->offset; 2203 cache->used = btrfs_stack_block_group_used(bgi); 2204 cache->commit_used = cache->used; 2205 cache->flags = btrfs_stack_block_group_flags(bgi); 2206 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 2207 2208 set_free_space_tree_thresholds(cache); 2209 2210 if (need_clear) { 2211 /* 2212 * When we mount with old space cache, we need to 2213 * set BTRFS_DC_CLEAR and set dirty flag. 2214 * 2215 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2216 * truncate the old free space cache inode and 2217 * setup a new one. 2218 * b) Setting 'dirty flag' makes sure that we flush 2219 * the new space cache info onto disk. 2220 */ 2221 if (btrfs_test_opt(info, SPACE_CACHE)) 2222 cache->disk_cache_state = BTRFS_DC_CLEAR; 2223 } 2224 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2225 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2226 btrfs_err(info, 2227 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2228 cache->start); 2229 ret = -EINVAL; 2230 goto error; 2231 } 2232 2233 ret = btrfs_load_block_group_zone_info(cache, false); 2234 if (ret) { 2235 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2236 cache->start); 2237 goto error; 2238 } 2239 2240 /* 2241 * We need to exclude the super stripes now so that the space info has 2242 * super bytes accounted for, otherwise we'll think we have more space 2243 * than we actually do. 2244 */ 2245 ret = exclude_super_stripes(cache); 2246 if (ret) { 2247 /* We may have excluded something, so call this just in case. */ 2248 btrfs_free_excluded_extents(cache); 2249 goto error; 2250 } 2251 2252 /* 2253 * For zoned filesystem, space after the allocation offset is the only 2254 * free space for a block group. So, we don't need any caching work. 2255 * btrfs_calc_zone_unusable() will set the amount of free space and 2256 * zone_unusable space. 2257 * 2258 * For regular filesystem, check for two cases, either we are full, and 2259 * therefore don't need to bother with the caching work since we won't 2260 * find any space, or we are empty, and we can just add all the space 2261 * in and be done with it. This saves us _a_lot_ of time, particularly 2262 * in the full case. 2263 */ 2264 if (btrfs_is_zoned(info)) { 2265 btrfs_calc_zone_unusable(cache); 2266 /* Should not have any excluded extents. Just in case, though. */ 2267 btrfs_free_excluded_extents(cache); 2268 } else if (cache->length == cache->used) { 2269 cache->cached = BTRFS_CACHE_FINISHED; 2270 btrfs_free_excluded_extents(cache); 2271 } else if (cache->used == 0) { 2272 cache->cached = BTRFS_CACHE_FINISHED; 2273 add_new_free_space(cache, cache->start, 2274 cache->start + cache->length); 2275 btrfs_free_excluded_extents(cache); 2276 } 2277 2278 ret = btrfs_add_block_group_cache(info, cache); 2279 if (ret) { 2280 btrfs_remove_free_space_cache(cache); 2281 goto error; 2282 } 2283 trace_btrfs_add_block_group(info, cache, 0); 2284 btrfs_add_bg_to_space_info(info, cache); 2285 2286 set_avail_alloc_bits(info, cache->flags); 2287 if (btrfs_chunk_writeable(info, cache->start)) { 2288 if (cache->used == 0) { 2289 ASSERT(list_empty(&cache->bg_list)); 2290 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2291 btrfs_discard_queue_work(&info->discard_ctl, cache); 2292 else 2293 btrfs_mark_bg_unused(cache); 2294 } 2295 } else { 2296 inc_block_group_ro(cache, 1); 2297 } 2298 2299 return 0; 2300 error: 2301 btrfs_put_block_group(cache); 2302 return ret; 2303 } 2304 2305 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2306 { 2307 struct extent_map_tree *em_tree = &fs_info->mapping_tree; 2308 struct rb_node *node; 2309 int ret = 0; 2310 2311 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 2312 struct extent_map *em; 2313 struct map_lookup *map; 2314 struct btrfs_block_group *bg; 2315 2316 em = rb_entry(node, struct extent_map, rb_node); 2317 map = em->map_lookup; 2318 bg = btrfs_create_block_group_cache(fs_info, em->start); 2319 if (!bg) { 2320 ret = -ENOMEM; 2321 break; 2322 } 2323 2324 /* Fill dummy cache as FULL */ 2325 bg->length = em->len; 2326 bg->flags = map->type; 2327 bg->cached = BTRFS_CACHE_FINISHED; 2328 bg->used = em->len; 2329 bg->flags = map->type; 2330 ret = btrfs_add_block_group_cache(fs_info, bg); 2331 /* 2332 * We may have some valid block group cache added already, in 2333 * that case we skip to the next one. 2334 */ 2335 if (ret == -EEXIST) { 2336 ret = 0; 2337 btrfs_put_block_group(bg); 2338 continue; 2339 } 2340 2341 if (ret) { 2342 btrfs_remove_free_space_cache(bg); 2343 btrfs_put_block_group(bg); 2344 break; 2345 } 2346 2347 btrfs_add_bg_to_space_info(fs_info, bg); 2348 2349 set_avail_alloc_bits(fs_info, bg->flags); 2350 } 2351 if (!ret) 2352 btrfs_init_global_block_rsv(fs_info); 2353 return ret; 2354 } 2355 2356 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2357 { 2358 struct btrfs_root *root = btrfs_block_group_root(info); 2359 struct btrfs_path *path; 2360 int ret; 2361 struct btrfs_block_group *cache; 2362 struct btrfs_space_info *space_info; 2363 struct btrfs_key key; 2364 int need_clear = 0; 2365 u64 cache_gen; 2366 2367 /* 2368 * Either no extent root (with ibadroots rescue option) or we have 2369 * unsupported RO options. The fs can never be mounted read-write, so no 2370 * need to waste time searching block group items. 2371 * 2372 * This also allows new extent tree related changes to be RO compat, 2373 * no need for a full incompat flag. 2374 */ 2375 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2376 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2377 return fill_dummy_bgs(info); 2378 2379 key.objectid = 0; 2380 key.offset = 0; 2381 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2382 path = btrfs_alloc_path(); 2383 if (!path) 2384 return -ENOMEM; 2385 2386 cache_gen = btrfs_super_cache_generation(info->super_copy); 2387 if (btrfs_test_opt(info, SPACE_CACHE) && 2388 btrfs_super_generation(info->super_copy) != cache_gen) 2389 need_clear = 1; 2390 if (btrfs_test_opt(info, CLEAR_CACHE)) 2391 need_clear = 1; 2392 2393 while (1) { 2394 struct btrfs_block_group_item bgi; 2395 struct extent_buffer *leaf; 2396 int slot; 2397 2398 ret = find_first_block_group(info, path, &key); 2399 if (ret > 0) 2400 break; 2401 if (ret != 0) 2402 goto error; 2403 2404 leaf = path->nodes[0]; 2405 slot = path->slots[0]; 2406 2407 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2408 sizeof(bgi)); 2409 2410 btrfs_item_key_to_cpu(leaf, &key, slot); 2411 btrfs_release_path(path); 2412 ret = read_one_block_group(info, &bgi, &key, need_clear); 2413 if (ret < 0) 2414 goto error; 2415 key.objectid += key.offset; 2416 key.offset = 0; 2417 } 2418 btrfs_release_path(path); 2419 2420 list_for_each_entry(space_info, &info->space_info, list) { 2421 int i; 2422 2423 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2424 if (list_empty(&space_info->block_groups[i])) 2425 continue; 2426 cache = list_first_entry(&space_info->block_groups[i], 2427 struct btrfs_block_group, 2428 list); 2429 btrfs_sysfs_add_block_group_type(cache); 2430 } 2431 2432 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2433 (BTRFS_BLOCK_GROUP_RAID10 | 2434 BTRFS_BLOCK_GROUP_RAID1_MASK | 2435 BTRFS_BLOCK_GROUP_RAID56_MASK | 2436 BTRFS_BLOCK_GROUP_DUP))) 2437 continue; 2438 /* 2439 * Avoid allocating from un-mirrored block group if there are 2440 * mirrored block groups. 2441 */ 2442 list_for_each_entry(cache, 2443 &space_info->block_groups[BTRFS_RAID_RAID0], 2444 list) 2445 inc_block_group_ro(cache, 1); 2446 list_for_each_entry(cache, 2447 &space_info->block_groups[BTRFS_RAID_SINGLE], 2448 list) 2449 inc_block_group_ro(cache, 1); 2450 } 2451 2452 btrfs_init_global_block_rsv(info); 2453 ret = check_chunk_block_group_mappings(info); 2454 error: 2455 btrfs_free_path(path); 2456 /* 2457 * We've hit some error while reading the extent tree, and have 2458 * rescue=ibadroots mount option. 2459 * Try to fill the tree using dummy block groups so that the user can 2460 * continue to mount and grab their data. 2461 */ 2462 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2463 ret = fill_dummy_bgs(info); 2464 return ret; 2465 } 2466 2467 /* 2468 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2469 * allocation. 2470 * 2471 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2472 * phases. 2473 */ 2474 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2475 struct btrfs_block_group *block_group) 2476 { 2477 struct btrfs_fs_info *fs_info = trans->fs_info; 2478 struct btrfs_block_group_item bgi; 2479 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2480 struct btrfs_key key; 2481 u64 old_commit_used; 2482 int ret; 2483 2484 spin_lock(&block_group->lock); 2485 btrfs_set_stack_block_group_used(&bgi, block_group->used); 2486 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2487 block_group->global_root_id); 2488 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2489 old_commit_used = block_group->commit_used; 2490 block_group->commit_used = block_group->used; 2491 key.objectid = block_group->start; 2492 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2493 key.offset = block_group->length; 2494 spin_unlock(&block_group->lock); 2495 2496 ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2497 if (ret < 0) { 2498 spin_lock(&block_group->lock); 2499 block_group->commit_used = old_commit_used; 2500 spin_unlock(&block_group->lock); 2501 } 2502 2503 return ret; 2504 } 2505 2506 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2507 struct btrfs_device *device, u64 chunk_offset, 2508 u64 start, u64 num_bytes) 2509 { 2510 struct btrfs_fs_info *fs_info = device->fs_info; 2511 struct btrfs_root *root = fs_info->dev_root; 2512 struct btrfs_path *path; 2513 struct btrfs_dev_extent *extent; 2514 struct extent_buffer *leaf; 2515 struct btrfs_key key; 2516 int ret; 2517 2518 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2519 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2520 path = btrfs_alloc_path(); 2521 if (!path) 2522 return -ENOMEM; 2523 2524 key.objectid = device->devid; 2525 key.type = BTRFS_DEV_EXTENT_KEY; 2526 key.offset = start; 2527 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2528 if (ret) 2529 goto out; 2530 2531 leaf = path->nodes[0]; 2532 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2533 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2534 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2535 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2536 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2537 2538 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2539 btrfs_mark_buffer_dirty(leaf); 2540 out: 2541 btrfs_free_path(path); 2542 return ret; 2543 } 2544 2545 /* 2546 * This function belongs to phase 2. 2547 * 2548 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2549 * phases. 2550 */ 2551 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2552 u64 chunk_offset, u64 chunk_size) 2553 { 2554 struct btrfs_fs_info *fs_info = trans->fs_info; 2555 struct btrfs_device *device; 2556 struct extent_map *em; 2557 struct map_lookup *map; 2558 u64 dev_offset; 2559 u64 stripe_size; 2560 int i; 2561 int ret = 0; 2562 2563 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2564 if (IS_ERR(em)) 2565 return PTR_ERR(em); 2566 2567 map = em->map_lookup; 2568 stripe_size = em->orig_block_len; 2569 2570 /* 2571 * Take the device list mutex to prevent races with the final phase of 2572 * a device replace operation that replaces the device object associated 2573 * with the map's stripes, because the device object's id can change 2574 * at any time during that final phase of the device replace operation 2575 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2576 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2577 * resulting in persisting a device extent item with such ID. 2578 */ 2579 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2580 for (i = 0; i < map->num_stripes; i++) { 2581 device = map->stripes[i].dev; 2582 dev_offset = map->stripes[i].physical; 2583 2584 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2585 stripe_size); 2586 if (ret) 2587 break; 2588 } 2589 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2590 2591 free_extent_map(em); 2592 return ret; 2593 } 2594 2595 /* 2596 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2597 * chunk allocation. 2598 * 2599 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2600 * phases. 2601 */ 2602 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2603 { 2604 struct btrfs_fs_info *fs_info = trans->fs_info; 2605 struct btrfs_block_group *block_group; 2606 int ret = 0; 2607 2608 while (!list_empty(&trans->new_bgs)) { 2609 int index; 2610 2611 block_group = list_first_entry(&trans->new_bgs, 2612 struct btrfs_block_group, 2613 bg_list); 2614 if (ret) 2615 goto next; 2616 2617 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2618 2619 ret = insert_block_group_item(trans, block_group); 2620 if (ret) 2621 btrfs_abort_transaction(trans, ret); 2622 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2623 &block_group->runtime_flags)) { 2624 mutex_lock(&fs_info->chunk_mutex); 2625 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2626 mutex_unlock(&fs_info->chunk_mutex); 2627 if (ret) 2628 btrfs_abort_transaction(trans, ret); 2629 } 2630 ret = insert_dev_extents(trans, block_group->start, 2631 block_group->length); 2632 if (ret) 2633 btrfs_abort_transaction(trans, ret); 2634 add_block_group_free_space(trans, block_group); 2635 2636 /* 2637 * If we restriped during balance, we may have added a new raid 2638 * type, so now add the sysfs entries when it is safe to do so. 2639 * We don't have to worry about locking here as it's handled in 2640 * btrfs_sysfs_add_block_group_type. 2641 */ 2642 if (block_group->space_info->block_group_kobjs[index] == NULL) 2643 btrfs_sysfs_add_block_group_type(block_group); 2644 2645 /* Already aborted the transaction if it failed. */ 2646 next: 2647 btrfs_delayed_refs_rsv_release(fs_info, 1); 2648 list_del_init(&block_group->bg_list); 2649 } 2650 btrfs_trans_release_chunk_metadata(trans); 2651 } 2652 2653 /* 2654 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2655 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2656 */ 2657 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2658 { 2659 u64 div = SZ_1G; 2660 u64 index; 2661 2662 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2663 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2664 2665 /* If we have a smaller fs index based on 128MiB. */ 2666 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2667 div = SZ_128M; 2668 2669 offset = div64_u64(offset, div); 2670 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2671 return index; 2672 } 2673 2674 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2675 u64 bytes_used, u64 type, 2676 u64 chunk_offset, u64 size) 2677 { 2678 struct btrfs_fs_info *fs_info = trans->fs_info; 2679 struct btrfs_block_group *cache; 2680 int ret; 2681 2682 btrfs_set_log_full_commit(trans); 2683 2684 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2685 if (!cache) 2686 return ERR_PTR(-ENOMEM); 2687 2688 cache->length = size; 2689 set_free_space_tree_thresholds(cache); 2690 cache->used = bytes_used; 2691 cache->flags = type; 2692 cache->cached = BTRFS_CACHE_FINISHED; 2693 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2694 2695 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2696 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2697 2698 ret = btrfs_load_block_group_zone_info(cache, true); 2699 if (ret) { 2700 btrfs_put_block_group(cache); 2701 return ERR_PTR(ret); 2702 } 2703 2704 ret = exclude_super_stripes(cache); 2705 if (ret) { 2706 /* We may have excluded something, so call this just in case */ 2707 btrfs_free_excluded_extents(cache); 2708 btrfs_put_block_group(cache); 2709 return ERR_PTR(ret); 2710 } 2711 2712 add_new_free_space(cache, chunk_offset, chunk_offset + size); 2713 2714 btrfs_free_excluded_extents(cache); 2715 2716 /* 2717 * Ensure the corresponding space_info object is created and 2718 * assigned to our block group. We want our bg to be added to the rbtree 2719 * with its ->space_info set. 2720 */ 2721 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 2722 ASSERT(cache->space_info); 2723 2724 ret = btrfs_add_block_group_cache(fs_info, cache); 2725 if (ret) { 2726 btrfs_remove_free_space_cache(cache); 2727 btrfs_put_block_group(cache); 2728 return ERR_PTR(ret); 2729 } 2730 2731 /* 2732 * Now that our block group has its ->space_info set and is inserted in 2733 * the rbtree, update the space info's counters. 2734 */ 2735 trace_btrfs_add_block_group(fs_info, cache, 1); 2736 btrfs_add_bg_to_space_info(fs_info, cache); 2737 btrfs_update_global_block_rsv(fs_info); 2738 2739 #ifdef CONFIG_BTRFS_DEBUG 2740 if (btrfs_should_fragment_free_space(cache)) { 2741 u64 new_bytes_used = size - bytes_used; 2742 2743 cache->space_info->bytes_used += new_bytes_used >> 1; 2744 fragment_free_space(cache); 2745 } 2746 #endif 2747 2748 list_add_tail(&cache->bg_list, &trans->new_bgs); 2749 trans->delayed_ref_updates++; 2750 btrfs_update_delayed_refs_rsv(trans); 2751 2752 set_avail_alloc_bits(fs_info, type); 2753 return cache; 2754 } 2755 2756 /* 2757 * Mark one block group RO, can be called several times for the same block 2758 * group. 2759 * 2760 * @cache: the destination block group 2761 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2762 * ensure we still have some free space after marking this 2763 * block group RO. 2764 */ 2765 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2766 bool do_chunk_alloc) 2767 { 2768 struct btrfs_fs_info *fs_info = cache->fs_info; 2769 struct btrfs_trans_handle *trans; 2770 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2771 u64 alloc_flags; 2772 int ret; 2773 bool dirty_bg_running; 2774 2775 /* 2776 * This can only happen when we are doing read-only scrub on read-only 2777 * mount. 2778 * In that case we should not start a new transaction on read-only fs. 2779 * Thus here we skip all chunk allocations. 2780 */ 2781 if (sb_rdonly(fs_info->sb)) { 2782 mutex_lock(&fs_info->ro_block_group_mutex); 2783 ret = inc_block_group_ro(cache, 0); 2784 mutex_unlock(&fs_info->ro_block_group_mutex); 2785 return ret; 2786 } 2787 2788 do { 2789 trans = btrfs_join_transaction(root); 2790 if (IS_ERR(trans)) 2791 return PTR_ERR(trans); 2792 2793 dirty_bg_running = false; 2794 2795 /* 2796 * We're not allowed to set block groups readonly after the dirty 2797 * block group cache has started writing. If it already started, 2798 * back off and let this transaction commit. 2799 */ 2800 mutex_lock(&fs_info->ro_block_group_mutex); 2801 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 2802 u64 transid = trans->transid; 2803 2804 mutex_unlock(&fs_info->ro_block_group_mutex); 2805 btrfs_end_transaction(trans); 2806 2807 ret = btrfs_wait_for_commit(fs_info, transid); 2808 if (ret) 2809 return ret; 2810 dirty_bg_running = true; 2811 } 2812 } while (dirty_bg_running); 2813 2814 if (do_chunk_alloc) { 2815 /* 2816 * If we are changing raid levels, try to allocate a 2817 * corresponding block group with the new raid level. 2818 */ 2819 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2820 if (alloc_flags != cache->flags) { 2821 ret = btrfs_chunk_alloc(trans, alloc_flags, 2822 CHUNK_ALLOC_FORCE); 2823 /* 2824 * ENOSPC is allowed here, we may have enough space 2825 * already allocated at the new raid level to carry on 2826 */ 2827 if (ret == -ENOSPC) 2828 ret = 0; 2829 if (ret < 0) 2830 goto out; 2831 } 2832 } 2833 2834 ret = inc_block_group_ro(cache, 0); 2835 if (!do_chunk_alloc || ret == -ETXTBSY) 2836 goto unlock_out; 2837 if (!ret) 2838 goto out; 2839 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 2840 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 2841 if (ret < 0) 2842 goto out; 2843 /* 2844 * We have allocated a new chunk. We also need to activate that chunk to 2845 * grant metadata tickets for zoned filesystem. 2846 */ 2847 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 2848 if (ret < 0) 2849 goto out; 2850 2851 ret = inc_block_group_ro(cache, 0); 2852 if (ret == -ETXTBSY) 2853 goto unlock_out; 2854 out: 2855 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2856 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2857 mutex_lock(&fs_info->chunk_mutex); 2858 check_system_chunk(trans, alloc_flags); 2859 mutex_unlock(&fs_info->chunk_mutex); 2860 } 2861 unlock_out: 2862 mutex_unlock(&fs_info->ro_block_group_mutex); 2863 2864 btrfs_end_transaction(trans); 2865 return ret; 2866 } 2867 2868 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 2869 { 2870 struct btrfs_space_info *sinfo = cache->space_info; 2871 u64 num_bytes; 2872 2873 BUG_ON(!cache->ro); 2874 2875 spin_lock(&sinfo->lock); 2876 spin_lock(&cache->lock); 2877 if (!--cache->ro) { 2878 if (btrfs_is_zoned(cache->fs_info)) { 2879 /* Migrate zone_unusable bytes back */ 2880 cache->zone_unusable = 2881 (cache->alloc_offset - cache->used) + 2882 (cache->length - cache->zone_capacity); 2883 sinfo->bytes_zone_unusable += cache->zone_unusable; 2884 sinfo->bytes_readonly -= cache->zone_unusable; 2885 } 2886 num_bytes = cache->length - cache->reserved - 2887 cache->pinned - cache->bytes_super - 2888 cache->zone_unusable - cache->used; 2889 sinfo->bytes_readonly -= num_bytes; 2890 list_del_init(&cache->ro_list); 2891 } 2892 spin_unlock(&cache->lock); 2893 spin_unlock(&sinfo->lock); 2894 } 2895 2896 static int update_block_group_item(struct btrfs_trans_handle *trans, 2897 struct btrfs_path *path, 2898 struct btrfs_block_group *cache) 2899 { 2900 struct btrfs_fs_info *fs_info = trans->fs_info; 2901 int ret; 2902 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2903 unsigned long bi; 2904 struct extent_buffer *leaf; 2905 struct btrfs_block_group_item bgi; 2906 struct btrfs_key key; 2907 u64 old_commit_used; 2908 u64 used; 2909 2910 /* 2911 * Block group items update can be triggered out of commit transaction 2912 * critical section, thus we need a consistent view of used bytes. 2913 * We cannot use cache->used directly outside of the spin lock, as it 2914 * may be changed. 2915 */ 2916 spin_lock(&cache->lock); 2917 old_commit_used = cache->commit_used; 2918 used = cache->used; 2919 /* No change in used bytes, can safely skip it. */ 2920 if (cache->commit_used == used) { 2921 spin_unlock(&cache->lock); 2922 return 0; 2923 } 2924 cache->commit_used = used; 2925 spin_unlock(&cache->lock); 2926 2927 key.objectid = cache->start; 2928 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2929 key.offset = cache->length; 2930 2931 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2932 if (ret) { 2933 if (ret > 0) 2934 ret = -ENOENT; 2935 goto fail; 2936 } 2937 2938 leaf = path->nodes[0]; 2939 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2940 btrfs_set_stack_block_group_used(&bgi, used); 2941 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2942 cache->global_root_id); 2943 btrfs_set_stack_block_group_flags(&bgi, cache->flags); 2944 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 2945 btrfs_mark_buffer_dirty(leaf); 2946 fail: 2947 btrfs_release_path(path); 2948 /* We didn't update the block group item, need to revert @commit_used. */ 2949 if (ret < 0) { 2950 spin_lock(&cache->lock); 2951 cache->commit_used = old_commit_used; 2952 spin_unlock(&cache->lock); 2953 } 2954 return ret; 2955 2956 } 2957 2958 static int cache_save_setup(struct btrfs_block_group *block_group, 2959 struct btrfs_trans_handle *trans, 2960 struct btrfs_path *path) 2961 { 2962 struct btrfs_fs_info *fs_info = block_group->fs_info; 2963 struct btrfs_root *root = fs_info->tree_root; 2964 struct inode *inode = NULL; 2965 struct extent_changeset *data_reserved = NULL; 2966 u64 alloc_hint = 0; 2967 int dcs = BTRFS_DC_ERROR; 2968 u64 cache_size = 0; 2969 int retries = 0; 2970 int ret = 0; 2971 2972 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 2973 return 0; 2974 2975 /* 2976 * If this block group is smaller than 100 megs don't bother caching the 2977 * block group. 2978 */ 2979 if (block_group->length < (100 * SZ_1M)) { 2980 spin_lock(&block_group->lock); 2981 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 2982 spin_unlock(&block_group->lock); 2983 return 0; 2984 } 2985 2986 if (TRANS_ABORTED(trans)) 2987 return 0; 2988 again: 2989 inode = lookup_free_space_inode(block_group, path); 2990 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 2991 ret = PTR_ERR(inode); 2992 btrfs_release_path(path); 2993 goto out; 2994 } 2995 2996 if (IS_ERR(inode)) { 2997 BUG_ON(retries); 2998 retries++; 2999 3000 if (block_group->ro) 3001 goto out_free; 3002 3003 ret = create_free_space_inode(trans, block_group, path); 3004 if (ret) 3005 goto out_free; 3006 goto again; 3007 } 3008 3009 /* 3010 * We want to set the generation to 0, that way if anything goes wrong 3011 * from here on out we know not to trust this cache when we load up next 3012 * time. 3013 */ 3014 BTRFS_I(inode)->generation = 0; 3015 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 3016 if (ret) { 3017 /* 3018 * So theoretically we could recover from this, simply set the 3019 * super cache generation to 0 so we know to invalidate the 3020 * cache, but then we'd have to keep track of the block groups 3021 * that fail this way so we know we _have_ to reset this cache 3022 * before the next commit or risk reading stale cache. So to 3023 * limit our exposure to horrible edge cases lets just abort the 3024 * transaction, this only happens in really bad situations 3025 * anyway. 3026 */ 3027 btrfs_abort_transaction(trans, ret); 3028 goto out_put; 3029 } 3030 WARN_ON(ret); 3031 3032 /* We've already setup this transaction, go ahead and exit */ 3033 if (block_group->cache_generation == trans->transid && 3034 i_size_read(inode)) { 3035 dcs = BTRFS_DC_SETUP; 3036 goto out_put; 3037 } 3038 3039 if (i_size_read(inode) > 0) { 3040 ret = btrfs_check_trunc_cache_free_space(fs_info, 3041 &fs_info->global_block_rsv); 3042 if (ret) 3043 goto out_put; 3044 3045 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3046 if (ret) 3047 goto out_put; 3048 } 3049 3050 spin_lock(&block_group->lock); 3051 if (block_group->cached != BTRFS_CACHE_FINISHED || 3052 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3053 /* 3054 * don't bother trying to write stuff out _if_ 3055 * a) we're not cached, 3056 * b) we're with nospace_cache mount option, 3057 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3058 */ 3059 dcs = BTRFS_DC_WRITTEN; 3060 spin_unlock(&block_group->lock); 3061 goto out_put; 3062 } 3063 spin_unlock(&block_group->lock); 3064 3065 /* 3066 * We hit an ENOSPC when setting up the cache in this transaction, just 3067 * skip doing the setup, we've already cleared the cache so we're safe. 3068 */ 3069 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3070 ret = -ENOSPC; 3071 goto out_put; 3072 } 3073 3074 /* 3075 * Try to preallocate enough space based on how big the block group is. 3076 * Keep in mind this has to include any pinned space which could end up 3077 * taking up quite a bit since it's not folded into the other space 3078 * cache. 3079 */ 3080 cache_size = div_u64(block_group->length, SZ_256M); 3081 if (!cache_size) 3082 cache_size = 1; 3083 3084 cache_size *= 16; 3085 cache_size *= fs_info->sectorsize; 3086 3087 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3088 cache_size, false); 3089 if (ret) 3090 goto out_put; 3091 3092 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3093 cache_size, cache_size, 3094 &alloc_hint); 3095 /* 3096 * Our cache requires contiguous chunks so that we don't modify a bunch 3097 * of metadata or split extents when writing the cache out, which means 3098 * we can enospc if we are heavily fragmented in addition to just normal 3099 * out of space conditions. So if we hit this just skip setting up any 3100 * other block groups for this transaction, maybe we'll unpin enough 3101 * space the next time around. 3102 */ 3103 if (!ret) 3104 dcs = BTRFS_DC_SETUP; 3105 else if (ret == -ENOSPC) 3106 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3107 3108 out_put: 3109 iput(inode); 3110 out_free: 3111 btrfs_release_path(path); 3112 out: 3113 spin_lock(&block_group->lock); 3114 if (!ret && dcs == BTRFS_DC_SETUP) 3115 block_group->cache_generation = trans->transid; 3116 block_group->disk_cache_state = dcs; 3117 spin_unlock(&block_group->lock); 3118 3119 extent_changeset_free(data_reserved); 3120 return ret; 3121 } 3122 3123 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3124 { 3125 struct btrfs_fs_info *fs_info = trans->fs_info; 3126 struct btrfs_block_group *cache, *tmp; 3127 struct btrfs_transaction *cur_trans = trans->transaction; 3128 struct btrfs_path *path; 3129 3130 if (list_empty(&cur_trans->dirty_bgs) || 3131 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3132 return 0; 3133 3134 path = btrfs_alloc_path(); 3135 if (!path) 3136 return -ENOMEM; 3137 3138 /* Could add new block groups, use _safe just in case */ 3139 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3140 dirty_list) { 3141 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3142 cache_save_setup(cache, trans, path); 3143 } 3144 3145 btrfs_free_path(path); 3146 return 0; 3147 } 3148 3149 /* 3150 * Transaction commit does final block group cache writeback during a critical 3151 * section where nothing is allowed to change the FS. This is required in 3152 * order for the cache to actually match the block group, but can introduce a 3153 * lot of latency into the commit. 3154 * 3155 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3156 * There's a chance we'll have to redo some of it if the block group changes 3157 * again during the commit, but it greatly reduces the commit latency by 3158 * getting rid of the easy block groups while we're still allowing others to 3159 * join the commit. 3160 */ 3161 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3162 { 3163 struct btrfs_fs_info *fs_info = trans->fs_info; 3164 struct btrfs_block_group *cache; 3165 struct btrfs_transaction *cur_trans = trans->transaction; 3166 int ret = 0; 3167 int should_put; 3168 struct btrfs_path *path = NULL; 3169 LIST_HEAD(dirty); 3170 struct list_head *io = &cur_trans->io_bgs; 3171 int loops = 0; 3172 3173 spin_lock(&cur_trans->dirty_bgs_lock); 3174 if (list_empty(&cur_trans->dirty_bgs)) { 3175 spin_unlock(&cur_trans->dirty_bgs_lock); 3176 return 0; 3177 } 3178 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3179 spin_unlock(&cur_trans->dirty_bgs_lock); 3180 3181 again: 3182 /* Make sure all the block groups on our dirty list actually exist */ 3183 btrfs_create_pending_block_groups(trans); 3184 3185 if (!path) { 3186 path = btrfs_alloc_path(); 3187 if (!path) { 3188 ret = -ENOMEM; 3189 goto out; 3190 } 3191 } 3192 3193 /* 3194 * cache_write_mutex is here only to save us from balance or automatic 3195 * removal of empty block groups deleting this block group while we are 3196 * writing out the cache 3197 */ 3198 mutex_lock(&trans->transaction->cache_write_mutex); 3199 while (!list_empty(&dirty)) { 3200 bool drop_reserve = true; 3201 3202 cache = list_first_entry(&dirty, struct btrfs_block_group, 3203 dirty_list); 3204 /* 3205 * This can happen if something re-dirties a block group that 3206 * is already under IO. Just wait for it to finish and then do 3207 * it all again 3208 */ 3209 if (!list_empty(&cache->io_list)) { 3210 list_del_init(&cache->io_list); 3211 btrfs_wait_cache_io(trans, cache, path); 3212 btrfs_put_block_group(cache); 3213 } 3214 3215 3216 /* 3217 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3218 * it should update the cache_state. Don't delete until after 3219 * we wait. 3220 * 3221 * Since we're not running in the commit critical section 3222 * we need the dirty_bgs_lock to protect from update_block_group 3223 */ 3224 spin_lock(&cur_trans->dirty_bgs_lock); 3225 list_del_init(&cache->dirty_list); 3226 spin_unlock(&cur_trans->dirty_bgs_lock); 3227 3228 should_put = 1; 3229 3230 cache_save_setup(cache, trans, path); 3231 3232 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3233 cache->io_ctl.inode = NULL; 3234 ret = btrfs_write_out_cache(trans, cache, path); 3235 if (ret == 0 && cache->io_ctl.inode) { 3236 should_put = 0; 3237 3238 /* 3239 * The cache_write_mutex is protecting the 3240 * io_list, also refer to the definition of 3241 * btrfs_transaction::io_bgs for more details 3242 */ 3243 list_add_tail(&cache->io_list, io); 3244 } else { 3245 /* 3246 * If we failed to write the cache, the 3247 * generation will be bad and life goes on 3248 */ 3249 ret = 0; 3250 } 3251 } 3252 if (!ret) { 3253 ret = update_block_group_item(trans, path, cache); 3254 /* 3255 * Our block group might still be attached to the list 3256 * of new block groups in the transaction handle of some 3257 * other task (struct btrfs_trans_handle->new_bgs). This 3258 * means its block group item isn't yet in the extent 3259 * tree. If this happens ignore the error, as we will 3260 * try again later in the critical section of the 3261 * transaction commit. 3262 */ 3263 if (ret == -ENOENT) { 3264 ret = 0; 3265 spin_lock(&cur_trans->dirty_bgs_lock); 3266 if (list_empty(&cache->dirty_list)) { 3267 list_add_tail(&cache->dirty_list, 3268 &cur_trans->dirty_bgs); 3269 btrfs_get_block_group(cache); 3270 drop_reserve = false; 3271 } 3272 spin_unlock(&cur_trans->dirty_bgs_lock); 3273 } else if (ret) { 3274 btrfs_abort_transaction(trans, ret); 3275 } 3276 } 3277 3278 /* If it's not on the io list, we need to put the block group */ 3279 if (should_put) 3280 btrfs_put_block_group(cache); 3281 if (drop_reserve) 3282 btrfs_delayed_refs_rsv_release(fs_info, 1); 3283 /* 3284 * Avoid blocking other tasks for too long. It might even save 3285 * us from writing caches for block groups that are going to be 3286 * removed. 3287 */ 3288 mutex_unlock(&trans->transaction->cache_write_mutex); 3289 if (ret) 3290 goto out; 3291 mutex_lock(&trans->transaction->cache_write_mutex); 3292 } 3293 mutex_unlock(&trans->transaction->cache_write_mutex); 3294 3295 /* 3296 * Go through delayed refs for all the stuff we've just kicked off 3297 * and then loop back (just once) 3298 */ 3299 if (!ret) 3300 ret = btrfs_run_delayed_refs(trans, 0); 3301 if (!ret && loops == 0) { 3302 loops++; 3303 spin_lock(&cur_trans->dirty_bgs_lock); 3304 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3305 /* 3306 * dirty_bgs_lock protects us from concurrent block group 3307 * deletes too (not just cache_write_mutex). 3308 */ 3309 if (!list_empty(&dirty)) { 3310 spin_unlock(&cur_trans->dirty_bgs_lock); 3311 goto again; 3312 } 3313 spin_unlock(&cur_trans->dirty_bgs_lock); 3314 } 3315 out: 3316 if (ret < 0) { 3317 spin_lock(&cur_trans->dirty_bgs_lock); 3318 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3319 spin_unlock(&cur_trans->dirty_bgs_lock); 3320 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3321 } 3322 3323 btrfs_free_path(path); 3324 return ret; 3325 } 3326 3327 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3328 { 3329 struct btrfs_fs_info *fs_info = trans->fs_info; 3330 struct btrfs_block_group *cache; 3331 struct btrfs_transaction *cur_trans = trans->transaction; 3332 int ret = 0; 3333 int should_put; 3334 struct btrfs_path *path; 3335 struct list_head *io = &cur_trans->io_bgs; 3336 3337 path = btrfs_alloc_path(); 3338 if (!path) 3339 return -ENOMEM; 3340 3341 /* 3342 * Even though we are in the critical section of the transaction commit, 3343 * we can still have concurrent tasks adding elements to this 3344 * transaction's list of dirty block groups. These tasks correspond to 3345 * endio free space workers started when writeback finishes for a 3346 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3347 * allocate new block groups as a result of COWing nodes of the root 3348 * tree when updating the free space inode. The writeback for the space 3349 * caches is triggered by an earlier call to 3350 * btrfs_start_dirty_block_groups() and iterations of the following 3351 * loop. 3352 * Also we want to do the cache_save_setup first and then run the 3353 * delayed refs to make sure we have the best chance at doing this all 3354 * in one shot. 3355 */ 3356 spin_lock(&cur_trans->dirty_bgs_lock); 3357 while (!list_empty(&cur_trans->dirty_bgs)) { 3358 cache = list_first_entry(&cur_trans->dirty_bgs, 3359 struct btrfs_block_group, 3360 dirty_list); 3361 3362 /* 3363 * This can happen if cache_save_setup re-dirties a block group 3364 * that is already under IO. Just wait for it to finish and 3365 * then do it all again 3366 */ 3367 if (!list_empty(&cache->io_list)) { 3368 spin_unlock(&cur_trans->dirty_bgs_lock); 3369 list_del_init(&cache->io_list); 3370 btrfs_wait_cache_io(trans, cache, path); 3371 btrfs_put_block_group(cache); 3372 spin_lock(&cur_trans->dirty_bgs_lock); 3373 } 3374 3375 /* 3376 * Don't remove from the dirty list until after we've waited on 3377 * any pending IO 3378 */ 3379 list_del_init(&cache->dirty_list); 3380 spin_unlock(&cur_trans->dirty_bgs_lock); 3381 should_put = 1; 3382 3383 cache_save_setup(cache, trans, path); 3384 3385 if (!ret) 3386 ret = btrfs_run_delayed_refs(trans, 3387 (unsigned long) -1); 3388 3389 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3390 cache->io_ctl.inode = NULL; 3391 ret = btrfs_write_out_cache(trans, cache, path); 3392 if (ret == 0 && cache->io_ctl.inode) { 3393 should_put = 0; 3394 list_add_tail(&cache->io_list, io); 3395 } else { 3396 /* 3397 * If we failed to write the cache, the 3398 * generation will be bad and life goes on 3399 */ 3400 ret = 0; 3401 } 3402 } 3403 if (!ret) { 3404 ret = update_block_group_item(trans, path, cache); 3405 /* 3406 * One of the free space endio workers might have 3407 * created a new block group while updating a free space 3408 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3409 * and hasn't released its transaction handle yet, in 3410 * which case the new block group is still attached to 3411 * its transaction handle and its creation has not 3412 * finished yet (no block group item in the extent tree 3413 * yet, etc). If this is the case, wait for all free 3414 * space endio workers to finish and retry. This is a 3415 * very rare case so no need for a more efficient and 3416 * complex approach. 3417 */ 3418 if (ret == -ENOENT) { 3419 wait_event(cur_trans->writer_wait, 3420 atomic_read(&cur_trans->num_writers) == 1); 3421 ret = update_block_group_item(trans, path, cache); 3422 } 3423 if (ret) 3424 btrfs_abort_transaction(trans, ret); 3425 } 3426 3427 /* If its not on the io list, we need to put the block group */ 3428 if (should_put) 3429 btrfs_put_block_group(cache); 3430 btrfs_delayed_refs_rsv_release(fs_info, 1); 3431 spin_lock(&cur_trans->dirty_bgs_lock); 3432 } 3433 spin_unlock(&cur_trans->dirty_bgs_lock); 3434 3435 /* 3436 * Refer to the definition of io_bgs member for details why it's safe 3437 * to use it without any locking 3438 */ 3439 while (!list_empty(io)) { 3440 cache = list_first_entry(io, struct btrfs_block_group, 3441 io_list); 3442 list_del_init(&cache->io_list); 3443 btrfs_wait_cache_io(trans, cache, path); 3444 btrfs_put_block_group(cache); 3445 } 3446 3447 btrfs_free_path(path); 3448 return ret; 3449 } 3450 3451 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3452 u64 bytenr, u64 num_bytes, bool alloc) 3453 { 3454 struct btrfs_fs_info *info = trans->fs_info; 3455 struct btrfs_block_group *cache = NULL; 3456 u64 total = num_bytes; 3457 u64 old_val; 3458 u64 byte_in_group; 3459 int factor; 3460 int ret = 0; 3461 3462 /* Block accounting for super block */ 3463 spin_lock(&info->delalloc_root_lock); 3464 old_val = btrfs_super_bytes_used(info->super_copy); 3465 if (alloc) 3466 old_val += num_bytes; 3467 else 3468 old_val -= num_bytes; 3469 btrfs_set_super_bytes_used(info->super_copy, old_val); 3470 spin_unlock(&info->delalloc_root_lock); 3471 3472 while (total) { 3473 struct btrfs_space_info *space_info; 3474 bool reclaim = false; 3475 3476 cache = btrfs_lookup_block_group(info, bytenr); 3477 if (!cache) { 3478 ret = -ENOENT; 3479 break; 3480 } 3481 space_info = cache->space_info; 3482 factor = btrfs_bg_type_to_factor(cache->flags); 3483 3484 /* 3485 * If this block group has free space cache written out, we 3486 * need to make sure to load it if we are removing space. This 3487 * is because we need the unpinning stage to actually add the 3488 * space back to the block group, otherwise we will leak space. 3489 */ 3490 if (!alloc && !btrfs_block_group_done(cache)) 3491 btrfs_cache_block_group(cache, true); 3492 3493 byte_in_group = bytenr - cache->start; 3494 WARN_ON(byte_in_group > cache->length); 3495 3496 spin_lock(&space_info->lock); 3497 spin_lock(&cache->lock); 3498 3499 if (btrfs_test_opt(info, SPACE_CACHE) && 3500 cache->disk_cache_state < BTRFS_DC_CLEAR) 3501 cache->disk_cache_state = BTRFS_DC_CLEAR; 3502 3503 old_val = cache->used; 3504 num_bytes = min(total, cache->length - byte_in_group); 3505 if (alloc) { 3506 old_val += num_bytes; 3507 cache->used = old_val; 3508 cache->reserved -= num_bytes; 3509 space_info->bytes_reserved -= num_bytes; 3510 space_info->bytes_used += num_bytes; 3511 space_info->disk_used += num_bytes * factor; 3512 spin_unlock(&cache->lock); 3513 spin_unlock(&space_info->lock); 3514 } else { 3515 old_val -= num_bytes; 3516 cache->used = old_val; 3517 cache->pinned += num_bytes; 3518 btrfs_space_info_update_bytes_pinned(info, space_info, 3519 num_bytes); 3520 space_info->bytes_used -= num_bytes; 3521 space_info->disk_used -= num_bytes * factor; 3522 3523 reclaim = should_reclaim_block_group(cache, num_bytes); 3524 3525 spin_unlock(&cache->lock); 3526 spin_unlock(&space_info->lock); 3527 3528 set_extent_dirty(&trans->transaction->pinned_extents, 3529 bytenr, bytenr + num_bytes - 1, 3530 GFP_NOFS | __GFP_NOFAIL); 3531 } 3532 3533 spin_lock(&trans->transaction->dirty_bgs_lock); 3534 if (list_empty(&cache->dirty_list)) { 3535 list_add_tail(&cache->dirty_list, 3536 &trans->transaction->dirty_bgs); 3537 trans->delayed_ref_updates++; 3538 btrfs_get_block_group(cache); 3539 } 3540 spin_unlock(&trans->transaction->dirty_bgs_lock); 3541 3542 /* 3543 * No longer have used bytes in this block group, queue it for 3544 * deletion. We do this after adding the block group to the 3545 * dirty list to avoid races between cleaner kthread and space 3546 * cache writeout. 3547 */ 3548 if (!alloc && old_val == 0) { 3549 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3550 btrfs_mark_bg_unused(cache); 3551 } else if (!alloc && reclaim) { 3552 btrfs_mark_bg_to_reclaim(cache); 3553 } 3554 3555 btrfs_put_block_group(cache); 3556 total -= num_bytes; 3557 bytenr += num_bytes; 3558 } 3559 3560 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3561 btrfs_update_delayed_refs_rsv(trans); 3562 return ret; 3563 } 3564 3565 /* 3566 * Update the block_group and space info counters. 3567 * 3568 * @cache: The cache we are manipulating 3569 * @ram_bytes: The number of bytes of file content, and will be same to 3570 * @num_bytes except for the compress path. 3571 * @num_bytes: The number of bytes in question 3572 * @delalloc: The blocks are allocated for the delalloc write 3573 * 3574 * This is called by the allocator when it reserves space. If this is a 3575 * reservation and the block group has become read only we cannot make the 3576 * reservation and return -EAGAIN, otherwise this function always succeeds. 3577 */ 3578 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3579 u64 ram_bytes, u64 num_bytes, int delalloc, 3580 bool force_wrong_size_class) 3581 { 3582 struct btrfs_space_info *space_info = cache->space_info; 3583 enum btrfs_block_group_size_class size_class; 3584 int ret = 0; 3585 3586 spin_lock(&space_info->lock); 3587 spin_lock(&cache->lock); 3588 if (cache->ro) { 3589 ret = -EAGAIN; 3590 goto out; 3591 } 3592 3593 if (btrfs_block_group_should_use_size_class(cache)) { 3594 size_class = btrfs_calc_block_group_size_class(num_bytes); 3595 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3596 if (ret) 3597 goto out; 3598 } 3599 cache->reserved += num_bytes; 3600 space_info->bytes_reserved += num_bytes; 3601 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3602 space_info->flags, num_bytes, 1); 3603 btrfs_space_info_update_bytes_may_use(cache->fs_info, 3604 space_info, -ram_bytes); 3605 if (delalloc) 3606 cache->delalloc_bytes += num_bytes; 3607 3608 /* 3609 * Compression can use less space than we reserved, so wake tickets if 3610 * that happens. 3611 */ 3612 if (num_bytes < ram_bytes) 3613 btrfs_try_granting_tickets(cache->fs_info, space_info); 3614 out: 3615 spin_unlock(&cache->lock); 3616 spin_unlock(&space_info->lock); 3617 return ret; 3618 } 3619 3620 /* 3621 * Update the block_group and space info counters. 3622 * 3623 * @cache: The cache we are manipulating 3624 * @num_bytes: The number of bytes in question 3625 * @delalloc: The blocks are allocated for the delalloc write 3626 * 3627 * This is called by somebody who is freeing space that was never actually used 3628 * on disk. For example if you reserve some space for a new leaf in transaction 3629 * A and before transaction A commits you free that leaf, you call this with 3630 * reserve set to 0 in order to clear the reservation. 3631 */ 3632 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3633 u64 num_bytes, int delalloc) 3634 { 3635 struct btrfs_space_info *space_info = cache->space_info; 3636 3637 spin_lock(&space_info->lock); 3638 spin_lock(&cache->lock); 3639 if (cache->ro) 3640 space_info->bytes_readonly += num_bytes; 3641 cache->reserved -= num_bytes; 3642 space_info->bytes_reserved -= num_bytes; 3643 space_info->max_extent_size = 0; 3644 3645 if (delalloc) 3646 cache->delalloc_bytes -= num_bytes; 3647 spin_unlock(&cache->lock); 3648 3649 btrfs_try_granting_tickets(cache->fs_info, space_info); 3650 spin_unlock(&space_info->lock); 3651 } 3652 3653 static void force_metadata_allocation(struct btrfs_fs_info *info) 3654 { 3655 struct list_head *head = &info->space_info; 3656 struct btrfs_space_info *found; 3657 3658 list_for_each_entry(found, head, list) { 3659 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3660 found->force_alloc = CHUNK_ALLOC_FORCE; 3661 } 3662 } 3663 3664 static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 3665 struct btrfs_space_info *sinfo, int force) 3666 { 3667 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3668 u64 thresh; 3669 3670 if (force == CHUNK_ALLOC_FORCE) 3671 return 1; 3672 3673 /* 3674 * in limited mode, we want to have some free space up to 3675 * about 1% of the FS size. 3676 */ 3677 if (force == CHUNK_ALLOC_LIMITED) { 3678 thresh = btrfs_super_total_bytes(fs_info->super_copy); 3679 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 3680 3681 if (sinfo->total_bytes - bytes_used < thresh) 3682 return 1; 3683 } 3684 3685 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 3686 return 0; 3687 return 1; 3688 } 3689 3690 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 3691 { 3692 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 3693 3694 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 3695 } 3696 3697 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 3698 { 3699 struct btrfs_block_group *bg; 3700 int ret; 3701 3702 /* 3703 * Check if we have enough space in the system space info because we 3704 * will need to update device items in the chunk btree and insert a new 3705 * chunk item in the chunk btree as well. This will allocate a new 3706 * system block group if needed. 3707 */ 3708 check_system_chunk(trans, flags); 3709 3710 bg = btrfs_create_chunk(trans, flags); 3711 if (IS_ERR(bg)) { 3712 ret = PTR_ERR(bg); 3713 goto out; 3714 } 3715 3716 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3717 /* 3718 * Normally we are not expected to fail with -ENOSPC here, since we have 3719 * previously reserved space in the system space_info and allocated one 3720 * new system chunk if necessary. However there are three exceptions: 3721 * 3722 * 1) We may have enough free space in the system space_info but all the 3723 * existing system block groups have a profile which can not be used 3724 * for extent allocation. 3725 * 3726 * This happens when mounting in degraded mode. For example we have a 3727 * RAID1 filesystem with 2 devices, lose one device and mount the fs 3728 * using the other device in degraded mode. If we then allocate a chunk, 3729 * we may have enough free space in the existing system space_info, but 3730 * none of the block groups can be used for extent allocation since they 3731 * have a RAID1 profile, and because we are in degraded mode with a 3732 * single device, we are forced to allocate a new system chunk with a 3733 * SINGLE profile. Making check_system_chunk() iterate over all system 3734 * block groups and check if they have a usable profile and enough space 3735 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 3736 * try again after forcing allocation of a new system chunk. Like this 3737 * we avoid paying the cost of that search in normal circumstances, when 3738 * we were not mounted in degraded mode; 3739 * 3740 * 2) We had enough free space info the system space_info, and one suitable 3741 * block group to allocate from when we called check_system_chunk() 3742 * above. However right after we called it, the only system block group 3743 * with enough free space got turned into RO mode by a running scrub, 3744 * and in this case we have to allocate a new one and retry. We only 3745 * need do this allocate and retry once, since we have a transaction 3746 * handle and scrub uses the commit root to search for block groups; 3747 * 3748 * 3) We had one system block group with enough free space when we called 3749 * check_system_chunk(), but after that, right before we tried to 3750 * allocate the last extent buffer we needed, a discard operation came 3751 * in and it temporarily removed the last free space entry from the 3752 * block group (discard removes a free space entry, discards it, and 3753 * then adds back the entry to the block group cache). 3754 */ 3755 if (ret == -ENOSPC) { 3756 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 3757 struct btrfs_block_group *sys_bg; 3758 3759 sys_bg = btrfs_create_chunk(trans, sys_flags); 3760 if (IS_ERR(sys_bg)) { 3761 ret = PTR_ERR(sys_bg); 3762 btrfs_abort_transaction(trans, ret); 3763 goto out; 3764 } 3765 3766 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3767 if (ret) { 3768 btrfs_abort_transaction(trans, ret); 3769 goto out; 3770 } 3771 3772 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3773 if (ret) { 3774 btrfs_abort_transaction(trans, ret); 3775 goto out; 3776 } 3777 } else if (ret) { 3778 btrfs_abort_transaction(trans, ret); 3779 goto out; 3780 } 3781 out: 3782 btrfs_trans_release_chunk_metadata(trans); 3783 3784 if (ret) 3785 return ERR_PTR(ret); 3786 3787 btrfs_get_block_group(bg); 3788 return bg; 3789 } 3790 3791 /* 3792 * Chunk allocation is done in 2 phases: 3793 * 3794 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 3795 * the chunk, the chunk mapping, create its block group and add the items 3796 * that belong in the chunk btree to it - more specifically, we need to 3797 * update device items in the chunk btree and add a new chunk item to it. 3798 * 3799 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 3800 * group item to the extent btree and the device extent items to the devices 3801 * btree. 3802 * 3803 * This is done to prevent deadlocks. For example when COWing a node from the 3804 * extent btree we are holding a write lock on the node's parent and if we 3805 * trigger chunk allocation and attempted to insert the new block group item 3806 * in the extent btree right way, we could deadlock because the path for the 3807 * insertion can include that parent node. At first glance it seems impossible 3808 * to trigger chunk allocation after starting a transaction since tasks should 3809 * reserve enough transaction units (metadata space), however while that is true 3810 * most of the time, chunk allocation may still be triggered for several reasons: 3811 * 3812 * 1) When reserving metadata, we check if there is enough free space in the 3813 * metadata space_info and therefore don't trigger allocation of a new chunk. 3814 * However later when the task actually tries to COW an extent buffer from 3815 * the extent btree or from the device btree for example, it is forced to 3816 * allocate a new block group (chunk) because the only one that had enough 3817 * free space was just turned to RO mode by a running scrub for example (or 3818 * device replace, block group reclaim thread, etc), so we can not use it 3819 * for allocating an extent and end up being forced to allocate a new one; 3820 * 3821 * 2) Because we only check that the metadata space_info has enough free bytes, 3822 * we end up not allocating a new metadata chunk in that case. However if 3823 * the filesystem was mounted in degraded mode, none of the existing block 3824 * groups might be suitable for extent allocation due to their incompatible 3825 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 3826 * use a RAID1 profile, in degraded mode using a single device). In this case 3827 * when the task attempts to COW some extent buffer of the extent btree for 3828 * example, it will trigger allocation of a new metadata block group with a 3829 * suitable profile (SINGLE profile in the example of the degraded mount of 3830 * the RAID1 filesystem); 3831 * 3832 * 3) The task has reserved enough transaction units / metadata space, but when 3833 * it attempts to COW an extent buffer from the extent or device btree for 3834 * example, it does not find any free extent in any metadata block group, 3835 * therefore forced to try to allocate a new metadata block group. 3836 * This is because some other task allocated all available extents in the 3837 * meanwhile - this typically happens with tasks that don't reserve space 3838 * properly, either intentionally or as a bug. One example where this is 3839 * done intentionally is fsync, as it does not reserve any transaction units 3840 * and ends up allocating a variable number of metadata extents for log 3841 * tree extent buffers; 3842 * 3843 * 4) The task has reserved enough transaction units / metadata space, but right 3844 * before it tries to allocate the last extent buffer it needs, a discard 3845 * operation comes in and, temporarily, removes the last free space entry from 3846 * the only metadata block group that had free space (discard starts by 3847 * removing a free space entry from a block group, then does the discard 3848 * operation and, once it's done, it adds back the free space entry to the 3849 * block group). 3850 * 3851 * We also need this 2 phases setup when adding a device to a filesystem with 3852 * a seed device - we must create new metadata and system chunks without adding 3853 * any of the block group items to the chunk, extent and device btrees. If we 3854 * did not do it this way, we would get ENOSPC when attempting to update those 3855 * btrees, since all the chunks from the seed device are read-only. 3856 * 3857 * Phase 1 does the updates and insertions to the chunk btree because if we had 3858 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 3859 * parallel, we risk having too many system chunks allocated by many tasks if 3860 * many tasks reach phase 1 without the previous ones completing phase 2. In the 3861 * extreme case this leads to exhaustion of the system chunk array in the 3862 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 3863 * and with RAID filesystems (so we have more device items in the chunk btree). 3864 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 3865 * the system chunk array due to concurrent allocations") provides more details. 3866 * 3867 * Allocation of system chunks does not happen through this function. A task that 3868 * needs to update the chunk btree (the only btree that uses system chunks), must 3869 * preallocate chunk space by calling either check_system_chunk() or 3870 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 3871 * metadata chunk or when removing a chunk, while the later is used before doing 3872 * a modification to the chunk btree - use cases for the later are adding, 3873 * removing and resizing a device as well as relocation of a system chunk. 3874 * See the comment below for more details. 3875 * 3876 * The reservation of system space, done through check_system_chunk(), as well 3877 * as all the updates and insertions into the chunk btree must be done while 3878 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 3879 * an extent buffer from the chunks btree we never trigger allocation of a new 3880 * system chunk, which would result in a deadlock (trying to lock twice an 3881 * extent buffer of the chunk btree, first time before triggering the chunk 3882 * allocation and the second time during chunk allocation while attempting to 3883 * update the chunks btree). The system chunk array is also updated while holding 3884 * that mutex. The same logic applies to removing chunks - we must reserve system 3885 * space, update the chunk btree and the system chunk array in the superblock 3886 * while holding fs_info->chunk_mutex. 3887 * 3888 * This function, btrfs_chunk_alloc(), belongs to phase 1. 3889 * 3890 * If @force is CHUNK_ALLOC_FORCE: 3891 * - return 1 if it successfully allocates a chunk, 3892 * - return errors including -ENOSPC otherwise. 3893 * If @force is NOT CHUNK_ALLOC_FORCE: 3894 * - return 0 if it doesn't need to allocate a new chunk, 3895 * - return 1 if it successfully allocates a chunk, 3896 * - return errors including -ENOSPC otherwise. 3897 */ 3898 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 3899 enum btrfs_chunk_alloc_enum force) 3900 { 3901 struct btrfs_fs_info *fs_info = trans->fs_info; 3902 struct btrfs_space_info *space_info; 3903 struct btrfs_block_group *ret_bg; 3904 bool wait_for_alloc = false; 3905 bool should_alloc = false; 3906 bool from_extent_allocation = false; 3907 int ret = 0; 3908 3909 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 3910 from_extent_allocation = true; 3911 force = CHUNK_ALLOC_FORCE; 3912 } 3913 3914 /* Don't re-enter if we're already allocating a chunk */ 3915 if (trans->allocating_chunk) 3916 return -ENOSPC; 3917 /* 3918 * Allocation of system chunks can not happen through this path, as we 3919 * could end up in a deadlock if we are allocating a data or metadata 3920 * chunk and there is another task modifying the chunk btree. 3921 * 3922 * This is because while we are holding the chunk mutex, we will attempt 3923 * to add the new chunk item to the chunk btree or update an existing 3924 * device item in the chunk btree, while the other task that is modifying 3925 * the chunk btree is attempting to COW an extent buffer while holding a 3926 * lock on it and on its parent - if the COW operation triggers a system 3927 * chunk allocation, then we can deadlock because we are holding the 3928 * chunk mutex and we may need to access that extent buffer or its parent 3929 * in order to add the chunk item or update a device item. 3930 * 3931 * Tasks that want to modify the chunk tree should reserve system space 3932 * before updating the chunk btree, by calling either 3933 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 3934 * It's possible that after a task reserves the space, it still ends up 3935 * here - this happens in the cases described above at do_chunk_alloc(). 3936 * The task will have to either retry or fail. 3937 */ 3938 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3939 return -ENOSPC; 3940 3941 space_info = btrfs_find_space_info(fs_info, flags); 3942 ASSERT(space_info); 3943 3944 do { 3945 spin_lock(&space_info->lock); 3946 if (force < space_info->force_alloc) 3947 force = space_info->force_alloc; 3948 should_alloc = should_alloc_chunk(fs_info, space_info, force); 3949 if (space_info->full) { 3950 /* No more free physical space */ 3951 if (should_alloc) 3952 ret = -ENOSPC; 3953 else 3954 ret = 0; 3955 spin_unlock(&space_info->lock); 3956 return ret; 3957 } else if (!should_alloc) { 3958 spin_unlock(&space_info->lock); 3959 return 0; 3960 } else if (space_info->chunk_alloc) { 3961 /* 3962 * Someone is already allocating, so we need to block 3963 * until this someone is finished and then loop to 3964 * recheck if we should continue with our allocation 3965 * attempt. 3966 */ 3967 wait_for_alloc = true; 3968 force = CHUNK_ALLOC_NO_FORCE; 3969 spin_unlock(&space_info->lock); 3970 mutex_lock(&fs_info->chunk_mutex); 3971 mutex_unlock(&fs_info->chunk_mutex); 3972 } else { 3973 /* Proceed with allocation */ 3974 space_info->chunk_alloc = 1; 3975 wait_for_alloc = false; 3976 spin_unlock(&space_info->lock); 3977 } 3978 3979 cond_resched(); 3980 } while (wait_for_alloc); 3981 3982 mutex_lock(&fs_info->chunk_mutex); 3983 trans->allocating_chunk = true; 3984 3985 /* 3986 * If we have mixed data/metadata chunks we want to make sure we keep 3987 * allocating mixed chunks instead of individual chunks. 3988 */ 3989 if (btrfs_mixed_space_info(space_info)) 3990 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 3991 3992 /* 3993 * if we're doing a data chunk, go ahead and make sure that 3994 * we keep a reasonable number of metadata chunks allocated in the 3995 * FS as well. 3996 */ 3997 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 3998 fs_info->data_chunk_allocations++; 3999 if (!(fs_info->data_chunk_allocations % 4000 fs_info->metadata_ratio)) 4001 force_metadata_allocation(fs_info); 4002 } 4003 4004 ret_bg = do_chunk_alloc(trans, flags); 4005 trans->allocating_chunk = false; 4006 4007 if (IS_ERR(ret_bg)) { 4008 ret = PTR_ERR(ret_bg); 4009 } else if (from_extent_allocation) { 4010 /* 4011 * New block group is likely to be used soon. Try to activate 4012 * it now. Failure is OK for now. 4013 */ 4014 btrfs_zone_activate(ret_bg); 4015 } 4016 4017 if (!ret) 4018 btrfs_put_block_group(ret_bg); 4019 4020 spin_lock(&space_info->lock); 4021 if (ret < 0) { 4022 if (ret == -ENOSPC) 4023 space_info->full = 1; 4024 else 4025 goto out; 4026 } else { 4027 ret = 1; 4028 space_info->max_extent_size = 0; 4029 } 4030 4031 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4032 out: 4033 space_info->chunk_alloc = 0; 4034 spin_unlock(&space_info->lock); 4035 mutex_unlock(&fs_info->chunk_mutex); 4036 4037 return ret; 4038 } 4039 4040 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 4041 { 4042 u64 num_dev; 4043 4044 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4045 if (!num_dev) 4046 num_dev = fs_info->fs_devices->rw_devices; 4047 4048 return num_dev; 4049 } 4050 4051 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4052 u64 bytes, 4053 u64 type) 4054 { 4055 struct btrfs_fs_info *fs_info = trans->fs_info; 4056 struct btrfs_space_info *info; 4057 u64 left; 4058 int ret = 0; 4059 4060 /* 4061 * Needed because we can end up allocating a system chunk and for an 4062 * atomic and race free space reservation in the chunk block reserve. 4063 */ 4064 lockdep_assert_held(&fs_info->chunk_mutex); 4065 4066 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4067 spin_lock(&info->lock); 4068 left = info->total_bytes - btrfs_space_info_used(info, true); 4069 spin_unlock(&info->lock); 4070 4071 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4072 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4073 left, bytes, type); 4074 btrfs_dump_space_info(fs_info, info, 0, 0); 4075 } 4076 4077 if (left < bytes) { 4078 u64 flags = btrfs_system_alloc_profile(fs_info); 4079 struct btrfs_block_group *bg; 4080 4081 /* 4082 * Ignore failure to create system chunk. We might end up not 4083 * needing it, as we might not need to COW all nodes/leafs from 4084 * the paths we visit in the chunk tree (they were already COWed 4085 * or created in the current transaction for example). 4086 */ 4087 bg = btrfs_create_chunk(trans, flags); 4088 if (IS_ERR(bg)) { 4089 ret = PTR_ERR(bg); 4090 } else { 4091 /* 4092 * We have a new chunk. We also need to activate it for 4093 * zoned filesystem. 4094 */ 4095 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4096 if (ret < 0) 4097 return; 4098 4099 /* 4100 * If we fail to add the chunk item here, we end up 4101 * trying again at phase 2 of chunk allocation, at 4102 * btrfs_create_pending_block_groups(). So ignore 4103 * any error here. An ENOSPC here could happen, due to 4104 * the cases described at do_chunk_alloc() - the system 4105 * block group we just created was just turned into RO 4106 * mode by a scrub for example, or a running discard 4107 * temporarily removed its free space entries, etc. 4108 */ 4109 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4110 } 4111 } 4112 4113 if (!ret) { 4114 ret = btrfs_block_rsv_add(fs_info, 4115 &fs_info->chunk_block_rsv, 4116 bytes, BTRFS_RESERVE_NO_FLUSH); 4117 if (!ret) 4118 trans->chunk_bytes_reserved += bytes; 4119 } 4120 } 4121 4122 /* 4123 * Reserve space in the system space for allocating or removing a chunk. 4124 * The caller must be holding fs_info->chunk_mutex. 4125 */ 4126 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4127 { 4128 struct btrfs_fs_info *fs_info = trans->fs_info; 4129 const u64 num_devs = get_profile_num_devs(fs_info, type); 4130 u64 bytes; 4131 4132 /* num_devs device items to update and 1 chunk item to add or remove. */ 4133 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4134 btrfs_calc_insert_metadata_size(fs_info, 1); 4135 4136 reserve_chunk_space(trans, bytes, type); 4137 } 4138 4139 /* 4140 * Reserve space in the system space, if needed, for doing a modification to the 4141 * chunk btree. 4142 * 4143 * @trans: A transaction handle. 4144 * @is_item_insertion: Indicate if the modification is for inserting a new item 4145 * in the chunk btree or if it's for the deletion or update 4146 * of an existing item. 4147 * 4148 * This is used in a context where we need to update the chunk btree outside 4149 * block group allocation and removal, to avoid a deadlock with a concurrent 4150 * task that is allocating a metadata or data block group and therefore needs to 4151 * update the chunk btree while holding the chunk mutex. After the update to the 4152 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4153 * 4154 */ 4155 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4156 bool is_item_insertion) 4157 { 4158 struct btrfs_fs_info *fs_info = trans->fs_info; 4159 u64 bytes; 4160 4161 if (is_item_insertion) 4162 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4163 else 4164 bytes = btrfs_calc_metadata_size(fs_info, 1); 4165 4166 mutex_lock(&fs_info->chunk_mutex); 4167 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4168 mutex_unlock(&fs_info->chunk_mutex); 4169 } 4170 4171 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4172 { 4173 struct btrfs_block_group *block_group; 4174 4175 block_group = btrfs_lookup_first_block_group(info, 0); 4176 while (block_group) { 4177 btrfs_wait_block_group_cache_done(block_group); 4178 spin_lock(&block_group->lock); 4179 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4180 &block_group->runtime_flags)) { 4181 struct inode *inode = block_group->inode; 4182 4183 block_group->inode = NULL; 4184 spin_unlock(&block_group->lock); 4185 4186 ASSERT(block_group->io_ctl.inode == NULL); 4187 iput(inode); 4188 } else { 4189 spin_unlock(&block_group->lock); 4190 } 4191 block_group = btrfs_next_block_group(block_group); 4192 } 4193 } 4194 4195 /* 4196 * Must be called only after stopping all workers, since we could have block 4197 * group caching kthreads running, and therefore they could race with us if we 4198 * freed the block groups before stopping them. 4199 */ 4200 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4201 { 4202 struct btrfs_block_group *block_group; 4203 struct btrfs_space_info *space_info; 4204 struct btrfs_caching_control *caching_ctl; 4205 struct rb_node *n; 4206 4207 write_lock(&info->block_group_cache_lock); 4208 while (!list_empty(&info->caching_block_groups)) { 4209 caching_ctl = list_entry(info->caching_block_groups.next, 4210 struct btrfs_caching_control, list); 4211 list_del(&caching_ctl->list); 4212 btrfs_put_caching_control(caching_ctl); 4213 } 4214 write_unlock(&info->block_group_cache_lock); 4215 4216 spin_lock(&info->unused_bgs_lock); 4217 while (!list_empty(&info->unused_bgs)) { 4218 block_group = list_first_entry(&info->unused_bgs, 4219 struct btrfs_block_group, 4220 bg_list); 4221 list_del_init(&block_group->bg_list); 4222 btrfs_put_block_group(block_group); 4223 } 4224 4225 while (!list_empty(&info->reclaim_bgs)) { 4226 block_group = list_first_entry(&info->reclaim_bgs, 4227 struct btrfs_block_group, 4228 bg_list); 4229 list_del_init(&block_group->bg_list); 4230 btrfs_put_block_group(block_group); 4231 } 4232 spin_unlock(&info->unused_bgs_lock); 4233 4234 spin_lock(&info->zone_active_bgs_lock); 4235 while (!list_empty(&info->zone_active_bgs)) { 4236 block_group = list_first_entry(&info->zone_active_bgs, 4237 struct btrfs_block_group, 4238 active_bg_list); 4239 list_del_init(&block_group->active_bg_list); 4240 btrfs_put_block_group(block_group); 4241 } 4242 spin_unlock(&info->zone_active_bgs_lock); 4243 4244 write_lock(&info->block_group_cache_lock); 4245 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4246 block_group = rb_entry(n, struct btrfs_block_group, 4247 cache_node); 4248 rb_erase_cached(&block_group->cache_node, 4249 &info->block_group_cache_tree); 4250 RB_CLEAR_NODE(&block_group->cache_node); 4251 write_unlock(&info->block_group_cache_lock); 4252 4253 down_write(&block_group->space_info->groups_sem); 4254 list_del(&block_group->list); 4255 up_write(&block_group->space_info->groups_sem); 4256 4257 /* 4258 * We haven't cached this block group, which means we could 4259 * possibly have excluded extents on this block group. 4260 */ 4261 if (block_group->cached == BTRFS_CACHE_NO || 4262 block_group->cached == BTRFS_CACHE_ERROR) 4263 btrfs_free_excluded_extents(block_group); 4264 4265 btrfs_remove_free_space_cache(block_group); 4266 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4267 ASSERT(list_empty(&block_group->dirty_list)); 4268 ASSERT(list_empty(&block_group->io_list)); 4269 ASSERT(list_empty(&block_group->bg_list)); 4270 ASSERT(refcount_read(&block_group->refs) == 1); 4271 ASSERT(block_group->swap_extents == 0); 4272 btrfs_put_block_group(block_group); 4273 4274 write_lock(&info->block_group_cache_lock); 4275 } 4276 write_unlock(&info->block_group_cache_lock); 4277 4278 btrfs_release_global_block_rsv(info); 4279 4280 while (!list_empty(&info->space_info)) { 4281 space_info = list_entry(info->space_info.next, 4282 struct btrfs_space_info, 4283 list); 4284 4285 /* 4286 * Do not hide this behind enospc_debug, this is actually 4287 * important and indicates a real bug if this happens. 4288 */ 4289 if (WARN_ON(space_info->bytes_pinned > 0 || 4290 space_info->bytes_may_use > 0)) 4291 btrfs_dump_space_info(info, space_info, 0, 0); 4292 4293 /* 4294 * If there was a failure to cleanup a log tree, very likely due 4295 * to an IO failure on a writeback attempt of one or more of its 4296 * extent buffers, we could not do proper (and cheap) unaccounting 4297 * of their reserved space, so don't warn on bytes_reserved > 0 in 4298 * that case. 4299 */ 4300 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4301 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4302 if (WARN_ON(space_info->bytes_reserved > 0)) 4303 btrfs_dump_space_info(info, space_info, 0, 0); 4304 } 4305 4306 WARN_ON(space_info->reclaim_size > 0); 4307 list_del(&space_info->list); 4308 btrfs_sysfs_remove_space_info(space_info); 4309 } 4310 return 0; 4311 } 4312 4313 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4314 { 4315 atomic_inc(&cache->frozen); 4316 } 4317 4318 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4319 { 4320 struct btrfs_fs_info *fs_info = block_group->fs_info; 4321 struct extent_map_tree *em_tree; 4322 struct extent_map *em; 4323 bool cleanup; 4324 4325 spin_lock(&block_group->lock); 4326 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4327 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4328 spin_unlock(&block_group->lock); 4329 4330 if (cleanup) { 4331 em_tree = &fs_info->mapping_tree; 4332 write_lock(&em_tree->lock); 4333 em = lookup_extent_mapping(em_tree, block_group->start, 4334 1); 4335 BUG_ON(!em); /* logic error, can't happen */ 4336 remove_extent_mapping(em_tree, em); 4337 write_unlock(&em_tree->lock); 4338 4339 /* once for us and once for the tree */ 4340 free_extent_map(em); 4341 free_extent_map(em); 4342 4343 /* 4344 * We may have left one free space entry and other possible 4345 * tasks trimming this block group have left 1 entry each one. 4346 * Free them if any. 4347 */ 4348 btrfs_remove_free_space_cache(block_group); 4349 } 4350 } 4351 4352 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4353 { 4354 bool ret = true; 4355 4356 spin_lock(&bg->lock); 4357 if (bg->ro) 4358 ret = false; 4359 else 4360 bg->swap_extents++; 4361 spin_unlock(&bg->lock); 4362 4363 return ret; 4364 } 4365 4366 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4367 { 4368 spin_lock(&bg->lock); 4369 ASSERT(!bg->ro); 4370 ASSERT(bg->swap_extents >= amount); 4371 bg->swap_extents -= amount; 4372 spin_unlock(&bg->lock); 4373 } 4374 4375 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4376 { 4377 if (size <= SZ_128K) 4378 return BTRFS_BG_SZ_SMALL; 4379 if (size <= SZ_8M) 4380 return BTRFS_BG_SZ_MEDIUM; 4381 return BTRFS_BG_SZ_LARGE; 4382 } 4383 4384 /* 4385 * Handle a block group allocating an extent in a size class 4386 * 4387 * @bg: The block group we allocated in. 4388 * @size_class: The size class of the allocation. 4389 * @force_wrong_size_class: Whether we are desperate enough to allow 4390 * mismatched size classes. 4391 * 4392 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4393 * case of a race that leads to the wrong size class without 4394 * force_wrong_size_class set. 4395 * 4396 * find_free_extent will skip block groups with a mismatched size class until 4397 * it really needs to avoid ENOSPC. In that case it will set 4398 * force_wrong_size_class. However, if a block group is newly allocated and 4399 * doesn't yet have a size class, then it is possible for two allocations of 4400 * different sizes to race and both try to use it. The loser is caught here and 4401 * has to retry. 4402 */ 4403 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4404 enum btrfs_block_group_size_class size_class, 4405 bool force_wrong_size_class) 4406 { 4407 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4408 4409 /* The new allocation is in the right size class, do nothing */ 4410 if (bg->size_class == size_class) 4411 return 0; 4412 /* 4413 * The new allocation is in a mismatched size class. 4414 * This means one of two things: 4415 * 4416 * 1. Two tasks in find_free_extent for different size_classes raced 4417 * and hit the same empty block_group. Make the loser try again. 4418 * 2. A call to find_free_extent got desperate enough to set 4419 * 'force_wrong_slab'. Don't change the size_class, but allow the 4420 * allocation. 4421 */ 4422 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4423 if (force_wrong_size_class) 4424 return 0; 4425 return -EAGAIN; 4426 } 4427 /* 4428 * The happy new block group case: the new allocation is the first 4429 * one in the block_group so we set size_class. 4430 */ 4431 bg->size_class = size_class; 4432 4433 return 0; 4434 } 4435 4436 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4437 { 4438 if (btrfs_is_zoned(bg->fs_info)) 4439 return false; 4440 if (!btrfs_is_block_group_data_only(bg)) 4441 return false; 4442 return true; 4443 } 4444