1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 u64 btrfs_space_info_used(struct btrfs_space_info *s_info, 14 bool may_use_included) 15 { 16 ASSERT(s_info); 17 return s_info->bytes_used + s_info->bytes_reserved + 18 s_info->bytes_pinned + s_info->bytes_readonly + 19 (may_use_included ? s_info->bytes_may_use : 0); 20 } 21 22 /* 23 * after adding space to the filesystem, we need to clear the full flags 24 * on all the space infos. 25 */ 26 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 27 { 28 struct list_head *head = &info->space_info; 29 struct btrfs_space_info *found; 30 31 rcu_read_lock(); 32 list_for_each_entry_rcu(found, head, list) 33 found->full = 0; 34 rcu_read_unlock(); 35 } 36 37 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 38 { 39 40 struct btrfs_space_info *space_info; 41 int i; 42 int ret; 43 44 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 45 if (!space_info) 46 return -ENOMEM; 47 48 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 49 GFP_KERNEL); 50 if (ret) { 51 kfree(space_info); 52 return ret; 53 } 54 55 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 56 INIT_LIST_HEAD(&space_info->block_groups[i]); 57 init_rwsem(&space_info->groups_sem); 58 spin_lock_init(&space_info->lock); 59 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 60 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 61 init_waitqueue_head(&space_info->wait); 62 INIT_LIST_HEAD(&space_info->ro_bgs); 63 INIT_LIST_HEAD(&space_info->tickets); 64 INIT_LIST_HEAD(&space_info->priority_tickets); 65 66 ret = btrfs_sysfs_add_space_info_type(info, space_info); 67 if (ret) 68 return ret; 69 70 list_add_rcu(&space_info->list, &info->space_info); 71 if (flags & BTRFS_BLOCK_GROUP_DATA) 72 info->data_sinfo = space_info; 73 74 return ret; 75 } 76 77 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 78 { 79 struct btrfs_super_block *disk_super; 80 u64 features; 81 u64 flags; 82 int mixed = 0; 83 int ret; 84 85 disk_super = fs_info->super_copy; 86 if (!btrfs_super_root(disk_super)) 87 return -EINVAL; 88 89 features = btrfs_super_incompat_flags(disk_super); 90 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 91 mixed = 1; 92 93 flags = BTRFS_BLOCK_GROUP_SYSTEM; 94 ret = create_space_info(fs_info, flags); 95 if (ret) 96 goto out; 97 98 if (mixed) { 99 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 100 ret = create_space_info(fs_info, flags); 101 } else { 102 flags = BTRFS_BLOCK_GROUP_METADATA; 103 ret = create_space_info(fs_info, flags); 104 if (ret) 105 goto out; 106 107 flags = BTRFS_BLOCK_GROUP_DATA; 108 ret = create_space_info(fs_info, flags); 109 } 110 out: 111 return ret; 112 } 113 114 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 115 u64 total_bytes, u64 bytes_used, 116 u64 bytes_readonly, 117 struct btrfs_space_info **space_info) 118 { 119 struct btrfs_space_info *found; 120 int factor; 121 122 factor = btrfs_bg_type_to_factor(flags); 123 124 found = btrfs_find_space_info(info, flags); 125 ASSERT(found); 126 spin_lock(&found->lock); 127 found->total_bytes += total_bytes; 128 found->disk_total += total_bytes * factor; 129 found->bytes_used += bytes_used; 130 found->disk_used += bytes_used * factor; 131 found->bytes_readonly += bytes_readonly; 132 if (total_bytes > 0) 133 found->full = 0; 134 btrfs_space_info_add_new_bytes(info, found, 135 total_bytes - bytes_used - 136 bytes_readonly); 137 spin_unlock(&found->lock); 138 *space_info = found; 139 } 140 141 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 142 u64 flags) 143 { 144 struct list_head *head = &info->space_info; 145 struct btrfs_space_info *found; 146 147 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 148 149 rcu_read_lock(); 150 list_for_each_entry_rcu(found, head, list) { 151 if (found->flags & flags) { 152 rcu_read_unlock(); 153 return found; 154 } 155 } 156 rcu_read_unlock(); 157 return NULL; 158 } 159 160 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 161 { 162 return (global->size << 1); 163 } 164 165 static int can_overcommit(struct btrfs_fs_info *fs_info, 166 struct btrfs_space_info *space_info, u64 bytes, 167 enum btrfs_reserve_flush_enum flush, 168 bool system_chunk) 169 { 170 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 171 u64 profile; 172 u64 space_size; 173 u64 avail; 174 u64 used; 175 int factor; 176 177 /* Don't overcommit when in mixed mode. */ 178 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 179 return 0; 180 181 if (system_chunk) 182 profile = btrfs_system_alloc_profile(fs_info); 183 else 184 profile = btrfs_metadata_alloc_profile(fs_info); 185 186 used = btrfs_space_info_used(space_info, false); 187 188 /* 189 * We only want to allow over committing if we have lots of actual space 190 * free, but if we don't have enough space to handle the global reserve 191 * space then we could end up having a real enospc problem when trying 192 * to allocate a chunk or some other such important allocation. 193 */ 194 spin_lock(&global_rsv->lock); 195 space_size = calc_global_rsv_need_space(global_rsv); 196 spin_unlock(&global_rsv->lock); 197 if (used + space_size >= space_info->total_bytes) 198 return 0; 199 200 used += space_info->bytes_may_use; 201 202 avail = atomic64_read(&fs_info->free_chunk_space); 203 204 /* 205 * If we have dup, raid1 or raid10 then only half of the free 206 * space is actually usable. For raid56, the space info used 207 * doesn't include the parity drive, so we don't have to 208 * change the math 209 */ 210 factor = btrfs_bg_type_to_factor(profile); 211 avail = div_u64(avail, factor); 212 213 /* 214 * If we aren't flushing all things, let us overcommit up to 215 * 1/2th of the space. If we can flush, don't let us overcommit 216 * too much, let it overcommit up to 1/8 of the space. 217 */ 218 if (flush == BTRFS_RESERVE_FLUSH_ALL) 219 avail >>= 3; 220 else 221 avail >>= 1; 222 223 if (used + bytes < space_info->total_bytes + avail) 224 return 1; 225 return 0; 226 } 227 228 /* 229 * This is for space we already have accounted in space_info->bytes_may_use, so 230 * basically when we're returning space from block_rsv's. 231 */ 232 void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, 233 struct btrfs_space_info *space_info, 234 u64 num_bytes) 235 { 236 struct reserve_ticket *ticket; 237 struct list_head *head; 238 u64 used; 239 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 240 bool check_overcommit = false; 241 242 spin_lock(&space_info->lock); 243 head = &space_info->priority_tickets; 244 245 /* 246 * If we are over our limit then we need to check and see if we can 247 * overcommit, and if we can't then we just need to free up our space 248 * and not satisfy any requests. 249 */ 250 used = btrfs_space_info_used(space_info, true); 251 if (used - num_bytes >= space_info->total_bytes) 252 check_overcommit = true; 253 again: 254 while (!list_empty(head) && num_bytes) { 255 ticket = list_first_entry(head, struct reserve_ticket, 256 list); 257 /* 258 * We use 0 bytes because this space is already reserved, so 259 * adding the ticket space would be a double count. 260 */ 261 if (check_overcommit && 262 !can_overcommit(fs_info, space_info, 0, flush, false)) 263 break; 264 if (num_bytes >= ticket->bytes) { 265 list_del_init(&ticket->list); 266 num_bytes -= ticket->bytes; 267 ticket->bytes = 0; 268 space_info->tickets_id++; 269 wake_up(&ticket->wait); 270 } else { 271 ticket->bytes -= num_bytes; 272 num_bytes = 0; 273 } 274 } 275 276 if (num_bytes && head == &space_info->priority_tickets) { 277 head = &space_info->tickets; 278 flush = BTRFS_RESERVE_FLUSH_ALL; 279 goto again; 280 } 281 btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); 282 spin_unlock(&space_info->lock); 283 } 284 285 /* 286 * This is for newly allocated space that isn't accounted in 287 * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent 288 * we use this helper. 289 */ 290 void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, 291 struct btrfs_space_info *space_info, 292 u64 num_bytes) 293 { 294 struct reserve_ticket *ticket; 295 struct list_head *head = &space_info->priority_tickets; 296 297 again: 298 while (!list_empty(head) && num_bytes) { 299 ticket = list_first_entry(head, struct reserve_ticket, 300 list); 301 if (num_bytes >= ticket->bytes) { 302 list_del_init(&ticket->list); 303 num_bytes -= ticket->bytes; 304 btrfs_space_info_update_bytes_may_use(fs_info, 305 space_info, 306 ticket->bytes); 307 ticket->bytes = 0; 308 space_info->tickets_id++; 309 wake_up(&ticket->wait); 310 } else { 311 btrfs_space_info_update_bytes_may_use(fs_info, 312 space_info, 313 num_bytes); 314 ticket->bytes -= num_bytes; 315 num_bytes = 0; 316 } 317 } 318 319 if (num_bytes && head == &space_info->priority_tickets) { 320 head = &space_info->tickets; 321 goto again; 322 } 323 } 324 325 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 326 do { \ 327 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 328 spin_lock(&__rsv->lock); \ 329 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 330 __rsv->size, __rsv->reserved); \ 331 spin_unlock(&__rsv->lock); \ 332 } while (0) 333 334 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 335 struct btrfs_space_info *info, u64 bytes, 336 int dump_block_groups) 337 { 338 struct btrfs_block_group_cache *cache; 339 int index = 0; 340 341 spin_lock(&info->lock); 342 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 343 info->flags, 344 info->total_bytes - btrfs_space_info_used(info, true), 345 info->full ? "" : "not "); 346 btrfs_info(fs_info, 347 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 348 info->total_bytes, info->bytes_used, info->bytes_pinned, 349 info->bytes_reserved, info->bytes_may_use, 350 info->bytes_readonly); 351 spin_unlock(&info->lock); 352 353 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 354 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 355 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 356 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 357 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 358 359 if (!dump_block_groups) 360 return; 361 362 down_read(&info->groups_sem); 363 again: 364 list_for_each_entry(cache, &info->block_groups[index], list) { 365 spin_lock(&cache->lock); 366 btrfs_info(fs_info, 367 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 368 cache->key.objectid, cache->key.offset, 369 btrfs_block_group_used(&cache->item), cache->pinned, 370 cache->reserved, cache->ro ? "[readonly]" : ""); 371 btrfs_dump_free_space(cache, bytes); 372 spin_unlock(&cache->lock); 373 } 374 if (++index < BTRFS_NR_RAID_TYPES) 375 goto again; 376 up_read(&info->groups_sem); 377 } 378 379 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info, 380 unsigned long nr_pages, int nr_items) 381 { 382 struct super_block *sb = fs_info->sb; 383 384 if (down_read_trylock(&sb->s_umount)) { 385 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE); 386 up_read(&sb->s_umount); 387 } else { 388 /* 389 * We needn't worry the filesystem going from r/w to r/o though 390 * we don't acquire ->s_umount mutex, because the filesystem 391 * should guarantee the delalloc inodes list be empty after 392 * the filesystem is readonly(all dirty pages are written to 393 * the disk). 394 */ 395 btrfs_start_delalloc_roots(fs_info, nr_items); 396 if (!current->journal_info) 397 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1); 398 } 399 } 400 401 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 402 u64 to_reclaim) 403 { 404 u64 bytes; 405 u64 nr; 406 407 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 408 nr = div64_u64(to_reclaim, bytes); 409 if (!nr) 410 nr = 1; 411 return nr; 412 } 413 414 #define EXTENT_SIZE_PER_ITEM SZ_256K 415 416 /* 417 * shrink metadata reservation for delalloc 418 */ 419 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, 420 u64 orig, bool wait_ordered) 421 { 422 struct btrfs_space_info *space_info; 423 struct btrfs_trans_handle *trans; 424 u64 delalloc_bytes; 425 u64 dio_bytes; 426 u64 async_pages; 427 u64 items; 428 long time_left; 429 unsigned long nr_pages; 430 int loops; 431 432 /* Calc the number of the pages we need flush for space reservation */ 433 items = calc_reclaim_items_nr(fs_info, to_reclaim); 434 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 435 436 trans = (struct btrfs_trans_handle *)current->journal_info; 437 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 438 439 delalloc_bytes = percpu_counter_sum_positive( 440 &fs_info->delalloc_bytes); 441 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 442 if (delalloc_bytes == 0 && dio_bytes == 0) { 443 if (trans) 444 return; 445 if (wait_ordered) 446 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 447 return; 448 } 449 450 /* 451 * If we are doing more ordered than delalloc we need to just wait on 452 * ordered extents, otherwise we'll waste time trying to flush delalloc 453 * that likely won't give us the space back we need. 454 */ 455 if (dio_bytes > delalloc_bytes) 456 wait_ordered = true; 457 458 loops = 0; 459 while ((delalloc_bytes || dio_bytes) && loops < 3) { 460 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 461 462 /* 463 * Triggers inode writeback for up to nr_pages. This will invoke 464 * ->writepages callback and trigger delalloc filling 465 * (btrfs_run_delalloc_range()). 466 */ 467 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items); 468 469 /* 470 * We need to wait for the compressed pages to start before 471 * we continue. 472 */ 473 async_pages = atomic_read(&fs_info->async_delalloc_pages); 474 if (!async_pages) 475 goto skip_async; 476 477 /* 478 * Calculate how many compressed pages we want to be written 479 * before we continue. I.e if there are more async pages than we 480 * require wait_event will wait until nr_pages are written. 481 */ 482 if (async_pages <= nr_pages) 483 async_pages = 0; 484 else 485 async_pages -= nr_pages; 486 487 wait_event(fs_info->async_submit_wait, 488 atomic_read(&fs_info->async_delalloc_pages) <= 489 (int)async_pages); 490 skip_async: 491 spin_lock(&space_info->lock); 492 if (list_empty(&space_info->tickets) && 493 list_empty(&space_info->priority_tickets)) { 494 spin_unlock(&space_info->lock); 495 break; 496 } 497 spin_unlock(&space_info->lock); 498 499 loops++; 500 if (wait_ordered && !trans) { 501 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 502 } else { 503 time_left = schedule_timeout_killable(1); 504 if (time_left) 505 break; 506 } 507 delalloc_bytes = percpu_counter_sum_positive( 508 &fs_info->delalloc_bytes); 509 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 510 } 511 } 512 513 /** 514 * maybe_commit_transaction - possibly commit the transaction if its ok to 515 * @root - the root we're allocating for 516 * @bytes - the number of bytes we want to reserve 517 * @force - force the commit 518 * 519 * This will check to make sure that committing the transaction will actually 520 * get us somewhere and then commit the transaction if it does. Otherwise it 521 * will return -ENOSPC. 522 */ 523 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 524 struct btrfs_space_info *space_info) 525 { 526 struct reserve_ticket *ticket = NULL; 527 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 528 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 529 struct btrfs_trans_handle *trans; 530 u64 bytes_needed; 531 u64 reclaim_bytes = 0; 532 533 trans = (struct btrfs_trans_handle *)current->journal_info; 534 if (trans) 535 return -EAGAIN; 536 537 spin_lock(&space_info->lock); 538 if (!list_empty(&space_info->priority_tickets)) 539 ticket = list_first_entry(&space_info->priority_tickets, 540 struct reserve_ticket, list); 541 else if (!list_empty(&space_info->tickets)) 542 ticket = list_first_entry(&space_info->tickets, 543 struct reserve_ticket, list); 544 bytes_needed = (ticket) ? ticket->bytes : 0; 545 spin_unlock(&space_info->lock); 546 547 if (!bytes_needed) 548 return 0; 549 550 trans = btrfs_join_transaction(fs_info->extent_root); 551 if (IS_ERR(trans)) 552 return PTR_ERR(trans); 553 554 /* 555 * See if there is enough pinned space to make this reservation, or if 556 * we have block groups that are going to be freed, allowing us to 557 * possibly do a chunk allocation the next loop through. 558 */ 559 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 560 __percpu_counter_compare(&space_info->total_bytes_pinned, 561 bytes_needed, 562 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 563 goto commit; 564 565 /* 566 * See if there is some space in the delayed insertion reservation for 567 * this reservation. 568 */ 569 if (space_info != delayed_rsv->space_info) 570 goto enospc; 571 572 spin_lock(&delayed_rsv->lock); 573 reclaim_bytes += delayed_rsv->reserved; 574 spin_unlock(&delayed_rsv->lock); 575 576 spin_lock(&delayed_refs_rsv->lock); 577 reclaim_bytes += delayed_refs_rsv->reserved; 578 spin_unlock(&delayed_refs_rsv->lock); 579 if (reclaim_bytes >= bytes_needed) 580 goto commit; 581 bytes_needed -= reclaim_bytes; 582 583 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 584 bytes_needed, 585 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 586 goto enospc; 587 588 commit: 589 return btrfs_commit_transaction(trans); 590 enospc: 591 btrfs_end_transaction(trans); 592 return -ENOSPC; 593 } 594 595 /* 596 * Try to flush some data based on policy set by @state. This is only advisory 597 * and may fail for various reasons. The caller is supposed to examine the 598 * state of @space_info to detect the outcome. 599 */ 600 static void flush_space(struct btrfs_fs_info *fs_info, 601 struct btrfs_space_info *space_info, u64 num_bytes, 602 int state) 603 { 604 struct btrfs_root *root = fs_info->extent_root; 605 struct btrfs_trans_handle *trans; 606 int nr; 607 int ret = 0; 608 609 switch (state) { 610 case FLUSH_DELAYED_ITEMS_NR: 611 case FLUSH_DELAYED_ITEMS: 612 if (state == FLUSH_DELAYED_ITEMS_NR) 613 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 614 else 615 nr = -1; 616 617 trans = btrfs_join_transaction(root); 618 if (IS_ERR(trans)) { 619 ret = PTR_ERR(trans); 620 break; 621 } 622 ret = btrfs_run_delayed_items_nr(trans, nr); 623 btrfs_end_transaction(trans); 624 break; 625 case FLUSH_DELALLOC: 626 case FLUSH_DELALLOC_WAIT: 627 shrink_delalloc(fs_info, num_bytes * 2, num_bytes, 628 state == FLUSH_DELALLOC_WAIT); 629 break; 630 case FLUSH_DELAYED_REFS_NR: 631 case FLUSH_DELAYED_REFS: 632 trans = btrfs_join_transaction(root); 633 if (IS_ERR(trans)) { 634 ret = PTR_ERR(trans); 635 break; 636 } 637 if (state == FLUSH_DELAYED_REFS_NR) 638 nr = calc_reclaim_items_nr(fs_info, num_bytes); 639 else 640 nr = 0; 641 btrfs_run_delayed_refs(trans, nr); 642 btrfs_end_transaction(trans); 643 break; 644 case ALLOC_CHUNK: 645 case ALLOC_CHUNK_FORCE: 646 trans = btrfs_join_transaction(root); 647 if (IS_ERR(trans)) { 648 ret = PTR_ERR(trans); 649 break; 650 } 651 ret = btrfs_chunk_alloc(trans, 652 btrfs_metadata_alloc_profile(fs_info), 653 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 654 CHUNK_ALLOC_FORCE); 655 btrfs_end_transaction(trans); 656 if (ret > 0 || ret == -ENOSPC) 657 ret = 0; 658 break; 659 case RUN_DELAYED_IPUTS: 660 /* 661 * If we have pending delayed iputs then we could free up a 662 * bunch of pinned space, so make sure we run the iputs before 663 * we do our pinned bytes check below. 664 */ 665 btrfs_run_delayed_iputs(fs_info); 666 btrfs_wait_on_delayed_iputs(fs_info); 667 break; 668 case COMMIT_TRANS: 669 ret = may_commit_transaction(fs_info, space_info); 670 break; 671 default: 672 ret = -ENOSPC; 673 break; 674 } 675 676 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 677 ret); 678 return; 679 } 680 681 static inline u64 682 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 683 struct btrfs_space_info *space_info, 684 bool system_chunk) 685 { 686 struct reserve_ticket *ticket; 687 u64 used; 688 u64 expected; 689 u64 to_reclaim = 0; 690 691 list_for_each_entry(ticket, &space_info->tickets, list) 692 to_reclaim += ticket->bytes; 693 list_for_each_entry(ticket, &space_info->priority_tickets, list) 694 to_reclaim += ticket->bytes; 695 if (to_reclaim) 696 return to_reclaim; 697 698 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 699 if (can_overcommit(fs_info, space_info, to_reclaim, 700 BTRFS_RESERVE_FLUSH_ALL, system_chunk)) 701 return 0; 702 703 used = btrfs_space_info_used(space_info, true); 704 705 if (can_overcommit(fs_info, space_info, SZ_1M, 706 BTRFS_RESERVE_FLUSH_ALL, system_chunk)) 707 expected = div_factor_fine(space_info->total_bytes, 95); 708 else 709 expected = div_factor_fine(space_info->total_bytes, 90); 710 711 if (used > expected) 712 to_reclaim = used - expected; 713 else 714 to_reclaim = 0; 715 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 716 space_info->bytes_reserved); 717 return to_reclaim; 718 } 719 720 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 721 struct btrfs_space_info *space_info, 722 u64 used, bool system_chunk) 723 { 724 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 725 726 /* If we're just plain full then async reclaim just slows us down. */ 727 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 728 return 0; 729 730 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info, 731 system_chunk)) 732 return 0; 733 734 return (used >= thresh && !btrfs_fs_closing(fs_info) && 735 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 736 } 737 738 static bool wake_all_tickets(struct list_head *head) 739 { 740 struct reserve_ticket *ticket; 741 742 while (!list_empty(head)) { 743 ticket = list_first_entry(head, struct reserve_ticket, list); 744 list_del_init(&ticket->list); 745 ticket->error = -ENOSPC; 746 wake_up(&ticket->wait); 747 if (ticket->bytes != ticket->orig_bytes) 748 return true; 749 } 750 return false; 751 } 752 753 /* 754 * This is for normal flushers, we can wait all goddamned day if we want to. We 755 * will loop and continuously try to flush as long as we are making progress. 756 * We count progress as clearing off tickets each time we have to loop. 757 */ 758 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 759 { 760 struct btrfs_fs_info *fs_info; 761 struct btrfs_space_info *space_info; 762 u64 to_reclaim; 763 int flush_state; 764 int commit_cycles = 0; 765 u64 last_tickets_id; 766 767 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 768 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 769 770 spin_lock(&space_info->lock); 771 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, 772 false); 773 if (!to_reclaim) { 774 space_info->flush = 0; 775 spin_unlock(&space_info->lock); 776 return; 777 } 778 last_tickets_id = space_info->tickets_id; 779 spin_unlock(&space_info->lock); 780 781 flush_state = FLUSH_DELAYED_ITEMS_NR; 782 do { 783 flush_space(fs_info, space_info, to_reclaim, flush_state); 784 spin_lock(&space_info->lock); 785 if (list_empty(&space_info->tickets)) { 786 space_info->flush = 0; 787 spin_unlock(&space_info->lock); 788 return; 789 } 790 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 791 space_info, 792 false); 793 if (last_tickets_id == space_info->tickets_id) { 794 flush_state++; 795 } else { 796 last_tickets_id = space_info->tickets_id; 797 flush_state = FLUSH_DELAYED_ITEMS_NR; 798 if (commit_cycles) 799 commit_cycles--; 800 } 801 802 /* 803 * We don't want to force a chunk allocation until we've tried 804 * pretty hard to reclaim space. Think of the case where we 805 * freed up a bunch of space and so have a lot of pinned space 806 * to reclaim. We would rather use that than possibly create a 807 * underutilized metadata chunk. So if this is our first run 808 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 809 * commit the transaction. If nothing has changed the next go 810 * around then we can force a chunk allocation. 811 */ 812 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 813 flush_state++; 814 815 if (flush_state > COMMIT_TRANS) { 816 commit_cycles++; 817 if (commit_cycles > 2) { 818 if (wake_all_tickets(&space_info->tickets)) { 819 flush_state = FLUSH_DELAYED_ITEMS_NR; 820 commit_cycles--; 821 } else { 822 space_info->flush = 0; 823 } 824 } else { 825 flush_state = FLUSH_DELAYED_ITEMS_NR; 826 } 827 } 828 spin_unlock(&space_info->lock); 829 } while (flush_state <= COMMIT_TRANS); 830 } 831 832 void btrfs_init_async_reclaim_work(struct work_struct *work) 833 { 834 INIT_WORK(work, btrfs_async_reclaim_metadata_space); 835 } 836 837 static const enum btrfs_flush_state priority_flush_states[] = { 838 FLUSH_DELAYED_ITEMS_NR, 839 FLUSH_DELAYED_ITEMS, 840 ALLOC_CHUNK, 841 }; 842 843 static const enum btrfs_flush_state evict_flush_states[] = { 844 FLUSH_DELAYED_ITEMS_NR, 845 FLUSH_DELAYED_ITEMS, 846 FLUSH_DELAYED_REFS_NR, 847 FLUSH_DELAYED_REFS, 848 FLUSH_DELALLOC, 849 FLUSH_DELALLOC_WAIT, 850 ALLOC_CHUNK, 851 COMMIT_TRANS, 852 }; 853 854 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 855 struct btrfs_space_info *space_info, 856 struct reserve_ticket *ticket, 857 const enum btrfs_flush_state *states, 858 int states_nr) 859 { 860 u64 to_reclaim; 861 int flush_state; 862 863 spin_lock(&space_info->lock); 864 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, 865 false); 866 if (!to_reclaim) { 867 spin_unlock(&space_info->lock); 868 return; 869 } 870 spin_unlock(&space_info->lock); 871 872 flush_state = 0; 873 do { 874 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 875 flush_state++; 876 spin_lock(&space_info->lock); 877 if (ticket->bytes == 0) { 878 spin_unlock(&space_info->lock); 879 return; 880 } 881 spin_unlock(&space_info->lock); 882 } while (flush_state < states_nr); 883 } 884 885 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 886 struct btrfs_space_info *space_info, 887 struct reserve_ticket *ticket) 888 889 { 890 DEFINE_WAIT(wait); 891 int ret = 0; 892 893 spin_lock(&space_info->lock); 894 while (ticket->bytes > 0 && ticket->error == 0) { 895 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 896 if (ret) { 897 ticket->error = -EINTR; 898 break; 899 } 900 spin_unlock(&space_info->lock); 901 902 schedule(); 903 904 finish_wait(&ticket->wait, &wait); 905 spin_lock(&space_info->lock); 906 } 907 spin_unlock(&space_info->lock); 908 } 909 910 /** 911 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket 912 * @fs_info - the fs 913 * @space_info - the space_info for the reservation 914 * @ticket - the ticket for the reservation 915 * @flush - how much we can flush 916 * 917 * This does the work of figuring out how to flush for the ticket, waiting for 918 * the reservation, and returning the appropriate error if there is one. 919 */ 920 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 921 struct btrfs_space_info *space_info, 922 struct reserve_ticket *ticket, 923 enum btrfs_reserve_flush_enum flush) 924 { 925 u64 reclaim_bytes = 0; 926 int ret; 927 928 switch (flush) { 929 case BTRFS_RESERVE_FLUSH_ALL: 930 wait_reserve_ticket(fs_info, space_info, ticket); 931 break; 932 case BTRFS_RESERVE_FLUSH_LIMIT: 933 priority_reclaim_metadata_space(fs_info, space_info, ticket, 934 priority_flush_states, 935 ARRAY_SIZE(priority_flush_states)); 936 break; 937 case BTRFS_RESERVE_FLUSH_EVICT: 938 priority_reclaim_metadata_space(fs_info, space_info, ticket, 939 evict_flush_states, 940 ARRAY_SIZE(evict_flush_states)); 941 break; 942 default: 943 ASSERT(0); 944 break; 945 } 946 947 spin_lock(&space_info->lock); 948 ret = ticket->error; 949 if (ticket->bytes || ticket->error) { 950 if (ticket->bytes < ticket->orig_bytes) 951 reclaim_bytes = ticket->orig_bytes - ticket->bytes; 952 list_del_init(&ticket->list); 953 if (!ret) 954 ret = -ENOSPC; 955 } 956 spin_unlock(&space_info->lock); 957 958 if (reclaim_bytes) 959 btrfs_space_info_add_old_bytes(fs_info, space_info, 960 reclaim_bytes); 961 ASSERT(list_empty(&ticket->list)); 962 return ret; 963 } 964 965 /** 966 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 967 * @root - the root we're allocating for 968 * @space_info - the space info we want to allocate from 969 * @orig_bytes - the number of bytes we want 970 * @flush - whether or not we can flush to make our reservation 971 * 972 * This will reserve orig_bytes number of bytes from the space info associated 973 * with the block_rsv. If there is not enough space it will make an attempt to 974 * flush out space to make room. It will do this by flushing delalloc if 975 * possible or committing the transaction. If flush is 0 then no attempts to 976 * regain reservations will be made and this will fail if there is not enough 977 * space already. 978 */ 979 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 980 struct btrfs_space_info *space_info, 981 u64 orig_bytes, 982 enum btrfs_reserve_flush_enum flush, 983 bool system_chunk) 984 { 985 struct reserve_ticket ticket; 986 u64 used; 987 int ret = 0; 988 bool pending_tickets; 989 990 ASSERT(orig_bytes); 991 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 992 993 spin_lock(&space_info->lock); 994 ret = -ENOSPC; 995 used = btrfs_space_info_used(space_info, true); 996 pending_tickets = !list_empty(&space_info->tickets) || 997 !list_empty(&space_info->priority_tickets); 998 999 /* 1000 * Carry on if we have enough space (short-circuit) OR call 1001 * can_overcommit() to ensure we can overcommit to continue. 1002 */ 1003 if (!pending_tickets && 1004 ((used + orig_bytes <= space_info->total_bytes) || 1005 can_overcommit(fs_info, space_info, orig_bytes, flush, 1006 system_chunk))) { 1007 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1008 orig_bytes); 1009 ret = 0; 1010 } 1011 1012 /* 1013 * If we couldn't make a reservation then setup our reservation ticket 1014 * and kick the async worker if it's not already running. 1015 * 1016 * If we are a priority flusher then we just need to add our ticket to 1017 * the list and we will do our own flushing further down. 1018 */ 1019 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1020 ticket.orig_bytes = orig_bytes; 1021 ticket.bytes = orig_bytes; 1022 ticket.error = 0; 1023 init_waitqueue_head(&ticket.wait); 1024 if (flush == BTRFS_RESERVE_FLUSH_ALL) { 1025 list_add_tail(&ticket.list, &space_info->tickets); 1026 if (!space_info->flush) { 1027 space_info->flush = 1; 1028 trace_btrfs_trigger_flush(fs_info, 1029 space_info->flags, 1030 orig_bytes, flush, 1031 "enospc"); 1032 queue_work(system_unbound_wq, 1033 &fs_info->async_reclaim_work); 1034 } 1035 } else { 1036 list_add_tail(&ticket.list, 1037 &space_info->priority_tickets); 1038 } 1039 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1040 used += orig_bytes; 1041 /* 1042 * We will do the space reservation dance during log replay, 1043 * which means we won't have fs_info->fs_root set, so don't do 1044 * the async reclaim as we will panic. 1045 */ 1046 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1047 need_do_async_reclaim(fs_info, space_info, 1048 used, system_chunk) && 1049 !work_busy(&fs_info->async_reclaim_work)) { 1050 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1051 orig_bytes, flush, "preempt"); 1052 queue_work(system_unbound_wq, 1053 &fs_info->async_reclaim_work); 1054 } 1055 } 1056 spin_unlock(&space_info->lock); 1057 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1058 return ret; 1059 1060 return handle_reserve_ticket(fs_info, space_info, &ticket, flush); 1061 } 1062 1063 /** 1064 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1065 * @root - the root we're allocating for 1066 * @block_rsv - the block_rsv we're allocating for 1067 * @orig_bytes - the number of bytes we want 1068 * @flush - whether or not we can flush to make our reservation 1069 * 1070 * This will reserve orig_bytes number of bytes from the space info associated 1071 * with the block_rsv. If there is not enough space it will make an attempt to 1072 * flush out space to make room. It will do this by flushing delalloc if 1073 * possible or committing the transaction. If flush is 0 then no attempts to 1074 * regain reservations will be made and this will fail if there is not enough 1075 * space already. 1076 */ 1077 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1078 struct btrfs_block_rsv *block_rsv, 1079 u64 orig_bytes, 1080 enum btrfs_reserve_flush_enum flush) 1081 { 1082 struct btrfs_fs_info *fs_info = root->fs_info; 1083 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1084 int ret; 1085 bool system_chunk = (root == fs_info->chunk_root); 1086 1087 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info, 1088 orig_bytes, flush, system_chunk); 1089 if (ret == -ENOSPC && 1090 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1091 if (block_rsv != global_rsv && 1092 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1093 ret = 0; 1094 } 1095 if (ret == -ENOSPC) { 1096 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1097 block_rsv->space_info->flags, 1098 orig_bytes, 1); 1099 1100 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1101 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1102 orig_bytes, 0); 1103 } 1104 return ret; 1105 } 1106