1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * OVERCOMMIT 144 * 145 * Because we hold so many reservations for metadata we will allow you to 146 * reserve more space than is currently free in the currently allocate 147 * metadata space. This only happens with metadata, data does not allow 148 * overcommitting. 149 * 150 * You can see the current logic for when we allow overcommit in 151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 152 * is no unallocated space to be had, all reservations are kept within the 153 * free space in the allocated metadata chunks. 154 * 155 * Because of overcommitting, you generally want to use the 156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 157 * thing with or without extra unallocated space. 158 */ 159 160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 161 bool may_use_included) 162 { 163 ASSERT(s_info); 164 return s_info->bytes_used + s_info->bytes_reserved + 165 s_info->bytes_pinned + s_info->bytes_readonly + 166 (may_use_included ? s_info->bytes_may_use : 0); 167 } 168 169 /* 170 * after adding space to the filesystem, we need to clear the full flags 171 * on all the space infos. 172 */ 173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 174 { 175 struct list_head *head = &info->space_info; 176 struct btrfs_space_info *found; 177 178 rcu_read_lock(); 179 list_for_each_entry_rcu(found, head, list) 180 found->full = 0; 181 rcu_read_unlock(); 182 } 183 184 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 185 { 186 187 struct btrfs_space_info *space_info; 188 int i; 189 int ret; 190 191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 192 if (!space_info) 193 return -ENOMEM; 194 195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 196 GFP_KERNEL); 197 if (ret) { 198 kfree(space_info); 199 return ret; 200 } 201 202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 203 INIT_LIST_HEAD(&space_info->block_groups[i]); 204 init_rwsem(&space_info->groups_sem); 205 spin_lock_init(&space_info->lock); 206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 208 INIT_LIST_HEAD(&space_info->ro_bgs); 209 INIT_LIST_HEAD(&space_info->tickets); 210 INIT_LIST_HEAD(&space_info->priority_tickets); 211 212 ret = btrfs_sysfs_add_space_info_type(info, space_info); 213 if (ret) 214 return ret; 215 216 list_add_rcu(&space_info->list, &info->space_info); 217 if (flags & BTRFS_BLOCK_GROUP_DATA) 218 info->data_sinfo = space_info; 219 220 return ret; 221 } 222 223 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 224 { 225 struct btrfs_super_block *disk_super; 226 u64 features; 227 u64 flags; 228 int mixed = 0; 229 int ret; 230 231 disk_super = fs_info->super_copy; 232 if (!btrfs_super_root(disk_super)) 233 return -EINVAL; 234 235 features = btrfs_super_incompat_flags(disk_super); 236 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 237 mixed = 1; 238 239 flags = BTRFS_BLOCK_GROUP_SYSTEM; 240 ret = create_space_info(fs_info, flags); 241 if (ret) 242 goto out; 243 244 if (mixed) { 245 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 246 ret = create_space_info(fs_info, flags); 247 } else { 248 flags = BTRFS_BLOCK_GROUP_METADATA; 249 ret = create_space_info(fs_info, flags); 250 if (ret) 251 goto out; 252 253 flags = BTRFS_BLOCK_GROUP_DATA; 254 ret = create_space_info(fs_info, flags); 255 } 256 out: 257 return ret; 258 } 259 260 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 261 u64 total_bytes, u64 bytes_used, 262 u64 bytes_readonly, 263 struct btrfs_space_info **space_info) 264 { 265 struct btrfs_space_info *found; 266 int factor; 267 268 factor = btrfs_bg_type_to_factor(flags); 269 270 found = btrfs_find_space_info(info, flags); 271 ASSERT(found); 272 spin_lock(&found->lock); 273 found->total_bytes += total_bytes; 274 found->disk_total += total_bytes * factor; 275 found->bytes_used += bytes_used; 276 found->disk_used += bytes_used * factor; 277 found->bytes_readonly += bytes_readonly; 278 if (total_bytes > 0) 279 found->full = 0; 280 btrfs_try_granting_tickets(info, found); 281 spin_unlock(&found->lock); 282 *space_info = found; 283 } 284 285 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 286 u64 flags) 287 { 288 struct list_head *head = &info->space_info; 289 struct btrfs_space_info *found; 290 291 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 292 293 rcu_read_lock(); 294 list_for_each_entry_rcu(found, head, list) { 295 if (found->flags & flags) { 296 rcu_read_unlock(); 297 return found; 298 } 299 } 300 rcu_read_unlock(); 301 return NULL; 302 } 303 304 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 305 { 306 return (global->size << 1); 307 } 308 309 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 310 struct btrfs_space_info *space_info, u64 bytes, 311 enum btrfs_reserve_flush_enum flush) 312 { 313 u64 profile; 314 u64 avail; 315 u64 used; 316 int factor; 317 318 /* Don't overcommit when in mixed mode. */ 319 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 320 return 0; 321 322 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 323 profile = btrfs_system_alloc_profile(fs_info); 324 else 325 profile = btrfs_metadata_alloc_profile(fs_info); 326 327 used = btrfs_space_info_used(space_info, true); 328 avail = atomic64_read(&fs_info->free_chunk_space); 329 330 /* 331 * If we have dup, raid1 or raid10 then only half of the free 332 * space is actually usable. For raid56, the space info used 333 * doesn't include the parity drive, so we don't have to 334 * change the math 335 */ 336 factor = btrfs_bg_type_to_factor(profile); 337 avail = div_u64(avail, factor); 338 339 /* 340 * If we aren't flushing all things, let us overcommit up to 341 * 1/2th of the space. If we can flush, don't let us overcommit 342 * too much, let it overcommit up to 1/8 of the space. 343 */ 344 if (flush == BTRFS_RESERVE_FLUSH_ALL) 345 avail >>= 3; 346 else 347 avail >>= 1; 348 349 if (used + bytes < space_info->total_bytes + avail) 350 return 1; 351 return 0; 352 } 353 354 /* 355 * This is for space we already have accounted in space_info->bytes_may_use, so 356 * basically when we're returning space from block_rsv's. 357 */ 358 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 359 struct btrfs_space_info *space_info) 360 { 361 struct list_head *head; 362 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 363 364 lockdep_assert_held(&space_info->lock); 365 366 head = &space_info->priority_tickets; 367 again: 368 while (!list_empty(head)) { 369 struct reserve_ticket *ticket; 370 u64 used = btrfs_space_info_used(space_info, true); 371 372 ticket = list_first_entry(head, struct reserve_ticket, list); 373 374 /* Check and see if our ticket can be satisified now. */ 375 if ((used + ticket->bytes <= space_info->total_bytes) || 376 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 377 flush)) { 378 btrfs_space_info_update_bytes_may_use(fs_info, 379 space_info, 380 ticket->bytes); 381 list_del_init(&ticket->list); 382 ticket->bytes = 0; 383 space_info->tickets_id++; 384 wake_up(&ticket->wait); 385 } else { 386 break; 387 } 388 } 389 390 if (head == &space_info->priority_tickets) { 391 head = &space_info->tickets; 392 flush = BTRFS_RESERVE_FLUSH_ALL; 393 goto again; 394 } 395 } 396 397 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 398 do { \ 399 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 400 spin_lock(&__rsv->lock); \ 401 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 402 __rsv->size, __rsv->reserved); \ 403 spin_unlock(&__rsv->lock); \ 404 } while (0) 405 406 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 407 struct btrfs_space_info *info) 408 { 409 lockdep_assert_held(&info->lock); 410 411 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 412 info->flags, 413 info->total_bytes - btrfs_space_info_used(info, true), 414 info->full ? "" : "not "); 415 btrfs_info(fs_info, 416 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 417 info->total_bytes, info->bytes_used, info->bytes_pinned, 418 info->bytes_reserved, info->bytes_may_use, 419 info->bytes_readonly); 420 421 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 422 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 423 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 424 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 425 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 426 427 } 428 429 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 430 struct btrfs_space_info *info, u64 bytes, 431 int dump_block_groups) 432 { 433 struct btrfs_block_group *cache; 434 int index = 0; 435 436 spin_lock(&info->lock); 437 __btrfs_dump_space_info(fs_info, info); 438 spin_unlock(&info->lock); 439 440 if (!dump_block_groups) 441 return; 442 443 down_read(&info->groups_sem); 444 again: 445 list_for_each_entry(cache, &info->block_groups[index], list) { 446 spin_lock(&cache->lock); 447 btrfs_info(fs_info, 448 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 449 cache->start, cache->length, cache->used, cache->pinned, 450 cache->reserved, cache->ro ? "[readonly]" : ""); 451 btrfs_dump_free_space(cache, bytes); 452 spin_unlock(&cache->lock); 453 } 454 if (++index < BTRFS_NR_RAID_TYPES) 455 goto again; 456 up_read(&info->groups_sem); 457 } 458 459 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info, 460 unsigned long nr_pages, int nr_items) 461 { 462 struct super_block *sb = fs_info->sb; 463 464 if (down_read_trylock(&sb->s_umount)) { 465 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE); 466 up_read(&sb->s_umount); 467 } else { 468 /* 469 * We needn't worry the filesystem going from r/w to r/o though 470 * we don't acquire ->s_umount mutex, because the filesystem 471 * should guarantee the delalloc inodes list be empty after 472 * the filesystem is readonly(all dirty pages are written to 473 * the disk). 474 */ 475 btrfs_start_delalloc_roots(fs_info, nr_items); 476 if (!current->journal_info) 477 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1); 478 } 479 } 480 481 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 482 u64 to_reclaim) 483 { 484 u64 bytes; 485 u64 nr; 486 487 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 488 nr = div64_u64(to_reclaim, bytes); 489 if (!nr) 490 nr = 1; 491 return nr; 492 } 493 494 #define EXTENT_SIZE_PER_ITEM SZ_256K 495 496 /* 497 * shrink metadata reservation for delalloc 498 */ 499 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, 500 u64 orig, bool wait_ordered) 501 { 502 struct btrfs_space_info *space_info; 503 struct btrfs_trans_handle *trans; 504 u64 delalloc_bytes; 505 u64 dio_bytes; 506 u64 async_pages; 507 u64 items; 508 long time_left; 509 unsigned long nr_pages; 510 int loops; 511 512 /* Calc the number of the pages we need flush for space reservation */ 513 items = calc_reclaim_items_nr(fs_info, to_reclaim); 514 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 515 516 trans = (struct btrfs_trans_handle *)current->journal_info; 517 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 518 519 delalloc_bytes = percpu_counter_sum_positive( 520 &fs_info->delalloc_bytes); 521 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 522 if (delalloc_bytes == 0 && dio_bytes == 0) { 523 if (trans) 524 return; 525 if (wait_ordered) 526 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 527 return; 528 } 529 530 /* 531 * If we are doing more ordered than delalloc we need to just wait on 532 * ordered extents, otherwise we'll waste time trying to flush delalloc 533 * that likely won't give us the space back we need. 534 */ 535 if (dio_bytes > delalloc_bytes) 536 wait_ordered = true; 537 538 loops = 0; 539 while ((delalloc_bytes || dio_bytes) && loops < 3) { 540 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 541 542 /* 543 * Triggers inode writeback for up to nr_pages. This will invoke 544 * ->writepages callback and trigger delalloc filling 545 * (btrfs_run_delalloc_range()). 546 */ 547 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items); 548 549 /* 550 * We need to wait for the compressed pages to start before 551 * we continue. 552 */ 553 async_pages = atomic_read(&fs_info->async_delalloc_pages); 554 if (!async_pages) 555 goto skip_async; 556 557 /* 558 * Calculate how many compressed pages we want to be written 559 * before we continue. I.e if there are more async pages than we 560 * require wait_event will wait until nr_pages are written. 561 */ 562 if (async_pages <= nr_pages) 563 async_pages = 0; 564 else 565 async_pages -= nr_pages; 566 567 wait_event(fs_info->async_submit_wait, 568 atomic_read(&fs_info->async_delalloc_pages) <= 569 (int)async_pages); 570 skip_async: 571 spin_lock(&space_info->lock); 572 if (list_empty(&space_info->tickets) && 573 list_empty(&space_info->priority_tickets)) { 574 spin_unlock(&space_info->lock); 575 break; 576 } 577 spin_unlock(&space_info->lock); 578 579 loops++; 580 if (wait_ordered && !trans) { 581 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 582 } else { 583 time_left = schedule_timeout_killable(1); 584 if (time_left) 585 break; 586 } 587 delalloc_bytes = percpu_counter_sum_positive( 588 &fs_info->delalloc_bytes); 589 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 590 } 591 } 592 593 /** 594 * maybe_commit_transaction - possibly commit the transaction if its ok to 595 * @root - the root we're allocating for 596 * @bytes - the number of bytes we want to reserve 597 * @force - force the commit 598 * 599 * This will check to make sure that committing the transaction will actually 600 * get us somewhere and then commit the transaction if it does. Otherwise it 601 * will return -ENOSPC. 602 */ 603 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 604 struct btrfs_space_info *space_info) 605 { 606 struct reserve_ticket *ticket = NULL; 607 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 608 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 609 struct btrfs_trans_handle *trans; 610 u64 bytes_needed; 611 u64 reclaim_bytes = 0; 612 u64 cur_free_bytes = 0; 613 614 trans = (struct btrfs_trans_handle *)current->journal_info; 615 if (trans) 616 return -EAGAIN; 617 618 spin_lock(&space_info->lock); 619 cur_free_bytes = btrfs_space_info_used(space_info, true); 620 if (cur_free_bytes < space_info->total_bytes) 621 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 622 else 623 cur_free_bytes = 0; 624 625 if (!list_empty(&space_info->priority_tickets)) 626 ticket = list_first_entry(&space_info->priority_tickets, 627 struct reserve_ticket, list); 628 else if (!list_empty(&space_info->tickets)) 629 ticket = list_first_entry(&space_info->tickets, 630 struct reserve_ticket, list); 631 bytes_needed = (ticket) ? ticket->bytes : 0; 632 633 if (bytes_needed > cur_free_bytes) 634 bytes_needed -= cur_free_bytes; 635 else 636 bytes_needed = 0; 637 spin_unlock(&space_info->lock); 638 639 if (!bytes_needed) 640 return 0; 641 642 trans = btrfs_join_transaction(fs_info->extent_root); 643 if (IS_ERR(trans)) 644 return PTR_ERR(trans); 645 646 /* 647 * See if there is enough pinned space to make this reservation, or if 648 * we have block groups that are going to be freed, allowing us to 649 * possibly do a chunk allocation the next loop through. 650 */ 651 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 652 __percpu_counter_compare(&space_info->total_bytes_pinned, 653 bytes_needed, 654 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 655 goto commit; 656 657 /* 658 * See if there is some space in the delayed insertion reservation for 659 * this reservation. 660 */ 661 if (space_info != delayed_rsv->space_info) 662 goto enospc; 663 664 spin_lock(&delayed_rsv->lock); 665 reclaim_bytes += delayed_rsv->reserved; 666 spin_unlock(&delayed_rsv->lock); 667 668 spin_lock(&delayed_refs_rsv->lock); 669 reclaim_bytes += delayed_refs_rsv->reserved; 670 spin_unlock(&delayed_refs_rsv->lock); 671 if (reclaim_bytes >= bytes_needed) 672 goto commit; 673 bytes_needed -= reclaim_bytes; 674 675 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 676 bytes_needed, 677 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 678 goto enospc; 679 680 commit: 681 return btrfs_commit_transaction(trans); 682 enospc: 683 btrfs_end_transaction(trans); 684 return -ENOSPC; 685 } 686 687 /* 688 * Try to flush some data based on policy set by @state. This is only advisory 689 * and may fail for various reasons. The caller is supposed to examine the 690 * state of @space_info to detect the outcome. 691 */ 692 static void flush_space(struct btrfs_fs_info *fs_info, 693 struct btrfs_space_info *space_info, u64 num_bytes, 694 int state) 695 { 696 struct btrfs_root *root = fs_info->extent_root; 697 struct btrfs_trans_handle *trans; 698 int nr; 699 int ret = 0; 700 701 switch (state) { 702 case FLUSH_DELAYED_ITEMS_NR: 703 case FLUSH_DELAYED_ITEMS: 704 if (state == FLUSH_DELAYED_ITEMS_NR) 705 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 706 else 707 nr = -1; 708 709 trans = btrfs_join_transaction(root); 710 if (IS_ERR(trans)) { 711 ret = PTR_ERR(trans); 712 break; 713 } 714 ret = btrfs_run_delayed_items_nr(trans, nr); 715 btrfs_end_transaction(trans); 716 break; 717 case FLUSH_DELALLOC: 718 case FLUSH_DELALLOC_WAIT: 719 shrink_delalloc(fs_info, num_bytes * 2, num_bytes, 720 state == FLUSH_DELALLOC_WAIT); 721 break; 722 case FLUSH_DELAYED_REFS_NR: 723 case FLUSH_DELAYED_REFS: 724 trans = btrfs_join_transaction(root); 725 if (IS_ERR(trans)) { 726 ret = PTR_ERR(trans); 727 break; 728 } 729 if (state == FLUSH_DELAYED_REFS_NR) 730 nr = calc_reclaim_items_nr(fs_info, num_bytes); 731 else 732 nr = 0; 733 btrfs_run_delayed_refs(trans, nr); 734 btrfs_end_transaction(trans); 735 break; 736 case ALLOC_CHUNK: 737 case ALLOC_CHUNK_FORCE: 738 trans = btrfs_join_transaction(root); 739 if (IS_ERR(trans)) { 740 ret = PTR_ERR(trans); 741 break; 742 } 743 ret = btrfs_chunk_alloc(trans, 744 btrfs_metadata_alloc_profile(fs_info), 745 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 746 CHUNK_ALLOC_FORCE); 747 btrfs_end_transaction(trans); 748 if (ret > 0 || ret == -ENOSPC) 749 ret = 0; 750 break; 751 case RUN_DELAYED_IPUTS: 752 /* 753 * If we have pending delayed iputs then we could free up a 754 * bunch of pinned space, so make sure we run the iputs before 755 * we do our pinned bytes check below. 756 */ 757 btrfs_run_delayed_iputs(fs_info); 758 btrfs_wait_on_delayed_iputs(fs_info); 759 break; 760 case COMMIT_TRANS: 761 ret = may_commit_transaction(fs_info, space_info); 762 break; 763 default: 764 ret = -ENOSPC; 765 break; 766 } 767 768 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 769 ret); 770 return; 771 } 772 773 static inline u64 774 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 775 struct btrfs_space_info *space_info) 776 { 777 struct reserve_ticket *ticket; 778 u64 used; 779 u64 expected; 780 u64 to_reclaim = 0; 781 782 list_for_each_entry(ticket, &space_info->tickets, list) 783 to_reclaim += ticket->bytes; 784 list_for_each_entry(ticket, &space_info->priority_tickets, list) 785 to_reclaim += ticket->bytes; 786 if (to_reclaim) 787 return to_reclaim; 788 789 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 790 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, 791 BTRFS_RESERVE_FLUSH_ALL)) 792 return 0; 793 794 used = btrfs_space_info_used(space_info, true); 795 796 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, 797 BTRFS_RESERVE_FLUSH_ALL)) 798 expected = div_factor_fine(space_info->total_bytes, 95); 799 else 800 expected = div_factor_fine(space_info->total_bytes, 90); 801 802 if (used > expected) 803 to_reclaim = used - expected; 804 else 805 to_reclaim = 0; 806 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 807 space_info->bytes_reserved); 808 return to_reclaim; 809 } 810 811 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 812 struct btrfs_space_info *space_info, 813 u64 used) 814 { 815 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 816 817 /* If we're just plain full then async reclaim just slows us down. */ 818 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 819 return 0; 820 821 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) 822 return 0; 823 824 return (used >= thresh && !btrfs_fs_closing(fs_info) && 825 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 826 } 827 828 /* 829 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 830 * @fs_info - fs_info for this fs 831 * @space_info - the space info we were flushing 832 * 833 * We call this when we've exhausted our flushing ability and haven't made 834 * progress in satisfying tickets. The reservation code handles tickets in 835 * order, so if there is a large ticket first and then smaller ones we could 836 * very well satisfy the smaller tickets. This will attempt to wake up any 837 * tickets in the list to catch this case. 838 * 839 * This function returns true if it was able to make progress by clearing out 840 * other tickets, or if it stumbles across a ticket that was smaller than the 841 * first ticket. 842 */ 843 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 844 struct btrfs_space_info *space_info) 845 { 846 struct reserve_ticket *ticket; 847 u64 tickets_id = space_info->tickets_id; 848 u64 first_ticket_bytes = 0; 849 850 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 851 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 852 __btrfs_dump_space_info(fs_info, space_info); 853 } 854 855 while (!list_empty(&space_info->tickets) && 856 tickets_id == space_info->tickets_id) { 857 ticket = list_first_entry(&space_info->tickets, 858 struct reserve_ticket, list); 859 860 /* 861 * may_commit_transaction will avoid committing the transaction 862 * if it doesn't feel like the space reclaimed by the commit 863 * would result in the ticket succeeding. However if we have a 864 * smaller ticket in the queue it may be small enough to be 865 * satisified by committing the transaction, so if any 866 * subsequent ticket is smaller than the first ticket go ahead 867 * and send us back for another loop through the enospc flushing 868 * code. 869 */ 870 if (first_ticket_bytes == 0) 871 first_ticket_bytes = ticket->bytes; 872 else if (first_ticket_bytes > ticket->bytes) 873 return true; 874 875 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 876 btrfs_info(fs_info, "failing ticket with %llu bytes", 877 ticket->bytes); 878 879 list_del_init(&ticket->list); 880 ticket->error = -ENOSPC; 881 wake_up(&ticket->wait); 882 883 /* 884 * We're just throwing tickets away, so more flushing may not 885 * trip over btrfs_try_granting_tickets, so we need to call it 886 * here to see if we can make progress with the next ticket in 887 * the list. 888 */ 889 btrfs_try_granting_tickets(fs_info, space_info); 890 } 891 return (tickets_id != space_info->tickets_id); 892 } 893 894 /* 895 * This is for normal flushers, we can wait all goddamned day if we want to. We 896 * will loop and continuously try to flush as long as we are making progress. 897 * We count progress as clearing off tickets each time we have to loop. 898 */ 899 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 900 { 901 struct btrfs_fs_info *fs_info; 902 struct btrfs_space_info *space_info; 903 u64 to_reclaim; 904 int flush_state; 905 int commit_cycles = 0; 906 u64 last_tickets_id; 907 908 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 909 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 910 911 spin_lock(&space_info->lock); 912 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 913 if (!to_reclaim) { 914 space_info->flush = 0; 915 spin_unlock(&space_info->lock); 916 return; 917 } 918 last_tickets_id = space_info->tickets_id; 919 spin_unlock(&space_info->lock); 920 921 flush_state = FLUSH_DELAYED_ITEMS_NR; 922 do { 923 flush_space(fs_info, space_info, to_reclaim, flush_state); 924 spin_lock(&space_info->lock); 925 if (list_empty(&space_info->tickets)) { 926 space_info->flush = 0; 927 spin_unlock(&space_info->lock); 928 return; 929 } 930 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 931 space_info); 932 if (last_tickets_id == space_info->tickets_id) { 933 flush_state++; 934 } else { 935 last_tickets_id = space_info->tickets_id; 936 flush_state = FLUSH_DELAYED_ITEMS_NR; 937 if (commit_cycles) 938 commit_cycles--; 939 } 940 941 /* 942 * We don't want to force a chunk allocation until we've tried 943 * pretty hard to reclaim space. Think of the case where we 944 * freed up a bunch of space and so have a lot of pinned space 945 * to reclaim. We would rather use that than possibly create a 946 * underutilized metadata chunk. So if this is our first run 947 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 948 * commit the transaction. If nothing has changed the next go 949 * around then we can force a chunk allocation. 950 */ 951 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 952 flush_state++; 953 954 if (flush_state > COMMIT_TRANS) { 955 commit_cycles++; 956 if (commit_cycles > 2) { 957 if (maybe_fail_all_tickets(fs_info, space_info)) { 958 flush_state = FLUSH_DELAYED_ITEMS_NR; 959 commit_cycles--; 960 } else { 961 space_info->flush = 0; 962 } 963 } else { 964 flush_state = FLUSH_DELAYED_ITEMS_NR; 965 } 966 } 967 spin_unlock(&space_info->lock); 968 } while (flush_state <= COMMIT_TRANS); 969 } 970 971 void btrfs_init_async_reclaim_work(struct work_struct *work) 972 { 973 INIT_WORK(work, btrfs_async_reclaim_metadata_space); 974 } 975 976 static const enum btrfs_flush_state priority_flush_states[] = { 977 FLUSH_DELAYED_ITEMS_NR, 978 FLUSH_DELAYED_ITEMS, 979 ALLOC_CHUNK, 980 }; 981 982 static const enum btrfs_flush_state evict_flush_states[] = { 983 FLUSH_DELAYED_ITEMS_NR, 984 FLUSH_DELAYED_ITEMS, 985 FLUSH_DELAYED_REFS_NR, 986 FLUSH_DELAYED_REFS, 987 FLUSH_DELALLOC, 988 FLUSH_DELALLOC_WAIT, 989 ALLOC_CHUNK, 990 COMMIT_TRANS, 991 }; 992 993 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 994 struct btrfs_space_info *space_info, 995 struct reserve_ticket *ticket, 996 const enum btrfs_flush_state *states, 997 int states_nr) 998 { 999 u64 to_reclaim; 1000 int flush_state; 1001 1002 spin_lock(&space_info->lock); 1003 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1004 if (!to_reclaim) { 1005 spin_unlock(&space_info->lock); 1006 return; 1007 } 1008 spin_unlock(&space_info->lock); 1009 1010 flush_state = 0; 1011 do { 1012 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 1013 flush_state++; 1014 spin_lock(&space_info->lock); 1015 if (ticket->bytes == 0) { 1016 spin_unlock(&space_info->lock); 1017 return; 1018 } 1019 spin_unlock(&space_info->lock); 1020 } while (flush_state < states_nr); 1021 } 1022 1023 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1024 struct btrfs_space_info *space_info, 1025 struct reserve_ticket *ticket) 1026 1027 { 1028 DEFINE_WAIT(wait); 1029 int ret = 0; 1030 1031 spin_lock(&space_info->lock); 1032 while (ticket->bytes > 0 && ticket->error == 0) { 1033 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1034 if (ret) { 1035 /* 1036 * Delete us from the list. After we unlock the space 1037 * info, we don't want the async reclaim job to reserve 1038 * space for this ticket. If that would happen, then the 1039 * ticket's task would not known that space was reserved 1040 * despite getting an error, resulting in a space leak 1041 * (bytes_may_use counter of our space_info). 1042 */ 1043 list_del_init(&ticket->list); 1044 ticket->error = -EINTR; 1045 break; 1046 } 1047 spin_unlock(&space_info->lock); 1048 1049 schedule(); 1050 1051 finish_wait(&ticket->wait, &wait); 1052 spin_lock(&space_info->lock); 1053 } 1054 spin_unlock(&space_info->lock); 1055 } 1056 1057 /** 1058 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket 1059 * @fs_info - the fs 1060 * @space_info - the space_info for the reservation 1061 * @ticket - the ticket for the reservation 1062 * @flush - how much we can flush 1063 * 1064 * This does the work of figuring out how to flush for the ticket, waiting for 1065 * the reservation, and returning the appropriate error if there is one. 1066 */ 1067 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1068 struct btrfs_space_info *space_info, 1069 struct reserve_ticket *ticket, 1070 enum btrfs_reserve_flush_enum flush) 1071 { 1072 int ret; 1073 1074 switch (flush) { 1075 case BTRFS_RESERVE_FLUSH_ALL: 1076 wait_reserve_ticket(fs_info, space_info, ticket); 1077 break; 1078 case BTRFS_RESERVE_FLUSH_LIMIT: 1079 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1080 priority_flush_states, 1081 ARRAY_SIZE(priority_flush_states)); 1082 break; 1083 case BTRFS_RESERVE_FLUSH_EVICT: 1084 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1085 evict_flush_states, 1086 ARRAY_SIZE(evict_flush_states)); 1087 break; 1088 default: 1089 ASSERT(0); 1090 break; 1091 } 1092 1093 spin_lock(&space_info->lock); 1094 ret = ticket->error; 1095 if (ticket->bytes || ticket->error) { 1096 /* 1097 * Need to delete here for priority tickets. For regular tickets 1098 * either the async reclaim job deletes the ticket from the list 1099 * or we delete it ourselves at wait_reserve_ticket(). 1100 */ 1101 list_del_init(&ticket->list); 1102 if (!ret) 1103 ret = -ENOSPC; 1104 } 1105 spin_unlock(&space_info->lock); 1106 ASSERT(list_empty(&ticket->list)); 1107 /* 1108 * Check that we can't have an error set if the reservation succeeded, 1109 * as that would confuse tasks and lead them to error out without 1110 * releasing reserved space (if an error happens the expectation is that 1111 * space wasn't reserved at all). 1112 */ 1113 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1114 return ret; 1115 } 1116 1117 /** 1118 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1119 * @root - the root we're allocating for 1120 * @space_info - the space info we want to allocate from 1121 * @orig_bytes - the number of bytes we want 1122 * @flush - whether or not we can flush to make our reservation 1123 * 1124 * This will reserve orig_bytes number of bytes from the space info associated 1125 * with the block_rsv. If there is not enough space it will make an attempt to 1126 * flush out space to make room. It will do this by flushing delalloc if 1127 * possible or committing the transaction. If flush is 0 then no attempts to 1128 * regain reservations will be made and this will fail if there is not enough 1129 * space already. 1130 */ 1131 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1132 struct btrfs_space_info *space_info, 1133 u64 orig_bytes, 1134 enum btrfs_reserve_flush_enum flush) 1135 { 1136 struct reserve_ticket ticket; 1137 u64 used; 1138 int ret = 0; 1139 bool pending_tickets; 1140 1141 ASSERT(orig_bytes); 1142 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1143 1144 spin_lock(&space_info->lock); 1145 ret = -ENOSPC; 1146 used = btrfs_space_info_used(space_info, true); 1147 pending_tickets = !list_empty(&space_info->tickets) || 1148 !list_empty(&space_info->priority_tickets); 1149 1150 /* 1151 * Carry on if we have enough space (short-circuit) OR call 1152 * can_overcommit() to ensure we can overcommit to continue. 1153 */ 1154 if (!pending_tickets && 1155 ((used + orig_bytes <= space_info->total_bytes) || 1156 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1157 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1158 orig_bytes); 1159 ret = 0; 1160 } 1161 1162 /* 1163 * If we couldn't make a reservation then setup our reservation ticket 1164 * and kick the async worker if it's not already running. 1165 * 1166 * If we are a priority flusher then we just need to add our ticket to 1167 * the list and we will do our own flushing further down. 1168 */ 1169 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1170 ticket.bytes = orig_bytes; 1171 ticket.error = 0; 1172 init_waitqueue_head(&ticket.wait); 1173 if (flush == BTRFS_RESERVE_FLUSH_ALL) { 1174 list_add_tail(&ticket.list, &space_info->tickets); 1175 if (!space_info->flush) { 1176 space_info->flush = 1; 1177 trace_btrfs_trigger_flush(fs_info, 1178 space_info->flags, 1179 orig_bytes, flush, 1180 "enospc"); 1181 queue_work(system_unbound_wq, 1182 &fs_info->async_reclaim_work); 1183 } 1184 } else { 1185 list_add_tail(&ticket.list, 1186 &space_info->priority_tickets); 1187 } 1188 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1189 used += orig_bytes; 1190 /* 1191 * We will do the space reservation dance during log replay, 1192 * which means we won't have fs_info->fs_root set, so don't do 1193 * the async reclaim as we will panic. 1194 */ 1195 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1196 need_do_async_reclaim(fs_info, space_info, used) && 1197 !work_busy(&fs_info->async_reclaim_work)) { 1198 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1199 orig_bytes, flush, "preempt"); 1200 queue_work(system_unbound_wq, 1201 &fs_info->async_reclaim_work); 1202 } 1203 } 1204 spin_unlock(&space_info->lock); 1205 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1206 return ret; 1207 1208 return handle_reserve_ticket(fs_info, space_info, &ticket, flush); 1209 } 1210 1211 /** 1212 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1213 * @root - the root we're allocating for 1214 * @block_rsv - the block_rsv we're allocating for 1215 * @orig_bytes - the number of bytes we want 1216 * @flush - whether or not we can flush to make our reservation 1217 * 1218 * This will reserve orig_bytes number of bytes from the space info associated 1219 * with the block_rsv. If there is not enough space it will make an attempt to 1220 * flush out space to make room. It will do this by flushing delalloc if 1221 * possible or committing the transaction. If flush is 0 then no attempts to 1222 * regain reservations will be made and this will fail if there is not enough 1223 * space already. 1224 */ 1225 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1226 struct btrfs_block_rsv *block_rsv, 1227 u64 orig_bytes, 1228 enum btrfs_reserve_flush_enum flush) 1229 { 1230 struct btrfs_fs_info *fs_info = root->fs_info; 1231 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1232 int ret; 1233 1234 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info, 1235 orig_bytes, flush); 1236 if (ret == -ENOSPC && 1237 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1238 if (block_rsv != global_rsv && 1239 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1240 ret = 0; 1241 } 1242 if (ret == -ENOSPC) { 1243 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1244 block_rsv->space_info->flags, 1245 orig_bytes, 1); 1246 1247 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1248 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1249 orig_bytes, 0); 1250 } 1251 return ret; 1252 } 1253