1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * FORCE_COMMIT_TRANS 144 * For use by the preemptive flusher. We use this to bypass the ticketing 145 * checks in may_commit_transaction, as we have more information about the 146 * overall state of the system and may want to commit the transaction ahead 147 * of actual ENOSPC conditions. 148 * 149 * OVERCOMMIT 150 * 151 * Because we hold so many reservations for metadata we will allow you to 152 * reserve more space than is currently free in the currently allocate 153 * metadata space. This only happens with metadata, data does not allow 154 * overcommitting. 155 * 156 * You can see the current logic for when we allow overcommit in 157 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 158 * is no unallocated space to be had, all reservations are kept within the 159 * free space in the allocated metadata chunks. 160 * 161 * Because of overcommitting, you generally want to use the 162 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 163 * thing with or without extra unallocated space. 164 */ 165 166 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 167 bool may_use_included) 168 { 169 ASSERT(s_info); 170 return s_info->bytes_used + s_info->bytes_reserved + 171 s_info->bytes_pinned + s_info->bytes_readonly + 172 (may_use_included ? s_info->bytes_may_use : 0); 173 } 174 175 /* 176 * after adding space to the filesystem, we need to clear the full flags 177 * on all the space infos. 178 */ 179 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 180 { 181 struct list_head *head = &info->space_info; 182 struct btrfs_space_info *found; 183 184 list_for_each_entry(found, head, list) 185 found->full = 0; 186 } 187 188 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 189 { 190 191 struct btrfs_space_info *space_info; 192 int i; 193 int ret; 194 195 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 196 if (!space_info) 197 return -ENOMEM; 198 199 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 200 GFP_KERNEL); 201 if (ret) { 202 kfree(space_info); 203 return ret; 204 } 205 206 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 207 INIT_LIST_HEAD(&space_info->block_groups[i]); 208 init_rwsem(&space_info->groups_sem); 209 spin_lock_init(&space_info->lock); 210 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 211 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 212 INIT_LIST_HEAD(&space_info->ro_bgs); 213 INIT_LIST_HEAD(&space_info->tickets); 214 INIT_LIST_HEAD(&space_info->priority_tickets); 215 216 ret = btrfs_sysfs_add_space_info_type(info, space_info); 217 if (ret) 218 return ret; 219 220 list_add(&space_info->list, &info->space_info); 221 if (flags & BTRFS_BLOCK_GROUP_DATA) 222 info->data_sinfo = space_info; 223 224 return ret; 225 } 226 227 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 228 { 229 struct btrfs_super_block *disk_super; 230 u64 features; 231 u64 flags; 232 int mixed = 0; 233 int ret; 234 235 disk_super = fs_info->super_copy; 236 if (!btrfs_super_root(disk_super)) 237 return -EINVAL; 238 239 features = btrfs_super_incompat_flags(disk_super); 240 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 241 mixed = 1; 242 243 flags = BTRFS_BLOCK_GROUP_SYSTEM; 244 ret = create_space_info(fs_info, flags); 245 if (ret) 246 goto out; 247 248 if (mixed) { 249 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 250 ret = create_space_info(fs_info, flags); 251 } else { 252 flags = BTRFS_BLOCK_GROUP_METADATA; 253 ret = create_space_info(fs_info, flags); 254 if (ret) 255 goto out; 256 257 flags = BTRFS_BLOCK_GROUP_DATA; 258 ret = create_space_info(fs_info, flags); 259 } 260 out: 261 return ret; 262 } 263 264 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 265 u64 total_bytes, u64 bytes_used, 266 u64 bytes_readonly, 267 struct btrfs_space_info **space_info) 268 { 269 struct btrfs_space_info *found; 270 int factor; 271 272 factor = btrfs_bg_type_to_factor(flags); 273 274 found = btrfs_find_space_info(info, flags); 275 ASSERT(found); 276 spin_lock(&found->lock); 277 found->total_bytes += total_bytes; 278 found->disk_total += total_bytes * factor; 279 found->bytes_used += bytes_used; 280 found->disk_used += bytes_used * factor; 281 found->bytes_readonly += bytes_readonly; 282 if (total_bytes > 0) 283 found->full = 0; 284 btrfs_try_granting_tickets(info, found); 285 spin_unlock(&found->lock); 286 *space_info = found; 287 } 288 289 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 290 u64 flags) 291 { 292 struct list_head *head = &info->space_info; 293 struct btrfs_space_info *found; 294 295 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 296 297 list_for_each_entry(found, head, list) { 298 if (found->flags & flags) 299 return found; 300 } 301 return NULL; 302 } 303 304 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 305 struct btrfs_space_info *space_info, 306 enum btrfs_reserve_flush_enum flush) 307 { 308 u64 profile; 309 u64 avail; 310 int factor; 311 312 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 313 profile = btrfs_system_alloc_profile(fs_info); 314 else 315 profile = btrfs_metadata_alloc_profile(fs_info); 316 317 avail = atomic64_read(&fs_info->free_chunk_space); 318 319 /* 320 * If we have dup, raid1 or raid10 then only half of the free 321 * space is actually usable. For raid56, the space info used 322 * doesn't include the parity drive, so we don't have to 323 * change the math 324 */ 325 factor = btrfs_bg_type_to_factor(profile); 326 avail = div_u64(avail, factor); 327 328 /* 329 * If we aren't flushing all things, let us overcommit up to 330 * 1/2th of the space. If we can flush, don't let us overcommit 331 * too much, let it overcommit up to 1/8 of the space. 332 */ 333 if (flush == BTRFS_RESERVE_FLUSH_ALL) 334 avail >>= 3; 335 else 336 avail >>= 1; 337 return avail; 338 } 339 340 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 341 struct btrfs_space_info *space_info, u64 bytes, 342 enum btrfs_reserve_flush_enum flush) 343 { 344 u64 avail; 345 u64 used; 346 347 /* Don't overcommit when in mixed mode */ 348 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 349 return 0; 350 351 used = btrfs_space_info_used(space_info, true); 352 avail = calc_available_free_space(fs_info, space_info, flush); 353 354 if (used + bytes < space_info->total_bytes + avail) 355 return 1; 356 return 0; 357 } 358 359 static void remove_ticket(struct btrfs_space_info *space_info, 360 struct reserve_ticket *ticket) 361 { 362 if (!list_empty(&ticket->list)) { 363 list_del_init(&ticket->list); 364 ASSERT(space_info->reclaim_size >= ticket->bytes); 365 space_info->reclaim_size -= ticket->bytes; 366 } 367 } 368 369 /* 370 * This is for space we already have accounted in space_info->bytes_may_use, so 371 * basically when we're returning space from block_rsv's. 372 */ 373 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 374 struct btrfs_space_info *space_info) 375 { 376 struct list_head *head; 377 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 378 379 lockdep_assert_held(&space_info->lock); 380 381 head = &space_info->priority_tickets; 382 again: 383 while (!list_empty(head)) { 384 struct reserve_ticket *ticket; 385 u64 used = btrfs_space_info_used(space_info, true); 386 387 ticket = list_first_entry(head, struct reserve_ticket, list); 388 389 /* Check and see if our ticket can be satisified now. */ 390 if ((used + ticket->bytes <= space_info->total_bytes) || 391 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 392 flush)) { 393 btrfs_space_info_update_bytes_may_use(fs_info, 394 space_info, 395 ticket->bytes); 396 remove_ticket(space_info, ticket); 397 ticket->bytes = 0; 398 space_info->tickets_id++; 399 wake_up(&ticket->wait); 400 } else { 401 break; 402 } 403 } 404 405 if (head == &space_info->priority_tickets) { 406 head = &space_info->tickets; 407 flush = BTRFS_RESERVE_FLUSH_ALL; 408 goto again; 409 } 410 } 411 412 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 413 do { \ 414 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 415 spin_lock(&__rsv->lock); \ 416 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 417 __rsv->size, __rsv->reserved); \ 418 spin_unlock(&__rsv->lock); \ 419 } while (0) 420 421 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 422 struct btrfs_space_info *info) 423 { 424 lockdep_assert_held(&info->lock); 425 426 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 427 info->flags, 428 info->total_bytes - btrfs_space_info_used(info, true), 429 info->full ? "" : "not "); 430 btrfs_info(fs_info, 431 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 432 info->total_bytes, info->bytes_used, info->bytes_pinned, 433 info->bytes_reserved, info->bytes_may_use, 434 info->bytes_readonly); 435 436 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 437 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 438 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 439 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 440 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 441 442 } 443 444 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 445 struct btrfs_space_info *info, u64 bytes, 446 int dump_block_groups) 447 { 448 struct btrfs_block_group *cache; 449 int index = 0; 450 451 spin_lock(&info->lock); 452 __btrfs_dump_space_info(fs_info, info); 453 spin_unlock(&info->lock); 454 455 if (!dump_block_groups) 456 return; 457 458 down_read(&info->groups_sem); 459 again: 460 list_for_each_entry(cache, &info->block_groups[index], list) { 461 spin_lock(&cache->lock); 462 btrfs_info(fs_info, 463 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 464 cache->start, cache->length, cache->used, cache->pinned, 465 cache->reserved, cache->ro ? "[readonly]" : ""); 466 spin_unlock(&cache->lock); 467 btrfs_dump_free_space(cache, bytes); 468 } 469 if (++index < BTRFS_NR_RAID_TYPES) 470 goto again; 471 up_read(&info->groups_sem); 472 } 473 474 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 475 u64 to_reclaim) 476 { 477 u64 bytes; 478 u64 nr; 479 480 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 481 nr = div64_u64(to_reclaim, bytes); 482 if (!nr) 483 nr = 1; 484 return nr; 485 } 486 487 #define EXTENT_SIZE_PER_ITEM SZ_256K 488 489 /* 490 * shrink metadata reservation for delalloc 491 */ 492 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 493 struct btrfs_space_info *space_info, 494 u64 to_reclaim, bool wait_ordered) 495 { 496 struct btrfs_trans_handle *trans; 497 u64 delalloc_bytes; 498 u64 ordered_bytes; 499 u64 items; 500 long time_left; 501 int loops; 502 503 /* Calc the number of the pages we need flush for space reservation */ 504 if (to_reclaim == U64_MAX) { 505 items = U64_MAX; 506 } else { 507 /* 508 * to_reclaim is set to however much metadata we need to 509 * reclaim, but reclaiming that much data doesn't really track 510 * exactly, so increase the amount to reclaim by 2x in order to 511 * make sure we're flushing enough delalloc to hopefully reclaim 512 * some metadata reservations. 513 */ 514 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 515 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 516 } 517 518 trans = (struct btrfs_trans_handle *)current->journal_info; 519 520 delalloc_bytes = percpu_counter_sum_positive( 521 &fs_info->delalloc_bytes); 522 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 523 if (delalloc_bytes == 0 && ordered_bytes == 0) 524 return; 525 526 /* 527 * If we are doing more ordered than delalloc we need to just wait on 528 * ordered extents, otherwise we'll waste time trying to flush delalloc 529 * that likely won't give us the space back we need. 530 */ 531 if (ordered_bytes > delalloc_bytes) 532 wait_ordered = true; 533 534 loops = 0; 535 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 536 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 537 long nr_pages = min_t(u64, temp, LONG_MAX); 538 539 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 540 541 loops++; 542 if (wait_ordered && !trans) { 543 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 544 } else { 545 time_left = schedule_timeout_killable(1); 546 if (time_left) 547 break; 548 } 549 550 spin_lock(&space_info->lock); 551 if (list_empty(&space_info->tickets) && 552 list_empty(&space_info->priority_tickets)) { 553 spin_unlock(&space_info->lock); 554 break; 555 } 556 spin_unlock(&space_info->lock); 557 558 delalloc_bytes = percpu_counter_sum_positive( 559 &fs_info->delalloc_bytes); 560 ordered_bytes = percpu_counter_sum_positive( 561 &fs_info->ordered_bytes); 562 } 563 } 564 565 /** 566 * Possibly commit the transaction if its ok to 567 * 568 * @fs_info: the filesystem 569 * @space_info: space_info we are checking for commit, either data or metadata 570 * 571 * This will check to make sure that committing the transaction will actually 572 * get us somewhere and then commit the transaction if it does. Otherwise it 573 * will return -ENOSPC. 574 */ 575 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 576 struct btrfs_space_info *space_info) 577 { 578 struct reserve_ticket *ticket = NULL; 579 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 580 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 581 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 582 struct btrfs_trans_handle *trans; 583 u64 reclaim_bytes = 0; 584 u64 bytes_needed = 0; 585 u64 cur_free_bytes = 0; 586 587 trans = (struct btrfs_trans_handle *)current->journal_info; 588 if (trans) 589 return -EAGAIN; 590 591 spin_lock(&space_info->lock); 592 cur_free_bytes = btrfs_space_info_used(space_info, true); 593 if (cur_free_bytes < space_info->total_bytes) 594 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 595 else 596 cur_free_bytes = 0; 597 598 if (!list_empty(&space_info->priority_tickets)) 599 ticket = list_first_entry(&space_info->priority_tickets, 600 struct reserve_ticket, list); 601 else if (!list_empty(&space_info->tickets)) 602 ticket = list_first_entry(&space_info->tickets, 603 struct reserve_ticket, list); 604 if (ticket) 605 bytes_needed = ticket->bytes; 606 607 if (bytes_needed > cur_free_bytes) 608 bytes_needed -= cur_free_bytes; 609 else 610 bytes_needed = 0; 611 spin_unlock(&space_info->lock); 612 613 if (!bytes_needed) 614 return 0; 615 616 trans = btrfs_join_transaction(fs_info->extent_root); 617 if (IS_ERR(trans)) 618 return PTR_ERR(trans); 619 620 /* 621 * See if there is enough pinned space to make this reservation, or if 622 * we have block groups that are going to be freed, allowing us to 623 * possibly do a chunk allocation the next loop through. 624 */ 625 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 626 __percpu_counter_compare(&space_info->total_bytes_pinned, 627 bytes_needed, 628 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 629 goto commit; 630 631 /* 632 * See if there is some space in the delayed insertion reserve for this 633 * reservation. If the space_info's don't match (like for DATA or 634 * SYSTEM) then just go enospc, reclaiming this space won't recover any 635 * space to satisfy those reservations. 636 */ 637 if (space_info != delayed_rsv->space_info) 638 goto enospc; 639 640 spin_lock(&delayed_rsv->lock); 641 reclaim_bytes += delayed_rsv->reserved; 642 spin_unlock(&delayed_rsv->lock); 643 644 spin_lock(&delayed_refs_rsv->lock); 645 reclaim_bytes += delayed_refs_rsv->reserved; 646 spin_unlock(&delayed_refs_rsv->lock); 647 648 spin_lock(&trans_rsv->lock); 649 reclaim_bytes += trans_rsv->reserved; 650 spin_unlock(&trans_rsv->lock); 651 652 if (reclaim_bytes >= bytes_needed) 653 goto commit; 654 bytes_needed -= reclaim_bytes; 655 656 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 657 bytes_needed, 658 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 659 goto enospc; 660 661 commit: 662 return btrfs_commit_transaction(trans); 663 enospc: 664 btrfs_end_transaction(trans); 665 return -ENOSPC; 666 } 667 668 /* 669 * Try to flush some data based on policy set by @state. This is only advisory 670 * and may fail for various reasons. The caller is supposed to examine the 671 * state of @space_info to detect the outcome. 672 */ 673 static void flush_space(struct btrfs_fs_info *fs_info, 674 struct btrfs_space_info *space_info, u64 num_bytes, 675 enum btrfs_flush_state state) 676 { 677 struct btrfs_root *root = fs_info->extent_root; 678 struct btrfs_trans_handle *trans; 679 int nr; 680 int ret = 0; 681 682 switch (state) { 683 case FLUSH_DELAYED_ITEMS_NR: 684 case FLUSH_DELAYED_ITEMS: 685 if (state == FLUSH_DELAYED_ITEMS_NR) 686 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 687 else 688 nr = -1; 689 690 trans = btrfs_join_transaction(root); 691 if (IS_ERR(trans)) { 692 ret = PTR_ERR(trans); 693 break; 694 } 695 ret = btrfs_run_delayed_items_nr(trans, nr); 696 btrfs_end_transaction(trans); 697 break; 698 case FLUSH_DELALLOC: 699 case FLUSH_DELALLOC_WAIT: 700 shrink_delalloc(fs_info, space_info, num_bytes, 701 state == FLUSH_DELALLOC_WAIT); 702 break; 703 case FLUSH_DELAYED_REFS_NR: 704 case FLUSH_DELAYED_REFS: 705 trans = btrfs_join_transaction(root); 706 if (IS_ERR(trans)) { 707 ret = PTR_ERR(trans); 708 break; 709 } 710 if (state == FLUSH_DELAYED_REFS_NR) 711 nr = calc_reclaim_items_nr(fs_info, num_bytes); 712 else 713 nr = 0; 714 btrfs_run_delayed_refs(trans, nr); 715 btrfs_end_transaction(trans); 716 break; 717 case ALLOC_CHUNK: 718 case ALLOC_CHUNK_FORCE: 719 trans = btrfs_join_transaction(root); 720 if (IS_ERR(trans)) { 721 ret = PTR_ERR(trans); 722 break; 723 } 724 ret = btrfs_chunk_alloc(trans, 725 btrfs_get_alloc_profile(fs_info, space_info->flags), 726 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 727 CHUNK_ALLOC_FORCE); 728 btrfs_end_transaction(trans); 729 if (ret > 0 || ret == -ENOSPC) 730 ret = 0; 731 break; 732 case RUN_DELAYED_IPUTS: 733 /* 734 * If we have pending delayed iputs then we could free up a 735 * bunch of pinned space, so make sure we run the iputs before 736 * we do our pinned bytes check below. 737 */ 738 btrfs_run_delayed_iputs(fs_info); 739 btrfs_wait_on_delayed_iputs(fs_info); 740 break; 741 case COMMIT_TRANS: 742 ret = may_commit_transaction(fs_info, space_info); 743 break; 744 case FORCE_COMMIT_TRANS: 745 trans = btrfs_join_transaction(root); 746 if (IS_ERR(trans)) { 747 ret = PTR_ERR(trans); 748 break; 749 } 750 ret = btrfs_commit_transaction(trans); 751 break; 752 default: 753 ret = -ENOSPC; 754 break; 755 } 756 757 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 758 ret); 759 return; 760 } 761 762 static inline u64 763 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 764 struct btrfs_space_info *space_info) 765 { 766 u64 used; 767 u64 avail; 768 u64 expected; 769 u64 to_reclaim = space_info->reclaim_size; 770 771 lockdep_assert_held(&space_info->lock); 772 773 avail = calc_available_free_space(fs_info, space_info, 774 BTRFS_RESERVE_FLUSH_ALL); 775 used = btrfs_space_info_used(space_info, true); 776 777 /* 778 * We may be flushing because suddenly we have less space than we had 779 * before, and now we're well over-committed based on our current free 780 * space. If that's the case add in our overage so we make sure to put 781 * appropriate pressure on the flushing state machine. 782 */ 783 if (space_info->total_bytes + avail < used) 784 to_reclaim += used - (space_info->total_bytes + avail); 785 786 if (to_reclaim) 787 return to_reclaim; 788 789 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 790 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, 791 BTRFS_RESERVE_FLUSH_ALL)) 792 return 0; 793 794 used = btrfs_space_info_used(space_info, true); 795 796 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, 797 BTRFS_RESERVE_FLUSH_ALL)) 798 expected = div_factor_fine(space_info->total_bytes, 95); 799 else 800 expected = div_factor_fine(space_info->total_bytes, 90); 801 802 if (used > expected) 803 to_reclaim = used - expected; 804 else 805 to_reclaim = 0; 806 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 807 space_info->bytes_reserved); 808 return to_reclaim; 809 } 810 811 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 812 struct btrfs_space_info *space_info, 813 u64 used) 814 { 815 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 816 817 /* If we're just plain full then async reclaim just slows us down. */ 818 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 819 return 0; 820 821 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) 822 return 0; 823 824 return (used >= thresh && !btrfs_fs_closing(fs_info) && 825 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 826 } 827 828 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 829 struct btrfs_space_info *space_info, 830 struct reserve_ticket *ticket) 831 { 832 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 833 u64 min_bytes; 834 835 if (global_rsv->space_info != space_info) 836 return false; 837 838 spin_lock(&global_rsv->lock); 839 min_bytes = div_factor(global_rsv->size, 1); 840 if (global_rsv->reserved < min_bytes + ticket->bytes) { 841 spin_unlock(&global_rsv->lock); 842 return false; 843 } 844 global_rsv->reserved -= ticket->bytes; 845 remove_ticket(space_info, ticket); 846 ticket->bytes = 0; 847 wake_up(&ticket->wait); 848 space_info->tickets_id++; 849 if (global_rsv->reserved < global_rsv->size) 850 global_rsv->full = 0; 851 spin_unlock(&global_rsv->lock); 852 853 return true; 854 } 855 856 /* 857 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 858 * @fs_info - fs_info for this fs 859 * @space_info - the space info we were flushing 860 * 861 * We call this when we've exhausted our flushing ability and haven't made 862 * progress in satisfying tickets. The reservation code handles tickets in 863 * order, so if there is a large ticket first and then smaller ones we could 864 * very well satisfy the smaller tickets. This will attempt to wake up any 865 * tickets in the list to catch this case. 866 * 867 * This function returns true if it was able to make progress by clearing out 868 * other tickets, or if it stumbles across a ticket that was smaller than the 869 * first ticket. 870 */ 871 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 872 struct btrfs_space_info *space_info) 873 { 874 struct reserve_ticket *ticket; 875 u64 tickets_id = space_info->tickets_id; 876 u64 first_ticket_bytes = 0; 877 878 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 879 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 880 __btrfs_dump_space_info(fs_info, space_info); 881 } 882 883 while (!list_empty(&space_info->tickets) && 884 tickets_id == space_info->tickets_id) { 885 ticket = list_first_entry(&space_info->tickets, 886 struct reserve_ticket, list); 887 888 if (ticket->steal && 889 steal_from_global_rsv(fs_info, space_info, ticket)) 890 return true; 891 892 /* 893 * may_commit_transaction will avoid committing the transaction 894 * if it doesn't feel like the space reclaimed by the commit 895 * would result in the ticket succeeding. However if we have a 896 * smaller ticket in the queue it may be small enough to be 897 * satisified by committing the transaction, so if any 898 * subsequent ticket is smaller than the first ticket go ahead 899 * and send us back for another loop through the enospc flushing 900 * code. 901 */ 902 if (first_ticket_bytes == 0) 903 first_ticket_bytes = ticket->bytes; 904 else if (first_ticket_bytes > ticket->bytes) 905 return true; 906 907 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 908 btrfs_info(fs_info, "failing ticket with %llu bytes", 909 ticket->bytes); 910 911 remove_ticket(space_info, ticket); 912 ticket->error = -ENOSPC; 913 wake_up(&ticket->wait); 914 915 /* 916 * We're just throwing tickets away, so more flushing may not 917 * trip over btrfs_try_granting_tickets, so we need to call it 918 * here to see if we can make progress with the next ticket in 919 * the list. 920 */ 921 btrfs_try_granting_tickets(fs_info, space_info); 922 } 923 return (tickets_id != space_info->tickets_id); 924 } 925 926 /* 927 * This is for normal flushers, we can wait all goddamned day if we want to. We 928 * will loop and continuously try to flush as long as we are making progress. 929 * We count progress as clearing off tickets each time we have to loop. 930 */ 931 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 932 { 933 struct btrfs_fs_info *fs_info; 934 struct btrfs_space_info *space_info; 935 u64 to_reclaim; 936 enum btrfs_flush_state flush_state; 937 int commit_cycles = 0; 938 u64 last_tickets_id; 939 940 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 941 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 942 943 spin_lock(&space_info->lock); 944 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 945 if (!to_reclaim) { 946 space_info->flush = 0; 947 spin_unlock(&space_info->lock); 948 return; 949 } 950 last_tickets_id = space_info->tickets_id; 951 spin_unlock(&space_info->lock); 952 953 flush_state = FLUSH_DELAYED_ITEMS_NR; 954 do { 955 flush_space(fs_info, space_info, to_reclaim, flush_state); 956 spin_lock(&space_info->lock); 957 if (list_empty(&space_info->tickets)) { 958 space_info->flush = 0; 959 spin_unlock(&space_info->lock); 960 return; 961 } 962 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 963 space_info); 964 if (last_tickets_id == space_info->tickets_id) { 965 flush_state++; 966 } else { 967 last_tickets_id = space_info->tickets_id; 968 flush_state = FLUSH_DELAYED_ITEMS_NR; 969 if (commit_cycles) 970 commit_cycles--; 971 } 972 973 /* 974 * We don't want to force a chunk allocation until we've tried 975 * pretty hard to reclaim space. Think of the case where we 976 * freed up a bunch of space and so have a lot of pinned space 977 * to reclaim. We would rather use that than possibly create a 978 * underutilized metadata chunk. So if this is our first run 979 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 980 * commit the transaction. If nothing has changed the next go 981 * around then we can force a chunk allocation. 982 */ 983 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 984 flush_state++; 985 986 if (flush_state > COMMIT_TRANS) { 987 commit_cycles++; 988 if (commit_cycles > 2) { 989 if (maybe_fail_all_tickets(fs_info, space_info)) { 990 flush_state = FLUSH_DELAYED_ITEMS_NR; 991 commit_cycles--; 992 } else { 993 space_info->flush = 0; 994 } 995 } else { 996 flush_state = FLUSH_DELAYED_ITEMS_NR; 997 } 998 } 999 spin_unlock(&space_info->lock); 1000 } while (flush_state <= COMMIT_TRANS); 1001 } 1002 1003 /* 1004 * This handles pre-flushing of metadata space before we get to the point that 1005 * we need to start blocking threads on tickets. The logic here is different 1006 * from the other flush paths because it doesn't rely on tickets to tell us how 1007 * much we need to flush, instead it attempts to keep us below the 80% full 1008 * watermark of space by flushing whichever reservation pool is currently the 1009 * largest. 1010 */ 1011 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1012 { 1013 struct btrfs_fs_info *fs_info; 1014 struct btrfs_space_info *space_info; 1015 struct btrfs_block_rsv *delayed_block_rsv; 1016 struct btrfs_block_rsv *delayed_refs_rsv; 1017 struct btrfs_block_rsv *global_rsv; 1018 struct btrfs_block_rsv *trans_rsv; 1019 u64 used; 1020 1021 fs_info = container_of(work, struct btrfs_fs_info, 1022 preempt_reclaim_work); 1023 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1024 delayed_block_rsv = &fs_info->delayed_block_rsv; 1025 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1026 global_rsv = &fs_info->global_block_rsv; 1027 trans_rsv = &fs_info->trans_block_rsv; 1028 1029 spin_lock(&space_info->lock); 1030 used = btrfs_space_info_used(space_info, true); 1031 while (need_do_async_reclaim(fs_info, space_info, used)) { 1032 enum btrfs_flush_state flush; 1033 u64 delalloc_size = 0; 1034 u64 to_reclaim, block_rsv_size; 1035 u64 global_rsv_size = global_rsv->reserved; 1036 1037 /* 1038 * We don't have a precise counter for the metadata being 1039 * reserved for delalloc, so we'll approximate it by subtracting 1040 * out the block rsv's space from the bytes_may_use. If that 1041 * amount is higher than the individual reserves, then we can 1042 * assume it's tied up in delalloc reservations. 1043 */ 1044 block_rsv_size = global_rsv_size + 1045 delayed_block_rsv->reserved + 1046 delayed_refs_rsv->reserved + 1047 trans_rsv->reserved; 1048 if (block_rsv_size < space_info->bytes_may_use) 1049 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1050 spin_unlock(&space_info->lock); 1051 1052 /* 1053 * We don't want to include the global_rsv in our calculation, 1054 * because that's space we can't touch. Subtract it from the 1055 * block_rsv_size for the next checks. 1056 */ 1057 block_rsv_size -= global_rsv_size; 1058 1059 /* 1060 * We really want to avoid flushing delalloc too much, as it 1061 * could result in poor allocation patterns, so only flush it if 1062 * it's larger than the rest of the pools combined. 1063 */ 1064 if (delalloc_size > block_rsv_size) { 1065 to_reclaim = delalloc_size; 1066 flush = FLUSH_DELALLOC; 1067 } else if (space_info->bytes_pinned > 1068 (delayed_block_rsv->reserved + 1069 delayed_refs_rsv->reserved)) { 1070 to_reclaim = space_info->bytes_pinned; 1071 flush = FORCE_COMMIT_TRANS; 1072 } else if (delayed_block_rsv->reserved > 1073 delayed_refs_rsv->reserved) { 1074 to_reclaim = delayed_block_rsv->reserved; 1075 flush = FLUSH_DELAYED_ITEMS_NR; 1076 } else { 1077 to_reclaim = delayed_refs_rsv->reserved; 1078 flush = FLUSH_DELAYED_REFS_NR; 1079 } 1080 1081 /* 1082 * We don't want to reclaim everything, just a portion, so scale 1083 * down the to_reclaim by 1/4. If it takes us down to 0, 1084 * reclaim 1 items worth. 1085 */ 1086 to_reclaim >>= 2; 1087 if (!to_reclaim) 1088 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1089 flush_space(fs_info, space_info, to_reclaim, flush); 1090 cond_resched(); 1091 spin_lock(&space_info->lock); 1092 used = btrfs_space_info_used(space_info, true); 1093 } 1094 spin_unlock(&space_info->lock); 1095 } 1096 1097 /* 1098 * FLUSH_DELALLOC_WAIT: 1099 * Space is freed from flushing delalloc in one of two ways. 1100 * 1101 * 1) compression is on and we allocate less space than we reserved 1102 * 2) we are overwriting existing space 1103 * 1104 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1105 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1106 * length to ->bytes_reserved, and subtracts the reserved space from 1107 * ->bytes_may_use. 1108 * 1109 * For #2 this is trickier. Once the ordered extent runs we will drop the 1110 * extent in the range we are overwriting, which creates a delayed ref for 1111 * that freed extent. This however is not reclaimed until the transaction 1112 * commits, thus the next stages. 1113 * 1114 * RUN_DELAYED_IPUTS 1115 * If we are freeing inodes, we want to make sure all delayed iputs have 1116 * completed, because they could have been on an inode with i_nlink == 0, and 1117 * thus have been truncated and freed up space. But again this space is not 1118 * immediately re-usable, it comes in the form of a delayed ref, which must be 1119 * run and then the transaction must be committed. 1120 * 1121 * FLUSH_DELAYED_REFS 1122 * The above two cases generate delayed refs that will affect 1123 * ->total_bytes_pinned. However this counter can be inconsistent with 1124 * reality if there are outstanding delayed refs. This is because we adjust 1125 * the counter based solely on the current set of delayed refs and disregard 1126 * any on-disk state which might include more refs. So for example, if we 1127 * have an extent with 2 references, but we only drop 1, we'll see that there 1128 * is a negative delayed ref count for the extent and assume that the space 1129 * will be freed, and thus increase ->total_bytes_pinned. 1130 * 1131 * Running the delayed refs gives us the actual real view of what will be 1132 * freed at the transaction commit time. This stage will not actually free 1133 * space for us, it just makes sure that may_commit_transaction() has all of 1134 * the information it needs to make the right decision. 1135 * 1136 * COMMIT_TRANS 1137 * This is where we reclaim all of the pinned space generated by the previous 1138 * two stages. We will not commit the transaction if we don't think we're 1139 * likely to satisfy our request, which means if our current free space + 1140 * total_bytes_pinned < reservation we will not commit. This is why the 1141 * previous states are actually important, to make sure we know for sure 1142 * whether committing the transaction will allow us to make progress. 1143 * 1144 * ALLOC_CHUNK_FORCE 1145 * For data we start with alloc chunk force, however we could have been full 1146 * before, and then the transaction commit could have freed new block groups, 1147 * so if we now have space to allocate do the force chunk allocation. 1148 */ 1149 static const enum btrfs_flush_state data_flush_states[] = { 1150 FLUSH_DELALLOC_WAIT, 1151 RUN_DELAYED_IPUTS, 1152 FLUSH_DELAYED_REFS, 1153 COMMIT_TRANS, 1154 ALLOC_CHUNK_FORCE, 1155 }; 1156 1157 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1158 { 1159 struct btrfs_fs_info *fs_info; 1160 struct btrfs_space_info *space_info; 1161 u64 last_tickets_id; 1162 enum btrfs_flush_state flush_state = 0; 1163 1164 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1165 space_info = fs_info->data_sinfo; 1166 1167 spin_lock(&space_info->lock); 1168 if (list_empty(&space_info->tickets)) { 1169 space_info->flush = 0; 1170 spin_unlock(&space_info->lock); 1171 return; 1172 } 1173 last_tickets_id = space_info->tickets_id; 1174 spin_unlock(&space_info->lock); 1175 1176 while (!space_info->full) { 1177 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1178 spin_lock(&space_info->lock); 1179 if (list_empty(&space_info->tickets)) { 1180 space_info->flush = 0; 1181 spin_unlock(&space_info->lock); 1182 return; 1183 } 1184 last_tickets_id = space_info->tickets_id; 1185 spin_unlock(&space_info->lock); 1186 } 1187 1188 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1189 flush_space(fs_info, space_info, U64_MAX, 1190 data_flush_states[flush_state]); 1191 spin_lock(&space_info->lock); 1192 if (list_empty(&space_info->tickets)) { 1193 space_info->flush = 0; 1194 spin_unlock(&space_info->lock); 1195 return; 1196 } 1197 1198 if (last_tickets_id == space_info->tickets_id) { 1199 flush_state++; 1200 } else { 1201 last_tickets_id = space_info->tickets_id; 1202 flush_state = 0; 1203 } 1204 1205 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1206 if (space_info->full) { 1207 if (maybe_fail_all_tickets(fs_info, space_info)) 1208 flush_state = 0; 1209 else 1210 space_info->flush = 0; 1211 } else { 1212 flush_state = 0; 1213 } 1214 } 1215 spin_unlock(&space_info->lock); 1216 } 1217 } 1218 1219 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1220 { 1221 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1222 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1223 INIT_WORK(&fs_info->preempt_reclaim_work, 1224 btrfs_preempt_reclaim_metadata_space); 1225 } 1226 1227 static const enum btrfs_flush_state priority_flush_states[] = { 1228 FLUSH_DELAYED_ITEMS_NR, 1229 FLUSH_DELAYED_ITEMS, 1230 ALLOC_CHUNK, 1231 }; 1232 1233 static const enum btrfs_flush_state evict_flush_states[] = { 1234 FLUSH_DELAYED_ITEMS_NR, 1235 FLUSH_DELAYED_ITEMS, 1236 FLUSH_DELAYED_REFS_NR, 1237 FLUSH_DELAYED_REFS, 1238 FLUSH_DELALLOC, 1239 FLUSH_DELALLOC_WAIT, 1240 ALLOC_CHUNK, 1241 COMMIT_TRANS, 1242 }; 1243 1244 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1245 struct btrfs_space_info *space_info, 1246 struct reserve_ticket *ticket, 1247 const enum btrfs_flush_state *states, 1248 int states_nr) 1249 { 1250 u64 to_reclaim; 1251 int flush_state; 1252 1253 spin_lock(&space_info->lock); 1254 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1255 if (!to_reclaim) { 1256 spin_unlock(&space_info->lock); 1257 return; 1258 } 1259 spin_unlock(&space_info->lock); 1260 1261 flush_state = 0; 1262 do { 1263 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 1264 flush_state++; 1265 spin_lock(&space_info->lock); 1266 if (ticket->bytes == 0) { 1267 spin_unlock(&space_info->lock); 1268 return; 1269 } 1270 spin_unlock(&space_info->lock); 1271 } while (flush_state < states_nr); 1272 } 1273 1274 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1275 struct btrfs_space_info *space_info, 1276 struct reserve_ticket *ticket) 1277 { 1278 while (!space_info->full) { 1279 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1280 spin_lock(&space_info->lock); 1281 if (ticket->bytes == 0) { 1282 spin_unlock(&space_info->lock); 1283 return; 1284 } 1285 spin_unlock(&space_info->lock); 1286 } 1287 } 1288 1289 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1290 struct btrfs_space_info *space_info, 1291 struct reserve_ticket *ticket) 1292 1293 { 1294 DEFINE_WAIT(wait); 1295 int ret = 0; 1296 1297 spin_lock(&space_info->lock); 1298 while (ticket->bytes > 0 && ticket->error == 0) { 1299 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1300 if (ret) { 1301 /* 1302 * Delete us from the list. After we unlock the space 1303 * info, we don't want the async reclaim job to reserve 1304 * space for this ticket. If that would happen, then the 1305 * ticket's task would not known that space was reserved 1306 * despite getting an error, resulting in a space leak 1307 * (bytes_may_use counter of our space_info). 1308 */ 1309 remove_ticket(space_info, ticket); 1310 ticket->error = -EINTR; 1311 break; 1312 } 1313 spin_unlock(&space_info->lock); 1314 1315 schedule(); 1316 1317 finish_wait(&ticket->wait, &wait); 1318 spin_lock(&space_info->lock); 1319 } 1320 spin_unlock(&space_info->lock); 1321 } 1322 1323 /** 1324 * Do the appropriate flushing and waiting for a ticket 1325 * 1326 * @fs_info: the filesystem 1327 * @space_info: space info for the reservation 1328 * @ticket: ticket for the reservation 1329 * @start_ns: timestamp when the reservation started 1330 * @orig_bytes: amount of bytes originally reserved 1331 * @flush: how much we can flush 1332 * 1333 * This does the work of figuring out how to flush for the ticket, waiting for 1334 * the reservation, and returning the appropriate error if there is one. 1335 */ 1336 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1337 struct btrfs_space_info *space_info, 1338 struct reserve_ticket *ticket, 1339 u64 start_ns, u64 orig_bytes, 1340 enum btrfs_reserve_flush_enum flush) 1341 { 1342 int ret; 1343 1344 switch (flush) { 1345 case BTRFS_RESERVE_FLUSH_DATA: 1346 case BTRFS_RESERVE_FLUSH_ALL: 1347 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1348 wait_reserve_ticket(fs_info, space_info, ticket); 1349 break; 1350 case BTRFS_RESERVE_FLUSH_LIMIT: 1351 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1352 priority_flush_states, 1353 ARRAY_SIZE(priority_flush_states)); 1354 break; 1355 case BTRFS_RESERVE_FLUSH_EVICT: 1356 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1357 evict_flush_states, 1358 ARRAY_SIZE(evict_flush_states)); 1359 break; 1360 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1361 priority_reclaim_data_space(fs_info, space_info, ticket); 1362 break; 1363 default: 1364 ASSERT(0); 1365 break; 1366 } 1367 1368 spin_lock(&space_info->lock); 1369 ret = ticket->error; 1370 if (ticket->bytes || ticket->error) { 1371 /* 1372 * We were a priority ticket, so we need to delete ourselves 1373 * from the list. Because we could have other priority tickets 1374 * behind us that require less space, run 1375 * btrfs_try_granting_tickets() to see if their reservations can 1376 * now be made. 1377 */ 1378 if (!list_empty(&ticket->list)) { 1379 remove_ticket(space_info, ticket); 1380 btrfs_try_granting_tickets(fs_info, space_info); 1381 } 1382 1383 if (!ret) 1384 ret = -ENOSPC; 1385 } 1386 spin_unlock(&space_info->lock); 1387 ASSERT(list_empty(&ticket->list)); 1388 /* 1389 * Check that we can't have an error set if the reservation succeeded, 1390 * as that would confuse tasks and lead them to error out without 1391 * releasing reserved space (if an error happens the expectation is that 1392 * space wasn't reserved at all). 1393 */ 1394 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1395 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1396 start_ns, flush, ticket->error); 1397 return ret; 1398 } 1399 1400 /* 1401 * This returns true if this flush state will go through the ordinary flushing 1402 * code. 1403 */ 1404 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1405 { 1406 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1407 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1408 } 1409 1410 /** 1411 * Try to reserve bytes from the block_rsv's space 1412 * 1413 * @fs_info: the filesystem 1414 * @space_info: space info we want to allocate from 1415 * @orig_bytes: number of bytes we want 1416 * @flush: whether or not we can flush to make our reservation 1417 * 1418 * This will reserve orig_bytes number of bytes from the space info associated 1419 * with the block_rsv. If there is not enough space it will make an attempt to 1420 * flush out space to make room. It will do this by flushing delalloc if 1421 * possible or committing the transaction. If flush is 0 then no attempts to 1422 * regain reservations will be made and this will fail if there is not enough 1423 * space already. 1424 */ 1425 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1426 struct btrfs_space_info *space_info, u64 orig_bytes, 1427 enum btrfs_reserve_flush_enum flush) 1428 { 1429 struct work_struct *async_work; 1430 struct reserve_ticket ticket; 1431 u64 start_ns = 0; 1432 u64 used; 1433 int ret = 0; 1434 bool pending_tickets; 1435 1436 ASSERT(orig_bytes); 1437 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1438 1439 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1440 async_work = &fs_info->async_data_reclaim_work; 1441 else 1442 async_work = &fs_info->async_reclaim_work; 1443 1444 spin_lock(&space_info->lock); 1445 ret = -ENOSPC; 1446 used = btrfs_space_info_used(space_info, true); 1447 1448 /* 1449 * We don't want NO_FLUSH allocations to jump everybody, they can 1450 * generally handle ENOSPC in a different way, so treat them the same as 1451 * normal flushers when it comes to skipping pending tickets. 1452 */ 1453 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1454 pending_tickets = !list_empty(&space_info->tickets) || 1455 !list_empty(&space_info->priority_tickets); 1456 else 1457 pending_tickets = !list_empty(&space_info->priority_tickets); 1458 1459 /* 1460 * Carry on if we have enough space (short-circuit) OR call 1461 * can_overcommit() to ensure we can overcommit to continue. 1462 */ 1463 if (!pending_tickets && 1464 ((used + orig_bytes <= space_info->total_bytes) || 1465 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1466 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1467 orig_bytes); 1468 ret = 0; 1469 } 1470 1471 /* 1472 * If we couldn't make a reservation then setup our reservation ticket 1473 * and kick the async worker if it's not already running. 1474 * 1475 * If we are a priority flusher then we just need to add our ticket to 1476 * the list and we will do our own flushing further down. 1477 */ 1478 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1479 ticket.bytes = orig_bytes; 1480 ticket.error = 0; 1481 space_info->reclaim_size += ticket.bytes; 1482 init_waitqueue_head(&ticket.wait); 1483 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1484 if (trace_btrfs_reserve_ticket_enabled()) 1485 start_ns = ktime_get_ns(); 1486 1487 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1488 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1489 flush == BTRFS_RESERVE_FLUSH_DATA) { 1490 list_add_tail(&ticket.list, &space_info->tickets); 1491 if (!space_info->flush) { 1492 space_info->flush = 1; 1493 trace_btrfs_trigger_flush(fs_info, 1494 space_info->flags, 1495 orig_bytes, flush, 1496 "enospc"); 1497 queue_work(system_unbound_wq, async_work); 1498 } 1499 } else { 1500 list_add_tail(&ticket.list, 1501 &space_info->priority_tickets); 1502 } 1503 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1504 used += orig_bytes; 1505 /* 1506 * We will do the space reservation dance during log replay, 1507 * which means we won't have fs_info->fs_root set, so don't do 1508 * the async reclaim as we will panic. 1509 */ 1510 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1511 need_do_async_reclaim(fs_info, space_info, used) && 1512 !work_busy(&fs_info->preempt_reclaim_work)) { 1513 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1514 orig_bytes, flush, "preempt"); 1515 queue_work(system_unbound_wq, 1516 &fs_info->preempt_reclaim_work); 1517 } 1518 } 1519 spin_unlock(&space_info->lock); 1520 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1521 return ret; 1522 1523 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1524 orig_bytes, flush); 1525 } 1526 1527 /** 1528 * Trye to reserve metadata bytes from the block_rsv's space 1529 * 1530 * @root: the root we're allocating for 1531 * @block_rsv: block_rsv we're allocating for 1532 * @orig_bytes: number of bytes we want 1533 * @flush: whether or not we can flush to make our reservation 1534 * 1535 * This will reserve orig_bytes number of bytes from the space info associated 1536 * with the block_rsv. If there is not enough space it will make an attempt to 1537 * flush out space to make room. It will do this by flushing delalloc if 1538 * possible or committing the transaction. If flush is 0 then no attempts to 1539 * regain reservations will be made and this will fail if there is not enough 1540 * space already. 1541 */ 1542 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1543 struct btrfs_block_rsv *block_rsv, 1544 u64 orig_bytes, 1545 enum btrfs_reserve_flush_enum flush) 1546 { 1547 struct btrfs_fs_info *fs_info = root->fs_info; 1548 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1549 int ret; 1550 1551 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1552 if (ret == -ENOSPC && 1553 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1554 if (block_rsv != global_rsv && 1555 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1556 ret = 0; 1557 } 1558 if (ret == -ENOSPC) { 1559 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1560 block_rsv->space_info->flags, 1561 orig_bytes, 1); 1562 1563 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1564 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1565 orig_bytes, 0); 1566 } 1567 return ret; 1568 } 1569 1570 /** 1571 * Try to reserve data bytes for an allocation 1572 * 1573 * @fs_info: the filesystem 1574 * @bytes: number of bytes we need 1575 * @flush: how we are allowed to flush 1576 * 1577 * This will reserve bytes from the data space info. If there is not enough 1578 * space then we will attempt to flush space as specified by flush. 1579 */ 1580 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1581 enum btrfs_reserve_flush_enum flush) 1582 { 1583 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1584 int ret; 1585 1586 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1587 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1588 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1589 1590 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1591 if (ret == -ENOSPC) { 1592 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1593 data_sinfo->flags, bytes, 1); 1594 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1595 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1596 } 1597 return ret; 1598 } 1599