1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * OVERCOMMIT 144 * 145 * Because we hold so many reservations for metadata we will allow you to 146 * reserve more space than is currently free in the currently allocate 147 * metadata space. This only happens with metadata, data does not allow 148 * overcommitting. 149 * 150 * You can see the current logic for when we allow overcommit in 151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 152 * is no unallocated space to be had, all reservations are kept within the 153 * free space in the allocated metadata chunks. 154 * 155 * Because of overcommitting, you generally want to use the 156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 157 * thing with or without extra unallocated space. 158 */ 159 160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 161 bool may_use_included) 162 { 163 ASSERT(s_info); 164 return s_info->bytes_used + s_info->bytes_reserved + 165 s_info->bytes_pinned + s_info->bytes_readonly + 166 (may_use_included ? s_info->bytes_may_use : 0); 167 } 168 169 /* 170 * after adding space to the filesystem, we need to clear the full flags 171 * on all the space infos. 172 */ 173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 174 { 175 struct list_head *head = &info->space_info; 176 struct btrfs_space_info *found; 177 178 list_for_each_entry(found, head, list) 179 found->full = 0; 180 } 181 182 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 183 { 184 185 struct btrfs_space_info *space_info; 186 int i; 187 int ret; 188 189 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 190 if (!space_info) 191 return -ENOMEM; 192 193 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 194 GFP_KERNEL); 195 if (ret) { 196 kfree(space_info); 197 return ret; 198 } 199 200 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 201 INIT_LIST_HEAD(&space_info->block_groups[i]); 202 init_rwsem(&space_info->groups_sem); 203 spin_lock_init(&space_info->lock); 204 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 205 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 206 INIT_LIST_HEAD(&space_info->ro_bgs); 207 INIT_LIST_HEAD(&space_info->tickets); 208 INIT_LIST_HEAD(&space_info->priority_tickets); 209 210 ret = btrfs_sysfs_add_space_info_type(info, space_info); 211 if (ret) 212 return ret; 213 214 list_add(&space_info->list, &info->space_info); 215 if (flags & BTRFS_BLOCK_GROUP_DATA) 216 info->data_sinfo = space_info; 217 218 return ret; 219 } 220 221 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 222 { 223 struct btrfs_super_block *disk_super; 224 u64 features; 225 u64 flags; 226 int mixed = 0; 227 int ret; 228 229 disk_super = fs_info->super_copy; 230 if (!btrfs_super_root(disk_super)) 231 return -EINVAL; 232 233 features = btrfs_super_incompat_flags(disk_super); 234 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 235 mixed = 1; 236 237 flags = BTRFS_BLOCK_GROUP_SYSTEM; 238 ret = create_space_info(fs_info, flags); 239 if (ret) 240 goto out; 241 242 if (mixed) { 243 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 244 ret = create_space_info(fs_info, flags); 245 } else { 246 flags = BTRFS_BLOCK_GROUP_METADATA; 247 ret = create_space_info(fs_info, flags); 248 if (ret) 249 goto out; 250 251 flags = BTRFS_BLOCK_GROUP_DATA; 252 ret = create_space_info(fs_info, flags); 253 } 254 out: 255 return ret; 256 } 257 258 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 259 u64 total_bytes, u64 bytes_used, 260 u64 bytes_readonly, 261 struct btrfs_space_info **space_info) 262 { 263 struct btrfs_space_info *found; 264 int factor; 265 266 factor = btrfs_bg_type_to_factor(flags); 267 268 found = btrfs_find_space_info(info, flags); 269 ASSERT(found); 270 spin_lock(&found->lock); 271 found->total_bytes += total_bytes; 272 found->disk_total += total_bytes * factor; 273 found->bytes_used += bytes_used; 274 found->disk_used += bytes_used * factor; 275 found->bytes_readonly += bytes_readonly; 276 if (total_bytes > 0) 277 found->full = 0; 278 btrfs_try_granting_tickets(info, found); 279 spin_unlock(&found->lock); 280 *space_info = found; 281 } 282 283 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 284 u64 flags) 285 { 286 struct list_head *head = &info->space_info; 287 struct btrfs_space_info *found; 288 289 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 290 291 list_for_each_entry(found, head, list) { 292 if (found->flags & flags) 293 return found; 294 } 295 return NULL; 296 } 297 298 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 299 struct btrfs_space_info *space_info, 300 enum btrfs_reserve_flush_enum flush) 301 { 302 u64 profile; 303 u64 avail; 304 int factor; 305 306 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 307 profile = btrfs_system_alloc_profile(fs_info); 308 else 309 profile = btrfs_metadata_alloc_profile(fs_info); 310 311 avail = atomic64_read(&fs_info->free_chunk_space); 312 313 /* 314 * If we have dup, raid1 or raid10 then only half of the free 315 * space is actually usable. For raid56, the space info used 316 * doesn't include the parity drive, so we don't have to 317 * change the math 318 */ 319 factor = btrfs_bg_type_to_factor(profile); 320 avail = div_u64(avail, factor); 321 322 /* 323 * If we aren't flushing all things, let us overcommit up to 324 * 1/2th of the space. If we can flush, don't let us overcommit 325 * too much, let it overcommit up to 1/8 of the space. 326 */ 327 if (flush == BTRFS_RESERVE_FLUSH_ALL) 328 avail >>= 3; 329 else 330 avail >>= 1; 331 return avail; 332 } 333 334 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 335 struct btrfs_space_info *space_info, u64 bytes, 336 enum btrfs_reserve_flush_enum flush) 337 { 338 u64 avail; 339 u64 used; 340 341 /* Don't overcommit when in mixed mode */ 342 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 343 return 0; 344 345 used = btrfs_space_info_used(space_info, true); 346 avail = calc_available_free_space(fs_info, space_info, flush); 347 348 if (used + bytes < space_info->total_bytes + avail) 349 return 1; 350 return 0; 351 } 352 353 static void remove_ticket(struct btrfs_space_info *space_info, 354 struct reserve_ticket *ticket) 355 { 356 if (!list_empty(&ticket->list)) { 357 list_del_init(&ticket->list); 358 ASSERT(space_info->reclaim_size >= ticket->bytes); 359 space_info->reclaim_size -= ticket->bytes; 360 } 361 } 362 363 /* 364 * This is for space we already have accounted in space_info->bytes_may_use, so 365 * basically when we're returning space from block_rsv's. 366 */ 367 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 368 struct btrfs_space_info *space_info) 369 { 370 struct list_head *head; 371 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 372 373 lockdep_assert_held(&space_info->lock); 374 375 head = &space_info->priority_tickets; 376 again: 377 while (!list_empty(head)) { 378 struct reserve_ticket *ticket; 379 u64 used = btrfs_space_info_used(space_info, true); 380 381 ticket = list_first_entry(head, struct reserve_ticket, list); 382 383 /* Check and see if our ticket can be satisified now. */ 384 if ((used + ticket->bytes <= space_info->total_bytes) || 385 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 386 flush)) { 387 btrfs_space_info_update_bytes_may_use(fs_info, 388 space_info, 389 ticket->bytes); 390 remove_ticket(space_info, ticket); 391 ticket->bytes = 0; 392 space_info->tickets_id++; 393 wake_up(&ticket->wait); 394 } else { 395 break; 396 } 397 } 398 399 if (head == &space_info->priority_tickets) { 400 head = &space_info->tickets; 401 flush = BTRFS_RESERVE_FLUSH_ALL; 402 goto again; 403 } 404 } 405 406 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 407 do { \ 408 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 409 spin_lock(&__rsv->lock); \ 410 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 411 __rsv->size, __rsv->reserved); \ 412 spin_unlock(&__rsv->lock); \ 413 } while (0) 414 415 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 416 struct btrfs_space_info *info) 417 { 418 lockdep_assert_held(&info->lock); 419 420 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 421 info->flags, 422 info->total_bytes - btrfs_space_info_used(info, true), 423 info->full ? "" : "not "); 424 btrfs_info(fs_info, 425 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 426 info->total_bytes, info->bytes_used, info->bytes_pinned, 427 info->bytes_reserved, info->bytes_may_use, 428 info->bytes_readonly); 429 430 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 431 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 432 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 433 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 434 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 435 436 } 437 438 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 439 struct btrfs_space_info *info, u64 bytes, 440 int dump_block_groups) 441 { 442 struct btrfs_block_group *cache; 443 int index = 0; 444 445 spin_lock(&info->lock); 446 __btrfs_dump_space_info(fs_info, info); 447 spin_unlock(&info->lock); 448 449 if (!dump_block_groups) 450 return; 451 452 down_read(&info->groups_sem); 453 again: 454 list_for_each_entry(cache, &info->block_groups[index], list) { 455 spin_lock(&cache->lock); 456 btrfs_info(fs_info, 457 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 458 cache->start, cache->length, cache->used, cache->pinned, 459 cache->reserved, cache->ro ? "[readonly]" : ""); 460 spin_unlock(&cache->lock); 461 btrfs_dump_free_space(cache, bytes); 462 } 463 if (++index < BTRFS_NR_RAID_TYPES) 464 goto again; 465 up_read(&info->groups_sem); 466 } 467 468 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 469 u64 to_reclaim) 470 { 471 u64 bytes; 472 u64 nr; 473 474 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 475 nr = div64_u64(to_reclaim, bytes); 476 if (!nr) 477 nr = 1; 478 return nr; 479 } 480 481 #define EXTENT_SIZE_PER_ITEM SZ_256K 482 483 /* 484 * shrink metadata reservation for delalloc 485 */ 486 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 487 struct btrfs_space_info *space_info, 488 u64 to_reclaim, bool wait_ordered) 489 { 490 struct btrfs_trans_handle *trans; 491 u64 delalloc_bytes; 492 u64 ordered_bytes; 493 u64 items; 494 long time_left; 495 int loops; 496 497 /* Calc the number of the pages we need flush for space reservation */ 498 if (to_reclaim == U64_MAX) { 499 items = U64_MAX; 500 } else { 501 /* 502 * to_reclaim is set to however much metadata we need to 503 * reclaim, but reclaiming that much data doesn't really track 504 * exactly, so increase the amount to reclaim by 2x in order to 505 * make sure we're flushing enough delalloc to hopefully reclaim 506 * some metadata reservations. 507 */ 508 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 509 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 510 } 511 512 trans = (struct btrfs_trans_handle *)current->journal_info; 513 514 delalloc_bytes = percpu_counter_sum_positive( 515 &fs_info->delalloc_bytes); 516 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 517 if (delalloc_bytes == 0 && ordered_bytes == 0) 518 return; 519 520 /* 521 * If we are doing more ordered than delalloc we need to just wait on 522 * ordered extents, otherwise we'll waste time trying to flush delalloc 523 * that likely won't give us the space back we need. 524 */ 525 if (ordered_bytes > delalloc_bytes) 526 wait_ordered = true; 527 528 loops = 0; 529 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 530 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 531 long nr_pages = min_t(u64, temp, LONG_MAX); 532 533 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 534 535 loops++; 536 if (wait_ordered && !trans) { 537 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 538 } else { 539 time_left = schedule_timeout_killable(1); 540 if (time_left) 541 break; 542 } 543 544 spin_lock(&space_info->lock); 545 if (list_empty(&space_info->tickets) && 546 list_empty(&space_info->priority_tickets)) { 547 spin_unlock(&space_info->lock); 548 break; 549 } 550 spin_unlock(&space_info->lock); 551 552 delalloc_bytes = percpu_counter_sum_positive( 553 &fs_info->delalloc_bytes); 554 ordered_bytes = percpu_counter_sum_positive( 555 &fs_info->ordered_bytes); 556 } 557 } 558 559 /** 560 * Possibly commit the transaction if its ok to 561 * 562 * @fs_info: the filesystem 563 * @space_info: space_info we are checking for commit, either data or metadata 564 * 565 * This will check to make sure that committing the transaction will actually 566 * get us somewhere and then commit the transaction if it does. Otherwise it 567 * will return -ENOSPC. 568 */ 569 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 570 struct btrfs_space_info *space_info) 571 { 572 struct reserve_ticket *ticket = NULL; 573 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 574 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 575 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 576 struct btrfs_trans_handle *trans; 577 u64 reclaim_bytes = 0; 578 u64 bytes_needed = 0; 579 u64 cur_free_bytes = 0; 580 581 trans = (struct btrfs_trans_handle *)current->journal_info; 582 if (trans) 583 return -EAGAIN; 584 585 spin_lock(&space_info->lock); 586 cur_free_bytes = btrfs_space_info_used(space_info, true); 587 if (cur_free_bytes < space_info->total_bytes) 588 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 589 else 590 cur_free_bytes = 0; 591 592 if (!list_empty(&space_info->priority_tickets)) 593 ticket = list_first_entry(&space_info->priority_tickets, 594 struct reserve_ticket, list); 595 else if (!list_empty(&space_info->tickets)) 596 ticket = list_first_entry(&space_info->tickets, 597 struct reserve_ticket, list); 598 if (ticket) 599 bytes_needed = ticket->bytes; 600 601 if (bytes_needed > cur_free_bytes) 602 bytes_needed -= cur_free_bytes; 603 else 604 bytes_needed = 0; 605 spin_unlock(&space_info->lock); 606 607 if (!bytes_needed) 608 return 0; 609 610 trans = btrfs_join_transaction(fs_info->extent_root); 611 if (IS_ERR(trans)) 612 return PTR_ERR(trans); 613 614 /* 615 * See if there is enough pinned space to make this reservation, or if 616 * we have block groups that are going to be freed, allowing us to 617 * possibly do a chunk allocation the next loop through. 618 */ 619 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 620 __percpu_counter_compare(&space_info->total_bytes_pinned, 621 bytes_needed, 622 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 623 goto commit; 624 625 /* 626 * See if there is some space in the delayed insertion reserve for this 627 * reservation. If the space_info's don't match (like for DATA or 628 * SYSTEM) then just go enospc, reclaiming this space won't recover any 629 * space to satisfy those reservations. 630 */ 631 if (space_info != delayed_rsv->space_info) 632 goto enospc; 633 634 spin_lock(&delayed_rsv->lock); 635 reclaim_bytes += delayed_rsv->reserved; 636 spin_unlock(&delayed_rsv->lock); 637 638 spin_lock(&delayed_refs_rsv->lock); 639 reclaim_bytes += delayed_refs_rsv->reserved; 640 spin_unlock(&delayed_refs_rsv->lock); 641 642 spin_lock(&trans_rsv->lock); 643 reclaim_bytes += trans_rsv->reserved; 644 spin_unlock(&trans_rsv->lock); 645 646 if (reclaim_bytes >= bytes_needed) 647 goto commit; 648 bytes_needed -= reclaim_bytes; 649 650 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 651 bytes_needed, 652 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 653 goto enospc; 654 655 commit: 656 return btrfs_commit_transaction(trans); 657 enospc: 658 btrfs_end_transaction(trans); 659 return -ENOSPC; 660 } 661 662 /* 663 * Try to flush some data based on policy set by @state. This is only advisory 664 * and may fail for various reasons. The caller is supposed to examine the 665 * state of @space_info to detect the outcome. 666 */ 667 static void flush_space(struct btrfs_fs_info *fs_info, 668 struct btrfs_space_info *space_info, u64 num_bytes, 669 enum btrfs_flush_state state) 670 { 671 struct btrfs_root *root = fs_info->extent_root; 672 struct btrfs_trans_handle *trans; 673 int nr; 674 int ret = 0; 675 676 switch (state) { 677 case FLUSH_DELAYED_ITEMS_NR: 678 case FLUSH_DELAYED_ITEMS: 679 if (state == FLUSH_DELAYED_ITEMS_NR) 680 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 681 else 682 nr = -1; 683 684 trans = btrfs_join_transaction(root); 685 if (IS_ERR(trans)) { 686 ret = PTR_ERR(trans); 687 break; 688 } 689 ret = btrfs_run_delayed_items_nr(trans, nr); 690 btrfs_end_transaction(trans); 691 break; 692 case FLUSH_DELALLOC: 693 case FLUSH_DELALLOC_WAIT: 694 shrink_delalloc(fs_info, space_info, num_bytes, 695 state == FLUSH_DELALLOC_WAIT); 696 break; 697 case FLUSH_DELAYED_REFS_NR: 698 case FLUSH_DELAYED_REFS: 699 trans = btrfs_join_transaction(root); 700 if (IS_ERR(trans)) { 701 ret = PTR_ERR(trans); 702 break; 703 } 704 if (state == FLUSH_DELAYED_REFS_NR) 705 nr = calc_reclaim_items_nr(fs_info, num_bytes); 706 else 707 nr = 0; 708 btrfs_run_delayed_refs(trans, nr); 709 btrfs_end_transaction(trans); 710 break; 711 case ALLOC_CHUNK: 712 case ALLOC_CHUNK_FORCE: 713 trans = btrfs_join_transaction(root); 714 if (IS_ERR(trans)) { 715 ret = PTR_ERR(trans); 716 break; 717 } 718 ret = btrfs_chunk_alloc(trans, 719 btrfs_get_alloc_profile(fs_info, space_info->flags), 720 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 721 CHUNK_ALLOC_FORCE); 722 btrfs_end_transaction(trans); 723 if (ret > 0 || ret == -ENOSPC) 724 ret = 0; 725 break; 726 case RUN_DELAYED_IPUTS: 727 /* 728 * If we have pending delayed iputs then we could free up a 729 * bunch of pinned space, so make sure we run the iputs before 730 * we do our pinned bytes check below. 731 */ 732 btrfs_run_delayed_iputs(fs_info); 733 btrfs_wait_on_delayed_iputs(fs_info); 734 break; 735 case COMMIT_TRANS: 736 ret = may_commit_transaction(fs_info, space_info); 737 break; 738 default: 739 ret = -ENOSPC; 740 break; 741 } 742 743 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 744 ret); 745 return; 746 } 747 748 static inline u64 749 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 750 struct btrfs_space_info *space_info) 751 { 752 u64 used; 753 u64 avail; 754 u64 expected; 755 u64 to_reclaim = space_info->reclaim_size; 756 757 lockdep_assert_held(&space_info->lock); 758 759 avail = calc_available_free_space(fs_info, space_info, 760 BTRFS_RESERVE_FLUSH_ALL); 761 used = btrfs_space_info_used(space_info, true); 762 763 /* 764 * We may be flushing because suddenly we have less space than we had 765 * before, and now we're well over-committed based on our current free 766 * space. If that's the case add in our overage so we make sure to put 767 * appropriate pressure on the flushing state machine. 768 */ 769 if (space_info->total_bytes + avail < used) 770 to_reclaim += used - (space_info->total_bytes + avail); 771 772 if (to_reclaim) 773 return to_reclaim; 774 775 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 776 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, 777 BTRFS_RESERVE_FLUSH_ALL)) 778 return 0; 779 780 used = btrfs_space_info_used(space_info, true); 781 782 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, 783 BTRFS_RESERVE_FLUSH_ALL)) 784 expected = div_factor_fine(space_info->total_bytes, 95); 785 else 786 expected = div_factor_fine(space_info->total_bytes, 90); 787 788 if (used > expected) 789 to_reclaim = used - expected; 790 else 791 to_reclaim = 0; 792 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 793 space_info->bytes_reserved); 794 return to_reclaim; 795 } 796 797 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 798 struct btrfs_space_info *space_info, 799 u64 used) 800 { 801 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 802 803 /* If we're just plain full then async reclaim just slows us down. */ 804 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 805 return 0; 806 807 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) 808 return 0; 809 810 return (used >= thresh && !btrfs_fs_closing(fs_info) && 811 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 812 } 813 814 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 815 struct btrfs_space_info *space_info, 816 struct reserve_ticket *ticket) 817 { 818 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 819 u64 min_bytes; 820 821 if (global_rsv->space_info != space_info) 822 return false; 823 824 spin_lock(&global_rsv->lock); 825 min_bytes = div_factor(global_rsv->size, 1); 826 if (global_rsv->reserved < min_bytes + ticket->bytes) { 827 spin_unlock(&global_rsv->lock); 828 return false; 829 } 830 global_rsv->reserved -= ticket->bytes; 831 remove_ticket(space_info, ticket); 832 ticket->bytes = 0; 833 wake_up(&ticket->wait); 834 space_info->tickets_id++; 835 if (global_rsv->reserved < global_rsv->size) 836 global_rsv->full = 0; 837 spin_unlock(&global_rsv->lock); 838 839 return true; 840 } 841 842 /* 843 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 844 * @fs_info - fs_info for this fs 845 * @space_info - the space info we were flushing 846 * 847 * We call this when we've exhausted our flushing ability and haven't made 848 * progress in satisfying tickets. The reservation code handles tickets in 849 * order, so if there is a large ticket first and then smaller ones we could 850 * very well satisfy the smaller tickets. This will attempt to wake up any 851 * tickets in the list to catch this case. 852 * 853 * This function returns true if it was able to make progress by clearing out 854 * other tickets, or if it stumbles across a ticket that was smaller than the 855 * first ticket. 856 */ 857 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 858 struct btrfs_space_info *space_info) 859 { 860 struct reserve_ticket *ticket; 861 u64 tickets_id = space_info->tickets_id; 862 u64 first_ticket_bytes = 0; 863 864 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 865 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 866 __btrfs_dump_space_info(fs_info, space_info); 867 } 868 869 while (!list_empty(&space_info->tickets) && 870 tickets_id == space_info->tickets_id) { 871 ticket = list_first_entry(&space_info->tickets, 872 struct reserve_ticket, list); 873 874 if (ticket->steal && 875 steal_from_global_rsv(fs_info, space_info, ticket)) 876 return true; 877 878 /* 879 * may_commit_transaction will avoid committing the transaction 880 * if it doesn't feel like the space reclaimed by the commit 881 * would result in the ticket succeeding. However if we have a 882 * smaller ticket in the queue it may be small enough to be 883 * satisified by committing the transaction, so if any 884 * subsequent ticket is smaller than the first ticket go ahead 885 * and send us back for another loop through the enospc flushing 886 * code. 887 */ 888 if (first_ticket_bytes == 0) 889 first_ticket_bytes = ticket->bytes; 890 else if (first_ticket_bytes > ticket->bytes) 891 return true; 892 893 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 894 btrfs_info(fs_info, "failing ticket with %llu bytes", 895 ticket->bytes); 896 897 remove_ticket(space_info, ticket); 898 ticket->error = -ENOSPC; 899 wake_up(&ticket->wait); 900 901 /* 902 * We're just throwing tickets away, so more flushing may not 903 * trip over btrfs_try_granting_tickets, so we need to call it 904 * here to see if we can make progress with the next ticket in 905 * the list. 906 */ 907 btrfs_try_granting_tickets(fs_info, space_info); 908 } 909 return (tickets_id != space_info->tickets_id); 910 } 911 912 /* 913 * This is for normal flushers, we can wait all goddamned day if we want to. We 914 * will loop and continuously try to flush as long as we are making progress. 915 * We count progress as clearing off tickets each time we have to loop. 916 */ 917 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 918 { 919 struct btrfs_fs_info *fs_info; 920 struct btrfs_space_info *space_info; 921 u64 to_reclaim; 922 enum btrfs_flush_state flush_state; 923 int commit_cycles = 0; 924 u64 last_tickets_id; 925 926 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 927 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 928 929 spin_lock(&space_info->lock); 930 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 931 if (!to_reclaim) { 932 space_info->flush = 0; 933 spin_unlock(&space_info->lock); 934 return; 935 } 936 last_tickets_id = space_info->tickets_id; 937 spin_unlock(&space_info->lock); 938 939 flush_state = FLUSH_DELAYED_ITEMS_NR; 940 do { 941 flush_space(fs_info, space_info, to_reclaim, flush_state); 942 spin_lock(&space_info->lock); 943 if (list_empty(&space_info->tickets)) { 944 space_info->flush = 0; 945 spin_unlock(&space_info->lock); 946 return; 947 } 948 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 949 space_info); 950 if (last_tickets_id == space_info->tickets_id) { 951 flush_state++; 952 } else { 953 last_tickets_id = space_info->tickets_id; 954 flush_state = FLUSH_DELAYED_ITEMS_NR; 955 if (commit_cycles) 956 commit_cycles--; 957 } 958 959 /* 960 * We don't want to force a chunk allocation until we've tried 961 * pretty hard to reclaim space. Think of the case where we 962 * freed up a bunch of space and so have a lot of pinned space 963 * to reclaim. We would rather use that than possibly create a 964 * underutilized metadata chunk. So if this is our first run 965 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 966 * commit the transaction. If nothing has changed the next go 967 * around then we can force a chunk allocation. 968 */ 969 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 970 flush_state++; 971 972 if (flush_state > COMMIT_TRANS) { 973 commit_cycles++; 974 if (commit_cycles > 2) { 975 if (maybe_fail_all_tickets(fs_info, space_info)) { 976 flush_state = FLUSH_DELAYED_ITEMS_NR; 977 commit_cycles--; 978 } else { 979 space_info->flush = 0; 980 } 981 } else { 982 flush_state = FLUSH_DELAYED_ITEMS_NR; 983 } 984 } 985 spin_unlock(&space_info->lock); 986 } while (flush_state <= COMMIT_TRANS); 987 } 988 989 /* 990 * FLUSH_DELALLOC_WAIT: 991 * Space is freed from flushing delalloc in one of two ways. 992 * 993 * 1) compression is on and we allocate less space than we reserved 994 * 2) we are overwriting existing space 995 * 996 * For #1 that extra space is reclaimed as soon as the delalloc pages are 997 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 998 * length to ->bytes_reserved, and subtracts the reserved space from 999 * ->bytes_may_use. 1000 * 1001 * For #2 this is trickier. Once the ordered extent runs we will drop the 1002 * extent in the range we are overwriting, which creates a delayed ref for 1003 * that freed extent. This however is not reclaimed until the transaction 1004 * commits, thus the next stages. 1005 * 1006 * RUN_DELAYED_IPUTS 1007 * If we are freeing inodes, we want to make sure all delayed iputs have 1008 * completed, because they could have been on an inode with i_nlink == 0, and 1009 * thus have been truncated and freed up space. But again this space is not 1010 * immediately re-usable, it comes in the form of a delayed ref, which must be 1011 * run and then the transaction must be committed. 1012 * 1013 * FLUSH_DELAYED_REFS 1014 * The above two cases generate delayed refs that will affect 1015 * ->total_bytes_pinned. However this counter can be inconsistent with 1016 * reality if there are outstanding delayed refs. This is because we adjust 1017 * the counter based solely on the current set of delayed refs and disregard 1018 * any on-disk state which might include more refs. So for example, if we 1019 * have an extent with 2 references, but we only drop 1, we'll see that there 1020 * is a negative delayed ref count for the extent and assume that the space 1021 * will be freed, and thus increase ->total_bytes_pinned. 1022 * 1023 * Running the delayed refs gives us the actual real view of what will be 1024 * freed at the transaction commit time. This stage will not actually free 1025 * space for us, it just makes sure that may_commit_transaction() has all of 1026 * the information it needs to make the right decision. 1027 * 1028 * COMMIT_TRANS 1029 * This is where we reclaim all of the pinned space generated by the previous 1030 * two stages. We will not commit the transaction if we don't think we're 1031 * likely to satisfy our request, which means if our current free space + 1032 * total_bytes_pinned < reservation we will not commit. This is why the 1033 * previous states are actually important, to make sure we know for sure 1034 * whether committing the transaction will allow us to make progress. 1035 * 1036 * ALLOC_CHUNK_FORCE 1037 * For data we start with alloc chunk force, however we could have been full 1038 * before, and then the transaction commit could have freed new block groups, 1039 * so if we now have space to allocate do the force chunk allocation. 1040 */ 1041 static const enum btrfs_flush_state data_flush_states[] = { 1042 FLUSH_DELALLOC_WAIT, 1043 RUN_DELAYED_IPUTS, 1044 FLUSH_DELAYED_REFS, 1045 COMMIT_TRANS, 1046 ALLOC_CHUNK_FORCE, 1047 }; 1048 1049 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1050 { 1051 struct btrfs_fs_info *fs_info; 1052 struct btrfs_space_info *space_info; 1053 u64 last_tickets_id; 1054 enum btrfs_flush_state flush_state = 0; 1055 1056 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1057 space_info = fs_info->data_sinfo; 1058 1059 spin_lock(&space_info->lock); 1060 if (list_empty(&space_info->tickets)) { 1061 space_info->flush = 0; 1062 spin_unlock(&space_info->lock); 1063 return; 1064 } 1065 last_tickets_id = space_info->tickets_id; 1066 spin_unlock(&space_info->lock); 1067 1068 while (!space_info->full) { 1069 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1070 spin_lock(&space_info->lock); 1071 if (list_empty(&space_info->tickets)) { 1072 space_info->flush = 0; 1073 spin_unlock(&space_info->lock); 1074 return; 1075 } 1076 last_tickets_id = space_info->tickets_id; 1077 spin_unlock(&space_info->lock); 1078 } 1079 1080 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1081 flush_space(fs_info, space_info, U64_MAX, 1082 data_flush_states[flush_state]); 1083 spin_lock(&space_info->lock); 1084 if (list_empty(&space_info->tickets)) { 1085 space_info->flush = 0; 1086 spin_unlock(&space_info->lock); 1087 return; 1088 } 1089 1090 if (last_tickets_id == space_info->tickets_id) { 1091 flush_state++; 1092 } else { 1093 last_tickets_id = space_info->tickets_id; 1094 flush_state = 0; 1095 } 1096 1097 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1098 if (space_info->full) { 1099 if (maybe_fail_all_tickets(fs_info, space_info)) 1100 flush_state = 0; 1101 else 1102 space_info->flush = 0; 1103 } else { 1104 flush_state = 0; 1105 } 1106 } 1107 spin_unlock(&space_info->lock); 1108 } 1109 } 1110 1111 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1112 { 1113 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1114 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1115 } 1116 1117 static const enum btrfs_flush_state priority_flush_states[] = { 1118 FLUSH_DELAYED_ITEMS_NR, 1119 FLUSH_DELAYED_ITEMS, 1120 ALLOC_CHUNK, 1121 }; 1122 1123 static const enum btrfs_flush_state evict_flush_states[] = { 1124 FLUSH_DELAYED_ITEMS_NR, 1125 FLUSH_DELAYED_ITEMS, 1126 FLUSH_DELAYED_REFS_NR, 1127 FLUSH_DELAYED_REFS, 1128 FLUSH_DELALLOC, 1129 FLUSH_DELALLOC_WAIT, 1130 ALLOC_CHUNK, 1131 COMMIT_TRANS, 1132 }; 1133 1134 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1135 struct btrfs_space_info *space_info, 1136 struct reserve_ticket *ticket, 1137 const enum btrfs_flush_state *states, 1138 int states_nr) 1139 { 1140 u64 to_reclaim; 1141 int flush_state; 1142 1143 spin_lock(&space_info->lock); 1144 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1145 if (!to_reclaim) { 1146 spin_unlock(&space_info->lock); 1147 return; 1148 } 1149 spin_unlock(&space_info->lock); 1150 1151 flush_state = 0; 1152 do { 1153 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 1154 flush_state++; 1155 spin_lock(&space_info->lock); 1156 if (ticket->bytes == 0) { 1157 spin_unlock(&space_info->lock); 1158 return; 1159 } 1160 spin_unlock(&space_info->lock); 1161 } while (flush_state < states_nr); 1162 } 1163 1164 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1165 struct btrfs_space_info *space_info, 1166 struct reserve_ticket *ticket) 1167 { 1168 while (!space_info->full) { 1169 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1170 spin_lock(&space_info->lock); 1171 if (ticket->bytes == 0) { 1172 spin_unlock(&space_info->lock); 1173 return; 1174 } 1175 spin_unlock(&space_info->lock); 1176 } 1177 } 1178 1179 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1180 struct btrfs_space_info *space_info, 1181 struct reserve_ticket *ticket) 1182 1183 { 1184 DEFINE_WAIT(wait); 1185 int ret = 0; 1186 1187 spin_lock(&space_info->lock); 1188 while (ticket->bytes > 0 && ticket->error == 0) { 1189 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1190 if (ret) { 1191 /* 1192 * Delete us from the list. After we unlock the space 1193 * info, we don't want the async reclaim job to reserve 1194 * space for this ticket. If that would happen, then the 1195 * ticket's task would not known that space was reserved 1196 * despite getting an error, resulting in a space leak 1197 * (bytes_may_use counter of our space_info). 1198 */ 1199 remove_ticket(space_info, ticket); 1200 ticket->error = -EINTR; 1201 break; 1202 } 1203 spin_unlock(&space_info->lock); 1204 1205 schedule(); 1206 1207 finish_wait(&ticket->wait, &wait); 1208 spin_lock(&space_info->lock); 1209 } 1210 spin_unlock(&space_info->lock); 1211 } 1212 1213 /** 1214 * Do the appropriate flushing and waiting for a ticket 1215 * 1216 * @fs_info: the filesystem 1217 * @space_info: space info for the reservation 1218 * @ticket: ticket for the reservation 1219 * @start_ns: timestamp when the reservation started 1220 * @orig_bytes: amount of bytes originally reserved 1221 * @flush: how much we can flush 1222 * 1223 * This does the work of figuring out how to flush for the ticket, waiting for 1224 * the reservation, and returning the appropriate error if there is one. 1225 */ 1226 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1227 struct btrfs_space_info *space_info, 1228 struct reserve_ticket *ticket, 1229 u64 start_ns, u64 orig_bytes, 1230 enum btrfs_reserve_flush_enum flush) 1231 { 1232 int ret; 1233 1234 switch (flush) { 1235 case BTRFS_RESERVE_FLUSH_DATA: 1236 case BTRFS_RESERVE_FLUSH_ALL: 1237 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1238 wait_reserve_ticket(fs_info, space_info, ticket); 1239 break; 1240 case BTRFS_RESERVE_FLUSH_LIMIT: 1241 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1242 priority_flush_states, 1243 ARRAY_SIZE(priority_flush_states)); 1244 break; 1245 case BTRFS_RESERVE_FLUSH_EVICT: 1246 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1247 evict_flush_states, 1248 ARRAY_SIZE(evict_flush_states)); 1249 break; 1250 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1251 priority_reclaim_data_space(fs_info, space_info, ticket); 1252 break; 1253 default: 1254 ASSERT(0); 1255 break; 1256 } 1257 1258 spin_lock(&space_info->lock); 1259 ret = ticket->error; 1260 if (ticket->bytes || ticket->error) { 1261 /* 1262 * We were a priority ticket, so we need to delete ourselves 1263 * from the list. Because we could have other priority tickets 1264 * behind us that require less space, run 1265 * btrfs_try_granting_tickets() to see if their reservations can 1266 * now be made. 1267 */ 1268 if (!list_empty(&ticket->list)) { 1269 remove_ticket(space_info, ticket); 1270 btrfs_try_granting_tickets(fs_info, space_info); 1271 } 1272 1273 if (!ret) 1274 ret = -ENOSPC; 1275 } 1276 spin_unlock(&space_info->lock); 1277 ASSERT(list_empty(&ticket->list)); 1278 /* 1279 * Check that we can't have an error set if the reservation succeeded, 1280 * as that would confuse tasks and lead them to error out without 1281 * releasing reserved space (if an error happens the expectation is that 1282 * space wasn't reserved at all). 1283 */ 1284 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1285 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1286 start_ns, flush, ticket->error); 1287 return ret; 1288 } 1289 1290 /* 1291 * This returns true if this flush state will go through the ordinary flushing 1292 * code. 1293 */ 1294 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1295 { 1296 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1297 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1298 } 1299 1300 /** 1301 * Try to reserve bytes from the block_rsv's space 1302 * 1303 * @fs_info: the filesystem 1304 * @space_info: space info we want to allocate from 1305 * @orig_bytes: number of bytes we want 1306 * @flush: whether or not we can flush to make our reservation 1307 * 1308 * This will reserve orig_bytes number of bytes from the space info associated 1309 * with the block_rsv. If there is not enough space it will make an attempt to 1310 * flush out space to make room. It will do this by flushing delalloc if 1311 * possible or committing the transaction. If flush is 0 then no attempts to 1312 * regain reservations will be made and this will fail if there is not enough 1313 * space already. 1314 */ 1315 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1316 struct btrfs_space_info *space_info, u64 orig_bytes, 1317 enum btrfs_reserve_flush_enum flush) 1318 { 1319 struct work_struct *async_work; 1320 struct reserve_ticket ticket; 1321 u64 start_ns = 0; 1322 u64 used; 1323 int ret = 0; 1324 bool pending_tickets; 1325 1326 ASSERT(orig_bytes); 1327 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1328 1329 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1330 async_work = &fs_info->async_data_reclaim_work; 1331 else 1332 async_work = &fs_info->async_reclaim_work; 1333 1334 spin_lock(&space_info->lock); 1335 ret = -ENOSPC; 1336 used = btrfs_space_info_used(space_info, true); 1337 1338 /* 1339 * We don't want NO_FLUSH allocations to jump everybody, they can 1340 * generally handle ENOSPC in a different way, so treat them the same as 1341 * normal flushers when it comes to skipping pending tickets. 1342 */ 1343 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1344 pending_tickets = !list_empty(&space_info->tickets) || 1345 !list_empty(&space_info->priority_tickets); 1346 else 1347 pending_tickets = !list_empty(&space_info->priority_tickets); 1348 1349 /* 1350 * Carry on if we have enough space (short-circuit) OR call 1351 * can_overcommit() to ensure we can overcommit to continue. 1352 */ 1353 if (!pending_tickets && 1354 ((used + orig_bytes <= space_info->total_bytes) || 1355 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1356 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1357 orig_bytes); 1358 ret = 0; 1359 } 1360 1361 /* 1362 * If we couldn't make a reservation then setup our reservation ticket 1363 * and kick the async worker if it's not already running. 1364 * 1365 * If we are a priority flusher then we just need to add our ticket to 1366 * the list and we will do our own flushing further down. 1367 */ 1368 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1369 ticket.bytes = orig_bytes; 1370 ticket.error = 0; 1371 space_info->reclaim_size += ticket.bytes; 1372 init_waitqueue_head(&ticket.wait); 1373 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1374 if (trace_btrfs_reserve_ticket_enabled()) 1375 start_ns = ktime_get_ns(); 1376 1377 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1378 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1379 flush == BTRFS_RESERVE_FLUSH_DATA) { 1380 list_add_tail(&ticket.list, &space_info->tickets); 1381 if (!space_info->flush) { 1382 space_info->flush = 1; 1383 trace_btrfs_trigger_flush(fs_info, 1384 space_info->flags, 1385 orig_bytes, flush, 1386 "enospc"); 1387 queue_work(system_unbound_wq, async_work); 1388 } 1389 } else { 1390 list_add_tail(&ticket.list, 1391 &space_info->priority_tickets); 1392 } 1393 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1394 used += orig_bytes; 1395 /* 1396 * We will do the space reservation dance during log replay, 1397 * which means we won't have fs_info->fs_root set, so don't do 1398 * the async reclaim as we will panic. 1399 */ 1400 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1401 need_do_async_reclaim(fs_info, space_info, used) && 1402 !work_busy(&fs_info->async_reclaim_work)) { 1403 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1404 orig_bytes, flush, "preempt"); 1405 queue_work(system_unbound_wq, 1406 &fs_info->async_reclaim_work); 1407 } 1408 } 1409 spin_unlock(&space_info->lock); 1410 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1411 return ret; 1412 1413 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1414 orig_bytes, flush); 1415 } 1416 1417 /** 1418 * Trye to reserve metadata bytes from the block_rsv's space 1419 * 1420 * @root: the root we're allocating for 1421 * @block_rsv: block_rsv we're allocating for 1422 * @orig_bytes: number of bytes we want 1423 * @flush: whether or not we can flush to make our reservation 1424 * 1425 * This will reserve orig_bytes number of bytes from the space info associated 1426 * with the block_rsv. If there is not enough space it will make an attempt to 1427 * flush out space to make room. It will do this by flushing delalloc if 1428 * possible or committing the transaction. If flush is 0 then no attempts to 1429 * regain reservations will be made and this will fail if there is not enough 1430 * space already. 1431 */ 1432 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1433 struct btrfs_block_rsv *block_rsv, 1434 u64 orig_bytes, 1435 enum btrfs_reserve_flush_enum flush) 1436 { 1437 struct btrfs_fs_info *fs_info = root->fs_info; 1438 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1439 int ret; 1440 1441 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1442 if (ret == -ENOSPC && 1443 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1444 if (block_rsv != global_rsv && 1445 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1446 ret = 0; 1447 } 1448 if (ret == -ENOSPC) { 1449 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1450 block_rsv->space_info->flags, 1451 orig_bytes, 1); 1452 1453 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1454 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1455 orig_bytes, 0); 1456 } 1457 return ret; 1458 } 1459 1460 /** 1461 * Try to reserve data bytes for an allocation 1462 * 1463 * @fs_info: the filesystem 1464 * @bytes: number of bytes we need 1465 * @flush: how we are allowed to flush 1466 * 1467 * This will reserve bytes from the data space info. If there is not enough 1468 * space then we will attempt to flush space as specified by flush. 1469 */ 1470 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1471 enum btrfs_reserve_flush_enum flush) 1472 { 1473 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1474 int ret; 1475 1476 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1477 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1478 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1479 1480 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1481 if (ret == -ENOSPC) { 1482 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1483 data_sinfo->flags, bytes, 1); 1484 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1485 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1486 } 1487 return ret; 1488 } 1489