1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * OVERCOMMIT 144 * 145 * Because we hold so many reservations for metadata we will allow you to 146 * reserve more space than is currently free in the currently allocate 147 * metadata space. This only happens with metadata, data does not allow 148 * overcommitting. 149 * 150 * You can see the current logic for when we allow overcommit in 151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 152 * is no unallocated space to be had, all reservations are kept within the 153 * free space in the allocated metadata chunks. 154 * 155 * Because of overcommitting, you generally want to use the 156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 157 * thing with or without extra unallocated space. 158 */ 159 160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 161 bool may_use_included) 162 { 163 ASSERT(s_info); 164 return s_info->bytes_used + s_info->bytes_reserved + 165 s_info->bytes_pinned + s_info->bytes_readonly + 166 (may_use_included ? s_info->bytes_may_use : 0); 167 } 168 169 /* 170 * after adding space to the filesystem, we need to clear the full flags 171 * on all the space infos. 172 */ 173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 174 { 175 struct list_head *head = &info->space_info; 176 struct btrfs_space_info *found; 177 178 rcu_read_lock(); 179 list_for_each_entry_rcu(found, head, list) 180 found->full = 0; 181 rcu_read_unlock(); 182 } 183 184 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 185 { 186 187 struct btrfs_space_info *space_info; 188 int i; 189 int ret; 190 191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 192 if (!space_info) 193 return -ENOMEM; 194 195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 196 GFP_KERNEL); 197 if (ret) { 198 kfree(space_info); 199 return ret; 200 } 201 202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 203 INIT_LIST_HEAD(&space_info->block_groups[i]); 204 init_rwsem(&space_info->groups_sem); 205 spin_lock_init(&space_info->lock); 206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 208 INIT_LIST_HEAD(&space_info->ro_bgs); 209 INIT_LIST_HEAD(&space_info->tickets); 210 INIT_LIST_HEAD(&space_info->priority_tickets); 211 212 ret = btrfs_sysfs_add_space_info_type(info, space_info); 213 if (ret) 214 return ret; 215 216 list_add_rcu(&space_info->list, &info->space_info); 217 if (flags & BTRFS_BLOCK_GROUP_DATA) 218 info->data_sinfo = space_info; 219 220 return ret; 221 } 222 223 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 224 { 225 struct btrfs_super_block *disk_super; 226 u64 features; 227 u64 flags; 228 int mixed = 0; 229 int ret; 230 231 disk_super = fs_info->super_copy; 232 if (!btrfs_super_root(disk_super)) 233 return -EINVAL; 234 235 features = btrfs_super_incompat_flags(disk_super); 236 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 237 mixed = 1; 238 239 flags = BTRFS_BLOCK_GROUP_SYSTEM; 240 ret = create_space_info(fs_info, flags); 241 if (ret) 242 goto out; 243 244 if (mixed) { 245 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 246 ret = create_space_info(fs_info, flags); 247 } else { 248 flags = BTRFS_BLOCK_GROUP_METADATA; 249 ret = create_space_info(fs_info, flags); 250 if (ret) 251 goto out; 252 253 flags = BTRFS_BLOCK_GROUP_DATA; 254 ret = create_space_info(fs_info, flags); 255 } 256 out: 257 return ret; 258 } 259 260 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 261 u64 total_bytes, u64 bytes_used, 262 u64 bytes_readonly, 263 struct btrfs_space_info **space_info) 264 { 265 struct btrfs_space_info *found; 266 int factor; 267 268 factor = btrfs_bg_type_to_factor(flags); 269 270 found = btrfs_find_space_info(info, flags); 271 ASSERT(found); 272 spin_lock(&found->lock); 273 found->total_bytes += total_bytes; 274 found->disk_total += total_bytes * factor; 275 found->bytes_used += bytes_used; 276 found->disk_used += bytes_used * factor; 277 found->bytes_readonly += bytes_readonly; 278 if (total_bytes > 0) 279 found->full = 0; 280 btrfs_try_granting_tickets(info, found); 281 spin_unlock(&found->lock); 282 *space_info = found; 283 } 284 285 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 286 u64 flags) 287 { 288 struct list_head *head = &info->space_info; 289 struct btrfs_space_info *found; 290 291 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 292 293 rcu_read_lock(); 294 list_for_each_entry_rcu(found, head, list) { 295 if (found->flags & flags) { 296 rcu_read_unlock(); 297 return found; 298 } 299 } 300 rcu_read_unlock(); 301 return NULL; 302 } 303 304 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 305 { 306 return (global->size << 1); 307 } 308 309 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 310 struct btrfs_space_info *space_info, 311 enum btrfs_reserve_flush_enum flush) 312 { 313 u64 profile; 314 u64 avail; 315 int factor; 316 317 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 318 profile = btrfs_system_alloc_profile(fs_info); 319 else 320 profile = btrfs_metadata_alloc_profile(fs_info); 321 322 avail = atomic64_read(&fs_info->free_chunk_space); 323 324 /* 325 * If we have dup, raid1 or raid10 then only half of the free 326 * space is actually usable. For raid56, the space info used 327 * doesn't include the parity drive, so we don't have to 328 * change the math 329 */ 330 factor = btrfs_bg_type_to_factor(profile); 331 avail = div_u64(avail, factor); 332 333 /* 334 * If we aren't flushing all things, let us overcommit up to 335 * 1/2th of the space. If we can flush, don't let us overcommit 336 * too much, let it overcommit up to 1/8 of the space. 337 */ 338 if (flush == BTRFS_RESERVE_FLUSH_ALL) 339 avail >>= 3; 340 else 341 avail >>= 1; 342 return avail; 343 } 344 345 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 346 struct btrfs_space_info *space_info, u64 bytes, 347 enum btrfs_reserve_flush_enum flush) 348 { 349 u64 avail; 350 u64 used; 351 352 /* Don't overcommit when in mixed mode */ 353 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 354 return 0; 355 356 used = btrfs_space_info_used(space_info, true); 357 avail = calc_available_free_space(fs_info, space_info, flush); 358 359 if (used + bytes < space_info->total_bytes + avail) 360 return 1; 361 return 0; 362 } 363 364 static void remove_ticket(struct btrfs_space_info *space_info, 365 struct reserve_ticket *ticket) 366 { 367 if (!list_empty(&ticket->list)) { 368 list_del_init(&ticket->list); 369 ASSERT(space_info->reclaim_size >= ticket->bytes); 370 space_info->reclaim_size -= ticket->bytes; 371 } 372 } 373 374 /* 375 * This is for space we already have accounted in space_info->bytes_may_use, so 376 * basically when we're returning space from block_rsv's. 377 */ 378 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 379 struct btrfs_space_info *space_info) 380 { 381 struct list_head *head; 382 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 383 384 lockdep_assert_held(&space_info->lock); 385 386 head = &space_info->priority_tickets; 387 again: 388 while (!list_empty(head)) { 389 struct reserve_ticket *ticket; 390 u64 used = btrfs_space_info_used(space_info, true); 391 392 ticket = list_first_entry(head, struct reserve_ticket, list); 393 394 /* Check and see if our ticket can be satisified now. */ 395 if ((used + ticket->bytes <= space_info->total_bytes) || 396 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 397 flush)) { 398 btrfs_space_info_update_bytes_may_use(fs_info, 399 space_info, 400 ticket->bytes); 401 remove_ticket(space_info, ticket); 402 ticket->bytes = 0; 403 space_info->tickets_id++; 404 wake_up(&ticket->wait); 405 } else { 406 break; 407 } 408 } 409 410 if (head == &space_info->priority_tickets) { 411 head = &space_info->tickets; 412 flush = BTRFS_RESERVE_FLUSH_ALL; 413 goto again; 414 } 415 } 416 417 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 418 do { \ 419 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 420 spin_lock(&__rsv->lock); \ 421 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 422 __rsv->size, __rsv->reserved); \ 423 spin_unlock(&__rsv->lock); \ 424 } while (0) 425 426 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 427 struct btrfs_space_info *info) 428 { 429 lockdep_assert_held(&info->lock); 430 431 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 432 info->flags, 433 info->total_bytes - btrfs_space_info_used(info, true), 434 info->full ? "" : "not "); 435 btrfs_info(fs_info, 436 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 437 info->total_bytes, info->bytes_used, info->bytes_pinned, 438 info->bytes_reserved, info->bytes_may_use, 439 info->bytes_readonly); 440 441 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 442 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 443 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 444 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 445 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 446 447 } 448 449 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 450 struct btrfs_space_info *info, u64 bytes, 451 int dump_block_groups) 452 { 453 struct btrfs_block_group *cache; 454 int index = 0; 455 456 spin_lock(&info->lock); 457 __btrfs_dump_space_info(fs_info, info); 458 spin_unlock(&info->lock); 459 460 if (!dump_block_groups) 461 return; 462 463 down_read(&info->groups_sem); 464 again: 465 list_for_each_entry(cache, &info->block_groups[index], list) { 466 spin_lock(&cache->lock); 467 btrfs_info(fs_info, 468 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 469 cache->start, cache->length, cache->used, cache->pinned, 470 cache->reserved, cache->ro ? "[readonly]" : ""); 471 spin_unlock(&cache->lock); 472 btrfs_dump_free_space(cache, bytes); 473 } 474 if (++index < BTRFS_NR_RAID_TYPES) 475 goto again; 476 up_read(&info->groups_sem); 477 } 478 479 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 480 u64 to_reclaim) 481 { 482 u64 bytes; 483 u64 nr; 484 485 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 486 nr = div64_u64(to_reclaim, bytes); 487 if (!nr) 488 nr = 1; 489 return nr; 490 } 491 492 #define EXTENT_SIZE_PER_ITEM SZ_256K 493 494 /* 495 * shrink metadata reservation for delalloc 496 */ 497 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 498 struct btrfs_space_info *space_info, 499 u64 to_reclaim, bool wait_ordered) 500 { 501 struct btrfs_trans_handle *trans; 502 u64 delalloc_bytes; 503 u64 dio_bytes; 504 u64 items; 505 long time_left; 506 int loops; 507 508 /* Calc the number of the pages we need flush for space reservation */ 509 if (to_reclaim == U64_MAX) { 510 items = U64_MAX; 511 } else { 512 /* 513 * to_reclaim is set to however much metadata we need to 514 * reclaim, but reclaiming that much data doesn't really track 515 * exactly, so increase the amount to reclaim by 2x in order to 516 * make sure we're flushing enough delalloc to hopefully reclaim 517 * some metadata reservations. 518 */ 519 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 520 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 521 } 522 523 trans = (struct btrfs_trans_handle *)current->journal_info; 524 525 delalloc_bytes = percpu_counter_sum_positive( 526 &fs_info->delalloc_bytes); 527 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 528 if (delalloc_bytes == 0 && dio_bytes == 0) { 529 if (trans) 530 return; 531 if (wait_ordered) 532 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 533 return; 534 } 535 536 /* 537 * If we are doing more ordered than delalloc we need to just wait on 538 * ordered extents, otherwise we'll waste time trying to flush delalloc 539 * that likely won't give us the space back we need. 540 */ 541 if (dio_bytes > delalloc_bytes) 542 wait_ordered = true; 543 544 loops = 0; 545 while ((delalloc_bytes || dio_bytes) && loops < 3) { 546 btrfs_start_delalloc_roots(fs_info, items); 547 548 loops++; 549 if (wait_ordered && !trans) { 550 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 551 } else { 552 time_left = schedule_timeout_killable(1); 553 if (time_left) 554 break; 555 } 556 557 spin_lock(&space_info->lock); 558 if (list_empty(&space_info->tickets) && 559 list_empty(&space_info->priority_tickets)) { 560 spin_unlock(&space_info->lock); 561 break; 562 } 563 spin_unlock(&space_info->lock); 564 565 delalloc_bytes = percpu_counter_sum_positive( 566 &fs_info->delalloc_bytes); 567 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 568 } 569 } 570 571 /** 572 * maybe_commit_transaction - possibly commit the transaction if its ok to 573 * @root - the root we're allocating for 574 * @bytes - the number of bytes we want to reserve 575 * @force - force the commit 576 * 577 * This will check to make sure that committing the transaction will actually 578 * get us somewhere and then commit the transaction if it does. Otherwise it 579 * will return -ENOSPC. 580 */ 581 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 582 struct btrfs_space_info *space_info) 583 { 584 struct reserve_ticket *ticket = NULL; 585 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 586 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 587 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 588 struct btrfs_trans_handle *trans; 589 u64 reclaim_bytes = 0; 590 u64 bytes_needed = 0; 591 u64 cur_free_bytes = 0; 592 593 trans = (struct btrfs_trans_handle *)current->journal_info; 594 if (trans) 595 return -EAGAIN; 596 597 spin_lock(&space_info->lock); 598 cur_free_bytes = btrfs_space_info_used(space_info, true); 599 if (cur_free_bytes < space_info->total_bytes) 600 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 601 else 602 cur_free_bytes = 0; 603 604 if (!list_empty(&space_info->priority_tickets)) 605 ticket = list_first_entry(&space_info->priority_tickets, 606 struct reserve_ticket, list); 607 else if (!list_empty(&space_info->tickets)) 608 ticket = list_first_entry(&space_info->tickets, 609 struct reserve_ticket, list); 610 if (ticket) 611 bytes_needed = ticket->bytes; 612 613 if (bytes_needed > cur_free_bytes) 614 bytes_needed -= cur_free_bytes; 615 else 616 bytes_needed = 0; 617 spin_unlock(&space_info->lock); 618 619 if (!bytes_needed) 620 return 0; 621 622 trans = btrfs_join_transaction(fs_info->extent_root); 623 if (IS_ERR(trans)) 624 return PTR_ERR(trans); 625 626 /* 627 * See if there is enough pinned space to make this reservation, or if 628 * we have block groups that are going to be freed, allowing us to 629 * possibly do a chunk allocation the next loop through. 630 */ 631 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 632 __percpu_counter_compare(&space_info->total_bytes_pinned, 633 bytes_needed, 634 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 635 goto commit; 636 637 /* 638 * See if there is some space in the delayed insertion reserve for this 639 * reservation. If the space_info's don't match (like for DATA or 640 * SYSTEM) then just go enospc, reclaiming this space won't recover any 641 * space to satisfy those reservations. 642 */ 643 if (space_info != delayed_rsv->space_info) 644 goto enospc; 645 646 spin_lock(&delayed_rsv->lock); 647 reclaim_bytes += delayed_rsv->reserved; 648 spin_unlock(&delayed_rsv->lock); 649 650 spin_lock(&delayed_refs_rsv->lock); 651 reclaim_bytes += delayed_refs_rsv->reserved; 652 spin_unlock(&delayed_refs_rsv->lock); 653 654 spin_lock(&trans_rsv->lock); 655 reclaim_bytes += trans_rsv->reserved; 656 spin_unlock(&trans_rsv->lock); 657 658 if (reclaim_bytes >= bytes_needed) 659 goto commit; 660 bytes_needed -= reclaim_bytes; 661 662 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 663 bytes_needed, 664 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 665 goto enospc; 666 667 commit: 668 return btrfs_commit_transaction(trans); 669 enospc: 670 btrfs_end_transaction(trans); 671 return -ENOSPC; 672 } 673 674 /* 675 * Try to flush some data based on policy set by @state. This is only advisory 676 * and may fail for various reasons. The caller is supposed to examine the 677 * state of @space_info to detect the outcome. 678 */ 679 static void flush_space(struct btrfs_fs_info *fs_info, 680 struct btrfs_space_info *space_info, u64 num_bytes, 681 int state) 682 { 683 struct btrfs_root *root = fs_info->extent_root; 684 struct btrfs_trans_handle *trans; 685 int nr; 686 int ret = 0; 687 688 switch (state) { 689 case FLUSH_DELAYED_ITEMS_NR: 690 case FLUSH_DELAYED_ITEMS: 691 if (state == FLUSH_DELAYED_ITEMS_NR) 692 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 693 else 694 nr = -1; 695 696 trans = btrfs_join_transaction(root); 697 if (IS_ERR(trans)) { 698 ret = PTR_ERR(trans); 699 break; 700 } 701 ret = btrfs_run_delayed_items_nr(trans, nr); 702 btrfs_end_transaction(trans); 703 break; 704 case FLUSH_DELALLOC: 705 case FLUSH_DELALLOC_WAIT: 706 shrink_delalloc(fs_info, space_info, num_bytes, 707 state == FLUSH_DELALLOC_WAIT); 708 break; 709 case FLUSH_DELAYED_REFS_NR: 710 case FLUSH_DELAYED_REFS: 711 trans = btrfs_join_transaction(root); 712 if (IS_ERR(trans)) { 713 ret = PTR_ERR(trans); 714 break; 715 } 716 if (state == FLUSH_DELAYED_REFS_NR) 717 nr = calc_reclaim_items_nr(fs_info, num_bytes); 718 else 719 nr = 0; 720 btrfs_run_delayed_refs(trans, nr); 721 btrfs_end_transaction(trans); 722 break; 723 case ALLOC_CHUNK: 724 case ALLOC_CHUNK_FORCE: 725 trans = btrfs_join_transaction(root); 726 if (IS_ERR(trans)) { 727 ret = PTR_ERR(trans); 728 break; 729 } 730 ret = btrfs_chunk_alloc(trans, 731 btrfs_get_alloc_profile(fs_info, space_info->flags), 732 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 733 CHUNK_ALLOC_FORCE); 734 btrfs_end_transaction(trans); 735 if (ret > 0 || ret == -ENOSPC) 736 ret = 0; 737 break; 738 case RUN_DELAYED_IPUTS: 739 /* 740 * If we have pending delayed iputs then we could free up a 741 * bunch of pinned space, so make sure we run the iputs before 742 * we do our pinned bytes check below. 743 */ 744 btrfs_run_delayed_iputs(fs_info); 745 btrfs_wait_on_delayed_iputs(fs_info); 746 break; 747 case COMMIT_TRANS: 748 ret = may_commit_transaction(fs_info, space_info); 749 break; 750 default: 751 ret = -ENOSPC; 752 break; 753 } 754 755 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 756 ret); 757 return; 758 } 759 760 static inline u64 761 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 762 struct btrfs_space_info *space_info) 763 { 764 u64 used; 765 u64 avail; 766 u64 expected; 767 u64 to_reclaim = space_info->reclaim_size; 768 769 lockdep_assert_held(&space_info->lock); 770 771 avail = calc_available_free_space(fs_info, space_info, 772 BTRFS_RESERVE_FLUSH_ALL); 773 used = btrfs_space_info_used(space_info, true); 774 775 /* 776 * We may be flushing because suddenly we have less space than we had 777 * before, and now we're well over-committed based on our current free 778 * space. If that's the case add in our overage so we make sure to put 779 * appropriate pressure on the flushing state machine. 780 */ 781 if (space_info->total_bytes + avail < used) 782 to_reclaim += used - (space_info->total_bytes + avail); 783 784 if (to_reclaim) 785 return to_reclaim; 786 787 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 788 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, 789 BTRFS_RESERVE_FLUSH_ALL)) 790 return 0; 791 792 used = btrfs_space_info_used(space_info, true); 793 794 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, 795 BTRFS_RESERVE_FLUSH_ALL)) 796 expected = div_factor_fine(space_info->total_bytes, 95); 797 else 798 expected = div_factor_fine(space_info->total_bytes, 90); 799 800 if (used > expected) 801 to_reclaim = used - expected; 802 else 803 to_reclaim = 0; 804 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 805 space_info->bytes_reserved); 806 return to_reclaim; 807 } 808 809 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 810 struct btrfs_space_info *space_info, 811 u64 used) 812 { 813 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 814 815 /* If we're just plain full then async reclaim just slows us down. */ 816 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 817 return 0; 818 819 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) 820 return 0; 821 822 return (used >= thresh && !btrfs_fs_closing(fs_info) && 823 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 824 } 825 826 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 827 struct btrfs_space_info *space_info, 828 struct reserve_ticket *ticket) 829 { 830 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 831 u64 min_bytes; 832 833 if (global_rsv->space_info != space_info) 834 return false; 835 836 spin_lock(&global_rsv->lock); 837 min_bytes = div_factor(global_rsv->size, 1); 838 if (global_rsv->reserved < min_bytes + ticket->bytes) { 839 spin_unlock(&global_rsv->lock); 840 return false; 841 } 842 global_rsv->reserved -= ticket->bytes; 843 remove_ticket(space_info, ticket); 844 ticket->bytes = 0; 845 wake_up(&ticket->wait); 846 space_info->tickets_id++; 847 if (global_rsv->reserved < global_rsv->size) 848 global_rsv->full = 0; 849 spin_unlock(&global_rsv->lock); 850 851 return true; 852 } 853 854 /* 855 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 856 * @fs_info - fs_info for this fs 857 * @space_info - the space info we were flushing 858 * 859 * We call this when we've exhausted our flushing ability and haven't made 860 * progress in satisfying tickets. The reservation code handles tickets in 861 * order, so if there is a large ticket first and then smaller ones we could 862 * very well satisfy the smaller tickets. This will attempt to wake up any 863 * tickets in the list to catch this case. 864 * 865 * This function returns true if it was able to make progress by clearing out 866 * other tickets, or if it stumbles across a ticket that was smaller than the 867 * first ticket. 868 */ 869 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 870 struct btrfs_space_info *space_info) 871 { 872 struct reserve_ticket *ticket; 873 u64 tickets_id = space_info->tickets_id; 874 u64 first_ticket_bytes = 0; 875 876 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 877 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 878 __btrfs_dump_space_info(fs_info, space_info); 879 } 880 881 while (!list_empty(&space_info->tickets) && 882 tickets_id == space_info->tickets_id) { 883 ticket = list_first_entry(&space_info->tickets, 884 struct reserve_ticket, list); 885 886 if (ticket->steal && 887 steal_from_global_rsv(fs_info, space_info, ticket)) 888 return true; 889 890 /* 891 * may_commit_transaction will avoid committing the transaction 892 * if it doesn't feel like the space reclaimed by the commit 893 * would result in the ticket succeeding. However if we have a 894 * smaller ticket in the queue it may be small enough to be 895 * satisified by committing the transaction, so if any 896 * subsequent ticket is smaller than the first ticket go ahead 897 * and send us back for another loop through the enospc flushing 898 * code. 899 */ 900 if (first_ticket_bytes == 0) 901 first_ticket_bytes = ticket->bytes; 902 else if (first_ticket_bytes > ticket->bytes) 903 return true; 904 905 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 906 btrfs_info(fs_info, "failing ticket with %llu bytes", 907 ticket->bytes); 908 909 remove_ticket(space_info, ticket); 910 ticket->error = -ENOSPC; 911 wake_up(&ticket->wait); 912 913 /* 914 * We're just throwing tickets away, so more flushing may not 915 * trip over btrfs_try_granting_tickets, so we need to call it 916 * here to see if we can make progress with the next ticket in 917 * the list. 918 */ 919 btrfs_try_granting_tickets(fs_info, space_info); 920 } 921 return (tickets_id != space_info->tickets_id); 922 } 923 924 /* 925 * This is for normal flushers, we can wait all goddamned day if we want to. We 926 * will loop and continuously try to flush as long as we are making progress. 927 * We count progress as clearing off tickets each time we have to loop. 928 */ 929 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 930 { 931 struct btrfs_fs_info *fs_info; 932 struct btrfs_space_info *space_info; 933 u64 to_reclaim; 934 int flush_state; 935 int commit_cycles = 0; 936 u64 last_tickets_id; 937 938 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 939 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 940 941 spin_lock(&space_info->lock); 942 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 943 if (!to_reclaim) { 944 space_info->flush = 0; 945 spin_unlock(&space_info->lock); 946 return; 947 } 948 last_tickets_id = space_info->tickets_id; 949 spin_unlock(&space_info->lock); 950 951 flush_state = FLUSH_DELAYED_ITEMS_NR; 952 do { 953 flush_space(fs_info, space_info, to_reclaim, flush_state); 954 spin_lock(&space_info->lock); 955 if (list_empty(&space_info->tickets)) { 956 space_info->flush = 0; 957 spin_unlock(&space_info->lock); 958 return; 959 } 960 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 961 space_info); 962 if (last_tickets_id == space_info->tickets_id) { 963 flush_state++; 964 } else { 965 last_tickets_id = space_info->tickets_id; 966 flush_state = FLUSH_DELAYED_ITEMS_NR; 967 if (commit_cycles) 968 commit_cycles--; 969 } 970 971 /* 972 * We don't want to force a chunk allocation until we've tried 973 * pretty hard to reclaim space. Think of the case where we 974 * freed up a bunch of space and so have a lot of pinned space 975 * to reclaim. We would rather use that than possibly create a 976 * underutilized metadata chunk. So if this is our first run 977 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 978 * commit the transaction. If nothing has changed the next go 979 * around then we can force a chunk allocation. 980 */ 981 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 982 flush_state++; 983 984 if (flush_state > COMMIT_TRANS) { 985 commit_cycles++; 986 if (commit_cycles > 2) { 987 if (maybe_fail_all_tickets(fs_info, space_info)) { 988 flush_state = FLUSH_DELAYED_ITEMS_NR; 989 commit_cycles--; 990 } else { 991 space_info->flush = 0; 992 } 993 } else { 994 flush_state = FLUSH_DELAYED_ITEMS_NR; 995 } 996 } 997 spin_unlock(&space_info->lock); 998 } while (flush_state <= COMMIT_TRANS); 999 } 1000 1001 /* 1002 * FLUSH_DELALLOC_WAIT: 1003 * Space is freed from flushing delalloc in one of two ways. 1004 * 1005 * 1) compression is on and we allocate less space than we reserved 1006 * 2) we are overwriting existing space 1007 * 1008 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1009 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1010 * length to ->bytes_reserved, and subtracts the reserved space from 1011 * ->bytes_may_use. 1012 * 1013 * For #2 this is trickier. Once the ordered extent runs we will drop the 1014 * extent in the range we are overwriting, which creates a delayed ref for 1015 * that freed extent. This however is not reclaimed until the transaction 1016 * commits, thus the next stages. 1017 * 1018 * RUN_DELAYED_IPUTS 1019 * If we are freeing inodes, we want to make sure all delayed iputs have 1020 * completed, because they could have been on an inode with i_nlink == 0, and 1021 * thus have been truncated and freed up space. But again this space is not 1022 * immediately re-usable, it comes in the form of a delayed ref, which must be 1023 * run and then the transaction must be committed. 1024 * 1025 * FLUSH_DELAYED_REFS 1026 * The above two cases generate delayed refs that will affect 1027 * ->total_bytes_pinned. However this counter can be inconsistent with 1028 * reality if there are outstanding delayed refs. This is because we adjust 1029 * the counter based solely on the current set of delayed refs and disregard 1030 * any on-disk state which might include more refs. So for example, if we 1031 * have an extent with 2 references, but we only drop 1, we'll see that there 1032 * is a negative delayed ref count for the extent and assume that the space 1033 * will be freed, and thus increase ->total_bytes_pinned. 1034 * 1035 * Running the delayed refs gives us the actual real view of what will be 1036 * freed at the transaction commit time. This stage will not actually free 1037 * space for us, it just makes sure that may_commit_transaction() has all of 1038 * the information it needs to make the right decision. 1039 * 1040 * COMMIT_TRANS 1041 * This is where we reclaim all of the pinned space generated by the previous 1042 * two stages. We will not commit the transaction if we don't think we're 1043 * likely to satisfy our request, which means if our current free space + 1044 * total_bytes_pinned < reservation we will not commit. This is why the 1045 * previous states are actually important, to make sure we know for sure 1046 * whether committing the transaction will allow us to make progress. 1047 */ 1048 static const enum btrfs_flush_state data_flush_states[] = { 1049 FLUSH_DELALLOC_WAIT, 1050 RUN_DELAYED_IPUTS, 1051 FLUSH_DELAYED_REFS, 1052 COMMIT_TRANS, 1053 }; 1054 1055 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1056 { 1057 struct btrfs_fs_info *fs_info; 1058 struct btrfs_space_info *space_info; 1059 u64 last_tickets_id; 1060 int flush_state = 0; 1061 1062 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1063 space_info = fs_info->data_sinfo; 1064 1065 spin_lock(&space_info->lock); 1066 if (list_empty(&space_info->tickets)) { 1067 space_info->flush = 0; 1068 spin_unlock(&space_info->lock); 1069 return; 1070 } 1071 last_tickets_id = space_info->tickets_id; 1072 spin_unlock(&space_info->lock); 1073 1074 while (!space_info->full) { 1075 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1076 spin_lock(&space_info->lock); 1077 if (list_empty(&space_info->tickets)) { 1078 space_info->flush = 0; 1079 spin_unlock(&space_info->lock); 1080 return; 1081 } 1082 last_tickets_id = space_info->tickets_id; 1083 spin_unlock(&space_info->lock); 1084 } 1085 1086 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1087 flush_space(fs_info, space_info, U64_MAX, 1088 data_flush_states[flush_state]); 1089 spin_lock(&space_info->lock); 1090 if (list_empty(&space_info->tickets)) { 1091 space_info->flush = 0; 1092 spin_unlock(&space_info->lock); 1093 return; 1094 } 1095 1096 if (last_tickets_id == space_info->tickets_id) { 1097 flush_state++; 1098 } else { 1099 last_tickets_id = space_info->tickets_id; 1100 flush_state = 0; 1101 } 1102 1103 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1104 if (space_info->full) { 1105 if (maybe_fail_all_tickets(fs_info, space_info)) 1106 flush_state = 0; 1107 else 1108 space_info->flush = 0; 1109 } else { 1110 flush_state = 0; 1111 } 1112 } 1113 spin_unlock(&space_info->lock); 1114 } 1115 } 1116 1117 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1118 { 1119 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1120 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1121 } 1122 1123 static const enum btrfs_flush_state priority_flush_states[] = { 1124 FLUSH_DELAYED_ITEMS_NR, 1125 FLUSH_DELAYED_ITEMS, 1126 ALLOC_CHUNK, 1127 }; 1128 1129 static const enum btrfs_flush_state evict_flush_states[] = { 1130 FLUSH_DELAYED_ITEMS_NR, 1131 FLUSH_DELAYED_ITEMS, 1132 FLUSH_DELAYED_REFS_NR, 1133 FLUSH_DELAYED_REFS, 1134 FLUSH_DELALLOC, 1135 FLUSH_DELALLOC_WAIT, 1136 ALLOC_CHUNK, 1137 COMMIT_TRANS, 1138 }; 1139 1140 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1141 struct btrfs_space_info *space_info, 1142 struct reserve_ticket *ticket, 1143 const enum btrfs_flush_state *states, 1144 int states_nr) 1145 { 1146 u64 to_reclaim; 1147 int flush_state; 1148 1149 spin_lock(&space_info->lock); 1150 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1151 if (!to_reclaim) { 1152 spin_unlock(&space_info->lock); 1153 return; 1154 } 1155 spin_unlock(&space_info->lock); 1156 1157 flush_state = 0; 1158 do { 1159 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 1160 flush_state++; 1161 spin_lock(&space_info->lock); 1162 if (ticket->bytes == 0) { 1163 spin_unlock(&space_info->lock); 1164 return; 1165 } 1166 spin_unlock(&space_info->lock); 1167 } while (flush_state < states_nr); 1168 } 1169 1170 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1171 struct btrfs_space_info *space_info, 1172 struct reserve_ticket *ticket) 1173 { 1174 while (!space_info->full) { 1175 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); 1176 spin_lock(&space_info->lock); 1177 if (ticket->bytes == 0) { 1178 spin_unlock(&space_info->lock); 1179 return; 1180 } 1181 spin_unlock(&space_info->lock); 1182 } 1183 } 1184 1185 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1186 struct btrfs_space_info *space_info, 1187 struct reserve_ticket *ticket) 1188 1189 { 1190 DEFINE_WAIT(wait); 1191 int ret = 0; 1192 1193 spin_lock(&space_info->lock); 1194 while (ticket->bytes > 0 && ticket->error == 0) { 1195 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1196 if (ret) { 1197 /* 1198 * Delete us from the list. After we unlock the space 1199 * info, we don't want the async reclaim job to reserve 1200 * space for this ticket. If that would happen, then the 1201 * ticket's task would not known that space was reserved 1202 * despite getting an error, resulting in a space leak 1203 * (bytes_may_use counter of our space_info). 1204 */ 1205 remove_ticket(space_info, ticket); 1206 ticket->error = -EINTR; 1207 break; 1208 } 1209 spin_unlock(&space_info->lock); 1210 1211 schedule(); 1212 1213 finish_wait(&ticket->wait, &wait); 1214 spin_lock(&space_info->lock); 1215 } 1216 spin_unlock(&space_info->lock); 1217 } 1218 1219 /** 1220 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket 1221 * @fs_info - the fs 1222 * @space_info - the space_info for the reservation 1223 * @ticket - the ticket for the reservation 1224 * @flush - how much we can flush 1225 * 1226 * This does the work of figuring out how to flush for the ticket, waiting for 1227 * the reservation, and returning the appropriate error if there is one. 1228 */ 1229 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1230 struct btrfs_space_info *space_info, 1231 struct reserve_ticket *ticket, 1232 enum btrfs_reserve_flush_enum flush) 1233 { 1234 int ret; 1235 1236 switch (flush) { 1237 case BTRFS_RESERVE_FLUSH_DATA: 1238 case BTRFS_RESERVE_FLUSH_ALL: 1239 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1240 wait_reserve_ticket(fs_info, space_info, ticket); 1241 break; 1242 case BTRFS_RESERVE_FLUSH_LIMIT: 1243 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1244 priority_flush_states, 1245 ARRAY_SIZE(priority_flush_states)); 1246 break; 1247 case BTRFS_RESERVE_FLUSH_EVICT: 1248 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1249 evict_flush_states, 1250 ARRAY_SIZE(evict_flush_states)); 1251 break; 1252 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1253 priority_reclaim_data_space(fs_info, space_info, ticket); 1254 break; 1255 default: 1256 ASSERT(0); 1257 break; 1258 } 1259 1260 spin_lock(&space_info->lock); 1261 ret = ticket->error; 1262 if (ticket->bytes || ticket->error) { 1263 /* 1264 * We were a priority ticket, so we need to delete ourselves 1265 * from the list. Because we could have other priority tickets 1266 * behind us that require less space, run 1267 * btrfs_try_granting_tickets() to see if their reservations can 1268 * now be made. 1269 */ 1270 if (!list_empty(&ticket->list)) { 1271 remove_ticket(space_info, ticket); 1272 btrfs_try_granting_tickets(fs_info, space_info); 1273 } 1274 1275 if (!ret) 1276 ret = -ENOSPC; 1277 } 1278 spin_unlock(&space_info->lock); 1279 ASSERT(list_empty(&ticket->list)); 1280 /* 1281 * Check that we can't have an error set if the reservation succeeded, 1282 * as that would confuse tasks and lead them to error out without 1283 * releasing reserved space (if an error happens the expectation is that 1284 * space wasn't reserved at all). 1285 */ 1286 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1287 return ret; 1288 } 1289 1290 /* 1291 * This returns true if this flush state will go through the ordinary flushing 1292 * code. 1293 */ 1294 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1295 { 1296 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1297 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1298 } 1299 1300 /** 1301 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1302 * @root - the root we're allocating for 1303 * @space_info - the space info we want to allocate from 1304 * @orig_bytes - the number of bytes we want 1305 * @flush - whether or not we can flush to make our reservation 1306 * 1307 * This will reserve orig_bytes number of bytes from the space info associated 1308 * with the block_rsv. If there is not enough space it will make an attempt to 1309 * flush out space to make room. It will do this by flushing delalloc if 1310 * possible or committing the transaction. If flush is 0 then no attempts to 1311 * regain reservations will be made and this will fail if there is not enough 1312 * space already. 1313 */ 1314 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1315 struct btrfs_space_info *space_info, u64 orig_bytes, 1316 enum btrfs_reserve_flush_enum flush) 1317 { 1318 struct work_struct *async_work; 1319 struct reserve_ticket ticket; 1320 u64 used; 1321 int ret = 0; 1322 bool pending_tickets; 1323 1324 ASSERT(orig_bytes); 1325 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1326 1327 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1328 async_work = &fs_info->async_data_reclaim_work; 1329 else 1330 async_work = &fs_info->async_reclaim_work; 1331 1332 spin_lock(&space_info->lock); 1333 ret = -ENOSPC; 1334 used = btrfs_space_info_used(space_info, true); 1335 1336 /* 1337 * We don't want NO_FLUSH allocations to jump everybody, they can 1338 * generally handle ENOSPC in a different way, so treat them the same as 1339 * normal flushers when it comes to skipping pending tickets. 1340 */ 1341 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1342 pending_tickets = !list_empty(&space_info->tickets) || 1343 !list_empty(&space_info->priority_tickets); 1344 else 1345 pending_tickets = !list_empty(&space_info->priority_tickets); 1346 1347 /* 1348 * Carry on if we have enough space (short-circuit) OR call 1349 * can_overcommit() to ensure we can overcommit to continue. 1350 */ 1351 if (!pending_tickets && 1352 ((used + orig_bytes <= space_info->total_bytes) || 1353 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1354 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1355 orig_bytes); 1356 ret = 0; 1357 } 1358 1359 /* 1360 * If we couldn't make a reservation then setup our reservation ticket 1361 * and kick the async worker if it's not already running. 1362 * 1363 * If we are a priority flusher then we just need to add our ticket to 1364 * the list and we will do our own flushing further down. 1365 */ 1366 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1367 ticket.bytes = orig_bytes; 1368 ticket.error = 0; 1369 space_info->reclaim_size += ticket.bytes; 1370 init_waitqueue_head(&ticket.wait); 1371 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1372 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1373 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1374 flush == BTRFS_RESERVE_FLUSH_DATA) { 1375 list_add_tail(&ticket.list, &space_info->tickets); 1376 if (!space_info->flush) { 1377 space_info->flush = 1; 1378 trace_btrfs_trigger_flush(fs_info, 1379 space_info->flags, 1380 orig_bytes, flush, 1381 "enospc"); 1382 queue_work(system_unbound_wq, async_work); 1383 } 1384 } else { 1385 list_add_tail(&ticket.list, 1386 &space_info->priority_tickets); 1387 } 1388 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1389 used += orig_bytes; 1390 /* 1391 * We will do the space reservation dance during log replay, 1392 * which means we won't have fs_info->fs_root set, so don't do 1393 * the async reclaim as we will panic. 1394 */ 1395 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1396 need_do_async_reclaim(fs_info, space_info, used) && 1397 !work_busy(&fs_info->async_reclaim_work)) { 1398 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1399 orig_bytes, flush, "preempt"); 1400 queue_work(system_unbound_wq, 1401 &fs_info->async_reclaim_work); 1402 } 1403 } 1404 spin_unlock(&space_info->lock); 1405 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1406 return ret; 1407 1408 return handle_reserve_ticket(fs_info, space_info, &ticket, flush); 1409 } 1410 1411 /** 1412 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1413 * @root - the root we're allocating for 1414 * @block_rsv - the block_rsv we're allocating for 1415 * @orig_bytes - the number of bytes we want 1416 * @flush - whether or not we can flush to make our reservation 1417 * 1418 * This will reserve orig_bytes number of bytes from the space info associated 1419 * with the block_rsv. If there is not enough space it will make an attempt to 1420 * flush out space to make room. It will do this by flushing delalloc if 1421 * possible or committing the transaction. If flush is 0 then no attempts to 1422 * regain reservations will be made and this will fail if there is not enough 1423 * space already. 1424 */ 1425 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1426 struct btrfs_block_rsv *block_rsv, 1427 u64 orig_bytes, 1428 enum btrfs_reserve_flush_enum flush) 1429 { 1430 struct btrfs_fs_info *fs_info = root->fs_info; 1431 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1432 int ret; 1433 1434 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1435 if (ret == -ENOSPC && 1436 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1437 if (block_rsv != global_rsv && 1438 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1439 ret = 0; 1440 } 1441 if (ret == -ENOSPC) { 1442 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1443 block_rsv->space_info->flags, 1444 orig_bytes, 1); 1445 1446 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1447 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1448 orig_bytes, 0); 1449 } 1450 return ret; 1451 } 1452 1453 /** 1454 * btrfs_reserve_data_bytes - try to reserve data bytes for an allocation 1455 * @fs_info - the filesystem 1456 * @bytes - the number of bytes we need 1457 * @flush - how we are allowed to flush 1458 * 1459 * This will reserve bytes from the data space info. If there is not enough 1460 * space then we will attempt to flush space as specified by flush. 1461 */ 1462 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1463 enum btrfs_reserve_flush_enum flush) 1464 { 1465 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1466 int ret; 1467 1468 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1469 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1470 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1471 1472 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1473 if (ret == -ENOSPC) { 1474 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1475 data_sinfo->flags, bytes, 1); 1476 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1477 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1478 } 1479 return ret; 1480 } 1481