1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * FORCE_COMMIT_TRANS 144 * For use by the preemptive flusher. We use this to bypass the ticketing 145 * checks in may_commit_transaction, as we have more information about the 146 * overall state of the system and may want to commit the transaction ahead 147 * of actual ENOSPC conditions. 148 * 149 * OVERCOMMIT 150 * 151 * Because we hold so many reservations for metadata we will allow you to 152 * reserve more space than is currently free in the currently allocate 153 * metadata space. This only happens with metadata, data does not allow 154 * overcommitting. 155 * 156 * You can see the current logic for when we allow overcommit in 157 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 158 * is no unallocated space to be had, all reservations are kept within the 159 * free space in the allocated metadata chunks. 160 * 161 * Because of overcommitting, you generally want to use the 162 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 163 * thing with or without extra unallocated space. 164 */ 165 166 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 167 bool may_use_included) 168 { 169 ASSERT(s_info); 170 return s_info->bytes_used + s_info->bytes_reserved + 171 s_info->bytes_pinned + s_info->bytes_readonly + 172 s_info->bytes_zone_unusable + 173 (may_use_included ? s_info->bytes_may_use : 0); 174 } 175 176 /* 177 * after adding space to the filesystem, we need to clear the full flags 178 * on all the space infos. 179 */ 180 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 181 { 182 struct list_head *head = &info->space_info; 183 struct btrfs_space_info *found; 184 185 list_for_each_entry(found, head, list) 186 found->full = 0; 187 } 188 189 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 190 { 191 192 struct btrfs_space_info *space_info; 193 int i; 194 int ret; 195 196 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 197 if (!space_info) 198 return -ENOMEM; 199 200 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 201 GFP_KERNEL); 202 if (ret) { 203 kfree(space_info); 204 return ret; 205 } 206 207 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 208 INIT_LIST_HEAD(&space_info->block_groups[i]); 209 init_rwsem(&space_info->groups_sem); 210 spin_lock_init(&space_info->lock); 211 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 212 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 213 INIT_LIST_HEAD(&space_info->ro_bgs); 214 INIT_LIST_HEAD(&space_info->tickets); 215 INIT_LIST_HEAD(&space_info->priority_tickets); 216 space_info->clamp = 1; 217 218 ret = btrfs_sysfs_add_space_info_type(info, space_info); 219 if (ret) 220 return ret; 221 222 list_add(&space_info->list, &info->space_info); 223 if (flags & BTRFS_BLOCK_GROUP_DATA) 224 info->data_sinfo = space_info; 225 226 return ret; 227 } 228 229 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 230 { 231 struct btrfs_super_block *disk_super; 232 u64 features; 233 u64 flags; 234 int mixed = 0; 235 int ret; 236 237 disk_super = fs_info->super_copy; 238 if (!btrfs_super_root(disk_super)) 239 return -EINVAL; 240 241 features = btrfs_super_incompat_flags(disk_super); 242 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 243 mixed = 1; 244 245 flags = BTRFS_BLOCK_GROUP_SYSTEM; 246 ret = create_space_info(fs_info, flags); 247 if (ret) 248 goto out; 249 250 if (mixed) { 251 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 252 ret = create_space_info(fs_info, flags); 253 } else { 254 flags = BTRFS_BLOCK_GROUP_METADATA; 255 ret = create_space_info(fs_info, flags); 256 if (ret) 257 goto out; 258 259 flags = BTRFS_BLOCK_GROUP_DATA; 260 ret = create_space_info(fs_info, flags); 261 } 262 out: 263 return ret; 264 } 265 266 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 267 u64 total_bytes, u64 bytes_used, 268 u64 bytes_readonly, u64 bytes_zone_unusable, 269 struct btrfs_space_info **space_info) 270 { 271 struct btrfs_space_info *found; 272 int factor; 273 274 factor = btrfs_bg_type_to_factor(flags); 275 276 found = btrfs_find_space_info(info, flags); 277 ASSERT(found); 278 spin_lock(&found->lock); 279 found->total_bytes += total_bytes; 280 found->disk_total += total_bytes * factor; 281 found->bytes_used += bytes_used; 282 found->disk_used += bytes_used * factor; 283 found->bytes_readonly += bytes_readonly; 284 found->bytes_zone_unusable += bytes_zone_unusable; 285 if (total_bytes > 0) 286 found->full = 0; 287 btrfs_try_granting_tickets(info, found); 288 spin_unlock(&found->lock); 289 *space_info = found; 290 } 291 292 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 293 u64 flags) 294 { 295 struct list_head *head = &info->space_info; 296 struct btrfs_space_info *found; 297 298 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 299 300 list_for_each_entry(found, head, list) { 301 if (found->flags & flags) 302 return found; 303 } 304 return NULL; 305 } 306 307 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 308 struct btrfs_space_info *space_info, 309 enum btrfs_reserve_flush_enum flush) 310 { 311 u64 profile; 312 u64 avail; 313 int factor; 314 315 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 316 profile = btrfs_system_alloc_profile(fs_info); 317 else 318 profile = btrfs_metadata_alloc_profile(fs_info); 319 320 avail = atomic64_read(&fs_info->free_chunk_space); 321 322 /* 323 * If we have dup, raid1 or raid10 then only half of the free 324 * space is actually usable. For raid56, the space info used 325 * doesn't include the parity drive, so we don't have to 326 * change the math 327 */ 328 factor = btrfs_bg_type_to_factor(profile); 329 avail = div_u64(avail, factor); 330 331 /* 332 * If we aren't flushing all things, let us overcommit up to 333 * 1/2th of the space. If we can flush, don't let us overcommit 334 * too much, let it overcommit up to 1/8 of the space. 335 */ 336 if (flush == BTRFS_RESERVE_FLUSH_ALL) 337 avail >>= 3; 338 else 339 avail >>= 1; 340 return avail; 341 } 342 343 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 344 struct btrfs_space_info *space_info, u64 bytes, 345 enum btrfs_reserve_flush_enum flush) 346 { 347 u64 avail; 348 u64 used; 349 350 /* Don't overcommit when in mixed mode */ 351 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 352 return 0; 353 354 used = btrfs_space_info_used(space_info, true); 355 avail = calc_available_free_space(fs_info, space_info, flush); 356 357 if (used + bytes < space_info->total_bytes + avail) 358 return 1; 359 return 0; 360 } 361 362 static void remove_ticket(struct btrfs_space_info *space_info, 363 struct reserve_ticket *ticket) 364 { 365 if (!list_empty(&ticket->list)) { 366 list_del_init(&ticket->list); 367 ASSERT(space_info->reclaim_size >= ticket->bytes); 368 space_info->reclaim_size -= ticket->bytes; 369 } 370 } 371 372 /* 373 * This is for space we already have accounted in space_info->bytes_may_use, so 374 * basically when we're returning space from block_rsv's. 375 */ 376 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 377 struct btrfs_space_info *space_info) 378 { 379 struct list_head *head; 380 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 381 382 lockdep_assert_held(&space_info->lock); 383 384 head = &space_info->priority_tickets; 385 again: 386 while (!list_empty(head)) { 387 struct reserve_ticket *ticket; 388 u64 used = btrfs_space_info_used(space_info, true); 389 390 ticket = list_first_entry(head, struct reserve_ticket, list); 391 392 /* Check and see if our ticket can be satisified now. */ 393 if ((used + ticket->bytes <= space_info->total_bytes) || 394 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 395 flush)) { 396 btrfs_space_info_update_bytes_may_use(fs_info, 397 space_info, 398 ticket->bytes); 399 remove_ticket(space_info, ticket); 400 ticket->bytes = 0; 401 space_info->tickets_id++; 402 wake_up(&ticket->wait); 403 } else { 404 break; 405 } 406 } 407 408 if (head == &space_info->priority_tickets) { 409 head = &space_info->tickets; 410 flush = BTRFS_RESERVE_FLUSH_ALL; 411 goto again; 412 } 413 } 414 415 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 416 do { \ 417 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 418 spin_lock(&__rsv->lock); \ 419 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 420 __rsv->size, __rsv->reserved); \ 421 spin_unlock(&__rsv->lock); \ 422 } while (0) 423 424 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 425 struct btrfs_space_info *info) 426 { 427 lockdep_assert_held(&info->lock); 428 429 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 430 info->flags, 431 info->total_bytes - btrfs_space_info_used(info, true), 432 info->full ? "" : "not "); 433 btrfs_info(fs_info, 434 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 435 info->total_bytes, info->bytes_used, info->bytes_pinned, 436 info->bytes_reserved, info->bytes_may_use, 437 info->bytes_readonly, info->bytes_zone_unusable); 438 439 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 440 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 441 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 442 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 443 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 444 445 } 446 447 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 448 struct btrfs_space_info *info, u64 bytes, 449 int dump_block_groups) 450 { 451 struct btrfs_block_group *cache; 452 int index = 0; 453 454 spin_lock(&info->lock); 455 __btrfs_dump_space_info(fs_info, info); 456 spin_unlock(&info->lock); 457 458 if (!dump_block_groups) 459 return; 460 461 down_read(&info->groups_sem); 462 again: 463 list_for_each_entry(cache, &info->block_groups[index], list) { 464 spin_lock(&cache->lock); 465 btrfs_info(fs_info, 466 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 467 cache->start, cache->length, cache->used, cache->pinned, 468 cache->reserved, cache->zone_unusable, 469 cache->ro ? "[readonly]" : ""); 470 spin_unlock(&cache->lock); 471 btrfs_dump_free_space(cache, bytes); 472 } 473 if (++index < BTRFS_NR_RAID_TYPES) 474 goto again; 475 up_read(&info->groups_sem); 476 } 477 478 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 479 u64 to_reclaim) 480 { 481 u64 bytes; 482 u64 nr; 483 484 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 485 nr = div64_u64(to_reclaim, bytes); 486 if (!nr) 487 nr = 1; 488 return nr; 489 } 490 491 #define EXTENT_SIZE_PER_ITEM SZ_256K 492 493 /* 494 * shrink metadata reservation for delalloc 495 */ 496 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 497 struct btrfs_space_info *space_info, 498 u64 to_reclaim, bool wait_ordered) 499 { 500 struct btrfs_trans_handle *trans; 501 u64 delalloc_bytes; 502 u64 ordered_bytes; 503 u64 items; 504 long time_left; 505 int loops; 506 507 /* Calc the number of the pages we need flush for space reservation */ 508 if (to_reclaim == U64_MAX) { 509 items = U64_MAX; 510 } else { 511 /* 512 * to_reclaim is set to however much metadata we need to 513 * reclaim, but reclaiming that much data doesn't really track 514 * exactly, so increase the amount to reclaim by 2x in order to 515 * make sure we're flushing enough delalloc to hopefully reclaim 516 * some metadata reservations. 517 */ 518 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 519 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 520 } 521 522 trans = (struct btrfs_trans_handle *)current->journal_info; 523 524 delalloc_bytes = percpu_counter_sum_positive( 525 &fs_info->delalloc_bytes); 526 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 527 if (delalloc_bytes == 0 && ordered_bytes == 0) 528 return; 529 530 /* 531 * If we are doing more ordered than delalloc we need to just wait on 532 * ordered extents, otherwise we'll waste time trying to flush delalloc 533 * that likely won't give us the space back we need. 534 */ 535 if (ordered_bytes > delalloc_bytes) 536 wait_ordered = true; 537 538 loops = 0; 539 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 540 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 541 long nr_pages = min_t(u64, temp, LONG_MAX); 542 543 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 544 545 loops++; 546 if (wait_ordered && !trans) { 547 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 548 } else { 549 time_left = schedule_timeout_killable(1); 550 if (time_left) 551 break; 552 } 553 554 spin_lock(&space_info->lock); 555 if (list_empty(&space_info->tickets) && 556 list_empty(&space_info->priority_tickets)) { 557 spin_unlock(&space_info->lock); 558 break; 559 } 560 spin_unlock(&space_info->lock); 561 562 delalloc_bytes = percpu_counter_sum_positive( 563 &fs_info->delalloc_bytes); 564 ordered_bytes = percpu_counter_sum_positive( 565 &fs_info->ordered_bytes); 566 } 567 } 568 569 /** 570 * Possibly commit the transaction if its ok to 571 * 572 * @fs_info: the filesystem 573 * @space_info: space_info we are checking for commit, either data or metadata 574 * 575 * This will check to make sure that committing the transaction will actually 576 * get us somewhere and then commit the transaction if it does. Otherwise it 577 * will return -ENOSPC. 578 */ 579 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 580 struct btrfs_space_info *space_info) 581 { 582 struct reserve_ticket *ticket = NULL; 583 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 584 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 585 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 586 struct btrfs_trans_handle *trans; 587 u64 reclaim_bytes = 0; 588 u64 bytes_needed = 0; 589 u64 cur_free_bytes = 0; 590 591 trans = (struct btrfs_trans_handle *)current->journal_info; 592 if (trans) 593 return -EAGAIN; 594 595 spin_lock(&space_info->lock); 596 cur_free_bytes = btrfs_space_info_used(space_info, true); 597 if (cur_free_bytes < space_info->total_bytes) 598 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 599 else 600 cur_free_bytes = 0; 601 602 if (!list_empty(&space_info->priority_tickets)) 603 ticket = list_first_entry(&space_info->priority_tickets, 604 struct reserve_ticket, list); 605 else if (!list_empty(&space_info->tickets)) 606 ticket = list_first_entry(&space_info->tickets, 607 struct reserve_ticket, list); 608 if (ticket) 609 bytes_needed = ticket->bytes; 610 611 if (bytes_needed > cur_free_bytes) 612 bytes_needed -= cur_free_bytes; 613 else 614 bytes_needed = 0; 615 spin_unlock(&space_info->lock); 616 617 if (!bytes_needed) 618 return 0; 619 620 trans = btrfs_join_transaction(fs_info->extent_root); 621 if (IS_ERR(trans)) 622 return PTR_ERR(trans); 623 624 /* 625 * See if there is enough pinned space to make this reservation, or if 626 * we have block groups that are going to be freed, allowing us to 627 * possibly do a chunk allocation the next loop through. 628 */ 629 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 630 __percpu_counter_compare(&space_info->total_bytes_pinned, 631 bytes_needed, 632 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 633 goto commit; 634 635 /* 636 * See if there is some space in the delayed insertion reserve for this 637 * reservation. If the space_info's don't match (like for DATA or 638 * SYSTEM) then just go enospc, reclaiming this space won't recover any 639 * space to satisfy those reservations. 640 */ 641 if (space_info != delayed_rsv->space_info) 642 goto enospc; 643 644 spin_lock(&delayed_rsv->lock); 645 reclaim_bytes += delayed_rsv->reserved; 646 spin_unlock(&delayed_rsv->lock); 647 648 spin_lock(&delayed_refs_rsv->lock); 649 reclaim_bytes += delayed_refs_rsv->reserved; 650 spin_unlock(&delayed_refs_rsv->lock); 651 652 spin_lock(&trans_rsv->lock); 653 reclaim_bytes += trans_rsv->reserved; 654 spin_unlock(&trans_rsv->lock); 655 656 if (reclaim_bytes >= bytes_needed) 657 goto commit; 658 bytes_needed -= reclaim_bytes; 659 660 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 661 bytes_needed, 662 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 663 goto enospc; 664 665 commit: 666 return btrfs_commit_transaction(trans); 667 enospc: 668 btrfs_end_transaction(trans); 669 return -ENOSPC; 670 } 671 672 /* 673 * Try to flush some data based on policy set by @state. This is only advisory 674 * and may fail for various reasons. The caller is supposed to examine the 675 * state of @space_info to detect the outcome. 676 */ 677 static void flush_space(struct btrfs_fs_info *fs_info, 678 struct btrfs_space_info *space_info, u64 num_bytes, 679 enum btrfs_flush_state state, bool for_preempt) 680 { 681 struct btrfs_root *root = fs_info->extent_root; 682 struct btrfs_trans_handle *trans; 683 int nr; 684 int ret = 0; 685 686 switch (state) { 687 case FLUSH_DELAYED_ITEMS_NR: 688 case FLUSH_DELAYED_ITEMS: 689 if (state == FLUSH_DELAYED_ITEMS_NR) 690 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 691 else 692 nr = -1; 693 694 trans = btrfs_join_transaction(root); 695 if (IS_ERR(trans)) { 696 ret = PTR_ERR(trans); 697 break; 698 } 699 ret = btrfs_run_delayed_items_nr(trans, nr); 700 btrfs_end_transaction(trans); 701 break; 702 case FLUSH_DELALLOC: 703 case FLUSH_DELALLOC_WAIT: 704 shrink_delalloc(fs_info, space_info, num_bytes, 705 state == FLUSH_DELALLOC_WAIT); 706 break; 707 case FLUSH_DELAYED_REFS_NR: 708 case FLUSH_DELAYED_REFS: 709 trans = btrfs_join_transaction(root); 710 if (IS_ERR(trans)) { 711 ret = PTR_ERR(trans); 712 break; 713 } 714 if (state == FLUSH_DELAYED_REFS_NR) 715 nr = calc_reclaim_items_nr(fs_info, num_bytes); 716 else 717 nr = 0; 718 btrfs_run_delayed_refs(trans, nr); 719 btrfs_end_transaction(trans); 720 break; 721 case ALLOC_CHUNK: 722 case ALLOC_CHUNK_FORCE: 723 trans = btrfs_join_transaction(root); 724 if (IS_ERR(trans)) { 725 ret = PTR_ERR(trans); 726 break; 727 } 728 ret = btrfs_chunk_alloc(trans, 729 btrfs_get_alloc_profile(fs_info, space_info->flags), 730 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 731 CHUNK_ALLOC_FORCE); 732 btrfs_end_transaction(trans); 733 if (ret > 0 || ret == -ENOSPC) 734 ret = 0; 735 break; 736 case RUN_DELAYED_IPUTS: 737 /* 738 * If we have pending delayed iputs then we could free up a 739 * bunch of pinned space, so make sure we run the iputs before 740 * we do our pinned bytes check below. 741 */ 742 btrfs_run_delayed_iputs(fs_info); 743 btrfs_wait_on_delayed_iputs(fs_info); 744 break; 745 case COMMIT_TRANS: 746 ret = may_commit_transaction(fs_info, space_info); 747 break; 748 case FORCE_COMMIT_TRANS: 749 trans = btrfs_join_transaction(root); 750 if (IS_ERR(trans)) { 751 ret = PTR_ERR(trans); 752 break; 753 } 754 ret = btrfs_commit_transaction(trans); 755 break; 756 default: 757 ret = -ENOSPC; 758 break; 759 } 760 761 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 762 ret, for_preempt); 763 return; 764 } 765 766 static inline u64 767 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 768 struct btrfs_space_info *space_info) 769 { 770 u64 used; 771 u64 avail; 772 u64 to_reclaim = space_info->reclaim_size; 773 774 lockdep_assert_held(&space_info->lock); 775 776 avail = calc_available_free_space(fs_info, space_info, 777 BTRFS_RESERVE_FLUSH_ALL); 778 used = btrfs_space_info_used(space_info, true); 779 780 /* 781 * We may be flushing because suddenly we have less space than we had 782 * before, and now we're well over-committed based on our current free 783 * space. If that's the case add in our overage so we make sure to put 784 * appropriate pressure on the flushing state machine. 785 */ 786 if (space_info->total_bytes + avail < used) 787 to_reclaim += used - (space_info->total_bytes + avail); 788 789 return to_reclaim; 790 } 791 792 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 793 struct btrfs_space_info *space_info) 794 { 795 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 796 u64 ordered, delalloc; 797 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 798 u64 used; 799 800 /* If we're just plain full then async reclaim just slows us down. */ 801 if ((space_info->bytes_used + space_info->bytes_reserved + 802 global_rsv_size) >= thresh) 803 return false; 804 805 /* 806 * We have tickets queued, bail so we don't compete with the async 807 * flushers. 808 */ 809 if (space_info->reclaim_size) 810 return false; 811 812 /* 813 * If we have over half of the free space occupied by reservations or 814 * pinned then we want to start flushing. 815 * 816 * We do not do the traditional thing here, which is to say 817 * 818 * if (used >= ((total_bytes + avail) / 2)) 819 * return 1; 820 * 821 * because this doesn't quite work how we want. If we had more than 50% 822 * of the space_info used by bytes_used and we had 0 available we'd just 823 * constantly run the background flusher. Instead we want it to kick in 824 * if our reclaimable space exceeds our clamped free space. 825 * 826 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 827 * the following: 828 * 829 * Amount of RAM Minimum threshold Maximum threshold 830 * 831 * 256GiB 1GiB 128GiB 832 * 128GiB 512MiB 64GiB 833 * 64GiB 256MiB 32GiB 834 * 32GiB 128MiB 16GiB 835 * 16GiB 64MiB 8GiB 836 * 837 * These are the range our thresholds will fall in, corresponding to how 838 * much delalloc we need for the background flusher to kick in. 839 */ 840 841 thresh = calc_available_free_space(fs_info, space_info, 842 BTRFS_RESERVE_FLUSH_ALL); 843 used = space_info->bytes_used + space_info->bytes_reserved + 844 space_info->bytes_readonly + global_rsv_size; 845 if (used < space_info->total_bytes) 846 thresh += space_info->total_bytes - used; 847 thresh >>= space_info->clamp; 848 849 used = space_info->bytes_pinned; 850 851 /* 852 * If we have more ordered bytes than delalloc bytes then we're either 853 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 854 * around. Preemptive flushing is only useful in that it can free up 855 * space before tickets need to wait for things to finish. In the case 856 * of ordered extents, preemptively waiting on ordered extents gets us 857 * nothing, if our reservations are tied up in ordered extents we'll 858 * simply have to slow down writers by forcing them to wait on ordered 859 * extents. 860 * 861 * In the case that ordered is larger than delalloc, only include the 862 * block reserves that we would actually be able to directly reclaim 863 * from. In this case if we're heavy on metadata operations this will 864 * clearly be heavy enough to warrant preemptive flushing. In the case 865 * of heavy DIO or ordered reservations, preemptive flushing will just 866 * waste time and cause us to slow down. 867 * 868 * We want to make sure we truly are maxed out on ordered however, so 869 * cut ordered in half, and if it's still higher than delalloc then we 870 * can keep flushing. This is to avoid the case where we start 871 * flushing, and now delalloc == ordered and we stop preemptively 872 * flushing when we could still have several gigs of delalloc to flush. 873 */ 874 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 875 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 876 if (ordered >= delalloc) 877 used += fs_info->delayed_refs_rsv.reserved + 878 fs_info->delayed_block_rsv.reserved; 879 else 880 used += space_info->bytes_may_use - global_rsv_size; 881 882 return (used >= thresh && !btrfs_fs_closing(fs_info) && 883 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 884 } 885 886 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 887 struct btrfs_space_info *space_info, 888 struct reserve_ticket *ticket) 889 { 890 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 891 u64 min_bytes; 892 893 if (global_rsv->space_info != space_info) 894 return false; 895 896 spin_lock(&global_rsv->lock); 897 min_bytes = div_factor(global_rsv->size, 1); 898 if (global_rsv->reserved < min_bytes + ticket->bytes) { 899 spin_unlock(&global_rsv->lock); 900 return false; 901 } 902 global_rsv->reserved -= ticket->bytes; 903 remove_ticket(space_info, ticket); 904 ticket->bytes = 0; 905 wake_up(&ticket->wait); 906 space_info->tickets_id++; 907 if (global_rsv->reserved < global_rsv->size) 908 global_rsv->full = 0; 909 spin_unlock(&global_rsv->lock); 910 911 return true; 912 } 913 914 /* 915 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 916 * @fs_info - fs_info for this fs 917 * @space_info - the space info we were flushing 918 * 919 * We call this when we've exhausted our flushing ability and haven't made 920 * progress in satisfying tickets. The reservation code handles tickets in 921 * order, so if there is a large ticket first and then smaller ones we could 922 * very well satisfy the smaller tickets. This will attempt to wake up any 923 * tickets in the list to catch this case. 924 * 925 * This function returns true if it was able to make progress by clearing out 926 * other tickets, or if it stumbles across a ticket that was smaller than the 927 * first ticket. 928 */ 929 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 930 struct btrfs_space_info *space_info) 931 { 932 struct reserve_ticket *ticket; 933 u64 tickets_id = space_info->tickets_id; 934 u64 first_ticket_bytes = 0; 935 936 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 937 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 938 __btrfs_dump_space_info(fs_info, space_info); 939 } 940 941 while (!list_empty(&space_info->tickets) && 942 tickets_id == space_info->tickets_id) { 943 ticket = list_first_entry(&space_info->tickets, 944 struct reserve_ticket, list); 945 946 if (ticket->steal && 947 steal_from_global_rsv(fs_info, space_info, ticket)) 948 return true; 949 950 /* 951 * may_commit_transaction will avoid committing the transaction 952 * if it doesn't feel like the space reclaimed by the commit 953 * would result in the ticket succeeding. However if we have a 954 * smaller ticket in the queue it may be small enough to be 955 * satisified by committing the transaction, so if any 956 * subsequent ticket is smaller than the first ticket go ahead 957 * and send us back for another loop through the enospc flushing 958 * code. 959 */ 960 if (first_ticket_bytes == 0) 961 first_ticket_bytes = ticket->bytes; 962 else if (first_ticket_bytes > ticket->bytes) 963 return true; 964 965 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 966 btrfs_info(fs_info, "failing ticket with %llu bytes", 967 ticket->bytes); 968 969 remove_ticket(space_info, ticket); 970 ticket->error = -ENOSPC; 971 wake_up(&ticket->wait); 972 973 /* 974 * We're just throwing tickets away, so more flushing may not 975 * trip over btrfs_try_granting_tickets, so we need to call it 976 * here to see if we can make progress with the next ticket in 977 * the list. 978 */ 979 btrfs_try_granting_tickets(fs_info, space_info); 980 } 981 return (tickets_id != space_info->tickets_id); 982 } 983 984 /* 985 * This is for normal flushers, we can wait all goddamned day if we want to. We 986 * will loop and continuously try to flush as long as we are making progress. 987 * We count progress as clearing off tickets each time we have to loop. 988 */ 989 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 990 { 991 struct btrfs_fs_info *fs_info; 992 struct btrfs_space_info *space_info; 993 u64 to_reclaim; 994 enum btrfs_flush_state flush_state; 995 int commit_cycles = 0; 996 u64 last_tickets_id; 997 998 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 999 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1000 1001 spin_lock(&space_info->lock); 1002 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1003 if (!to_reclaim) { 1004 space_info->flush = 0; 1005 spin_unlock(&space_info->lock); 1006 return; 1007 } 1008 last_tickets_id = space_info->tickets_id; 1009 spin_unlock(&space_info->lock); 1010 1011 flush_state = FLUSH_DELAYED_ITEMS_NR; 1012 do { 1013 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 1014 spin_lock(&space_info->lock); 1015 if (list_empty(&space_info->tickets)) { 1016 space_info->flush = 0; 1017 spin_unlock(&space_info->lock); 1018 return; 1019 } 1020 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 1021 space_info); 1022 if (last_tickets_id == space_info->tickets_id) { 1023 flush_state++; 1024 } else { 1025 last_tickets_id = space_info->tickets_id; 1026 flush_state = FLUSH_DELAYED_ITEMS_NR; 1027 if (commit_cycles) 1028 commit_cycles--; 1029 } 1030 1031 /* 1032 * We don't want to force a chunk allocation until we've tried 1033 * pretty hard to reclaim space. Think of the case where we 1034 * freed up a bunch of space and so have a lot of pinned space 1035 * to reclaim. We would rather use that than possibly create a 1036 * underutilized metadata chunk. So if this is our first run 1037 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 1038 * commit the transaction. If nothing has changed the next go 1039 * around then we can force a chunk allocation. 1040 */ 1041 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 1042 flush_state++; 1043 1044 if (flush_state > COMMIT_TRANS) { 1045 commit_cycles++; 1046 if (commit_cycles > 2) { 1047 if (maybe_fail_all_tickets(fs_info, space_info)) { 1048 flush_state = FLUSH_DELAYED_ITEMS_NR; 1049 commit_cycles--; 1050 } else { 1051 space_info->flush = 0; 1052 } 1053 } else { 1054 flush_state = FLUSH_DELAYED_ITEMS_NR; 1055 } 1056 } 1057 spin_unlock(&space_info->lock); 1058 } while (flush_state <= COMMIT_TRANS); 1059 } 1060 1061 /* 1062 * This handles pre-flushing of metadata space before we get to the point that 1063 * we need to start blocking threads on tickets. The logic here is different 1064 * from the other flush paths because it doesn't rely on tickets to tell us how 1065 * much we need to flush, instead it attempts to keep us below the 80% full 1066 * watermark of space by flushing whichever reservation pool is currently the 1067 * largest. 1068 */ 1069 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1070 { 1071 struct btrfs_fs_info *fs_info; 1072 struct btrfs_space_info *space_info; 1073 struct btrfs_block_rsv *delayed_block_rsv; 1074 struct btrfs_block_rsv *delayed_refs_rsv; 1075 struct btrfs_block_rsv *global_rsv; 1076 struct btrfs_block_rsv *trans_rsv; 1077 int loops = 0; 1078 1079 fs_info = container_of(work, struct btrfs_fs_info, 1080 preempt_reclaim_work); 1081 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1082 delayed_block_rsv = &fs_info->delayed_block_rsv; 1083 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1084 global_rsv = &fs_info->global_block_rsv; 1085 trans_rsv = &fs_info->trans_block_rsv; 1086 1087 spin_lock(&space_info->lock); 1088 while (need_preemptive_reclaim(fs_info, space_info)) { 1089 enum btrfs_flush_state flush; 1090 u64 delalloc_size = 0; 1091 u64 to_reclaim, block_rsv_size; 1092 u64 global_rsv_size = global_rsv->reserved; 1093 1094 loops++; 1095 1096 /* 1097 * We don't have a precise counter for the metadata being 1098 * reserved for delalloc, so we'll approximate it by subtracting 1099 * out the block rsv's space from the bytes_may_use. If that 1100 * amount is higher than the individual reserves, then we can 1101 * assume it's tied up in delalloc reservations. 1102 */ 1103 block_rsv_size = global_rsv_size + 1104 delayed_block_rsv->reserved + 1105 delayed_refs_rsv->reserved + 1106 trans_rsv->reserved; 1107 if (block_rsv_size < space_info->bytes_may_use) 1108 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1109 spin_unlock(&space_info->lock); 1110 1111 /* 1112 * We don't want to include the global_rsv in our calculation, 1113 * because that's space we can't touch. Subtract it from the 1114 * block_rsv_size for the next checks. 1115 */ 1116 block_rsv_size -= global_rsv_size; 1117 1118 /* 1119 * We really want to avoid flushing delalloc too much, as it 1120 * could result in poor allocation patterns, so only flush it if 1121 * it's larger than the rest of the pools combined. 1122 */ 1123 if (delalloc_size > block_rsv_size) { 1124 to_reclaim = delalloc_size; 1125 flush = FLUSH_DELALLOC; 1126 } else if (space_info->bytes_pinned > 1127 (delayed_block_rsv->reserved + 1128 delayed_refs_rsv->reserved)) { 1129 to_reclaim = space_info->bytes_pinned; 1130 flush = FORCE_COMMIT_TRANS; 1131 } else if (delayed_block_rsv->reserved > 1132 delayed_refs_rsv->reserved) { 1133 to_reclaim = delayed_block_rsv->reserved; 1134 flush = FLUSH_DELAYED_ITEMS_NR; 1135 } else { 1136 to_reclaim = delayed_refs_rsv->reserved; 1137 flush = FLUSH_DELAYED_REFS_NR; 1138 } 1139 1140 /* 1141 * We don't want to reclaim everything, just a portion, so scale 1142 * down the to_reclaim by 1/4. If it takes us down to 0, 1143 * reclaim 1 items worth. 1144 */ 1145 to_reclaim >>= 2; 1146 if (!to_reclaim) 1147 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1148 flush_space(fs_info, space_info, to_reclaim, flush, true); 1149 cond_resched(); 1150 spin_lock(&space_info->lock); 1151 } 1152 1153 /* We only went through once, back off our clamping. */ 1154 if (loops == 1 && !space_info->reclaim_size) 1155 space_info->clamp = max(1, space_info->clamp - 1); 1156 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1157 spin_unlock(&space_info->lock); 1158 } 1159 1160 /* 1161 * FLUSH_DELALLOC_WAIT: 1162 * Space is freed from flushing delalloc in one of two ways. 1163 * 1164 * 1) compression is on and we allocate less space than we reserved 1165 * 2) we are overwriting existing space 1166 * 1167 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1168 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1169 * length to ->bytes_reserved, and subtracts the reserved space from 1170 * ->bytes_may_use. 1171 * 1172 * For #2 this is trickier. Once the ordered extent runs we will drop the 1173 * extent in the range we are overwriting, which creates a delayed ref for 1174 * that freed extent. This however is not reclaimed until the transaction 1175 * commits, thus the next stages. 1176 * 1177 * RUN_DELAYED_IPUTS 1178 * If we are freeing inodes, we want to make sure all delayed iputs have 1179 * completed, because they could have been on an inode with i_nlink == 0, and 1180 * thus have been truncated and freed up space. But again this space is not 1181 * immediately re-usable, it comes in the form of a delayed ref, which must be 1182 * run and then the transaction must be committed. 1183 * 1184 * FLUSH_DELAYED_REFS 1185 * The above two cases generate delayed refs that will affect 1186 * ->total_bytes_pinned. However this counter can be inconsistent with 1187 * reality if there are outstanding delayed refs. This is because we adjust 1188 * the counter based solely on the current set of delayed refs and disregard 1189 * any on-disk state which might include more refs. So for example, if we 1190 * have an extent with 2 references, but we only drop 1, we'll see that there 1191 * is a negative delayed ref count for the extent and assume that the space 1192 * will be freed, and thus increase ->total_bytes_pinned. 1193 * 1194 * Running the delayed refs gives us the actual real view of what will be 1195 * freed at the transaction commit time. This stage will not actually free 1196 * space for us, it just makes sure that may_commit_transaction() has all of 1197 * the information it needs to make the right decision. 1198 * 1199 * COMMIT_TRANS 1200 * This is where we reclaim all of the pinned space generated by the previous 1201 * two stages. We will not commit the transaction if we don't think we're 1202 * likely to satisfy our request, which means if our current free space + 1203 * total_bytes_pinned < reservation we will not commit. This is why the 1204 * previous states are actually important, to make sure we know for sure 1205 * whether committing the transaction will allow us to make progress. 1206 * 1207 * ALLOC_CHUNK_FORCE 1208 * For data we start with alloc chunk force, however we could have been full 1209 * before, and then the transaction commit could have freed new block groups, 1210 * so if we now have space to allocate do the force chunk allocation. 1211 */ 1212 static const enum btrfs_flush_state data_flush_states[] = { 1213 FLUSH_DELALLOC_WAIT, 1214 RUN_DELAYED_IPUTS, 1215 FLUSH_DELAYED_REFS, 1216 COMMIT_TRANS, 1217 ALLOC_CHUNK_FORCE, 1218 }; 1219 1220 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1221 { 1222 struct btrfs_fs_info *fs_info; 1223 struct btrfs_space_info *space_info; 1224 u64 last_tickets_id; 1225 enum btrfs_flush_state flush_state = 0; 1226 1227 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1228 space_info = fs_info->data_sinfo; 1229 1230 spin_lock(&space_info->lock); 1231 if (list_empty(&space_info->tickets)) { 1232 space_info->flush = 0; 1233 spin_unlock(&space_info->lock); 1234 return; 1235 } 1236 last_tickets_id = space_info->tickets_id; 1237 spin_unlock(&space_info->lock); 1238 1239 while (!space_info->full) { 1240 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1241 spin_lock(&space_info->lock); 1242 if (list_empty(&space_info->tickets)) { 1243 space_info->flush = 0; 1244 spin_unlock(&space_info->lock); 1245 return; 1246 } 1247 last_tickets_id = space_info->tickets_id; 1248 spin_unlock(&space_info->lock); 1249 } 1250 1251 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1252 flush_space(fs_info, space_info, U64_MAX, 1253 data_flush_states[flush_state], false); 1254 spin_lock(&space_info->lock); 1255 if (list_empty(&space_info->tickets)) { 1256 space_info->flush = 0; 1257 spin_unlock(&space_info->lock); 1258 return; 1259 } 1260 1261 if (last_tickets_id == space_info->tickets_id) { 1262 flush_state++; 1263 } else { 1264 last_tickets_id = space_info->tickets_id; 1265 flush_state = 0; 1266 } 1267 1268 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1269 if (space_info->full) { 1270 if (maybe_fail_all_tickets(fs_info, space_info)) 1271 flush_state = 0; 1272 else 1273 space_info->flush = 0; 1274 } else { 1275 flush_state = 0; 1276 } 1277 } 1278 spin_unlock(&space_info->lock); 1279 } 1280 } 1281 1282 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1283 { 1284 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1285 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1286 INIT_WORK(&fs_info->preempt_reclaim_work, 1287 btrfs_preempt_reclaim_metadata_space); 1288 } 1289 1290 static const enum btrfs_flush_state priority_flush_states[] = { 1291 FLUSH_DELAYED_ITEMS_NR, 1292 FLUSH_DELAYED_ITEMS, 1293 ALLOC_CHUNK, 1294 }; 1295 1296 static const enum btrfs_flush_state evict_flush_states[] = { 1297 FLUSH_DELAYED_ITEMS_NR, 1298 FLUSH_DELAYED_ITEMS, 1299 FLUSH_DELAYED_REFS_NR, 1300 FLUSH_DELAYED_REFS, 1301 FLUSH_DELALLOC, 1302 FLUSH_DELALLOC_WAIT, 1303 ALLOC_CHUNK, 1304 COMMIT_TRANS, 1305 }; 1306 1307 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1308 struct btrfs_space_info *space_info, 1309 struct reserve_ticket *ticket, 1310 const enum btrfs_flush_state *states, 1311 int states_nr) 1312 { 1313 u64 to_reclaim; 1314 int flush_state; 1315 1316 spin_lock(&space_info->lock); 1317 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1318 if (!to_reclaim) { 1319 spin_unlock(&space_info->lock); 1320 return; 1321 } 1322 spin_unlock(&space_info->lock); 1323 1324 flush_state = 0; 1325 do { 1326 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1327 false); 1328 flush_state++; 1329 spin_lock(&space_info->lock); 1330 if (ticket->bytes == 0) { 1331 spin_unlock(&space_info->lock); 1332 return; 1333 } 1334 spin_unlock(&space_info->lock); 1335 } while (flush_state < states_nr); 1336 } 1337 1338 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1339 struct btrfs_space_info *space_info, 1340 struct reserve_ticket *ticket) 1341 { 1342 while (!space_info->full) { 1343 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1344 spin_lock(&space_info->lock); 1345 if (ticket->bytes == 0) { 1346 spin_unlock(&space_info->lock); 1347 return; 1348 } 1349 spin_unlock(&space_info->lock); 1350 } 1351 } 1352 1353 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1354 struct btrfs_space_info *space_info, 1355 struct reserve_ticket *ticket) 1356 1357 { 1358 DEFINE_WAIT(wait); 1359 int ret = 0; 1360 1361 spin_lock(&space_info->lock); 1362 while (ticket->bytes > 0 && ticket->error == 0) { 1363 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1364 if (ret) { 1365 /* 1366 * Delete us from the list. After we unlock the space 1367 * info, we don't want the async reclaim job to reserve 1368 * space for this ticket. If that would happen, then the 1369 * ticket's task would not known that space was reserved 1370 * despite getting an error, resulting in a space leak 1371 * (bytes_may_use counter of our space_info). 1372 */ 1373 remove_ticket(space_info, ticket); 1374 ticket->error = -EINTR; 1375 break; 1376 } 1377 spin_unlock(&space_info->lock); 1378 1379 schedule(); 1380 1381 finish_wait(&ticket->wait, &wait); 1382 spin_lock(&space_info->lock); 1383 } 1384 spin_unlock(&space_info->lock); 1385 } 1386 1387 /** 1388 * Do the appropriate flushing and waiting for a ticket 1389 * 1390 * @fs_info: the filesystem 1391 * @space_info: space info for the reservation 1392 * @ticket: ticket for the reservation 1393 * @start_ns: timestamp when the reservation started 1394 * @orig_bytes: amount of bytes originally reserved 1395 * @flush: how much we can flush 1396 * 1397 * This does the work of figuring out how to flush for the ticket, waiting for 1398 * the reservation, and returning the appropriate error if there is one. 1399 */ 1400 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1401 struct btrfs_space_info *space_info, 1402 struct reserve_ticket *ticket, 1403 u64 start_ns, u64 orig_bytes, 1404 enum btrfs_reserve_flush_enum flush) 1405 { 1406 int ret; 1407 1408 switch (flush) { 1409 case BTRFS_RESERVE_FLUSH_DATA: 1410 case BTRFS_RESERVE_FLUSH_ALL: 1411 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1412 wait_reserve_ticket(fs_info, space_info, ticket); 1413 break; 1414 case BTRFS_RESERVE_FLUSH_LIMIT: 1415 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1416 priority_flush_states, 1417 ARRAY_SIZE(priority_flush_states)); 1418 break; 1419 case BTRFS_RESERVE_FLUSH_EVICT: 1420 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1421 evict_flush_states, 1422 ARRAY_SIZE(evict_flush_states)); 1423 break; 1424 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1425 priority_reclaim_data_space(fs_info, space_info, ticket); 1426 break; 1427 default: 1428 ASSERT(0); 1429 break; 1430 } 1431 1432 spin_lock(&space_info->lock); 1433 ret = ticket->error; 1434 if (ticket->bytes || ticket->error) { 1435 /* 1436 * We were a priority ticket, so we need to delete ourselves 1437 * from the list. Because we could have other priority tickets 1438 * behind us that require less space, run 1439 * btrfs_try_granting_tickets() to see if their reservations can 1440 * now be made. 1441 */ 1442 if (!list_empty(&ticket->list)) { 1443 remove_ticket(space_info, ticket); 1444 btrfs_try_granting_tickets(fs_info, space_info); 1445 } 1446 1447 if (!ret) 1448 ret = -ENOSPC; 1449 } 1450 spin_unlock(&space_info->lock); 1451 ASSERT(list_empty(&ticket->list)); 1452 /* 1453 * Check that we can't have an error set if the reservation succeeded, 1454 * as that would confuse tasks and lead them to error out without 1455 * releasing reserved space (if an error happens the expectation is that 1456 * space wasn't reserved at all). 1457 */ 1458 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1459 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1460 start_ns, flush, ticket->error); 1461 return ret; 1462 } 1463 1464 /* 1465 * This returns true if this flush state will go through the ordinary flushing 1466 * code. 1467 */ 1468 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1469 { 1470 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1471 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1472 } 1473 1474 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1475 struct btrfs_space_info *space_info) 1476 { 1477 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1478 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1479 1480 /* 1481 * If we're heavy on ordered operations then clamping won't help us. We 1482 * need to clamp specifically to keep up with dirty'ing buffered 1483 * writers, because there's not a 1:1 correlation of writing delalloc 1484 * and freeing space, like there is with flushing delayed refs or 1485 * delayed nodes. If we're already more ordered than delalloc then 1486 * we're keeping up, otherwise we aren't and should probably clamp. 1487 */ 1488 if (ordered < delalloc) 1489 space_info->clamp = min(space_info->clamp + 1, 8); 1490 } 1491 1492 /** 1493 * Try to reserve bytes from the block_rsv's space 1494 * 1495 * @fs_info: the filesystem 1496 * @space_info: space info we want to allocate from 1497 * @orig_bytes: number of bytes we want 1498 * @flush: whether or not we can flush to make our reservation 1499 * 1500 * This will reserve orig_bytes number of bytes from the space info associated 1501 * with the block_rsv. If there is not enough space it will make an attempt to 1502 * flush out space to make room. It will do this by flushing delalloc if 1503 * possible or committing the transaction. If flush is 0 then no attempts to 1504 * regain reservations will be made and this will fail if there is not enough 1505 * space already. 1506 */ 1507 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1508 struct btrfs_space_info *space_info, u64 orig_bytes, 1509 enum btrfs_reserve_flush_enum flush) 1510 { 1511 struct work_struct *async_work; 1512 struct reserve_ticket ticket; 1513 u64 start_ns = 0; 1514 u64 used; 1515 int ret = 0; 1516 bool pending_tickets; 1517 1518 ASSERT(orig_bytes); 1519 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1520 1521 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1522 async_work = &fs_info->async_data_reclaim_work; 1523 else 1524 async_work = &fs_info->async_reclaim_work; 1525 1526 spin_lock(&space_info->lock); 1527 ret = -ENOSPC; 1528 used = btrfs_space_info_used(space_info, true); 1529 1530 /* 1531 * We don't want NO_FLUSH allocations to jump everybody, they can 1532 * generally handle ENOSPC in a different way, so treat them the same as 1533 * normal flushers when it comes to skipping pending tickets. 1534 */ 1535 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1536 pending_tickets = !list_empty(&space_info->tickets) || 1537 !list_empty(&space_info->priority_tickets); 1538 else 1539 pending_tickets = !list_empty(&space_info->priority_tickets); 1540 1541 /* 1542 * Carry on if we have enough space (short-circuit) OR call 1543 * can_overcommit() to ensure we can overcommit to continue. 1544 */ 1545 if (!pending_tickets && 1546 ((used + orig_bytes <= space_info->total_bytes) || 1547 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1548 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1549 orig_bytes); 1550 ret = 0; 1551 } 1552 1553 /* 1554 * If we couldn't make a reservation then setup our reservation ticket 1555 * and kick the async worker if it's not already running. 1556 * 1557 * If we are a priority flusher then we just need to add our ticket to 1558 * the list and we will do our own flushing further down. 1559 */ 1560 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1561 ticket.bytes = orig_bytes; 1562 ticket.error = 0; 1563 space_info->reclaim_size += ticket.bytes; 1564 init_waitqueue_head(&ticket.wait); 1565 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1566 if (trace_btrfs_reserve_ticket_enabled()) 1567 start_ns = ktime_get_ns(); 1568 1569 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1570 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1571 flush == BTRFS_RESERVE_FLUSH_DATA) { 1572 list_add_tail(&ticket.list, &space_info->tickets); 1573 if (!space_info->flush) { 1574 /* 1575 * We were forced to add a reserve ticket, so 1576 * our preemptive flushing is unable to keep 1577 * up. Clamp down on the threshold for the 1578 * preemptive flushing in order to keep up with 1579 * the workload. 1580 */ 1581 maybe_clamp_preempt(fs_info, space_info); 1582 1583 space_info->flush = 1; 1584 trace_btrfs_trigger_flush(fs_info, 1585 space_info->flags, 1586 orig_bytes, flush, 1587 "enospc"); 1588 queue_work(system_unbound_wq, async_work); 1589 } 1590 } else { 1591 list_add_tail(&ticket.list, 1592 &space_info->priority_tickets); 1593 } 1594 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1595 used += orig_bytes; 1596 /* 1597 * We will do the space reservation dance during log replay, 1598 * which means we won't have fs_info->fs_root set, so don't do 1599 * the async reclaim as we will panic. 1600 */ 1601 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1602 !work_busy(&fs_info->preempt_reclaim_work) && 1603 need_preemptive_reclaim(fs_info, space_info)) { 1604 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1605 orig_bytes, flush, "preempt"); 1606 queue_work(system_unbound_wq, 1607 &fs_info->preempt_reclaim_work); 1608 } 1609 } 1610 spin_unlock(&space_info->lock); 1611 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1612 return ret; 1613 1614 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1615 orig_bytes, flush); 1616 } 1617 1618 /** 1619 * Trye to reserve metadata bytes from the block_rsv's space 1620 * 1621 * @root: the root we're allocating for 1622 * @block_rsv: block_rsv we're allocating for 1623 * @orig_bytes: number of bytes we want 1624 * @flush: whether or not we can flush to make our reservation 1625 * 1626 * This will reserve orig_bytes number of bytes from the space info associated 1627 * with the block_rsv. If there is not enough space it will make an attempt to 1628 * flush out space to make room. It will do this by flushing delalloc if 1629 * possible or committing the transaction. If flush is 0 then no attempts to 1630 * regain reservations will be made and this will fail if there is not enough 1631 * space already. 1632 */ 1633 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1634 struct btrfs_block_rsv *block_rsv, 1635 u64 orig_bytes, 1636 enum btrfs_reserve_flush_enum flush) 1637 { 1638 struct btrfs_fs_info *fs_info = root->fs_info; 1639 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1640 int ret; 1641 1642 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1643 if (ret == -ENOSPC && 1644 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1645 if (block_rsv != global_rsv && 1646 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1647 ret = 0; 1648 } 1649 if (ret == -ENOSPC) { 1650 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1651 block_rsv->space_info->flags, 1652 orig_bytes, 1); 1653 1654 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1655 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1656 orig_bytes, 0); 1657 } 1658 return ret; 1659 } 1660 1661 /** 1662 * Try to reserve data bytes for an allocation 1663 * 1664 * @fs_info: the filesystem 1665 * @bytes: number of bytes we need 1666 * @flush: how we are allowed to flush 1667 * 1668 * This will reserve bytes from the data space info. If there is not enough 1669 * space then we will attempt to flush space as specified by flush. 1670 */ 1671 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1672 enum btrfs_reserve_flush_enum flush) 1673 { 1674 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1675 int ret; 1676 1677 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1678 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1679 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1680 1681 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1682 if (ret == -ENOSPC) { 1683 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1684 data_sinfo->flags, bytes, 1); 1685 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1686 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1687 } 1688 return ret; 1689 } 1690