1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * This will commit the transaction. Historically we had a lot of logic 137 * surrounding whether or not we'd commit the transaction, but this waits born 138 * out of a pre-tickets era where we could end up committing the transaction 139 * thousands of times in a row without making progress. Now thanks to our 140 * ticketing system we know if we're not making progress and can error 141 * everybody out after a few commits rather than burning the disk hoping for 142 * a different answer. 143 * 144 * OVERCOMMIT 145 * 146 * Because we hold so many reservations for metadata we will allow you to 147 * reserve more space than is currently free in the currently allocate 148 * metadata space. This only happens with metadata, data does not allow 149 * overcommitting. 150 * 151 * You can see the current logic for when we allow overcommit in 152 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 153 * is no unallocated space to be had, all reservations are kept within the 154 * free space in the allocated metadata chunks. 155 * 156 * Because of overcommitting, you generally want to use the 157 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 158 * thing with or without extra unallocated space. 159 */ 160 161 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 162 bool may_use_included) 163 { 164 ASSERT(s_info); 165 return s_info->bytes_used + s_info->bytes_reserved + 166 s_info->bytes_pinned + s_info->bytes_readonly + 167 s_info->bytes_zone_unusable + 168 (may_use_included ? s_info->bytes_may_use : 0); 169 } 170 171 /* 172 * after adding space to the filesystem, we need to clear the full flags 173 * on all the space infos. 174 */ 175 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 176 { 177 struct list_head *head = &info->space_info; 178 struct btrfs_space_info *found; 179 180 list_for_each_entry(found, head, list) 181 found->full = 0; 182 } 183 184 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 185 { 186 187 struct btrfs_space_info *space_info; 188 int i; 189 int ret; 190 191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 192 if (!space_info) 193 return -ENOMEM; 194 195 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 196 INIT_LIST_HEAD(&space_info->block_groups[i]); 197 init_rwsem(&space_info->groups_sem); 198 spin_lock_init(&space_info->lock); 199 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 200 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 201 INIT_LIST_HEAD(&space_info->ro_bgs); 202 INIT_LIST_HEAD(&space_info->tickets); 203 INIT_LIST_HEAD(&space_info->priority_tickets); 204 space_info->clamp = 1; 205 206 ret = btrfs_sysfs_add_space_info_type(info, space_info); 207 if (ret) 208 return ret; 209 210 list_add(&space_info->list, &info->space_info); 211 if (flags & BTRFS_BLOCK_GROUP_DATA) 212 info->data_sinfo = space_info; 213 214 return ret; 215 } 216 217 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 218 { 219 struct btrfs_super_block *disk_super; 220 u64 features; 221 u64 flags; 222 int mixed = 0; 223 int ret; 224 225 disk_super = fs_info->super_copy; 226 if (!btrfs_super_root(disk_super)) 227 return -EINVAL; 228 229 features = btrfs_super_incompat_flags(disk_super); 230 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 231 mixed = 1; 232 233 flags = BTRFS_BLOCK_GROUP_SYSTEM; 234 ret = create_space_info(fs_info, flags); 235 if (ret) 236 goto out; 237 238 if (mixed) { 239 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 240 ret = create_space_info(fs_info, flags); 241 } else { 242 flags = BTRFS_BLOCK_GROUP_METADATA; 243 ret = create_space_info(fs_info, flags); 244 if (ret) 245 goto out; 246 247 flags = BTRFS_BLOCK_GROUP_DATA; 248 ret = create_space_info(fs_info, flags); 249 } 250 out: 251 return ret; 252 } 253 254 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 255 u64 total_bytes, u64 bytes_used, 256 u64 bytes_readonly, u64 bytes_zone_unusable, 257 struct btrfs_space_info **space_info) 258 { 259 struct btrfs_space_info *found; 260 int factor; 261 262 factor = btrfs_bg_type_to_factor(flags); 263 264 found = btrfs_find_space_info(info, flags); 265 ASSERT(found); 266 spin_lock(&found->lock); 267 found->total_bytes += total_bytes; 268 found->disk_total += total_bytes * factor; 269 found->bytes_used += bytes_used; 270 found->disk_used += bytes_used * factor; 271 found->bytes_readonly += bytes_readonly; 272 found->bytes_zone_unusable += bytes_zone_unusable; 273 if (total_bytes > 0) 274 found->full = 0; 275 btrfs_try_granting_tickets(info, found); 276 spin_unlock(&found->lock); 277 *space_info = found; 278 } 279 280 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 281 u64 flags) 282 { 283 struct list_head *head = &info->space_info; 284 struct btrfs_space_info *found; 285 286 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 287 288 list_for_each_entry(found, head, list) { 289 if (found->flags & flags) 290 return found; 291 } 292 return NULL; 293 } 294 295 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 296 struct btrfs_space_info *space_info, 297 enum btrfs_reserve_flush_enum flush) 298 { 299 u64 profile; 300 u64 avail; 301 int factor; 302 303 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 304 profile = btrfs_system_alloc_profile(fs_info); 305 else 306 profile = btrfs_metadata_alloc_profile(fs_info); 307 308 avail = atomic64_read(&fs_info->free_chunk_space); 309 310 /* 311 * If we have dup, raid1 or raid10 then only half of the free 312 * space is actually usable. For raid56, the space info used 313 * doesn't include the parity drive, so we don't have to 314 * change the math 315 */ 316 factor = btrfs_bg_type_to_factor(profile); 317 avail = div_u64(avail, factor); 318 319 /* 320 * If we aren't flushing all things, let us overcommit up to 321 * 1/2th of the space. If we can flush, don't let us overcommit 322 * too much, let it overcommit up to 1/8 of the space. 323 */ 324 if (flush == BTRFS_RESERVE_FLUSH_ALL) 325 avail >>= 3; 326 else 327 avail >>= 1; 328 return avail; 329 } 330 331 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 332 struct btrfs_space_info *space_info, u64 bytes, 333 enum btrfs_reserve_flush_enum flush) 334 { 335 u64 avail; 336 u64 used; 337 338 /* Don't overcommit when in mixed mode */ 339 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 340 return 0; 341 342 used = btrfs_space_info_used(space_info, true); 343 avail = calc_available_free_space(fs_info, space_info, flush); 344 345 if (used + bytes < space_info->total_bytes + avail) 346 return 1; 347 return 0; 348 } 349 350 static void remove_ticket(struct btrfs_space_info *space_info, 351 struct reserve_ticket *ticket) 352 { 353 if (!list_empty(&ticket->list)) { 354 list_del_init(&ticket->list); 355 ASSERT(space_info->reclaim_size >= ticket->bytes); 356 space_info->reclaim_size -= ticket->bytes; 357 } 358 } 359 360 /* 361 * This is for space we already have accounted in space_info->bytes_may_use, so 362 * basically when we're returning space from block_rsv's. 363 */ 364 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 365 struct btrfs_space_info *space_info) 366 { 367 struct list_head *head; 368 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 369 370 lockdep_assert_held(&space_info->lock); 371 372 head = &space_info->priority_tickets; 373 again: 374 while (!list_empty(head)) { 375 struct reserve_ticket *ticket; 376 u64 used = btrfs_space_info_used(space_info, true); 377 378 ticket = list_first_entry(head, struct reserve_ticket, list); 379 380 /* Check and see if our ticket can be satisfied now. */ 381 if ((used + ticket->bytes <= space_info->total_bytes) || 382 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 383 flush)) { 384 btrfs_space_info_update_bytes_may_use(fs_info, 385 space_info, 386 ticket->bytes); 387 remove_ticket(space_info, ticket); 388 ticket->bytes = 0; 389 space_info->tickets_id++; 390 wake_up(&ticket->wait); 391 } else { 392 break; 393 } 394 } 395 396 if (head == &space_info->priority_tickets) { 397 head = &space_info->tickets; 398 flush = BTRFS_RESERVE_FLUSH_ALL; 399 goto again; 400 } 401 } 402 403 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 404 do { \ 405 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 406 spin_lock(&__rsv->lock); \ 407 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 408 __rsv->size, __rsv->reserved); \ 409 spin_unlock(&__rsv->lock); \ 410 } while (0) 411 412 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 413 struct btrfs_space_info *info) 414 { 415 lockdep_assert_held(&info->lock); 416 417 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 418 info->flags, 419 info->total_bytes - btrfs_space_info_used(info, true), 420 info->full ? "" : "not "); 421 btrfs_info(fs_info, 422 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 423 info->total_bytes, info->bytes_used, info->bytes_pinned, 424 info->bytes_reserved, info->bytes_may_use, 425 info->bytes_readonly, info->bytes_zone_unusable); 426 427 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 428 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 429 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 430 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 431 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 432 433 } 434 435 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 436 struct btrfs_space_info *info, u64 bytes, 437 int dump_block_groups) 438 { 439 struct btrfs_block_group *cache; 440 int index = 0; 441 442 spin_lock(&info->lock); 443 __btrfs_dump_space_info(fs_info, info); 444 spin_unlock(&info->lock); 445 446 if (!dump_block_groups) 447 return; 448 449 down_read(&info->groups_sem); 450 again: 451 list_for_each_entry(cache, &info->block_groups[index], list) { 452 spin_lock(&cache->lock); 453 btrfs_info(fs_info, 454 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 455 cache->start, cache->length, cache->used, cache->pinned, 456 cache->reserved, cache->zone_unusable, 457 cache->ro ? "[readonly]" : ""); 458 spin_unlock(&cache->lock); 459 btrfs_dump_free_space(cache, bytes); 460 } 461 if (++index < BTRFS_NR_RAID_TYPES) 462 goto again; 463 up_read(&info->groups_sem); 464 } 465 466 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 467 u64 to_reclaim) 468 { 469 u64 bytes; 470 u64 nr; 471 472 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 473 nr = div64_u64(to_reclaim, bytes); 474 if (!nr) 475 nr = 1; 476 return nr; 477 } 478 479 #define EXTENT_SIZE_PER_ITEM SZ_256K 480 481 /* 482 * shrink metadata reservation for delalloc 483 */ 484 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 485 struct btrfs_space_info *space_info, 486 u64 to_reclaim, bool wait_ordered, 487 bool for_preempt) 488 { 489 struct btrfs_trans_handle *trans; 490 u64 delalloc_bytes; 491 u64 ordered_bytes; 492 u64 items; 493 long time_left; 494 int loops; 495 496 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 497 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 498 if (delalloc_bytes == 0 && ordered_bytes == 0) 499 return; 500 501 /* Calc the number of the pages we need flush for space reservation */ 502 if (to_reclaim == U64_MAX) { 503 items = U64_MAX; 504 } else { 505 /* 506 * to_reclaim is set to however much metadata we need to 507 * reclaim, but reclaiming that much data doesn't really track 508 * exactly. What we really want to do is reclaim full inode's 509 * worth of reservations, however that's not available to us 510 * here. We will take a fraction of the delalloc bytes for our 511 * flushing loops and hope for the best. Delalloc will expand 512 * the amount we write to cover an entire dirty extent, which 513 * will reclaim the metadata reservation for that range. If 514 * it's not enough subsequent flush stages will be more 515 * aggressive. 516 */ 517 to_reclaim = max(to_reclaim, delalloc_bytes >> 3); 518 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 519 } 520 521 trans = (struct btrfs_trans_handle *)current->journal_info; 522 523 /* 524 * If we are doing more ordered than delalloc we need to just wait on 525 * ordered extents, otherwise we'll waste time trying to flush delalloc 526 * that likely won't give us the space back we need. 527 */ 528 if (ordered_bytes > delalloc_bytes && !for_preempt) 529 wait_ordered = true; 530 531 loops = 0; 532 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 533 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 534 long nr_pages = min_t(u64, temp, LONG_MAX); 535 int async_pages; 536 537 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 538 539 /* 540 * We need to make sure any outstanding async pages are now 541 * processed before we continue. This is because things like 542 * sync_inode() try to be smart and skip writing if the inode is 543 * marked clean. We don't use filemap_fwrite for flushing 544 * because we want to control how many pages we write out at a 545 * time, thus this is the only safe way to make sure we've 546 * waited for outstanding compressed workers to have started 547 * their jobs and thus have ordered extents set up properly. 548 * 549 * This exists because we do not want to wait for each 550 * individual inode to finish its async work, we simply want to 551 * start the IO on everybody, and then come back here and wait 552 * for all of the async work to catch up. Once we're done with 553 * that we know we'll have ordered extents for everything and we 554 * can decide if we wait for that or not. 555 * 556 * If we choose to replace this in the future, make absolutely 557 * sure that the proper waiting is being done in the async case, 558 * as there have been bugs in that area before. 559 */ 560 async_pages = atomic_read(&fs_info->async_delalloc_pages); 561 if (!async_pages) 562 goto skip_async; 563 564 /* 565 * We don't want to wait forever, if we wrote less pages in this 566 * loop than we have outstanding, only wait for that number of 567 * pages, otherwise we can wait for all async pages to finish 568 * before continuing. 569 */ 570 if (async_pages > nr_pages) 571 async_pages -= nr_pages; 572 else 573 async_pages = 0; 574 wait_event(fs_info->async_submit_wait, 575 atomic_read(&fs_info->async_delalloc_pages) <= 576 async_pages); 577 skip_async: 578 loops++; 579 if (wait_ordered && !trans) { 580 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 581 } else { 582 time_left = schedule_timeout_killable(1); 583 if (time_left) 584 break; 585 } 586 587 /* 588 * If we are for preemption we just want a one-shot of delalloc 589 * flushing so we can stop flushing if we decide we don't need 590 * to anymore. 591 */ 592 if (for_preempt) 593 break; 594 595 spin_lock(&space_info->lock); 596 if (list_empty(&space_info->tickets) && 597 list_empty(&space_info->priority_tickets)) { 598 spin_unlock(&space_info->lock); 599 break; 600 } 601 spin_unlock(&space_info->lock); 602 603 delalloc_bytes = percpu_counter_sum_positive( 604 &fs_info->delalloc_bytes); 605 ordered_bytes = percpu_counter_sum_positive( 606 &fs_info->ordered_bytes); 607 } 608 } 609 610 /* 611 * Try to flush some data based on policy set by @state. This is only advisory 612 * and may fail for various reasons. The caller is supposed to examine the 613 * state of @space_info to detect the outcome. 614 */ 615 static void flush_space(struct btrfs_fs_info *fs_info, 616 struct btrfs_space_info *space_info, u64 num_bytes, 617 enum btrfs_flush_state state, bool for_preempt) 618 { 619 struct btrfs_root *root = fs_info->extent_root; 620 struct btrfs_trans_handle *trans; 621 int nr; 622 int ret = 0; 623 624 switch (state) { 625 case FLUSH_DELAYED_ITEMS_NR: 626 case FLUSH_DELAYED_ITEMS: 627 if (state == FLUSH_DELAYED_ITEMS_NR) 628 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 629 else 630 nr = -1; 631 632 trans = btrfs_join_transaction(root); 633 if (IS_ERR(trans)) { 634 ret = PTR_ERR(trans); 635 break; 636 } 637 ret = btrfs_run_delayed_items_nr(trans, nr); 638 btrfs_end_transaction(trans); 639 break; 640 case FLUSH_DELALLOC: 641 case FLUSH_DELALLOC_WAIT: 642 case FLUSH_DELALLOC_FULL: 643 if (state == FLUSH_DELALLOC_FULL) 644 num_bytes = U64_MAX; 645 shrink_delalloc(fs_info, space_info, num_bytes, 646 state != FLUSH_DELALLOC, for_preempt); 647 break; 648 case FLUSH_DELAYED_REFS_NR: 649 case FLUSH_DELAYED_REFS: 650 trans = btrfs_join_transaction(root); 651 if (IS_ERR(trans)) { 652 ret = PTR_ERR(trans); 653 break; 654 } 655 if (state == FLUSH_DELAYED_REFS_NR) 656 nr = calc_reclaim_items_nr(fs_info, num_bytes); 657 else 658 nr = 0; 659 btrfs_run_delayed_refs(trans, nr); 660 btrfs_end_transaction(trans); 661 break; 662 case ALLOC_CHUNK: 663 case ALLOC_CHUNK_FORCE: 664 trans = btrfs_join_transaction(root); 665 if (IS_ERR(trans)) { 666 ret = PTR_ERR(trans); 667 break; 668 } 669 ret = btrfs_chunk_alloc(trans, 670 btrfs_get_alloc_profile(fs_info, space_info->flags), 671 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 672 CHUNK_ALLOC_FORCE); 673 btrfs_end_transaction(trans); 674 if (ret > 0 || ret == -ENOSPC) 675 ret = 0; 676 break; 677 case RUN_DELAYED_IPUTS: 678 /* 679 * If we have pending delayed iputs then we could free up a 680 * bunch of pinned space, so make sure we run the iputs before 681 * we do our pinned bytes check below. 682 */ 683 btrfs_run_delayed_iputs(fs_info); 684 btrfs_wait_on_delayed_iputs(fs_info); 685 break; 686 case COMMIT_TRANS: 687 ASSERT(current->journal_info == NULL); 688 trans = btrfs_join_transaction(root); 689 if (IS_ERR(trans)) { 690 ret = PTR_ERR(trans); 691 break; 692 } 693 ret = btrfs_commit_transaction(trans); 694 break; 695 default: 696 ret = -ENOSPC; 697 break; 698 } 699 700 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 701 ret, for_preempt); 702 return; 703 } 704 705 static inline u64 706 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 707 struct btrfs_space_info *space_info) 708 { 709 u64 used; 710 u64 avail; 711 u64 to_reclaim = space_info->reclaim_size; 712 713 lockdep_assert_held(&space_info->lock); 714 715 avail = calc_available_free_space(fs_info, space_info, 716 BTRFS_RESERVE_FLUSH_ALL); 717 used = btrfs_space_info_used(space_info, true); 718 719 /* 720 * We may be flushing because suddenly we have less space than we had 721 * before, and now we're well over-committed based on our current free 722 * space. If that's the case add in our overage so we make sure to put 723 * appropriate pressure on the flushing state machine. 724 */ 725 if (space_info->total_bytes + avail < used) 726 to_reclaim += used - (space_info->total_bytes + avail); 727 728 return to_reclaim; 729 } 730 731 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 732 struct btrfs_space_info *space_info) 733 { 734 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 735 u64 ordered, delalloc; 736 u64 thresh = div_factor_fine(space_info->total_bytes, 90); 737 u64 used; 738 739 /* If we're just plain full then async reclaim just slows us down. */ 740 if ((space_info->bytes_used + space_info->bytes_reserved + 741 global_rsv_size) >= thresh) 742 return false; 743 744 used = space_info->bytes_may_use + space_info->bytes_pinned; 745 746 /* The total flushable belongs to the global rsv, don't flush. */ 747 if (global_rsv_size >= used) 748 return false; 749 750 /* 751 * 128MiB is 1/4 of the maximum global rsv size. If we have less than 752 * that devoted to other reservations then there's no sense in flushing, 753 * we don't have a lot of things that need flushing. 754 */ 755 if (used - global_rsv_size <= SZ_128M) 756 return false; 757 758 /* 759 * We have tickets queued, bail so we don't compete with the async 760 * flushers. 761 */ 762 if (space_info->reclaim_size) 763 return false; 764 765 /* 766 * If we have over half of the free space occupied by reservations or 767 * pinned then we want to start flushing. 768 * 769 * We do not do the traditional thing here, which is to say 770 * 771 * if (used >= ((total_bytes + avail) / 2)) 772 * return 1; 773 * 774 * because this doesn't quite work how we want. If we had more than 50% 775 * of the space_info used by bytes_used and we had 0 available we'd just 776 * constantly run the background flusher. Instead we want it to kick in 777 * if our reclaimable space exceeds our clamped free space. 778 * 779 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 780 * the following: 781 * 782 * Amount of RAM Minimum threshold Maximum threshold 783 * 784 * 256GiB 1GiB 128GiB 785 * 128GiB 512MiB 64GiB 786 * 64GiB 256MiB 32GiB 787 * 32GiB 128MiB 16GiB 788 * 16GiB 64MiB 8GiB 789 * 790 * These are the range our thresholds will fall in, corresponding to how 791 * much delalloc we need for the background flusher to kick in. 792 */ 793 794 thresh = calc_available_free_space(fs_info, space_info, 795 BTRFS_RESERVE_FLUSH_ALL); 796 used = space_info->bytes_used + space_info->bytes_reserved + 797 space_info->bytes_readonly + global_rsv_size; 798 if (used < space_info->total_bytes) 799 thresh += space_info->total_bytes - used; 800 thresh >>= space_info->clamp; 801 802 used = space_info->bytes_pinned; 803 804 /* 805 * If we have more ordered bytes than delalloc bytes then we're either 806 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 807 * around. Preemptive flushing is only useful in that it can free up 808 * space before tickets need to wait for things to finish. In the case 809 * of ordered extents, preemptively waiting on ordered extents gets us 810 * nothing, if our reservations are tied up in ordered extents we'll 811 * simply have to slow down writers by forcing them to wait on ordered 812 * extents. 813 * 814 * In the case that ordered is larger than delalloc, only include the 815 * block reserves that we would actually be able to directly reclaim 816 * from. In this case if we're heavy on metadata operations this will 817 * clearly be heavy enough to warrant preemptive flushing. In the case 818 * of heavy DIO or ordered reservations, preemptive flushing will just 819 * waste time and cause us to slow down. 820 * 821 * We want to make sure we truly are maxed out on ordered however, so 822 * cut ordered in half, and if it's still higher than delalloc then we 823 * can keep flushing. This is to avoid the case where we start 824 * flushing, and now delalloc == ordered and we stop preemptively 825 * flushing when we could still have several gigs of delalloc to flush. 826 */ 827 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 828 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 829 if (ordered >= delalloc) 830 used += fs_info->delayed_refs_rsv.reserved + 831 fs_info->delayed_block_rsv.reserved; 832 else 833 used += space_info->bytes_may_use - global_rsv_size; 834 835 return (used >= thresh && !btrfs_fs_closing(fs_info) && 836 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 837 } 838 839 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 840 struct btrfs_space_info *space_info, 841 struct reserve_ticket *ticket) 842 { 843 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 844 u64 min_bytes; 845 846 if (global_rsv->space_info != space_info) 847 return false; 848 849 spin_lock(&global_rsv->lock); 850 min_bytes = div_factor(global_rsv->size, 1); 851 if (global_rsv->reserved < min_bytes + ticket->bytes) { 852 spin_unlock(&global_rsv->lock); 853 return false; 854 } 855 global_rsv->reserved -= ticket->bytes; 856 remove_ticket(space_info, ticket); 857 ticket->bytes = 0; 858 wake_up(&ticket->wait); 859 space_info->tickets_id++; 860 if (global_rsv->reserved < global_rsv->size) 861 global_rsv->full = 0; 862 spin_unlock(&global_rsv->lock); 863 864 return true; 865 } 866 867 /* 868 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 869 * @fs_info - fs_info for this fs 870 * @space_info - the space info we were flushing 871 * 872 * We call this when we've exhausted our flushing ability and haven't made 873 * progress in satisfying tickets. The reservation code handles tickets in 874 * order, so if there is a large ticket first and then smaller ones we could 875 * very well satisfy the smaller tickets. This will attempt to wake up any 876 * tickets in the list to catch this case. 877 * 878 * This function returns true if it was able to make progress by clearing out 879 * other tickets, or if it stumbles across a ticket that was smaller than the 880 * first ticket. 881 */ 882 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 883 struct btrfs_space_info *space_info) 884 { 885 struct reserve_ticket *ticket; 886 u64 tickets_id = space_info->tickets_id; 887 888 trace_btrfs_fail_all_tickets(fs_info, space_info); 889 890 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 891 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 892 __btrfs_dump_space_info(fs_info, space_info); 893 } 894 895 while (!list_empty(&space_info->tickets) && 896 tickets_id == space_info->tickets_id) { 897 ticket = list_first_entry(&space_info->tickets, 898 struct reserve_ticket, list); 899 900 if (ticket->steal && 901 steal_from_global_rsv(fs_info, space_info, ticket)) 902 return true; 903 904 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 905 btrfs_info(fs_info, "failing ticket with %llu bytes", 906 ticket->bytes); 907 908 remove_ticket(space_info, ticket); 909 ticket->error = -ENOSPC; 910 wake_up(&ticket->wait); 911 912 /* 913 * We're just throwing tickets away, so more flushing may not 914 * trip over btrfs_try_granting_tickets, so we need to call it 915 * here to see if we can make progress with the next ticket in 916 * the list. 917 */ 918 btrfs_try_granting_tickets(fs_info, space_info); 919 } 920 return (tickets_id != space_info->tickets_id); 921 } 922 923 /* 924 * This is for normal flushers, we can wait all goddamned day if we want to. We 925 * will loop and continuously try to flush as long as we are making progress. 926 * We count progress as clearing off tickets each time we have to loop. 927 */ 928 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 929 { 930 struct btrfs_fs_info *fs_info; 931 struct btrfs_space_info *space_info; 932 u64 to_reclaim; 933 enum btrfs_flush_state flush_state; 934 int commit_cycles = 0; 935 u64 last_tickets_id; 936 937 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 938 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 939 940 spin_lock(&space_info->lock); 941 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 942 if (!to_reclaim) { 943 space_info->flush = 0; 944 spin_unlock(&space_info->lock); 945 return; 946 } 947 last_tickets_id = space_info->tickets_id; 948 spin_unlock(&space_info->lock); 949 950 flush_state = FLUSH_DELAYED_ITEMS_NR; 951 do { 952 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 953 spin_lock(&space_info->lock); 954 if (list_empty(&space_info->tickets)) { 955 space_info->flush = 0; 956 spin_unlock(&space_info->lock); 957 return; 958 } 959 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 960 space_info); 961 if (last_tickets_id == space_info->tickets_id) { 962 flush_state++; 963 } else { 964 last_tickets_id = space_info->tickets_id; 965 flush_state = FLUSH_DELAYED_ITEMS_NR; 966 if (commit_cycles) 967 commit_cycles--; 968 } 969 970 /* 971 * We do not want to empty the system of delalloc unless we're 972 * under heavy pressure, so allow one trip through the flushing 973 * logic before we start doing a FLUSH_DELALLOC_FULL. 974 */ 975 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) 976 flush_state++; 977 978 /* 979 * We don't want to force a chunk allocation until we've tried 980 * pretty hard to reclaim space. Think of the case where we 981 * freed up a bunch of space and so have a lot of pinned space 982 * to reclaim. We would rather use that than possibly create a 983 * underutilized metadata chunk. So if this is our first run 984 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 985 * commit the transaction. If nothing has changed the next go 986 * around then we can force a chunk allocation. 987 */ 988 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 989 flush_state++; 990 991 if (flush_state > COMMIT_TRANS) { 992 commit_cycles++; 993 if (commit_cycles > 2) { 994 if (maybe_fail_all_tickets(fs_info, space_info)) { 995 flush_state = FLUSH_DELAYED_ITEMS_NR; 996 commit_cycles--; 997 } else { 998 space_info->flush = 0; 999 } 1000 } else { 1001 flush_state = FLUSH_DELAYED_ITEMS_NR; 1002 } 1003 } 1004 spin_unlock(&space_info->lock); 1005 } while (flush_state <= COMMIT_TRANS); 1006 } 1007 1008 /* 1009 * This handles pre-flushing of metadata space before we get to the point that 1010 * we need to start blocking threads on tickets. The logic here is different 1011 * from the other flush paths because it doesn't rely on tickets to tell us how 1012 * much we need to flush, instead it attempts to keep us below the 80% full 1013 * watermark of space by flushing whichever reservation pool is currently the 1014 * largest. 1015 */ 1016 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1017 { 1018 struct btrfs_fs_info *fs_info; 1019 struct btrfs_space_info *space_info; 1020 struct btrfs_block_rsv *delayed_block_rsv; 1021 struct btrfs_block_rsv *delayed_refs_rsv; 1022 struct btrfs_block_rsv *global_rsv; 1023 struct btrfs_block_rsv *trans_rsv; 1024 int loops = 0; 1025 1026 fs_info = container_of(work, struct btrfs_fs_info, 1027 preempt_reclaim_work); 1028 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1029 delayed_block_rsv = &fs_info->delayed_block_rsv; 1030 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1031 global_rsv = &fs_info->global_block_rsv; 1032 trans_rsv = &fs_info->trans_block_rsv; 1033 1034 spin_lock(&space_info->lock); 1035 while (need_preemptive_reclaim(fs_info, space_info)) { 1036 enum btrfs_flush_state flush; 1037 u64 delalloc_size = 0; 1038 u64 to_reclaim, block_rsv_size; 1039 u64 global_rsv_size = global_rsv->reserved; 1040 1041 loops++; 1042 1043 /* 1044 * We don't have a precise counter for the metadata being 1045 * reserved for delalloc, so we'll approximate it by subtracting 1046 * out the block rsv's space from the bytes_may_use. If that 1047 * amount is higher than the individual reserves, then we can 1048 * assume it's tied up in delalloc reservations. 1049 */ 1050 block_rsv_size = global_rsv_size + 1051 delayed_block_rsv->reserved + 1052 delayed_refs_rsv->reserved + 1053 trans_rsv->reserved; 1054 if (block_rsv_size < space_info->bytes_may_use) 1055 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1056 spin_unlock(&space_info->lock); 1057 1058 /* 1059 * We don't want to include the global_rsv in our calculation, 1060 * because that's space we can't touch. Subtract it from the 1061 * block_rsv_size for the next checks. 1062 */ 1063 block_rsv_size -= global_rsv_size; 1064 1065 /* 1066 * We really want to avoid flushing delalloc too much, as it 1067 * could result in poor allocation patterns, so only flush it if 1068 * it's larger than the rest of the pools combined. 1069 */ 1070 if (delalloc_size > block_rsv_size) { 1071 to_reclaim = delalloc_size; 1072 flush = FLUSH_DELALLOC; 1073 } else if (space_info->bytes_pinned > 1074 (delayed_block_rsv->reserved + 1075 delayed_refs_rsv->reserved)) { 1076 to_reclaim = space_info->bytes_pinned; 1077 flush = COMMIT_TRANS; 1078 } else if (delayed_block_rsv->reserved > 1079 delayed_refs_rsv->reserved) { 1080 to_reclaim = delayed_block_rsv->reserved; 1081 flush = FLUSH_DELAYED_ITEMS_NR; 1082 } else { 1083 to_reclaim = delayed_refs_rsv->reserved; 1084 flush = FLUSH_DELAYED_REFS_NR; 1085 } 1086 1087 /* 1088 * We don't want to reclaim everything, just a portion, so scale 1089 * down the to_reclaim by 1/4. If it takes us down to 0, 1090 * reclaim 1 items worth. 1091 */ 1092 to_reclaim >>= 2; 1093 if (!to_reclaim) 1094 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1095 flush_space(fs_info, space_info, to_reclaim, flush, true); 1096 cond_resched(); 1097 spin_lock(&space_info->lock); 1098 } 1099 1100 /* We only went through once, back off our clamping. */ 1101 if (loops == 1 && !space_info->reclaim_size) 1102 space_info->clamp = max(1, space_info->clamp - 1); 1103 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1104 spin_unlock(&space_info->lock); 1105 } 1106 1107 /* 1108 * FLUSH_DELALLOC_WAIT: 1109 * Space is freed from flushing delalloc in one of two ways. 1110 * 1111 * 1) compression is on and we allocate less space than we reserved 1112 * 2) we are overwriting existing space 1113 * 1114 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1115 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1116 * length to ->bytes_reserved, and subtracts the reserved space from 1117 * ->bytes_may_use. 1118 * 1119 * For #2 this is trickier. Once the ordered extent runs we will drop the 1120 * extent in the range we are overwriting, which creates a delayed ref for 1121 * that freed extent. This however is not reclaimed until the transaction 1122 * commits, thus the next stages. 1123 * 1124 * RUN_DELAYED_IPUTS 1125 * If we are freeing inodes, we want to make sure all delayed iputs have 1126 * completed, because they could have been on an inode with i_nlink == 0, and 1127 * thus have been truncated and freed up space. But again this space is not 1128 * immediately re-usable, it comes in the form of a delayed ref, which must be 1129 * run and then the transaction must be committed. 1130 * 1131 * COMMIT_TRANS 1132 * This is where we reclaim all of the pinned space generated by running the 1133 * iputs 1134 * 1135 * ALLOC_CHUNK_FORCE 1136 * For data we start with alloc chunk force, however we could have been full 1137 * before, and then the transaction commit could have freed new block groups, 1138 * so if we now have space to allocate do the force chunk allocation. 1139 */ 1140 static const enum btrfs_flush_state data_flush_states[] = { 1141 FLUSH_DELALLOC_FULL, 1142 RUN_DELAYED_IPUTS, 1143 COMMIT_TRANS, 1144 ALLOC_CHUNK_FORCE, 1145 }; 1146 1147 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1148 { 1149 struct btrfs_fs_info *fs_info; 1150 struct btrfs_space_info *space_info; 1151 u64 last_tickets_id; 1152 enum btrfs_flush_state flush_state = 0; 1153 1154 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1155 space_info = fs_info->data_sinfo; 1156 1157 spin_lock(&space_info->lock); 1158 if (list_empty(&space_info->tickets)) { 1159 space_info->flush = 0; 1160 spin_unlock(&space_info->lock); 1161 return; 1162 } 1163 last_tickets_id = space_info->tickets_id; 1164 spin_unlock(&space_info->lock); 1165 1166 while (!space_info->full) { 1167 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1168 spin_lock(&space_info->lock); 1169 if (list_empty(&space_info->tickets)) { 1170 space_info->flush = 0; 1171 spin_unlock(&space_info->lock); 1172 return; 1173 } 1174 last_tickets_id = space_info->tickets_id; 1175 spin_unlock(&space_info->lock); 1176 } 1177 1178 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1179 flush_space(fs_info, space_info, U64_MAX, 1180 data_flush_states[flush_state], false); 1181 spin_lock(&space_info->lock); 1182 if (list_empty(&space_info->tickets)) { 1183 space_info->flush = 0; 1184 spin_unlock(&space_info->lock); 1185 return; 1186 } 1187 1188 if (last_tickets_id == space_info->tickets_id) { 1189 flush_state++; 1190 } else { 1191 last_tickets_id = space_info->tickets_id; 1192 flush_state = 0; 1193 } 1194 1195 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1196 if (space_info->full) { 1197 if (maybe_fail_all_tickets(fs_info, space_info)) 1198 flush_state = 0; 1199 else 1200 space_info->flush = 0; 1201 } else { 1202 flush_state = 0; 1203 } 1204 } 1205 spin_unlock(&space_info->lock); 1206 } 1207 } 1208 1209 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1210 { 1211 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1212 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1213 INIT_WORK(&fs_info->preempt_reclaim_work, 1214 btrfs_preempt_reclaim_metadata_space); 1215 } 1216 1217 static const enum btrfs_flush_state priority_flush_states[] = { 1218 FLUSH_DELAYED_ITEMS_NR, 1219 FLUSH_DELAYED_ITEMS, 1220 ALLOC_CHUNK, 1221 }; 1222 1223 static const enum btrfs_flush_state evict_flush_states[] = { 1224 FLUSH_DELAYED_ITEMS_NR, 1225 FLUSH_DELAYED_ITEMS, 1226 FLUSH_DELAYED_REFS_NR, 1227 FLUSH_DELAYED_REFS, 1228 FLUSH_DELALLOC, 1229 FLUSH_DELALLOC_WAIT, 1230 FLUSH_DELALLOC_FULL, 1231 ALLOC_CHUNK, 1232 COMMIT_TRANS, 1233 }; 1234 1235 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1236 struct btrfs_space_info *space_info, 1237 struct reserve_ticket *ticket, 1238 const enum btrfs_flush_state *states, 1239 int states_nr) 1240 { 1241 u64 to_reclaim; 1242 int flush_state; 1243 1244 spin_lock(&space_info->lock); 1245 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1246 if (!to_reclaim) { 1247 spin_unlock(&space_info->lock); 1248 return; 1249 } 1250 spin_unlock(&space_info->lock); 1251 1252 flush_state = 0; 1253 do { 1254 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1255 false); 1256 flush_state++; 1257 spin_lock(&space_info->lock); 1258 if (ticket->bytes == 0) { 1259 spin_unlock(&space_info->lock); 1260 return; 1261 } 1262 spin_unlock(&space_info->lock); 1263 } while (flush_state < states_nr); 1264 } 1265 1266 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1267 struct btrfs_space_info *space_info, 1268 struct reserve_ticket *ticket) 1269 { 1270 while (!space_info->full) { 1271 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1272 spin_lock(&space_info->lock); 1273 if (ticket->bytes == 0) { 1274 spin_unlock(&space_info->lock); 1275 return; 1276 } 1277 spin_unlock(&space_info->lock); 1278 } 1279 } 1280 1281 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1282 struct btrfs_space_info *space_info, 1283 struct reserve_ticket *ticket) 1284 1285 { 1286 DEFINE_WAIT(wait); 1287 int ret = 0; 1288 1289 spin_lock(&space_info->lock); 1290 while (ticket->bytes > 0 && ticket->error == 0) { 1291 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1292 if (ret) { 1293 /* 1294 * Delete us from the list. After we unlock the space 1295 * info, we don't want the async reclaim job to reserve 1296 * space for this ticket. If that would happen, then the 1297 * ticket's task would not known that space was reserved 1298 * despite getting an error, resulting in a space leak 1299 * (bytes_may_use counter of our space_info). 1300 */ 1301 remove_ticket(space_info, ticket); 1302 ticket->error = -EINTR; 1303 break; 1304 } 1305 spin_unlock(&space_info->lock); 1306 1307 schedule(); 1308 1309 finish_wait(&ticket->wait, &wait); 1310 spin_lock(&space_info->lock); 1311 } 1312 spin_unlock(&space_info->lock); 1313 } 1314 1315 /** 1316 * Do the appropriate flushing and waiting for a ticket 1317 * 1318 * @fs_info: the filesystem 1319 * @space_info: space info for the reservation 1320 * @ticket: ticket for the reservation 1321 * @start_ns: timestamp when the reservation started 1322 * @orig_bytes: amount of bytes originally reserved 1323 * @flush: how much we can flush 1324 * 1325 * This does the work of figuring out how to flush for the ticket, waiting for 1326 * the reservation, and returning the appropriate error if there is one. 1327 */ 1328 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1329 struct btrfs_space_info *space_info, 1330 struct reserve_ticket *ticket, 1331 u64 start_ns, u64 orig_bytes, 1332 enum btrfs_reserve_flush_enum flush) 1333 { 1334 int ret; 1335 1336 switch (flush) { 1337 case BTRFS_RESERVE_FLUSH_DATA: 1338 case BTRFS_RESERVE_FLUSH_ALL: 1339 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1340 wait_reserve_ticket(fs_info, space_info, ticket); 1341 break; 1342 case BTRFS_RESERVE_FLUSH_LIMIT: 1343 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1344 priority_flush_states, 1345 ARRAY_SIZE(priority_flush_states)); 1346 break; 1347 case BTRFS_RESERVE_FLUSH_EVICT: 1348 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1349 evict_flush_states, 1350 ARRAY_SIZE(evict_flush_states)); 1351 break; 1352 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1353 priority_reclaim_data_space(fs_info, space_info, ticket); 1354 break; 1355 default: 1356 ASSERT(0); 1357 break; 1358 } 1359 1360 spin_lock(&space_info->lock); 1361 ret = ticket->error; 1362 if (ticket->bytes || ticket->error) { 1363 /* 1364 * We were a priority ticket, so we need to delete ourselves 1365 * from the list. Because we could have other priority tickets 1366 * behind us that require less space, run 1367 * btrfs_try_granting_tickets() to see if their reservations can 1368 * now be made. 1369 */ 1370 if (!list_empty(&ticket->list)) { 1371 remove_ticket(space_info, ticket); 1372 btrfs_try_granting_tickets(fs_info, space_info); 1373 } 1374 1375 if (!ret) 1376 ret = -ENOSPC; 1377 } 1378 spin_unlock(&space_info->lock); 1379 ASSERT(list_empty(&ticket->list)); 1380 /* 1381 * Check that we can't have an error set if the reservation succeeded, 1382 * as that would confuse tasks and lead them to error out without 1383 * releasing reserved space (if an error happens the expectation is that 1384 * space wasn't reserved at all). 1385 */ 1386 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1387 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1388 start_ns, flush, ticket->error); 1389 return ret; 1390 } 1391 1392 /* 1393 * This returns true if this flush state will go through the ordinary flushing 1394 * code. 1395 */ 1396 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1397 { 1398 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1399 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1400 } 1401 1402 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1403 struct btrfs_space_info *space_info) 1404 { 1405 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1406 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1407 1408 /* 1409 * If we're heavy on ordered operations then clamping won't help us. We 1410 * need to clamp specifically to keep up with dirty'ing buffered 1411 * writers, because there's not a 1:1 correlation of writing delalloc 1412 * and freeing space, like there is with flushing delayed refs or 1413 * delayed nodes. If we're already more ordered than delalloc then 1414 * we're keeping up, otherwise we aren't and should probably clamp. 1415 */ 1416 if (ordered < delalloc) 1417 space_info->clamp = min(space_info->clamp + 1, 8); 1418 } 1419 1420 /** 1421 * Try to reserve bytes from the block_rsv's space 1422 * 1423 * @fs_info: the filesystem 1424 * @space_info: space info we want to allocate from 1425 * @orig_bytes: number of bytes we want 1426 * @flush: whether or not we can flush to make our reservation 1427 * 1428 * This will reserve orig_bytes number of bytes from the space info associated 1429 * with the block_rsv. If there is not enough space it will make an attempt to 1430 * flush out space to make room. It will do this by flushing delalloc if 1431 * possible or committing the transaction. If flush is 0 then no attempts to 1432 * regain reservations will be made and this will fail if there is not enough 1433 * space already. 1434 */ 1435 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1436 struct btrfs_space_info *space_info, u64 orig_bytes, 1437 enum btrfs_reserve_flush_enum flush) 1438 { 1439 struct work_struct *async_work; 1440 struct reserve_ticket ticket; 1441 u64 start_ns = 0; 1442 u64 used; 1443 int ret = 0; 1444 bool pending_tickets; 1445 1446 ASSERT(orig_bytes); 1447 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1448 1449 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1450 async_work = &fs_info->async_data_reclaim_work; 1451 else 1452 async_work = &fs_info->async_reclaim_work; 1453 1454 spin_lock(&space_info->lock); 1455 ret = -ENOSPC; 1456 used = btrfs_space_info_used(space_info, true); 1457 1458 /* 1459 * We don't want NO_FLUSH allocations to jump everybody, they can 1460 * generally handle ENOSPC in a different way, so treat them the same as 1461 * normal flushers when it comes to skipping pending tickets. 1462 */ 1463 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1464 pending_tickets = !list_empty(&space_info->tickets) || 1465 !list_empty(&space_info->priority_tickets); 1466 else 1467 pending_tickets = !list_empty(&space_info->priority_tickets); 1468 1469 /* 1470 * Carry on if we have enough space (short-circuit) OR call 1471 * can_overcommit() to ensure we can overcommit to continue. 1472 */ 1473 if (!pending_tickets && 1474 ((used + orig_bytes <= space_info->total_bytes) || 1475 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1476 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1477 orig_bytes); 1478 ret = 0; 1479 } 1480 1481 /* 1482 * If we couldn't make a reservation then setup our reservation ticket 1483 * and kick the async worker if it's not already running. 1484 * 1485 * If we are a priority flusher then we just need to add our ticket to 1486 * the list and we will do our own flushing further down. 1487 */ 1488 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1489 ticket.bytes = orig_bytes; 1490 ticket.error = 0; 1491 space_info->reclaim_size += ticket.bytes; 1492 init_waitqueue_head(&ticket.wait); 1493 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1494 if (trace_btrfs_reserve_ticket_enabled()) 1495 start_ns = ktime_get_ns(); 1496 1497 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1498 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1499 flush == BTRFS_RESERVE_FLUSH_DATA) { 1500 list_add_tail(&ticket.list, &space_info->tickets); 1501 if (!space_info->flush) { 1502 /* 1503 * We were forced to add a reserve ticket, so 1504 * our preemptive flushing is unable to keep 1505 * up. Clamp down on the threshold for the 1506 * preemptive flushing in order to keep up with 1507 * the workload. 1508 */ 1509 maybe_clamp_preempt(fs_info, space_info); 1510 1511 space_info->flush = 1; 1512 trace_btrfs_trigger_flush(fs_info, 1513 space_info->flags, 1514 orig_bytes, flush, 1515 "enospc"); 1516 queue_work(system_unbound_wq, async_work); 1517 } 1518 } else { 1519 list_add_tail(&ticket.list, 1520 &space_info->priority_tickets); 1521 } 1522 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1523 used += orig_bytes; 1524 /* 1525 * We will do the space reservation dance during log replay, 1526 * which means we won't have fs_info->fs_root set, so don't do 1527 * the async reclaim as we will panic. 1528 */ 1529 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1530 !work_busy(&fs_info->preempt_reclaim_work) && 1531 need_preemptive_reclaim(fs_info, space_info)) { 1532 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1533 orig_bytes, flush, "preempt"); 1534 queue_work(system_unbound_wq, 1535 &fs_info->preempt_reclaim_work); 1536 } 1537 } 1538 spin_unlock(&space_info->lock); 1539 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1540 return ret; 1541 1542 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1543 orig_bytes, flush); 1544 } 1545 1546 /** 1547 * Trye to reserve metadata bytes from the block_rsv's space 1548 * 1549 * @root: the root we're allocating for 1550 * @block_rsv: block_rsv we're allocating for 1551 * @orig_bytes: number of bytes we want 1552 * @flush: whether or not we can flush to make our reservation 1553 * 1554 * This will reserve orig_bytes number of bytes from the space info associated 1555 * with the block_rsv. If there is not enough space it will make an attempt to 1556 * flush out space to make room. It will do this by flushing delalloc if 1557 * possible or committing the transaction. If flush is 0 then no attempts to 1558 * regain reservations will be made and this will fail if there is not enough 1559 * space already. 1560 */ 1561 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1562 struct btrfs_block_rsv *block_rsv, 1563 u64 orig_bytes, 1564 enum btrfs_reserve_flush_enum flush) 1565 { 1566 struct btrfs_fs_info *fs_info = root->fs_info; 1567 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1568 int ret; 1569 1570 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1571 if (ret == -ENOSPC && 1572 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1573 if (block_rsv != global_rsv && 1574 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1575 ret = 0; 1576 } 1577 if (ret == -ENOSPC) { 1578 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1579 block_rsv->space_info->flags, 1580 orig_bytes, 1); 1581 1582 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1583 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1584 orig_bytes, 0); 1585 } 1586 return ret; 1587 } 1588 1589 /** 1590 * Try to reserve data bytes for an allocation 1591 * 1592 * @fs_info: the filesystem 1593 * @bytes: number of bytes we need 1594 * @flush: how we are allowed to flush 1595 * 1596 * This will reserve bytes from the data space info. If there is not enough 1597 * space then we will attempt to flush space as specified by flush. 1598 */ 1599 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1600 enum btrfs_reserve_flush_enum flush) 1601 { 1602 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1603 int ret; 1604 1605 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1606 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1607 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1608 1609 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1610 if (ret == -ENOSPC) { 1611 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1612 data_sinfo->flags, bytes, 1); 1613 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1614 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1615 } 1616 return ret; 1617 } 1618