1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * This will commit the transaction. Historically we had a lot of logic 137 * surrounding whether or not we'd commit the transaction, but this waits born 138 * out of a pre-tickets era where we could end up committing the transaction 139 * thousands of times in a row without making progress. Now thanks to our 140 * ticketing system we know if we're not making progress and can error 141 * everybody out after a few commits rather than burning the disk hoping for 142 * a different answer. 143 * 144 * OVERCOMMIT 145 * 146 * Because we hold so many reservations for metadata we will allow you to 147 * reserve more space than is currently free in the currently allocate 148 * metadata space. This only happens with metadata, data does not allow 149 * overcommitting. 150 * 151 * You can see the current logic for when we allow overcommit in 152 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 153 * is no unallocated space to be had, all reservations are kept within the 154 * free space in the allocated metadata chunks. 155 * 156 * Because of overcommitting, you generally want to use the 157 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 158 * thing with or without extra unallocated space. 159 */ 160 161 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 162 bool may_use_included) 163 { 164 ASSERT(s_info); 165 return s_info->bytes_used + s_info->bytes_reserved + 166 s_info->bytes_pinned + s_info->bytes_readonly + 167 s_info->bytes_zone_unusable + 168 (may_use_included ? s_info->bytes_may_use : 0); 169 } 170 171 /* 172 * after adding space to the filesystem, we need to clear the full flags 173 * on all the space infos. 174 */ 175 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 176 { 177 struct list_head *head = &info->space_info; 178 struct btrfs_space_info *found; 179 180 list_for_each_entry(found, head, list) 181 found->full = 0; 182 } 183 184 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 185 { 186 187 struct btrfs_space_info *space_info; 188 int i; 189 int ret; 190 191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 192 if (!space_info) 193 return -ENOMEM; 194 195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 196 GFP_KERNEL); 197 if (ret) { 198 kfree(space_info); 199 return ret; 200 } 201 202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 203 INIT_LIST_HEAD(&space_info->block_groups[i]); 204 init_rwsem(&space_info->groups_sem); 205 spin_lock_init(&space_info->lock); 206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 208 INIT_LIST_HEAD(&space_info->ro_bgs); 209 INIT_LIST_HEAD(&space_info->tickets); 210 INIT_LIST_HEAD(&space_info->priority_tickets); 211 space_info->clamp = 1; 212 213 ret = btrfs_sysfs_add_space_info_type(info, space_info); 214 if (ret) 215 return ret; 216 217 list_add(&space_info->list, &info->space_info); 218 if (flags & BTRFS_BLOCK_GROUP_DATA) 219 info->data_sinfo = space_info; 220 221 return ret; 222 } 223 224 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 225 { 226 struct btrfs_super_block *disk_super; 227 u64 features; 228 u64 flags; 229 int mixed = 0; 230 int ret; 231 232 disk_super = fs_info->super_copy; 233 if (!btrfs_super_root(disk_super)) 234 return -EINVAL; 235 236 features = btrfs_super_incompat_flags(disk_super); 237 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 238 mixed = 1; 239 240 flags = BTRFS_BLOCK_GROUP_SYSTEM; 241 ret = create_space_info(fs_info, flags); 242 if (ret) 243 goto out; 244 245 if (mixed) { 246 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 247 ret = create_space_info(fs_info, flags); 248 } else { 249 flags = BTRFS_BLOCK_GROUP_METADATA; 250 ret = create_space_info(fs_info, flags); 251 if (ret) 252 goto out; 253 254 flags = BTRFS_BLOCK_GROUP_DATA; 255 ret = create_space_info(fs_info, flags); 256 } 257 out: 258 return ret; 259 } 260 261 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 262 u64 total_bytes, u64 bytes_used, 263 u64 bytes_readonly, u64 bytes_zone_unusable, 264 struct btrfs_space_info **space_info) 265 { 266 struct btrfs_space_info *found; 267 int factor; 268 269 factor = btrfs_bg_type_to_factor(flags); 270 271 found = btrfs_find_space_info(info, flags); 272 ASSERT(found); 273 spin_lock(&found->lock); 274 found->total_bytes += total_bytes; 275 found->disk_total += total_bytes * factor; 276 found->bytes_used += bytes_used; 277 found->disk_used += bytes_used * factor; 278 found->bytes_readonly += bytes_readonly; 279 found->bytes_zone_unusable += bytes_zone_unusable; 280 if (total_bytes > 0) 281 found->full = 0; 282 btrfs_try_granting_tickets(info, found); 283 spin_unlock(&found->lock); 284 *space_info = found; 285 } 286 287 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 288 u64 flags) 289 { 290 struct list_head *head = &info->space_info; 291 struct btrfs_space_info *found; 292 293 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 294 295 list_for_each_entry(found, head, list) { 296 if (found->flags & flags) 297 return found; 298 } 299 return NULL; 300 } 301 302 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 303 struct btrfs_space_info *space_info, 304 enum btrfs_reserve_flush_enum flush) 305 { 306 u64 profile; 307 u64 avail; 308 int factor; 309 310 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 311 profile = btrfs_system_alloc_profile(fs_info); 312 else 313 profile = btrfs_metadata_alloc_profile(fs_info); 314 315 avail = atomic64_read(&fs_info->free_chunk_space); 316 317 /* 318 * If we have dup, raid1 or raid10 then only half of the free 319 * space is actually usable. For raid56, the space info used 320 * doesn't include the parity drive, so we don't have to 321 * change the math 322 */ 323 factor = btrfs_bg_type_to_factor(profile); 324 avail = div_u64(avail, factor); 325 326 /* 327 * If we aren't flushing all things, let us overcommit up to 328 * 1/2th of the space. If we can flush, don't let us overcommit 329 * too much, let it overcommit up to 1/8 of the space. 330 */ 331 if (flush == BTRFS_RESERVE_FLUSH_ALL) 332 avail >>= 3; 333 else 334 avail >>= 1; 335 return avail; 336 } 337 338 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 339 struct btrfs_space_info *space_info, u64 bytes, 340 enum btrfs_reserve_flush_enum flush) 341 { 342 u64 avail; 343 u64 used; 344 345 /* Don't overcommit when in mixed mode */ 346 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 347 return 0; 348 349 used = btrfs_space_info_used(space_info, true); 350 avail = calc_available_free_space(fs_info, space_info, flush); 351 352 if (used + bytes < space_info->total_bytes + avail) 353 return 1; 354 return 0; 355 } 356 357 static void remove_ticket(struct btrfs_space_info *space_info, 358 struct reserve_ticket *ticket) 359 { 360 if (!list_empty(&ticket->list)) { 361 list_del_init(&ticket->list); 362 ASSERT(space_info->reclaim_size >= ticket->bytes); 363 space_info->reclaim_size -= ticket->bytes; 364 } 365 } 366 367 /* 368 * This is for space we already have accounted in space_info->bytes_may_use, so 369 * basically when we're returning space from block_rsv's. 370 */ 371 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 372 struct btrfs_space_info *space_info) 373 { 374 struct list_head *head; 375 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 376 377 lockdep_assert_held(&space_info->lock); 378 379 head = &space_info->priority_tickets; 380 again: 381 while (!list_empty(head)) { 382 struct reserve_ticket *ticket; 383 u64 used = btrfs_space_info_used(space_info, true); 384 385 ticket = list_first_entry(head, struct reserve_ticket, list); 386 387 /* Check and see if our ticket can be satisfied now. */ 388 if ((used + ticket->bytes <= space_info->total_bytes) || 389 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 390 flush)) { 391 btrfs_space_info_update_bytes_may_use(fs_info, 392 space_info, 393 ticket->bytes); 394 remove_ticket(space_info, ticket); 395 ticket->bytes = 0; 396 space_info->tickets_id++; 397 wake_up(&ticket->wait); 398 } else { 399 break; 400 } 401 } 402 403 if (head == &space_info->priority_tickets) { 404 head = &space_info->tickets; 405 flush = BTRFS_RESERVE_FLUSH_ALL; 406 goto again; 407 } 408 } 409 410 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 411 do { \ 412 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 413 spin_lock(&__rsv->lock); \ 414 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 415 __rsv->size, __rsv->reserved); \ 416 spin_unlock(&__rsv->lock); \ 417 } while (0) 418 419 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 420 struct btrfs_space_info *info) 421 { 422 lockdep_assert_held(&info->lock); 423 424 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 425 info->flags, 426 info->total_bytes - btrfs_space_info_used(info, true), 427 info->full ? "" : "not "); 428 btrfs_info(fs_info, 429 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 430 info->total_bytes, info->bytes_used, info->bytes_pinned, 431 info->bytes_reserved, info->bytes_may_use, 432 info->bytes_readonly, info->bytes_zone_unusable); 433 434 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 435 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 436 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 437 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 438 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 439 440 } 441 442 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 443 struct btrfs_space_info *info, u64 bytes, 444 int dump_block_groups) 445 { 446 struct btrfs_block_group *cache; 447 int index = 0; 448 449 spin_lock(&info->lock); 450 __btrfs_dump_space_info(fs_info, info); 451 spin_unlock(&info->lock); 452 453 if (!dump_block_groups) 454 return; 455 456 down_read(&info->groups_sem); 457 again: 458 list_for_each_entry(cache, &info->block_groups[index], list) { 459 spin_lock(&cache->lock); 460 btrfs_info(fs_info, 461 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 462 cache->start, cache->length, cache->used, cache->pinned, 463 cache->reserved, cache->zone_unusable, 464 cache->ro ? "[readonly]" : ""); 465 spin_unlock(&cache->lock); 466 btrfs_dump_free_space(cache, bytes); 467 } 468 if (++index < BTRFS_NR_RAID_TYPES) 469 goto again; 470 up_read(&info->groups_sem); 471 } 472 473 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 474 u64 to_reclaim) 475 { 476 u64 bytes; 477 u64 nr; 478 479 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 480 nr = div64_u64(to_reclaim, bytes); 481 if (!nr) 482 nr = 1; 483 return nr; 484 } 485 486 #define EXTENT_SIZE_PER_ITEM SZ_256K 487 488 /* 489 * shrink metadata reservation for delalloc 490 */ 491 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 492 struct btrfs_space_info *space_info, 493 u64 to_reclaim, bool wait_ordered, 494 bool for_preempt) 495 { 496 struct btrfs_trans_handle *trans; 497 u64 delalloc_bytes; 498 u64 ordered_bytes; 499 u64 items; 500 long time_left; 501 int loops; 502 503 /* Calc the number of the pages we need flush for space reservation */ 504 if (to_reclaim == U64_MAX) { 505 items = U64_MAX; 506 } else { 507 /* 508 * to_reclaim is set to however much metadata we need to 509 * reclaim, but reclaiming that much data doesn't really track 510 * exactly, so increase the amount to reclaim by 2x in order to 511 * make sure we're flushing enough delalloc to hopefully reclaim 512 * some metadata reservations. 513 */ 514 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 515 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 516 } 517 518 trans = (struct btrfs_trans_handle *)current->journal_info; 519 520 delalloc_bytes = percpu_counter_sum_positive( 521 &fs_info->delalloc_bytes); 522 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 523 if (delalloc_bytes == 0 && ordered_bytes == 0) 524 return; 525 526 /* 527 * If we are doing more ordered than delalloc we need to just wait on 528 * ordered extents, otherwise we'll waste time trying to flush delalloc 529 * that likely won't give us the space back we need. 530 */ 531 if (ordered_bytes > delalloc_bytes && !for_preempt) 532 wait_ordered = true; 533 534 loops = 0; 535 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 536 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 537 long nr_pages = min_t(u64, temp, LONG_MAX); 538 539 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 540 541 loops++; 542 if (wait_ordered && !trans) { 543 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 544 } else { 545 time_left = schedule_timeout_killable(1); 546 if (time_left) 547 break; 548 } 549 550 /* 551 * If we are for preemption we just want a one-shot of delalloc 552 * flushing so we can stop flushing if we decide we don't need 553 * to anymore. 554 */ 555 if (for_preempt) 556 break; 557 558 spin_lock(&space_info->lock); 559 if (list_empty(&space_info->tickets) && 560 list_empty(&space_info->priority_tickets)) { 561 spin_unlock(&space_info->lock); 562 break; 563 } 564 spin_unlock(&space_info->lock); 565 566 delalloc_bytes = percpu_counter_sum_positive( 567 &fs_info->delalloc_bytes); 568 ordered_bytes = percpu_counter_sum_positive( 569 &fs_info->ordered_bytes); 570 } 571 } 572 573 /* 574 * Try to flush some data based on policy set by @state. This is only advisory 575 * and may fail for various reasons. The caller is supposed to examine the 576 * state of @space_info to detect the outcome. 577 */ 578 static void flush_space(struct btrfs_fs_info *fs_info, 579 struct btrfs_space_info *space_info, u64 num_bytes, 580 enum btrfs_flush_state state, bool for_preempt) 581 { 582 struct btrfs_root *root = fs_info->extent_root; 583 struct btrfs_trans_handle *trans; 584 int nr; 585 int ret = 0; 586 587 switch (state) { 588 case FLUSH_DELAYED_ITEMS_NR: 589 case FLUSH_DELAYED_ITEMS: 590 if (state == FLUSH_DELAYED_ITEMS_NR) 591 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 592 else 593 nr = -1; 594 595 trans = btrfs_join_transaction(root); 596 if (IS_ERR(trans)) { 597 ret = PTR_ERR(trans); 598 break; 599 } 600 ret = btrfs_run_delayed_items_nr(trans, nr); 601 btrfs_end_transaction(trans); 602 break; 603 case FLUSH_DELALLOC: 604 case FLUSH_DELALLOC_WAIT: 605 shrink_delalloc(fs_info, space_info, num_bytes, 606 state == FLUSH_DELALLOC_WAIT, for_preempt); 607 break; 608 case FLUSH_DELAYED_REFS_NR: 609 case FLUSH_DELAYED_REFS: 610 trans = btrfs_join_transaction(root); 611 if (IS_ERR(trans)) { 612 ret = PTR_ERR(trans); 613 break; 614 } 615 if (state == FLUSH_DELAYED_REFS_NR) 616 nr = calc_reclaim_items_nr(fs_info, num_bytes); 617 else 618 nr = 0; 619 btrfs_run_delayed_refs(trans, nr); 620 btrfs_end_transaction(trans); 621 break; 622 case ALLOC_CHUNK: 623 case ALLOC_CHUNK_FORCE: 624 trans = btrfs_join_transaction(root); 625 if (IS_ERR(trans)) { 626 ret = PTR_ERR(trans); 627 break; 628 } 629 ret = btrfs_chunk_alloc(trans, 630 btrfs_get_alloc_profile(fs_info, space_info->flags), 631 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 632 CHUNK_ALLOC_FORCE); 633 btrfs_end_transaction(trans); 634 if (ret > 0 || ret == -ENOSPC) 635 ret = 0; 636 break; 637 case RUN_DELAYED_IPUTS: 638 /* 639 * If we have pending delayed iputs then we could free up a 640 * bunch of pinned space, so make sure we run the iputs before 641 * we do our pinned bytes check below. 642 */ 643 btrfs_run_delayed_iputs(fs_info); 644 btrfs_wait_on_delayed_iputs(fs_info); 645 break; 646 case COMMIT_TRANS: 647 ASSERT(current->journal_info == NULL); 648 trans = btrfs_join_transaction(root); 649 if (IS_ERR(trans)) { 650 ret = PTR_ERR(trans); 651 break; 652 } 653 ret = btrfs_commit_transaction(trans); 654 break; 655 default: 656 ret = -ENOSPC; 657 break; 658 } 659 660 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 661 ret, for_preempt); 662 return; 663 } 664 665 static inline u64 666 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 667 struct btrfs_space_info *space_info) 668 { 669 u64 used; 670 u64 avail; 671 u64 to_reclaim = space_info->reclaim_size; 672 673 lockdep_assert_held(&space_info->lock); 674 675 avail = calc_available_free_space(fs_info, space_info, 676 BTRFS_RESERVE_FLUSH_ALL); 677 used = btrfs_space_info_used(space_info, true); 678 679 /* 680 * We may be flushing because suddenly we have less space than we had 681 * before, and now we're well over-committed based on our current free 682 * space. If that's the case add in our overage so we make sure to put 683 * appropriate pressure on the flushing state machine. 684 */ 685 if (space_info->total_bytes + avail < used) 686 to_reclaim += used - (space_info->total_bytes + avail); 687 688 return to_reclaim; 689 } 690 691 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 692 struct btrfs_space_info *space_info) 693 { 694 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 695 u64 ordered, delalloc; 696 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 697 u64 used; 698 699 /* If we're just plain full then async reclaim just slows us down. */ 700 if ((space_info->bytes_used + space_info->bytes_reserved + 701 global_rsv_size) >= thresh) 702 return false; 703 704 /* 705 * We have tickets queued, bail so we don't compete with the async 706 * flushers. 707 */ 708 if (space_info->reclaim_size) 709 return false; 710 711 /* 712 * If we have over half of the free space occupied by reservations or 713 * pinned then we want to start flushing. 714 * 715 * We do not do the traditional thing here, which is to say 716 * 717 * if (used >= ((total_bytes + avail) / 2)) 718 * return 1; 719 * 720 * because this doesn't quite work how we want. If we had more than 50% 721 * of the space_info used by bytes_used and we had 0 available we'd just 722 * constantly run the background flusher. Instead we want it to kick in 723 * if our reclaimable space exceeds our clamped free space. 724 * 725 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 726 * the following: 727 * 728 * Amount of RAM Minimum threshold Maximum threshold 729 * 730 * 256GiB 1GiB 128GiB 731 * 128GiB 512MiB 64GiB 732 * 64GiB 256MiB 32GiB 733 * 32GiB 128MiB 16GiB 734 * 16GiB 64MiB 8GiB 735 * 736 * These are the range our thresholds will fall in, corresponding to how 737 * much delalloc we need for the background flusher to kick in. 738 */ 739 740 thresh = calc_available_free_space(fs_info, space_info, 741 BTRFS_RESERVE_FLUSH_ALL); 742 used = space_info->bytes_used + space_info->bytes_reserved + 743 space_info->bytes_readonly + global_rsv_size; 744 if (used < space_info->total_bytes) 745 thresh += space_info->total_bytes - used; 746 thresh >>= space_info->clamp; 747 748 used = space_info->bytes_pinned; 749 750 /* 751 * If we have more ordered bytes than delalloc bytes then we're either 752 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 753 * around. Preemptive flushing is only useful in that it can free up 754 * space before tickets need to wait for things to finish. In the case 755 * of ordered extents, preemptively waiting on ordered extents gets us 756 * nothing, if our reservations are tied up in ordered extents we'll 757 * simply have to slow down writers by forcing them to wait on ordered 758 * extents. 759 * 760 * In the case that ordered is larger than delalloc, only include the 761 * block reserves that we would actually be able to directly reclaim 762 * from. In this case if we're heavy on metadata operations this will 763 * clearly be heavy enough to warrant preemptive flushing. In the case 764 * of heavy DIO or ordered reservations, preemptive flushing will just 765 * waste time and cause us to slow down. 766 * 767 * We want to make sure we truly are maxed out on ordered however, so 768 * cut ordered in half, and if it's still higher than delalloc then we 769 * can keep flushing. This is to avoid the case where we start 770 * flushing, and now delalloc == ordered and we stop preemptively 771 * flushing when we could still have several gigs of delalloc to flush. 772 */ 773 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 774 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 775 if (ordered >= delalloc) 776 used += fs_info->delayed_refs_rsv.reserved + 777 fs_info->delayed_block_rsv.reserved; 778 else 779 used += space_info->bytes_may_use - global_rsv_size; 780 781 return (used >= thresh && !btrfs_fs_closing(fs_info) && 782 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 783 } 784 785 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 786 struct btrfs_space_info *space_info, 787 struct reserve_ticket *ticket) 788 { 789 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 790 u64 min_bytes; 791 792 if (global_rsv->space_info != space_info) 793 return false; 794 795 spin_lock(&global_rsv->lock); 796 min_bytes = div_factor(global_rsv->size, 1); 797 if (global_rsv->reserved < min_bytes + ticket->bytes) { 798 spin_unlock(&global_rsv->lock); 799 return false; 800 } 801 global_rsv->reserved -= ticket->bytes; 802 remove_ticket(space_info, ticket); 803 ticket->bytes = 0; 804 wake_up(&ticket->wait); 805 space_info->tickets_id++; 806 if (global_rsv->reserved < global_rsv->size) 807 global_rsv->full = 0; 808 spin_unlock(&global_rsv->lock); 809 810 return true; 811 } 812 813 /* 814 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 815 * @fs_info - fs_info for this fs 816 * @space_info - the space info we were flushing 817 * 818 * We call this when we've exhausted our flushing ability and haven't made 819 * progress in satisfying tickets. The reservation code handles tickets in 820 * order, so if there is a large ticket first and then smaller ones we could 821 * very well satisfy the smaller tickets. This will attempt to wake up any 822 * tickets in the list to catch this case. 823 * 824 * This function returns true if it was able to make progress by clearing out 825 * other tickets, or if it stumbles across a ticket that was smaller than the 826 * first ticket. 827 */ 828 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 829 struct btrfs_space_info *space_info) 830 { 831 struct reserve_ticket *ticket; 832 u64 tickets_id = space_info->tickets_id; 833 u64 first_ticket_bytes = 0; 834 835 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 836 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 837 __btrfs_dump_space_info(fs_info, space_info); 838 } 839 840 while (!list_empty(&space_info->tickets) && 841 tickets_id == space_info->tickets_id) { 842 ticket = list_first_entry(&space_info->tickets, 843 struct reserve_ticket, list); 844 845 if (ticket->steal && 846 steal_from_global_rsv(fs_info, space_info, ticket)) 847 return true; 848 849 /* 850 * may_commit_transaction will avoid committing the transaction 851 * if it doesn't feel like the space reclaimed by the commit 852 * would result in the ticket succeeding. However if we have a 853 * smaller ticket in the queue it may be small enough to be 854 * satisfied by committing the transaction, so if any 855 * subsequent ticket is smaller than the first ticket go ahead 856 * and send us back for another loop through the enospc flushing 857 * code. 858 */ 859 if (first_ticket_bytes == 0) 860 first_ticket_bytes = ticket->bytes; 861 else if (first_ticket_bytes > ticket->bytes) 862 return true; 863 864 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 865 btrfs_info(fs_info, "failing ticket with %llu bytes", 866 ticket->bytes); 867 868 remove_ticket(space_info, ticket); 869 ticket->error = -ENOSPC; 870 wake_up(&ticket->wait); 871 872 /* 873 * We're just throwing tickets away, so more flushing may not 874 * trip over btrfs_try_granting_tickets, so we need to call it 875 * here to see if we can make progress with the next ticket in 876 * the list. 877 */ 878 btrfs_try_granting_tickets(fs_info, space_info); 879 } 880 return (tickets_id != space_info->tickets_id); 881 } 882 883 /* 884 * This is for normal flushers, we can wait all goddamned day if we want to. We 885 * will loop and continuously try to flush as long as we are making progress. 886 * We count progress as clearing off tickets each time we have to loop. 887 */ 888 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 889 { 890 struct btrfs_fs_info *fs_info; 891 struct btrfs_space_info *space_info; 892 u64 to_reclaim; 893 enum btrfs_flush_state flush_state; 894 int commit_cycles = 0; 895 u64 last_tickets_id; 896 897 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 898 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 899 900 spin_lock(&space_info->lock); 901 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 902 if (!to_reclaim) { 903 space_info->flush = 0; 904 spin_unlock(&space_info->lock); 905 return; 906 } 907 last_tickets_id = space_info->tickets_id; 908 spin_unlock(&space_info->lock); 909 910 flush_state = FLUSH_DELAYED_ITEMS_NR; 911 do { 912 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 913 spin_lock(&space_info->lock); 914 if (list_empty(&space_info->tickets)) { 915 space_info->flush = 0; 916 spin_unlock(&space_info->lock); 917 return; 918 } 919 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 920 space_info); 921 if (last_tickets_id == space_info->tickets_id) { 922 flush_state++; 923 } else { 924 last_tickets_id = space_info->tickets_id; 925 flush_state = FLUSH_DELAYED_ITEMS_NR; 926 if (commit_cycles) 927 commit_cycles--; 928 } 929 930 /* 931 * We don't want to force a chunk allocation until we've tried 932 * pretty hard to reclaim space. Think of the case where we 933 * freed up a bunch of space and so have a lot of pinned space 934 * to reclaim. We would rather use that than possibly create a 935 * underutilized metadata chunk. So if this is our first run 936 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 937 * commit the transaction. If nothing has changed the next go 938 * around then we can force a chunk allocation. 939 */ 940 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 941 flush_state++; 942 943 if (flush_state > COMMIT_TRANS) { 944 commit_cycles++; 945 if (commit_cycles > 2) { 946 if (maybe_fail_all_tickets(fs_info, space_info)) { 947 flush_state = FLUSH_DELAYED_ITEMS_NR; 948 commit_cycles--; 949 } else { 950 space_info->flush = 0; 951 } 952 } else { 953 flush_state = FLUSH_DELAYED_ITEMS_NR; 954 } 955 } 956 spin_unlock(&space_info->lock); 957 } while (flush_state <= COMMIT_TRANS); 958 } 959 960 /* 961 * This handles pre-flushing of metadata space before we get to the point that 962 * we need to start blocking threads on tickets. The logic here is different 963 * from the other flush paths because it doesn't rely on tickets to tell us how 964 * much we need to flush, instead it attempts to keep us below the 80% full 965 * watermark of space by flushing whichever reservation pool is currently the 966 * largest. 967 */ 968 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 969 { 970 struct btrfs_fs_info *fs_info; 971 struct btrfs_space_info *space_info; 972 struct btrfs_block_rsv *delayed_block_rsv; 973 struct btrfs_block_rsv *delayed_refs_rsv; 974 struct btrfs_block_rsv *global_rsv; 975 struct btrfs_block_rsv *trans_rsv; 976 int loops = 0; 977 978 fs_info = container_of(work, struct btrfs_fs_info, 979 preempt_reclaim_work); 980 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 981 delayed_block_rsv = &fs_info->delayed_block_rsv; 982 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 983 global_rsv = &fs_info->global_block_rsv; 984 trans_rsv = &fs_info->trans_block_rsv; 985 986 spin_lock(&space_info->lock); 987 while (need_preemptive_reclaim(fs_info, space_info)) { 988 enum btrfs_flush_state flush; 989 u64 delalloc_size = 0; 990 u64 to_reclaim, block_rsv_size; 991 u64 global_rsv_size = global_rsv->reserved; 992 993 loops++; 994 995 /* 996 * We don't have a precise counter for the metadata being 997 * reserved for delalloc, so we'll approximate it by subtracting 998 * out the block rsv's space from the bytes_may_use. If that 999 * amount is higher than the individual reserves, then we can 1000 * assume it's tied up in delalloc reservations. 1001 */ 1002 block_rsv_size = global_rsv_size + 1003 delayed_block_rsv->reserved + 1004 delayed_refs_rsv->reserved + 1005 trans_rsv->reserved; 1006 if (block_rsv_size < space_info->bytes_may_use) 1007 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1008 spin_unlock(&space_info->lock); 1009 1010 /* 1011 * We don't want to include the global_rsv in our calculation, 1012 * because that's space we can't touch. Subtract it from the 1013 * block_rsv_size for the next checks. 1014 */ 1015 block_rsv_size -= global_rsv_size; 1016 1017 /* 1018 * We really want to avoid flushing delalloc too much, as it 1019 * could result in poor allocation patterns, so only flush it if 1020 * it's larger than the rest of the pools combined. 1021 */ 1022 if (delalloc_size > block_rsv_size) { 1023 to_reclaim = delalloc_size; 1024 flush = FLUSH_DELALLOC; 1025 } else if (space_info->bytes_pinned > 1026 (delayed_block_rsv->reserved + 1027 delayed_refs_rsv->reserved)) { 1028 to_reclaim = space_info->bytes_pinned; 1029 flush = COMMIT_TRANS; 1030 } else if (delayed_block_rsv->reserved > 1031 delayed_refs_rsv->reserved) { 1032 to_reclaim = delayed_block_rsv->reserved; 1033 flush = FLUSH_DELAYED_ITEMS_NR; 1034 } else { 1035 to_reclaim = delayed_refs_rsv->reserved; 1036 flush = FLUSH_DELAYED_REFS_NR; 1037 } 1038 1039 /* 1040 * We don't want to reclaim everything, just a portion, so scale 1041 * down the to_reclaim by 1/4. If it takes us down to 0, 1042 * reclaim 1 items worth. 1043 */ 1044 to_reclaim >>= 2; 1045 if (!to_reclaim) 1046 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1047 flush_space(fs_info, space_info, to_reclaim, flush, true); 1048 cond_resched(); 1049 spin_lock(&space_info->lock); 1050 } 1051 1052 /* We only went through once, back off our clamping. */ 1053 if (loops == 1 && !space_info->reclaim_size) 1054 space_info->clamp = max(1, space_info->clamp - 1); 1055 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1056 spin_unlock(&space_info->lock); 1057 } 1058 1059 /* 1060 * FLUSH_DELALLOC_WAIT: 1061 * Space is freed from flushing delalloc in one of two ways. 1062 * 1063 * 1) compression is on and we allocate less space than we reserved 1064 * 2) we are overwriting existing space 1065 * 1066 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1067 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1068 * length to ->bytes_reserved, and subtracts the reserved space from 1069 * ->bytes_may_use. 1070 * 1071 * For #2 this is trickier. Once the ordered extent runs we will drop the 1072 * extent in the range we are overwriting, which creates a delayed ref for 1073 * that freed extent. This however is not reclaimed until the transaction 1074 * commits, thus the next stages. 1075 * 1076 * RUN_DELAYED_IPUTS 1077 * If we are freeing inodes, we want to make sure all delayed iputs have 1078 * completed, because they could have been on an inode with i_nlink == 0, and 1079 * thus have been truncated and freed up space. But again this space is not 1080 * immediately re-usable, it comes in the form of a delayed ref, which must be 1081 * run and then the transaction must be committed. 1082 * 1083 * FLUSH_DELAYED_REFS 1084 * The above two cases generate delayed refs that will affect 1085 * ->total_bytes_pinned. However this counter can be inconsistent with 1086 * reality if there are outstanding delayed refs. This is because we adjust 1087 * the counter based solely on the current set of delayed refs and disregard 1088 * any on-disk state which might include more refs. So for example, if we 1089 * have an extent with 2 references, but we only drop 1, we'll see that there 1090 * is a negative delayed ref count for the extent and assume that the space 1091 * will be freed, and thus increase ->total_bytes_pinned. 1092 * 1093 * Running the delayed refs gives us the actual real view of what will be 1094 * freed at the transaction commit time. This stage will not actually free 1095 * space for us, it just makes sure that may_commit_transaction() has all of 1096 * the information it needs to make the right decision. 1097 * 1098 * COMMIT_TRANS 1099 * This is where we reclaim all of the pinned space generated by running the 1100 * iputs 1101 * 1102 * ALLOC_CHUNK_FORCE 1103 * For data we start with alloc chunk force, however we could have been full 1104 * before, and then the transaction commit could have freed new block groups, 1105 * so if we now have space to allocate do the force chunk allocation. 1106 */ 1107 static const enum btrfs_flush_state data_flush_states[] = { 1108 FLUSH_DELALLOC_WAIT, 1109 RUN_DELAYED_IPUTS, 1110 FLUSH_DELAYED_REFS, 1111 COMMIT_TRANS, 1112 ALLOC_CHUNK_FORCE, 1113 }; 1114 1115 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1116 { 1117 struct btrfs_fs_info *fs_info; 1118 struct btrfs_space_info *space_info; 1119 u64 last_tickets_id; 1120 enum btrfs_flush_state flush_state = 0; 1121 1122 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1123 space_info = fs_info->data_sinfo; 1124 1125 spin_lock(&space_info->lock); 1126 if (list_empty(&space_info->tickets)) { 1127 space_info->flush = 0; 1128 spin_unlock(&space_info->lock); 1129 return; 1130 } 1131 last_tickets_id = space_info->tickets_id; 1132 spin_unlock(&space_info->lock); 1133 1134 while (!space_info->full) { 1135 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1136 spin_lock(&space_info->lock); 1137 if (list_empty(&space_info->tickets)) { 1138 space_info->flush = 0; 1139 spin_unlock(&space_info->lock); 1140 return; 1141 } 1142 last_tickets_id = space_info->tickets_id; 1143 spin_unlock(&space_info->lock); 1144 } 1145 1146 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1147 flush_space(fs_info, space_info, U64_MAX, 1148 data_flush_states[flush_state], false); 1149 spin_lock(&space_info->lock); 1150 if (list_empty(&space_info->tickets)) { 1151 space_info->flush = 0; 1152 spin_unlock(&space_info->lock); 1153 return; 1154 } 1155 1156 if (last_tickets_id == space_info->tickets_id) { 1157 flush_state++; 1158 } else { 1159 last_tickets_id = space_info->tickets_id; 1160 flush_state = 0; 1161 } 1162 1163 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1164 if (space_info->full) { 1165 if (maybe_fail_all_tickets(fs_info, space_info)) 1166 flush_state = 0; 1167 else 1168 space_info->flush = 0; 1169 } else { 1170 flush_state = 0; 1171 } 1172 } 1173 spin_unlock(&space_info->lock); 1174 } 1175 } 1176 1177 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1178 { 1179 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1180 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1181 INIT_WORK(&fs_info->preempt_reclaim_work, 1182 btrfs_preempt_reclaim_metadata_space); 1183 } 1184 1185 static const enum btrfs_flush_state priority_flush_states[] = { 1186 FLUSH_DELAYED_ITEMS_NR, 1187 FLUSH_DELAYED_ITEMS, 1188 ALLOC_CHUNK, 1189 }; 1190 1191 static const enum btrfs_flush_state evict_flush_states[] = { 1192 FLUSH_DELAYED_ITEMS_NR, 1193 FLUSH_DELAYED_ITEMS, 1194 FLUSH_DELAYED_REFS_NR, 1195 FLUSH_DELAYED_REFS, 1196 FLUSH_DELALLOC, 1197 FLUSH_DELALLOC_WAIT, 1198 ALLOC_CHUNK, 1199 COMMIT_TRANS, 1200 }; 1201 1202 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1203 struct btrfs_space_info *space_info, 1204 struct reserve_ticket *ticket, 1205 const enum btrfs_flush_state *states, 1206 int states_nr) 1207 { 1208 u64 to_reclaim; 1209 int flush_state; 1210 1211 spin_lock(&space_info->lock); 1212 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1213 if (!to_reclaim) { 1214 spin_unlock(&space_info->lock); 1215 return; 1216 } 1217 spin_unlock(&space_info->lock); 1218 1219 flush_state = 0; 1220 do { 1221 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1222 false); 1223 flush_state++; 1224 spin_lock(&space_info->lock); 1225 if (ticket->bytes == 0) { 1226 spin_unlock(&space_info->lock); 1227 return; 1228 } 1229 spin_unlock(&space_info->lock); 1230 } while (flush_state < states_nr); 1231 } 1232 1233 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1234 struct btrfs_space_info *space_info, 1235 struct reserve_ticket *ticket) 1236 { 1237 while (!space_info->full) { 1238 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1239 spin_lock(&space_info->lock); 1240 if (ticket->bytes == 0) { 1241 spin_unlock(&space_info->lock); 1242 return; 1243 } 1244 spin_unlock(&space_info->lock); 1245 } 1246 } 1247 1248 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1249 struct btrfs_space_info *space_info, 1250 struct reserve_ticket *ticket) 1251 1252 { 1253 DEFINE_WAIT(wait); 1254 int ret = 0; 1255 1256 spin_lock(&space_info->lock); 1257 while (ticket->bytes > 0 && ticket->error == 0) { 1258 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1259 if (ret) { 1260 /* 1261 * Delete us from the list. After we unlock the space 1262 * info, we don't want the async reclaim job to reserve 1263 * space for this ticket. If that would happen, then the 1264 * ticket's task would not known that space was reserved 1265 * despite getting an error, resulting in a space leak 1266 * (bytes_may_use counter of our space_info). 1267 */ 1268 remove_ticket(space_info, ticket); 1269 ticket->error = -EINTR; 1270 break; 1271 } 1272 spin_unlock(&space_info->lock); 1273 1274 schedule(); 1275 1276 finish_wait(&ticket->wait, &wait); 1277 spin_lock(&space_info->lock); 1278 } 1279 spin_unlock(&space_info->lock); 1280 } 1281 1282 /** 1283 * Do the appropriate flushing and waiting for a ticket 1284 * 1285 * @fs_info: the filesystem 1286 * @space_info: space info for the reservation 1287 * @ticket: ticket for the reservation 1288 * @start_ns: timestamp when the reservation started 1289 * @orig_bytes: amount of bytes originally reserved 1290 * @flush: how much we can flush 1291 * 1292 * This does the work of figuring out how to flush for the ticket, waiting for 1293 * the reservation, and returning the appropriate error if there is one. 1294 */ 1295 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1296 struct btrfs_space_info *space_info, 1297 struct reserve_ticket *ticket, 1298 u64 start_ns, u64 orig_bytes, 1299 enum btrfs_reserve_flush_enum flush) 1300 { 1301 int ret; 1302 1303 switch (flush) { 1304 case BTRFS_RESERVE_FLUSH_DATA: 1305 case BTRFS_RESERVE_FLUSH_ALL: 1306 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1307 wait_reserve_ticket(fs_info, space_info, ticket); 1308 break; 1309 case BTRFS_RESERVE_FLUSH_LIMIT: 1310 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1311 priority_flush_states, 1312 ARRAY_SIZE(priority_flush_states)); 1313 break; 1314 case BTRFS_RESERVE_FLUSH_EVICT: 1315 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1316 evict_flush_states, 1317 ARRAY_SIZE(evict_flush_states)); 1318 break; 1319 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1320 priority_reclaim_data_space(fs_info, space_info, ticket); 1321 break; 1322 default: 1323 ASSERT(0); 1324 break; 1325 } 1326 1327 spin_lock(&space_info->lock); 1328 ret = ticket->error; 1329 if (ticket->bytes || ticket->error) { 1330 /* 1331 * We were a priority ticket, so we need to delete ourselves 1332 * from the list. Because we could have other priority tickets 1333 * behind us that require less space, run 1334 * btrfs_try_granting_tickets() to see if their reservations can 1335 * now be made. 1336 */ 1337 if (!list_empty(&ticket->list)) { 1338 remove_ticket(space_info, ticket); 1339 btrfs_try_granting_tickets(fs_info, space_info); 1340 } 1341 1342 if (!ret) 1343 ret = -ENOSPC; 1344 } 1345 spin_unlock(&space_info->lock); 1346 ASSERT(list_empty(&ticket->list)); 1347 /* 1348 * Check that we can't have an error set if the reservation succeeded, 1349 * as that would confuse tasks and lead them to error out without 1350 * releasing reserved space (if an error happens the expectation is that 1351 * space wasn't reserved at all). 1352 */ 1353 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1354 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1355 start_ns, flush, ticket->error); 1356 return ret; 1357 } 1358 1359 /* 1360 * This returns true if this flush state will go through the ordinary flushing 1361 * code. 1362 */ 1363 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1364 { 1365 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1366 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1367 } 1368 1369 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1370 struct btrfs_space_info *space_info) 1371 { 1372 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1373 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1374 1375 /* 1376 * If we're heavy on ordered operations then clamping won't help us. We 1377 * need to clamp specifically to keep up with dirty'ing buffered 1378 * writers, because there's not a 1:1 correlation of writing delalloc 1379 * and freeing space, like there is with flushing delayed refs or 1380 * delayed nodes. If we're already more ordered than delalloc then 1381 * we're keeping up, otherwise we aren't and should probably clamp. 1382 */ 1383 if (ordered < delalloc) 1384 space_info->clamp = min(space_info->clamp + 1, 8); 1385 } 1386 1387 /** 1388 * Try to reserve bytes from the block_rsv's space 1389 * 1390 * @fs_info: the filesystem 1391 * @space_info: space info we want to allocate from 1392 * @orig_bytes: number of bytes we want 1393 * @flush: whether or not we can flush to make our reservation 1394 * 1395 * This will reserve orig_bytes number of bytes from the space info associated 1396 * with the block_rsv. If there is not enough space it will make an attempt to 1397 * flush out space to make room. It will do this by flushing delalloc if 1398 * possible or committing the transaction. If flush is 0 then no attempts to 1399 * regain reservations will be made and this will fail if there is not enough 1400 * space already. 1401 */ 1402 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1403 struct btrfs_space_info *space_info, u64 orig_bytes, 1404 enum btrfs_reserve_flush_enum flush) 1405 { 1406 struct work_struct *async_work; 1407 struct reserve_ticket ticket; 1408 u64 start_ns = 0; 1409 u64 used; 1410 int ret = 0; 1411 bool pending_tickets; 1412 1413 ASSERT(orig_bytes); 1414 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1415 1416 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1417 async_work = &fs_info->async_data_reclaim_work; 1418 else 1419 async_work = &fs_info->async_reclaim_work; 1420 1421 spin_lock(&space_info->lock); 1422 ret = -ENOSPC; 1423 used = btrfs_space_info_used(space_info, true); 1424 1425 /* 1426 * We don't want NO_FLUSH allocations to jump everybody, they can 1427 * generally handle ENOSPC in a different way, so treat them the same as 1428 * normal flushers when it comes to skipping pending tickets. 1429 */ 1430 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1431 pending_tickets = !list_empty(&space_info->tickets) || 1432 !list_empty(&space_info->priority_tickets); 1433 else 1434 pending_tickets = !list_empty(&space_info->priority_tickets); 1435 1436 /* 1437 * Carry on if we have enough space (short-circuit) OR call 1438 * can_overcommit() to ensure we can overcommit to continue. 1439 */ 1440 if (!pending_tickets && 1441 ((used + orig_bytes <= space_info->total_bytes) || 1442 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1443 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1444 orig_bytes); 1445 ret = 0; 1446 } 1447 1448 /* 1449 * If we couldn't make a reservation then setup our reservation ticket 1450 * and kick the async worker if it's not already running. 1451 * 1452 * If we are a priority flusher then we just need to add our ticket to 1453 * the list and we will do our own flushing further down. 1454 */ 1455 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1456 ticket.bytes = orig_bytes; 1457 ticket.error = 0; 1458 space_info->reclaim_size += ticket.bytes; 1459 init_waitqueue_head(&ticket.wait); 1460 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1461 if (trace_btrfs_reserve_ticket_enabled()) 1462 start_ns = ktime_get_ns(); 1463 1464 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1465 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1466 flush == BTRFS_RESERVE_FLUSH_DATA) { 1467 list_add_tail(&ticket.list, &space_info->tickets); 1468 if (!space_info->flush) { 1469 /* 1470 * We were forced to add a reserve ticket, so 1471 * our preemptive flushing is unable to keep 1472 * up. Clamp down on the threshold for the 1473 * preemptive flushing in order to keep up with 1474 * the workload. 1475 */ 1476 maybe_clamp_preempt(fs_info, space_info); 1477 1478 space_info->flush = 1; 1479 trace_btrfs_trigger_flush(fs_info, 1480 space_info->flags, 1481 orig_bytes, flush, 1482 "enospc"); 1483 queue_work(system_unbound_wq, async_work); 1484 } 1485 } else { 1486 list_add_tail(&ticket.list, 1487 &space_info->priority_tickets); 1488 } 1489 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1490 used += orig_bytes; 1491 /* 1492 * We will do the space reservation dance during log replay, 1493 * which means we won't have fs_info->fs_root set, so don't do 1494 * the async reclaim as we will panic. 1495 */ 1496 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1497 !work_busy(&fs_info->preempt_reclaim_work) && 1498 need_preemptive_reclaim(fs_info, space_info)) { 1499 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1500 orig_bytes, flush, "preempt"); 1501 queue_work(system_unbound_wq, 1502 &fs_info->preempt_reclaim_work); 1503 } 1504 } 1505 spin_unlock(&space_info->lock); 1506 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1507 return ret; 1508 1509 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1510 orig_bytes, flush); 1511 } 1512 1513 /** 1514 * Trye to reserve metadata bytes from the block_rsv's space 1515 * 1516 * @root: the root we're allocating for 1517 * @block_rsv: block_rsv we're allocating for 1518 * @orig_bytes: number of bytes we want 1519 * @flush: whether or not we can flush to make our reservation 1520 * 1521 * This will reserve orig_bytes number of bytes from the space info associated 1522 * with the block_rsv. If there is not enough space it will make an attempt to 1523 * flush out space to make room. It will do this by flushing delalloc if 1524 * possible or committing the transaction. If flush is 0 then no attempts to 1525 * regain reservations will be made and this will fail if there is not enough 1526 * space already. 1527 */ 1528 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1529 struct btrfs_block_rsv *block_rsv, 1530 u64 orig_bytes, 1531 enum btrfs_reserve_flush_enum flush) 1532 { 1533 struct btrfs_fs_info *fs_info = root->fs_info; 1534 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1535 int ret; 1536 1537 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1538 if (ret == -ENOSPC && 1539 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1540 if (block_rsv != global_rsv && 1541 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1542 ret = 0; 1543 } 1544 if (ret == -ENOSPC) { 1545 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1546 block_rsv->space_info->flags, 1547 orig_bytes, 1); 1548 1549 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1550 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1551 orig_bytes, 0); 1552 } 1553 return ret; 1554 } 1555 1556 /** 1557 * Try to reserve data bytes for an allocation 1558 * 1559 * @fs_info: the filesystem 1560 * @bytes: number of bytes we need 1561 * @flush: how we are allowed to flush 1562 * 1563 * This will reserve bytes from the data space info. If there is not enough 1564 * space then we will attempt to flush space as specified by flush. 1565 */ 1566 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1567 enum btrfs_reserve_flush_enum flush) 1568 { 1569 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1570 int ret; 1571 1572 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1573 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1574 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1575 1576 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1577 if (ret == -ENOSPC) { 1578 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1579 data_sinfo->flags, bytes, 1); 1580 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1581 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1582 } 1583 return ret; 1584 } 1585