1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 #include "zoned.h" 13 14 /* 15 * HOW DOES SPACE RESERVATION WORK 16 * 17 * If you want to know about delalloc specifically, there is a separate comment 18 * for that with the delalloc code. This comment is about how the whole system 19 * works generally. 20 * 21 * BASIC CONCEPTS 22 * 23 * 1) space_info. This is the ultimate arbiter of how much space we can use. 24 * There's a description of the bytes_ fields with the struct declaration, 25 * refer to that for specifics on each field. Suffice it to say that for 26 * reservations we care about total_bytes - SUM(space_info->bytes_) when 27 * determining if there is space to make an allocation. There is a space_info 28 * for METADATA, SYSTEM, and DATA areas. 29 * 30 * 2) block_rsv's. These are basically buckets for every different type of 31 * metadata reservation we have. You can see the comment in the block_rsv 32 * code on the rules for each type, but generally block_rsv->reserved is how 33 * much space is accounted for in space_info->bytes_may_use. 34 * 35 * 3) btrfs_calc*_size. These are the worst case calculations we used based 36 * on the number of items we will want to modify. We have one for changing 37 * items, and one for inserting new items. Generally we use these helpers to 38 * determine the size of the block reserves, and then use the actual bytes 39 * values to adjust the space_info counters. 40 * 41 * MAKING RESERVATIONS, THE NORMAL CASE 42 * 43 * We call into either btrfs_reserve_data_bytes() or 44 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 45 * num_bytes we want to reserve. 46 * 47 * ->reserve 48 * space_info->bytes_may_reserve += num_bytes 49 * 50 * ->extent allocation 51 * Call btrfs_add_reserved_bytes() which does 52 * space_info->bytes_may_reserve -= num_bytes 53 * space_info->bytes_reserved += extent_bytes 54 * 55 * ->insert reference 56 * Call btrfs_update_block_group() which does 57 * space_info->bytes_reserved -= extent_bytes 58 * space_info->bytes_used += extent_bytes 59 * 60 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 61 * 62 * Assume we are unable to simply make the reservation because we do not have 63 * enough space 64 * 65 * -> __reserve_bytes 66 * create a reserve_ticket with ->bytes set to our reservation, add it to 67 * the tail of space_info->tickets, kick async flush thread 68 * 69 * ->handle_reserve_ticket 70 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 71 * on the ticket. 72 * 73 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 74 * Flushes various things attempting to free up space. 75 * 76 * -> btrfs_try_granting_tickets() 77 * This is called by anything that either subtracts space from 78 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 79 * space_info->total_bytes. This loops through the ->priority_tickets and 80 * then the ->tickets list checking to see if the reservation can be 81 * completed. If it can the space is added to space_info->bytes_may_use and 82 * the ticket is woken up. 83 * 84 * -> ticket wakeup 85 * Check if ->bytes == 0, if it does we got our reservation and we can carry 86 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 87 * were interrupted.) 88 * 89 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 90 * 91 * Same as the above, except we add ourselves to the 92 * space_info->priority_tickets, and we do not use ticket->wait, we simply 93 * call flush_space() ourselves for the states that are safe for us to call 94 * without deadlocking and hope for the best. 95 * 96 * THE FLUSHING STATES 97 * 98 * Generally speaking we will have two cases for each state, a "nice" state 99 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 100 * reduce the locking over head on the various trees, and even to keep from 101 * doing any work at all in the case of delayed refs. Each of these delayed 102 * things however hold reservations, and so letting them run allows us to 103 * reclaim space so we can make new reservations. 104 * 105 * FLUSH_DELAYED_ITEMS 106 * Every inode has a delayed item to update the inode. Take a simple write 107 * for example, we would update the inode item at write time to update the 108 * mtime, and then again at finish_ordered_io() time in order to update the 109 * isize or bytes. We keep these delayed items to coalesce these operations 110 * into a single operation done on demand. These are an easy way to reclaim 111 * metadata space. 112 * 113 * FLUSH_DELALLOC 114 * Look at the delalloc comment to get an idea of how much space is reserved 115 * for delayed allocation. We can reclaim some of this space simply by 116 * running delalloc, but usually we need to wait for ordered extents to 117 * reclaim the bulk of this space. 118 * 119 * FLUSH_DELAYED_REFS 120 * We have a block reserve for the outstanding delayed refs space, and every 121 * delayed ref operation holds a reservation. Running these is a quick way 122 * to reclaim space, but we want to hold this until the end because COW can 123 * churn a lot and we can avoid making some extent tree modifications if we 124 * are able to delay for as long as possible. 125 * 126 * ALLOC_CHUNK 127 * We will skip this the first time through space reservation, because of 128 * overcommit and we don't want to have a lot of useless metadata space when 129 * our worst case reservations will likely never come true. 130 * 131 * RUN_DELAYED_IPUTS 132 * If we're freeing inodes we're likely freeing checksums, file extent 133 * items, and extent tree items. Loads of space could be freed up by these 134 * operations, however they won't be usable until the transaction commits. 135 * 136 * COMMIT_TRANS 137 * This will commit the transaction. Historically we had a lot of logic 138 * surrounding whether or not we'd commit the transaction, but this waits born 139 * out of a pre-tickets era where we could end up committing the transaction 140 * thousands of times in a row without making progress. Now thanks to our 141 * ticketing system we know if we're not making progress and can error 142 * everybody out after a few commits rather than burning the disk hoping for 143 * a different answer. 144 * 145 * OVERCOMMIT 146 * 147 * Because we hold so many reservations for metadata we will allow you to 148 * reserve more space than is currently free in the currently allocate 149 * metadata space. This only happens with metadata, data does not allow 150 * overcommitting. 151 * 152 * You can see the current logic for when we allow overcommit in 153 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 154 * is no unallocated space to be had, all reservations are kept within the 155 * free space in the allocated metadata chunks. 156 * 157 * Because of overcommitting, you generally want to use the 158 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 159 * thing with or without extra unallocated space. 160 */ 161 162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 163 bool may_use_included) 164 { 165 ASSERT(s_info); 166 return s_info->bytes_used + s_info->bytes_reserved + 167 s_info->bytes_pinned + s_info->bytes_readonly + 168 s_info->bytes_zone_unusable + 169 (may_use_included ? s_info->bytes_may_use : 0); 170 } 171 172 /* 173 * after adding space to the filesystem, we need to clear the full flags 174 * on all the space infos. 175 */ 176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 177 { 178 struct list_head *head = &info->space_info; 179 struct btrfs_space_info *found; 180 181 list_for_each_entry(found, head, list) 182 found->full = 0; 183 } 184 185 /* 186 * Block groups with more than this value (percents) of unusable space will be 187 * scheduled for background reclaim. 188 */ 189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) 190 191 /* 192 * Calculate chunk size depending on volume type (regular or zoned). 193 */ 194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) 195 { 196 if (btrfs_is_zoned(fs_info)) 197 return fs_info->zone_size; 198 199 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 200 201 if (flags & BTRFS_BLOCK_GROUP_DATA) 202 return BTRFS_MAX_DATA_CHUNK_SIZE; 203 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 204 return SZ_32M; 205 206 /* Handle BTRFS_BLOCK_GROUP_METADATA */ 207 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) 208 return SZ_1G; 209 210 return SZ_256M; 211 } 212 213 /* 214 * Update default chunk size. 215 */ 216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 217 u64 chunk_size) 218 { 219 WRITE_ONCE(space_info->chunk_size, chunk_size); 220 } 221 222 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 223 { 224 225 struct btrfs_space_info *space_info; 226 int i; 227 int ret; 228 229 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 230 if (!space_info) 231 return -ENOMEM; 232 233 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 234 INIT_LIST_HEAD(&space_info->block_groups[i]); 235 init_rwsem(&space_info->groups_sem); 236 spin_lock_init(&space_info->lock); 237 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 238 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 239 INIT_LIST_HEAD(&space_info->ro_bgs); 240 INIT_LIST_HEAD(&space_info->tickets); 241 INIT_LIST_HEAD(&space_info->priority_tickets); 242 space_info->clamp = 1; 243 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); 244 245 if (btrfs_is_zoned(info)) 246 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; 247 248 ret = btrfs_sysfs_add_space_info_type(info, space_info); 249 if (ret) 250 return ret; 251 252 list_add(&space_info->list, &info->space_info); 253 if (flags & BTRFS_BLOCK_GROUP_DATA) 254 info->data_sinfo = space_info; 255 256 return ret; 257 } 258 259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 260 { 261 struct btrfs_super_block *disk_super; 262 u64 features; 263 u64 flags; 264 int mixed = 0; 265 int ret; 266 267 disk_super = fs_info->super_copy; 268 if (!btrfs_super_root(disk_super)) 269 return -EINVAL; 270 271 features = btrfs_super_incompat_flags(disk_super); 272 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 273 mixed = 1; 274 275 flags = BTRFS_BLOCK_GROUP_SYSTEM; 276 ret = create_space_info(fs_info, flags); 277 if (ret) 278 goto out; 279 280 if (mixed) { 281 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 282 ret = create_space_info(fs_info, flags); 283 } else { 284 flags = BTRFS_BLOCK_GROUP_METADATA; 285 ret = create_space_info(fs_info, flags); 286 if (ret) 287 goto out; 288 289 flags = BTRFS_BLOCK_GROUP_DATA; 290 ret = create_space_info(fs_info, flags); 291 } 292 out: 293 return ret; 294 } 295 296 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, 297 struct btrfs_block_group *block_group) 298 { 299 struct btrfs_space_info *found; 300 int factor, index; 301 302 factor = btrfs_bg_type_to_factor(block_group->flags); 303 304 found = btrfs_find_space_info(info, block_group->flags); 305 ASSERT(found); 306 spin_lock(&found->lock); 307 found->total_bytes += block_group->length; 308 if (block_group->zone_is_active) 309 found->active_total_bytes += block_group->length; 310 found->disk_total += block_group->length * factor; 311 found->bytes_used += block_group->used; 312 found->disk_used += block_group->used * factor; 313 found->bytes_readonly += block_group->bytes_super; 314 found->bytes_zone_unusable += block_group->zone_unusable; 315 if (block_group->length > 0) 316 found->full = 0; 317 btrfs_try_granting_tickets(info, found); 318 spin_unlock(&found->lock); 319 320 block_group->space_info = found; 321 322 index = btrfs_bg_flags_to_raid_index(block_group->flags); 323 down_write(&found->groups_sem); 324 list_add_tail(&block_group->list, &found->block_groups[index]); 325 up_write(&found->groups_sem); 326 } 327 328 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 329 u64 flags) 330 { 331 struct list_head *head = &info->space_info; 332 struct btrfs_space_info *found; 333 334 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 335 336 list_for_each_entry(found, head, list) { 337 if (found->flags & flags) 338 return found; 339 } 340 return NULL; 341 } 342 343 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 344 struct btrfs_space_info *space_info, 345 enum btrfs_reserve_flush_enum flush) 346 { 347 u64 profile; 348 u64 avail; 349 int factor; 350 351 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 352 profile = btrfs_system_alloc_profile(fs_info); 353 else 354 profile = btrfs_metadata_alloc_profile(fs_info); 355 356 avail = atomic64_read(&fs_info->free_chunk_space); 357 358 /* 359 * If we have dup, raid1 or raid10 then only half of the free 360 * space is actually usable. For raid56, the space info used 361 * doesn't include the parity drive, so we don't have to 362 * change the math 363 */ 364 factor = btrfs_bg_type_to_factor(profile); 365 avail = div_u64(avail, factor); 366 367 /* 368 * If we aren't flushing all things, let us overcommit up to 369 * 1/2th of the space. If we can flush, don't let us overcommit 370 * too much, let it overcommit up to 1/8 of the space. 371 */ 372 if (flush == BTRFS_RESERVE_FLUSH_ALL) 373 avail >>= 3; 374 else 375 avail >>= 1; 376 return avail; 377 } 378 379 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info, 380 struct btrfs_space_info *space_info) 381 { 382 /* 383 * On regular filesystem, all total_bytes are always writable. On zoned 384 * filesystem, there may be a limitation imposed by max_active_zones. 385 * For metadata allocation, we cannot finish an existing active block 386 * group to avoid a deadlock. Thus, we need to consider only the active 387 * groups to be writable for metadata space. 388 */ 389 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) 390 return space_info->total_bytes; 391 392 return space_info->active_total_bytes; 393 } 394 395 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 396 struct btrfs_space_info *space_info, u64 bytes, 397 enum btrfs_reserve_flush_enum flush) 398 { 399 u64 avail; 400 u64 used; 401 402 /* Don't overcommit when in mixed mode */ 403 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 404 return 0; 405 406 used = btrfs_space_info_used(space_info, true); 407 if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA)) 408 avail = 0; 409 else 410 avail = calc_available_free_space(fs_info, space_info, flush); 411 412 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail) 413 return 1; 414 return 0; 415 } 416 417 static void remove_ticket(struct btrfs_space_info *space_info, 418 struct reserve_ticket *ticket) 419 { 420 if (!list_empty(&ticket->list)) { 421 list_del_init(&ticket->list); 422 ASSERT(space_info->reclaim_size >= ticket->bytes); 423 space_info->reclaim_size -= ticket->bytes; 424 } 425 } 426 427 /* 428 * This is for space we already have accounted in space_info->bytes_may_use, so 429 * basically when we're returning space from block_rsv's. 430 */ 431 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 432 struct btrfs_space_info *space_info) 433 { 434 struct list_head *head; 435 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 436 437 lockdep_assert_held(&space_info->lock); 438 439 head = &space_info->priority_tickets; 440 again: 441 while (!list_empty(head)) { 442 struct reserve_ticket *ticket; 443 u64 used = btrfs_space_info_used(space_info, true); 444 445 ticket = list_first_entry(head, struct reserve_ticket, list); 446 447 /* Check and see if our ticket can be satisfied now. */ 448 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) || 449 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 450 flush)) { 451 btrfs_space_info_update_bytes_may_use(fs_info, 452 space_info, 453 ticket->bytes); 454 remove_ticket(space_info, ticket); 455 ticket->bytes = 0; 456 space_info->tickets_id++; 457 wake_up(&ticket->wait); 458 } else { 459 break; 460 } 461 } 462 463 if (head == &space_info->priority_tickets) { 464 head = &space_info->tickets; 465 flush = BTRFS_RESERVE_FLUSH_ALL; 466 goto again; 467 } 468 } 469 470 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 471 do { \ 472 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 473 spin_lock(&__rsv->lock); \ 474 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 475 __rsv->size, __rsv->reserved); \ 476 spin_unlock(&__rsv->lock); \ 477 } while (0) 478 479 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 480 struct btrfs_space_info *info) 481 { 482 lockdep_assert_held(&info->lock); 483 484 /* The free space could be negative in case of overcommit */ 485 btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull", 486 info->flags, 487 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), 488 info->full ? "" : "not "); 489 btrfs_info(fs_info, 490 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 491 info->total_bytes, info->bytes_used, info->bytes_pinned, 492 info->bytes_reserved, info->bytes_may_use, 493 info->bytes_readonly, info->bytes_zone_unusable); 494 495 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 496 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 497 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 498 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 499 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 500 501 } 502 503 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 504 struct btrfs_space_info *info, u64 bytes, 505 int dump_block_groups) 506 { 507 struct btrfs_block_group *cache; 508 int index = 0; 509 510 spin_lock(&info->lock); 511 __btrfs_dump_space_info(fs_info, info); 512 spin_unlock(&info->lock); 513 514 if (!dump_block_groups) 515 return; 516 517 down_read(&info->groups_sem); 518 again: 519 list_for_each_entry(cache, &info->block_groups[index], list) { 520 spin_lock(&cache->lock); 521 btrfs_info(fs_info, 522 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 523 cache->start, cache->length, cache->used, cache->pinned, 524 cache->reserved, cache->zone_unusable, 525 cache->ro ? "[readonly]" : ""); 526 spin_unlock(&cache->lock); 527 btrfs_dump_free_space(cache, bytes); 528 } 529 if (++index < BTRFS_NR_RAID_TYPES) 530 goto again; 531 up_read(&info->groups_sem); 532 } 533 534 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 535 u64 to_reclaim) 536 { 537 u64 bytes; 538 u64 nr; 539 540 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 541 nr = div64_u64(to_reclaim, bytes); 542 if (!nr) 543 nr = 1; 544 return nr; 545 } 546 547 #define EXTENT_SIZE_PER_ITEM SZ_256K 548 549 /* 550 * shrink metadata reservation for delalloc 551 */ 552 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 553 struct btrfs_space_info *space_info, 554 u64 to_reclaim, bool wait_ordered, 555 bool for_preempt) 556 { 557 struct btrfs_trans_handle *trans; 558 u64 delalloc_bytes; 559 u64 ordered_bytes; 560 u64 items; 561 long time_left; 562 int loops; 563 564 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 565 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 566 if (delalloc_bytes == 0 && ordered_bytes == 0) 567 return; 568 569 /* Calc the number of the pages we need flush for space reservation */ 570 if (to_reclaim == U64_MAX) { 571 items = U64_MAX; 572 } else { 573 /* 574 * to_reclaim is set to however much metadata we need to 575 * reclaim, but reclaiming that much data doesn't really track 576 * exactly. What we really want to do is reclaim full inode's 577 * worth of reservations, however that's not available to us 578 * here. We will take a fraction of the delalloc bytes for our 579 * flushing loops and hope for the best. Delalloc will expand 580 * the amount we write to cover an entire dirty extent, which 581 * will reclaim the metadata reservation for that range. If 582 * it's not enough subsequent flush stages will be more 583 * aggressive. 584 */ 585 to_reclaim = max(to_reclaim, delalloc_bytes >> 3); 586 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 587 } 588 589 trans = current->journal_info; 590 591 /* 592 * If we are doing more ordered than delalloc we need to just wait on 593 * ordered extents, otherwise we'll waste time trying to flush delalloc 594 * that likely won't give us the space back we need. 595 */ 596 if (ordered_bytes > delalloc_bytes && !for_preempt) 597 wait_ordered = true; 598 599 loops = 0; 600 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 601 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 602 long nr_pages = min_t(u64, temp, LONG_MAX); 603 int async_pages; 604 605 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 606 607 /* 608 * We need to make sure any outstanding async pages are now 609 * processed before we continue. This is because things like 610 * sync_inode() try to be smart and skip writing if the inode is 611 * marked clean. We don't use filemap_fwrite for flushing 612 * because we want to control how many pages we write out at a 613 * time, thus this is the only safe way to make sure we've 614 * waited for outstanding compressed workers to have started 615 * their jobs and thus have ordered extents set up properly. 616 * 617 * This exists because we do not want to wait for each 618 * individual inode to finish its async work, we simply want to 619 * start the IO on everybody, and then come back here and wait 620 * for all of the async work to catch up. Once we're done with 621 * that we know we'll have ordered extents for everything and we 622 * can decide if we wait for that or not. 623 * 624 * If we choose to replace this in the future, make absolutely 625 * sure that the proper waiting is being done in the async case, 626 * as there have been bugs in that area before. 627 */ 628 async_pages = atomic_read(&fs_info->async_delalloc_pages); 629 if (!async_pages) 630 goto skip_async; 631 632 /* 633 * We don't want to wait forever, if we wrote less pages in this 634 * loop than we have outstanding, only wait for that number of 635 * pages, otherwise we can wait for all async pages to finish 636 * before continuing. 637 */ 638 if (async_pages > nr_pages) 639 async_pages -= nr_pages; 640 else 641 async_pages = 0; 642 wait_event(fs_info->async_submit_wait, 643 atomic_read(&fs_info->async_delalloc_pages) <= 644 async_pages); 645 skip_async: 646 loops++; 647 if (wait_ordered && !trans) { 648 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 649 } else { 650 time_left = schedule_timeout_killable(1); 651 if (time_left) 652 break; 653 } 654 655 /* 656 * If we are for preemption we just want a one-shot of delalloc 657 * flushing so we can stop flushing if we decide we don't need 658 * to anymore. 659 */ 660 if (for_preempt) 661 break; 662 663 spin_lock(&space_info->lock); 664 if (list_empty(&space_info->tickets) && 665 list_empty(&space_info->priority_tickets)) { 666 spin_unlock(&space_info->lock); 667 break; 668 } 669 spin_unlock(&space_info->lock); 670 671 delalloc_bytes = percpu_counter_sum_positive( 672 &fs_info->delalloc_bytes); 673 ordered_bytes = percpu_counter_sum_positive( 674 &fs_info->ordered_bytes); 675 } 676 } 677 678 /* 679 * Try to flush some data based on policy set by @state. This is only advisory 680 * and may fail for various reasons. The caller is supposed to examine the 681 * state of @space_info to detect the outcome. 682 */ 683 static void flush_space(struct btrfs_fs_info *fs_info, 684 struct btrfs_space_info *space_info, u64 num_bytes, 685 enum btrfs_flush_state state, bool for_preempt) 686 { 687 struct btrfs_root *root = fs_info->tree_root; 688 struct btrfs_trans_handle *trans; 689 int nr; 690 int ret = 0; 691 692 switch (state) { 693 case FLUSH_DELAYED_ITEMS_NR: 694 case FLUSH_DELAYED_ITEMS: 695 if (state == FLUSH_DELAYED_ITEMS_NR) 696 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 697 else 698 nr = -1; 699 700 trans = btrfs_join_transaction(root); 701 if (IS_ERR(trans)) { 702 ret = PTR_ERR(trans); 703 break; 704 } 705 ret = btrfs_run_delayed_items_nr(trans, nr); 706 btrfs_end_transaction(trans); 707 break; 708 case FLUSH_DELALLOC: 709 case FLUSH_DELALLOC_WAIT: 710 case FLUSH_DELALLOC_FULL: 711 if (state == FLUSH_DELALLOC_FULL) 712 num_bytes = U64_MAX; 713 shrink_delalloc(fs_info, space_info, num_bytes, 714 state != FLUSH_DELALLOC, for_preempt); 715 break; 716 case FLUSH_DELAYED_REFS_NR: 717 case FLUSH_DELAYED_REFS: 718 trans = btrfs_join_transaction(root); 719 if (IS_ERR(trans)) { 720 ret = PTR_ERR(trans); 721 break; 722 } 723 if (state == FLUSH_DELAYED_REFS_NR) 724 nr = calc_reclaim_items_nr(fs_info, num_bytes); 725 else 726 nr = 0; 727 btrfs_run_delayed_refs(trans, nr); 728 btrfs_end_transaction(trans); 729 break; 730 case ALLOC_CHUNK: 731 case ALLOC_CHUNK_FORCE: 732 /* 733 * For metadata space on zoned filesystem, reaching here means we 734 * don't have enough space left in active_total_bytes. Try to 735 * activate a block group first, because we may have inactive 736 * block group already allocated. 737 */ 738 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false); 739 if (ret < 0) 740 break; 741 else if (ret == 1) 742 break; 743 744 trans = btrfs_join_transaction(root); 745 if (IS_ERR(trans)) { 746 ret = PTR_ERR(trans); 747 break; 748 } 749 ret = btrfs_chunk_alloc(trans, 750 btrfs_get_alloc_profile(fs_info, space_info->flags), 751 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 752 CHUNK_ALLOC_FORCE); 753 btrfs_end_transaction(trans); 754 755 /* 756 * For metadata space on zoned filesystem, allocating a new chunk 757 * is not enough. We still need to activate the block * group. 758 * Active the newly allocated block group by (maybe) finishing 759 * a block group. 760 */ 761 if (ret == 1) { 762 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true); 763 /* 764 * Revert to the original ret regardless we could finish 765 * one block group or not. 766 */ 767 if (ret >= 0) 768 ret = 1; 769 } 770 771 if (ret > 0 || ret == -ENOSPC) 772 ret = 0; 773 break; 774 case RUN_DELAYED_IPUTS: 775 /* 776 * If we have pending delayed iputs then we could free up a 777 * bunch of pinned space, so make sure we run the iputs before 778 * we do our pinned bytes check below. 779 */ 780 btrfs_run_delayed_iputs(fs_info); 781 btrfs_wait_on_delayed_iputs(fs_info); 782 break; 783 case COMMIT_TRANS: 784 ASSERT(current->journal_info == NULL); 785 trans = btrfs_join_transaction(root); 786 if (IS_ERR(trans)) { 787 ret = PTR_ERR(trans); 788 break; 789 } 790 ret = btrfs_commit_transaction(trans); 791 break; 792 default: 793 ret = -ENOSPC; 794 break; 795 } 796 797 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 798 ret, for_preempt); 799 return; 800 } 801 802 static inline u64 803 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 804 struct btrfs_space_info *space_info) 805 { 806 u64 used; 807 u64 avail; 808 u64 total; 809 u64 to_reclaim = space_info->reclaim_size; 810 811 lockdep_assert_held(&space_info->lock); 812 813 avail = calc_available_free_space(fs_info, space_info, 814 BTRFS_RESERVE_FLUSH_ALL); 815 used = btrfs_space_info_used(space_info, true); 816 817 /* 818 * We may be flushing because suddenly we have less space than we had 819 * before, and now we're well over-committed based on our current free 820 * space. If that's the case add in our overage so we make sure to put 821 * appropriate pressure on the flushing state machine. 822 */ 823 total = writable_total_bytes(fs_info, space_info); 824 if (total + avail < used) 825 to_reclaim += used - (total + avail); 826 827 return to_reclaim; 828 } 829 830 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 831 struct btrfs_space_info *space_info) 832 { 833 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 834 u64 ordered, delalloc; 835 u64 total = writable_total_bytes(fs_info, space_info); 836 u64 thresh; 837 u64 used; 838 839 thresh = div_factor_fine(total, 90); 840 841 lockdep_assert_held(&space_info->lock); 842 843 /* If we're just plain full then async reclaim just slows us down. */ 844 if ((space_info->bytes_used + space_info->bytes_reserved + 845 global_rsv_size) >= thresh) 846 return false; 847 848 used = space_info->bytes_may_use + space_info->bytes_pinned; 849 850 /* The total flushable belongs to the global rsv, don't flush. */ 851 if (global_rsv_size >= used) 852 return false; 853 854 /* 855 * 128MiB is 1/4 of the maximum global rsv size. If we have less than 856 * that devoted to other reservations then there's no sense in flushing, 857 * we don't have a lot of things that need flushing. 858 */ 859 if (used - global_rsv_size <= SZ_128M) 860 return false; 861 862 /* 863 * We have tickets queued, bail so we don't compete with the async 864 * flushers. 865 */ 866 if (space_info->reclaim_size) 867 return false; 868 869 /* 870 * If we have over half of the free space occupied by reservations or 871 * pinned then we want to start flushing. 872 * 873 * We do not do the traditional thing here, which is to say 874 * 875 * if (used >= ((total_bytes + avail) / 2)) 876 * return 1; 877 * 878 * because this doesn't quite work how we want. If we had more than 50% 879 * of the space_info used by bytes_used and we had 0 available we'd just 880 * constantly run the background flusher. Instead we want it to kick in 881 * if our reclaimable space exceeds our clamped free space. 882 * 883 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 884 * the following: 885 * 886 * Amount of RAM Minimum threshold Maximum threshold 887 * 888 * 256GiB 1GiB 128GiB 889 * 128GiB 512MiB 64GiB 890 * 64GiB 256MiB 32GiB 891 * 32GiB 128MiB 16GiB 892 * 16GiB 64MiB 8GiB 893 * 894 * These are the range our thresholds will fall in, corresponding to how 895 * much delalloc we need for the background flusher to kick in. 896 */ 897 898 thresh = calc_available_free_space(fs_info, space_info, 899 BTRFS_RESERVE_FLUSH_ALL); 900 used = space_info->bytes_used + space_info->bytes_reserved + 901 space_info->bytes_readonly + global_rsv_size; 902 if (used < total) 903 thresh += total - used; 904 thresh >>= space_info->clamp; 905 906 used = space_info->bytes_pinned; 907 908 /* 909 * If we have more ordered bytes than delalloc bytes then we're either 910 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 911 * around. Preemptive flushing is only useful in that it can free up 912 * space before tickets need to wait for things to finish. In the case 913 * of ordered extents, preemptively waiting on ordered extents gets us 914 * nothing, if our reservations are tied up in ordered extents we'll 915 * simply have to slow down writers by forcing them to wait on ordered 916 * extents. 917 * 918 * In the case that ordered is larger than delalloc, only include the 919 * block reserves that we would actually be able to directly reclaim 920 * from. In this case if we're heavy on metadata operations this will 921 * clearly be heavy enough to warrant preemptive flushing. In the case 922 * of heavy DIO or ordered reservations, preemptive flushing will just 923 * waste time and cause us to slow down. 924 * 925 * We want to make sure we truly are maxed out on ordered however, so 926 * cut ordered in half, and if it's still higher than delalloc then we 927 * can keep flushing. This is to avoid the case where we start 928 * flushing, and now delalloc == ordered and we stop preemptively 929 * flushing when we could still have several gigs of delalloc to flush. 930 */ 931 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 932 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 933 if (ordered >= delalloc) 934 used += fs_info->delayed_refs_rsv.reserved + 935 fs_info->delayed_block_rsv.reserved; 936 else 937 used += space_info->bytes_may_use - global_rsv_size; 938 939 return (used >= thresh && !btrfs_fs_closing(fs_info) && 940 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 941 } 942 943 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 944 struct btrfs_space_info *space_info, 945 struct reserve_ticket *ticket) 946 { 947 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 948 u64 min_bytes; 949 950 if (!ticket->steal) 951 return false; 952 953 if (global_rsv->space_info != space_info) 954 return false; 955 956 spin_lock(&global_rsv->lock); 957 min_bytes = div_factor(global_rsv->size, 1); 958 if (global_rsv->reserved < min_bytes + ticket->bytes) { 959 spin_unlock(&global_rsv->lock); 960 return false; 961 } 962 global_rsv->reserved -= ticket->bytes; 963 remove_ticket(space_info, ticket); 964 ticket->bytes = 0; 965 wake_up(&ticket->wait); 966 space_info->tickets_id++; 967 if (global_rsv->reserved < global_rsv->size) 968 global_rsv->full = 0; 969 spin_unlock(&global_rsv->lock); 970 971 return true; 972 } 973 974 /* 975 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 976 * @fs_info - fs_info for this fs 977 * @space_info - the space info we were flushing 978 * 979 * We call this when we've exhausted our flushing ability and haven't made 980 * progress in satisfying tickets. The reservation code handles tickets in 981 * order, so if there is a large ticket first and then smaller ones we could 982 * very well satisfy the smaller tickets. This will attempt to wake up any 983 * tickets in the list to catch this case. 984 * 985 * This function returns true if it was able to make progress by clearing out 986 * other tickets, or if it stumbles across a ticket that was smaller than the 987 * first ticket. 988 */ 989 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 990 struct btrfs_space_info *space_info) 991 { 992 struct reserve_ticket *ticket; 993 u64 tickets_id = space_info->tickets_id; 994 const bool aborted = BTRFS_FS_ERROR(fs_info); 995 996 trace_btrfs_fail_all_tickets(fs_info, space_info); 997 998 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 999 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 1000 __btrfs_dump_space_info(fs_info, space_info); 1001 } 1002 1003 while (!list_empty(&space_info->tickets) && 1004 tickets_id == space_info->tickets_id) { 1005 ticket = list_first_entry(&space_info->tickets, 1006 struct reserve_ticket, list); 1007 1008 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) 1009 return true; 1010 1011 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1012 btrfs_info(fs_info, "failing ticket with %llu bytes", 1013 ticket->bytes); 1014 1015 remove_ticket(space_info, ticket); 1016 if (aborted) 1017 ticket->error = -EIO; 1018 else 1019 ticket->error = -ENOSPC; 1020 wake_up(&ticket->wait); 1021 1022 /* 1023 * We're just throwing tickets away, so more flushing may not 1024 * trip over btrfs_try_granting_tickets, so we need to call it 1025 * here to see if we can make progress with the next ticket in 1026 * the list. 1027 */ 1028 if (!aborted) 1029 btrfs_try_granting_tickets(fs_info, space_info); 1030 } 1031 return (tickets_id != space_info->tickets_id); 1032 } 1033 1034 /* 1035 * This is for normal flushers, we can wait all goddamned day if we want to. We 1036 * will loop and continuously try to flush as long as we are making progress. 1037 * We count progress as clearing off tickets each time we have to loop. 1038 */ 1039 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 1040 { 1041 struct btrfs_fs_info *fs_info; 1042 struct btrfs_space_info *space_info; 1043 u64 to_reclaim; 1044 enum btrfs_flush_state flush_state; 1045 int commit_cycles = 0; 1046 u64 last_tickets_id; 1047 1048 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 1049 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1050 1051 spin_lock(&space_info->lock); 1052 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1053 if (!to_reclaim) { 1054 space_info->flush = 0; 1055 spin_unlock(&space_info->lock); 1056 return; 1057 } 1058 last_tickets_id = space_info->tickets_id; 1059 spin_unlock(&space_info->lock); 1060 1061 flush_state = FLUSH_DELAYED_ITEMS_NR; 1062 do { 1063 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 1064 spin_lock(&space_info->lock); 1065 if (list_empty(&space_info->tickets)) { 1066 space_info->flush = 0; 1067 spin_unlock(&space_info->lock); 1068 return; 1069 } 1070 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 1071 space_info); 1072 if (last_tickets_id == space_info->tickets_id) { 1073 flush_state++; 1074 } else { 1075 last_tickets_id = space_info->tickets_id; 1076 flush_state = FLUSH_DELAYED_ITEMS_NR; 1077 if (commit_cycles) 1078 commit_cycles--; 1079 } 1080 1081 /* 1082 * We do not want to empty the system of delalloc unless we're 1083 * under heavy pressure, so allow one trip through the flushing 1084 * logic before we start doing a FLUSH_DELALLOC_FULL. 1085 */ 1086 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) 1087 flush_state++; 1088 1089 /* 1090 * We don't want to force a chunk allocation until we've tried 1091 * pretty hard to reclaim space. Think of the case where we 1092 * freed up a bunch of space and so have a lot of pinned space 1093 * to reclaim. We would rather use that than possibly create a 1094 * underutilized metadata chunk. So if this is our first run 1095 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 1096 * commit the transaction. If nothing has changed the next go 1097 * around then we can force a chunk allocation. 1098 */ 1099 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 1100 flush_state++; 1101 1102 if (flush_state > COMMIT_TRANS) { 1103 commit_cycles++; 1104 if (commit_cycles > 2) { 1105 if (maybe_fail_all_tickets(fs_info, space_info)) { 1106 flush_state = FLUSH_DELAYED_ITEMS_NR; 1107 commit_cycles--; 1108 } else { 1109 space_info->flush = 0; 1110 } 1111 } else { 1112 flush_state = FLUSH_DELAYED_ITEMS_NR; 1113 } 1114 } 1115 spin_unlock(&space_info->lock); 1116 } while (flush_state <= COMMIT_TRANS); 1117 } 1118 1119 /* 1120 * This handles pre-flushing of metadata space before we get to the point that 1121 * we need to start blocking threads on tickets. The logic here is different 1122 * from the other flush paths because it doesn't rely on tickets to tell us how 1123 * much we need to flush, instead it attempts to keep us below the 80% full 1124 * watermark of space by flushing whichever reservation pool is currently the 1125 * largest. 1126 */ 1127 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1128 { 1129 struct btrfs_fs_info *fs_info; 1130 struct btrfs_space_info *space_info; 1131 struct btrfs_block_rsv *delayed_block_rsv; 1132 struct btrfs_block_rsv *delayed_refs_rsv; 1133 struct btrfs_block_rsv *global_rsv; 1134 struct btrfs_block_rsv *trans_rsv; 1135 int loops = 0; 1136 1137 fs_info = container_of(work, struct btrfs_fs_info, 1138 preempt_reclaim_work); 1139 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1140 delayed_block_rsv = &fs_info->delayed_block_rsv; 1141 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1142 global_rsv = &fs_info->global_block_rsv; 1143 trans_rsv = &fs_info->trans_block_rsv; 1144 1145 spin_lock(&space_info->lock); 1146 while (need_preemptive_reclaim(fs_info, space_info)) { 1147 enum btrfs_flush_state flush; 1148 u64 delalloc_size = 0; 1149 u64 to_reclaim, block_rsv_size; 1150 u64 global_rsv_size = global_rsv->reserved; 1151 1152 loops++; 1153 1154 /* 1155 * We don't have a precise counter for the metadata being 1156 * reserved for delalloc, so we'll approximate it by subtracting 1157 * out the block rsv's space from the bytes_may_use. If that 1158 * amount is higher than the individual reserves, then we can 1159 * assume it's tied up in delalloc reservations. 1160 */ 1161 block_rsv_size = global_rsv_size + 1162 delayed_block_rsv->reserved + 1163 delayed_refs_rsv->reserved + 1164 trans_rsv->reserved; 1165 if (block_rsv_size < space_info->bytes_may_use) 1166 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1167 1168 /* 1169 * We don't want to include the global_rsv in our calculation, 1170 * because that's space we can't touch. Subtract it from the 1171 * block_rsv_size for the next checks. 1172 */ 1173 block_rsv_size -= global_rsv_size; 1174 1175 /* 1176 * We really want to avoid flushing delalloc too much, as it 1177 * could result in poor allocation patterns, so only flush it if 1178 * it's larger than the rest of the pools combined. 1179 */ 1180 if (delalloc_size > block_rsv_size) { 1181 to_reclaim = delalloc_size; 1182 flush = FLUSH_DELALLOC; 1183 } else if (space_info->bytes_pinned > 1184 (delayed_block_rsv->reserved + 1185 delayed_refs_rsv->reserved)) { 1186 to_reclaim = space_info->bytes_pinned; 1187 flush = COMMIT_TRANS; 1188 } else if (delayed_block_rsv->reserved > 1189 delayed_refs_rsv->reserved) { 1190 to_reclaim = delayed_block_rsv->reserved; 1191 flush = FLUSH_DELAYED_ITEMS_NR; 1192 } else { 1193 to_reclaim = delayed_refs_rsv->reserved; 1194 flush = FLUSH_DELAYED_REFS_NR; 1195 } 1196 1197 spin_unlock(&space_info->lock); 1198 1199 /* 1200 * We don't want to reclaim everything, just a portion, so scale 1201 * down the to_reclaim by 1/4. If it takes us down to 0, 1202 * reclaim 1 items worth. 1203 */ 1204 to_reclaim >>= 2; 1205 if (!to_reclaim) 1206 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1207 flush_space(fs_info, space_info, to_reclaim, flush, true); 1208 cond_resched(); 1209 spin_lock(&space_info->lock); 1210 } 1211 1212 /* We only went through once, back off our clamping. */ 1213 if (loops == 1 && !space_info->reclaim_size) 1214 space_info->clamp = max(1, space_info->clamp - 1); 1215 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1216 spin_unlock(&space_info->lock); 1217 } 1218 1219 /* 1220 * FLUSH_DELALLOC_WAIT: 1221 * Space is freed from flushing delalloc in one of two ways. 1222 * 1223 * 1) compression is on and we allocate less space than we reserved 1224 * 2) we are overwriting existing space 1225 * 1226 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1227 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1228 * length to ->bytes_reserved, and subtracts the reserved space from 1229 * ->bytes_may_use. 1230 * 1231 * For #2 this is trickier. Once the ordered extent runs we will drop the 1232 * extent in the range we are overwriting, which creates a delayed ref for 1233 * that freed extent. This however is not reclaimed until the transaction 1234 * commits, thus the next stages. 1235 * 1236 * RUN_DELAYED_IPUTS 1237 * If we are freeing inodes, we want to make sure all delayed iputs have 1238 * completed, because they could have been on an inode with i_nlink == 0, and 1239 * thus have been truncated and freed up space. But again this space is not 1240 * immediately re-usable, it comes in the form of a delayed ref, which must be 1241 * run and then the transaction must be committed. 1242 * 1243 * COMMIT_TRANS 1244 * This is where we reclaim all of the pinned space generated by running the 1245 * iputs 1246 * 1247 * ALLOC_CHUNK_FORCE 1248 * For data we start with alloc chunk force, however we could have been full 1249 * before, and then the transaction commit could have freed new block groups, 1250 * so if we now have space to allocate do the force chunk allocation. 1251 */ 1252 static const enum btrfs_flush_state data_flush_states[] = { 1253 FLUSH_DELALLOC_FULL, 1254 RUN_DELAYED_IPUTS, 1255 COMMIT_TRANS, 1256 ALLOC_CHUNK_FORCE, 1257 }; 1258 1259 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1260 { 1261 struct btrfs_fs_info *fs_info; 1262 struct btrfs_space_info *space_info; 1263 u64 last_tickets_id; 1264 enum btrfs_flush_state flush_state = 0; 1265 1266 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1267 space_info = fs_info->data_sinfo; 1268 1269 spin_lock(&space_info->lock); 1270 if (list_empty(&space_info->tickets)) { 1271 space_info->flush = 0; 1272 spin_unlock(&space_info->lock); 1273 return; 1274 } 1275 last_tickets_id = space_info->tickets_id; 1276 spin_unlock(&space_info->lock); 1277 1278 while (!space_info->full) { 1279 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1280 spin_lock(&space_info->lock); 1281 if (list_empty(&space_info->tickets)) { 1282 space_info->flush = 0; 1283 spin_unlock(&space_info->lock); 1284 return; 1285 } 1286 1287 /* Something happened, fail everything and bail. */ 1288 if (BTRFS_FS_ERROR(fs_info)) 1289 goto aborted_fs; 1290 last_tickets_id = space_info->tickets_id; 1291 spin_unlock(&space_info->lock); 1292 } 1293 1294 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1295 flush_space(fs_info, space_info, U64_MAX, 1296 data_flush_states[flush_state], false); 1297 spin_lock(&space_info->lock); 1298 if (list_empty(&space_info->tickets)) { 1299 space_info->flush = 0; 1300 spin_unlock(&space_info->lock); 1301 return; 1302 } 1303 1304 if (last_tickets_id == space_info->tickets_id) { 1305 flush_state++; 1306 } else { 1307 last_tickets_id = space_info->tickets_id; 1308 flush_state = 0; 1309 } 1310 1311 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1312 if (space_info->full) { 1313 if (maybe_fail_all_tickets(fs_info, space_info)) 1314 flush_state = 0; 1315 else 1316 space_info->flush = 0; 1317 } else { 1318 flush_state = 0; 1319 } 1320 1321 /* Something happened, fail everything and bail. */ 1322 if (BTRFS_FS_ERROR(fs_info)) 1323 goto aborted_fs; 1324 1325 } 1326 spin_unlock(&space_info->lock); 1327 } 1328 return; 1329 1330 aborted_fs: 1331 maybe_fail_all_tickets(fs_info, space_info); 1332 space_info->flush = 0; 1333 spin_unlock(&space_info->lock); 1334 } 1335 1336 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1337 { 1338 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1339 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1340 INIT_WORK(&fs_info->preempt_reclaim_work, 1341 btrfs_preempt_reclaim_metadata_space); 1342 } 1343 1344 static const enum btrfs_flush_state priority_flush_states[] = { 1345 FLUSH_DELAYED_ITEMS_NR, 1346 FLUSH_DELAYED_ITEMS, 1347 ALLOC_CHUNK, 1348 }; 1349 1350 static const enum btrfs_flush_state evict_flush_states[] = { 1351 FLUSH_DELAYED_ITEMS_NR, 1352 FLUSH_DELAYED_ITEMS, 1353 FLUSH_DELAYED_REFS_NR, 1354 FLUSH_DELAYED_REFS, 1355 FLUSH_DELALLOC, 1356 FLUSH_DELALLOC_WAIT, 1357 FLUSH_DELALLOC_FULL, 1358 ALLOC_CHUNK, 1359 COMMIT_TRANS, 1360 }; 1361 1362 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1363 struct btrfs_space_info *space_info, 1364 struct reserve_ticket *ticket, 1365 const enum btrfs_flush_state *states, 1366 int states_nr) 1367 { 1368 u64 to_reclaim; 1369 int flush_state = 0; 1370 1371 spin_lock(&space_info->lock); 1372 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1373 /* 1374 * This is the priority reclaim path, so to_reclaim could be >0 still 1375 * because we may have only satisfied the priority tickets and still 1376 * left non priority tickets on the list. We would then have 1377 * to_reclaim but ->bytes == 0. 1378 */ 1379 if (ticket->bytes == 0) { 1380 spin_unlock(&space_info->lock); 1381 return; 1382 } 1383 1384 while (flush_state < states_nr) { 1385 spin_unlock(&space_info->lock); 1386 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1387 false); 1388 flush_state++; 1389 spin_lock(&space_info->lock); 1390 if (ticket->bytes == 0) { 1391 spin_unlock(&space_info->lock); 1392 return; 1393 } 1394 } 1395 1396 /* Attempt to steal from the global rsv if we can. */ 1397 if (!steal_from_global_rsv(fs_info, space_info, ticket)) { 1398 ticket->error = -ENOSPC; 1399 remove_ticket(space_info, ticket); 1400 } 1401 1402 /* 1403 * We must run try_granting_tickets here because we could be a large 1404 * ticket in front of a smaller ticket that can now be satisfied with 1405 * the available space. 1406 */ 1407 btrfs_try_granting_tickets(fs_info, space_info); 1408 spin_unlock(&space_info->lock); 1409 } 1410 1411 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1412 struct btrfs_space_info *space_info, 1413 struct reserve_ticket *ticket) 1414 { 1415 spin_lock(&space_info->lock); 1416 1417 /* We could have been granted before we got here. */ 1418 if (ticket->bytes == 0) { 1419 spin_unlock(&space_info->lock); 1420 return; 1421 } 1422 1423 while (!space_info->full) { 1424 spin_unlock(&space_info->lock); 1425 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1426 spin_lock(&space_info->lock); 1427 if (ticket->bytes == 0) { 1428 spin_unlock(&space_info->lock); 1429 return; 1430 } 1431 } 1432 1433 ticket->error = -ENOSPC; 1434 remove_ticket(space_info, ticket); 1435 btrfs_try_granting_tickets(fs_info, space_info); 1436 spin_unlock(&space_info->lock); 1437 } 1438 1439 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1440 struct btrfs_space_info *space_info, 1441 struct reserve_ticket *ticket) 1442 1443 { 1444 DEFINE_WAIT(wait); 1445 int ret = 0; 1446 1447 spin_lock(&space_info->lock); 1448 while (ticket->bytes > 0 && ticket->error == 0) { 1449 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1450 if (ret) { 1451 /* 1452 * Delete us from the list. After we unlock the space 1453 * info, we don't want the async reclaim job to reserve 1454 * space for this ticket. If that would happen, then the 1455 * ticket's task would not known that space was reserved 1456 * despite getting an error, resulting in a space leak 1457 * (bytes_may_use counter of our space_info). 1458 */ 1459 remove_ticket(space_info, ticket); 1460 ticket->error = -EINTR; 1461 break; 1462 } 1463 spin_unlock(&space_info->lock); 1464 1465 schedule(); 1466 1467 finish_wait(&ticket->wait, &wait); 1468 spin_lock(&space_info->lock); 1469 } 1470 spin_unlock(&space_info->lock); 1471 } 1472 1473 /** 1474 * Do the appropriate flushing and waiting for a ticket 1475 * 1476 * @fs_info: the filesystem 1477 * @space_info: space info for the reservation 1478 * @ticket: ticket for the reservation 1479 * @start_ns: timestamp when the reservation started 1480 * @orig_bytes: amount of bytes originally reserved 1481 * @flush: how much we can flush 1482 * 1483 * This does the work of figuring out how to flush for the ticket, waiting for 1484 * the reservation, and returning the appropriate error if there is one. 1485 */ 1486 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1487 struct btrfs_space_info *space_info, 1488 struct reserve_ticket *ticket, 1489 u64 start_ns, u64 orig_bytes, 1490 enum btrfs_reserve_flush_enum flush) 1491 { 1492 int ret; 1493 1494 switch (flush) { 1495 case BTRFS_RESERVE_FLUSH_DATA: 1496 case BTRFS_RESERVE_FLUSH_ALL: 1497 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1498 wait_reserve_ticket(fs_info, space_info, ticket); 1499 break; 1500 case BTRFS_RESERVE_FLUSH_LIMIT: 1501 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1502 priority_flush_states, 1503 ARRAY_SIZE(priority_flush_states)); 1504 break; 1505 case BTRFS_RESERVE_FLUSH_EVICT: 1506 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1507 evict_flush_states, 1508 ARRAY_SIZE(evict_flush_states)); 1509 break; 1510 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1511 priority_reclaim_data_space(fs_info, space_info, ticket); 1512 break; 1513 default: 1514 ASSERT(0); 1515 break; 1516 } 1517 1518 ret = ticket->error; 1519 ASSERT(list_empty(&ticket->list)); 1520 /* 1521 * Check that we can't have an error set if the reservation succeeded, 1522 * as that would confuse tasks and lead them to error out without 1523 * releasing reserved space (if an error happens the expectation is that 1524 * space wasn't reserved at all). 1525 */ 1526 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1527 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1528 start_ns, flush, ticket->error); 1529 return ret; 1530 } 1531 1532 /* 1533 * This returns true if this flush state will go through the ordinary flushing 1534 * code. 1535 */ 1536 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1537 { 1538 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1539 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1540 } 1541 1542 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1543 struct btrfs_space_info *space_info) 1544 { 1545 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1546 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1547 1548 /* 1549 * If we're heavy on ordered operations then clamping won't help us. We 1550 * need to clamp specifically to keep up with dirty'ing buffered 1551 * writers, because there's not a 1:1 correlation of writing delalloc 1552 * and freeing space, like there is with flushing delayed refs or 1553 * delayed nodes. If we're already more ordered than delalloc then 1554 * we're keeping up, otherwise we aren't and should probably clamp. 1555 */ 1556 if (ordered < delalloc) 1557 space_info->clamp = min(space_info->clamp + 1, 8); 1558 } 1559 1560 static inline bool can_steal(enum btrfs_reserve_flush_enum flush) 1561 { 1562 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1563 flush == BTRFS_RESERVE_FLUSH_EVICT); 1564 } 1565 1566 /** 1567 * Try to reserve bytes from the block_rsv's space 1568 * 1569 * @fs_info: the filesystem 1570 * @space_info: space info we want to allocate from 1571 * @orig_bytes: number of bytes we want 1572 * @flush: whether or not we can flush to make our reservation 1573 * 1574 * This will reserve orig_bytes number of bytes from the space info associated 1575 * with the block_rsv. If there is not enough space it will make an attempt to 1576 * flush out space to make room. It will do this by flushing delalloc if 1577 * possible or committing the transaction. If flush is 0 then no attempts to 1578 * regain reservations will be made and this will fail if there is not enough 1579 * space already. 1580 */ 1581 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1582 struct btrfs_space_info *space_info, u64 orig_bytes, 1583 enum btrfs_reserve_flush_enum flush) 1584 { 1585 struct work_struct *async_work; 1586 struct reserve_ticket ticket; 1587 u64 start_ns = 0; 1588 u64 used; 1589 int ret = 0; 1590 bool pending_tickets; 1591 1592 ASSERT(orig_bytes); 1593 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1594 1595 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1596 async_work = &fs_info->async_data_reclaim_work; 1597 else 1598 async_work = &fs_info->async_reclaim_work; 1599 1600 spin_lock(&space_info->lock); 1601 ret = -ENOSPC; 1602 used = btrfs_space_info_used(space_info, true); 1603 1604 /* 1605 * We don't want NO_FLUSH allocations to jump everybody, they can 1606 * generally handle ENOSPC in a different way, so treat them the same as 1607 * normal flushers when it comes to skipping pending tickets. 1608 */ 1609 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1610 pending_tickets = !list_empty(&space_info->tickets) || 1611 !list_empty(&space_info->priority_tickets); 1612 else 1613 pending_tickets = !list_empty(&space_info->priority_tickets); 1614 1615 /* 1616 * Carry on if we have enough space (short-circuit) OR call 1617 * can_overcommit() to ensure we can overcommit to continue. 1618 */ 1619 if (!pending_tickets && 1620 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) || 1621 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1622 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1623 orig_bytes); 1624 ret = 0; 1625 } 1626 1627 /* 1628 * If we couldn't make a reservation then setup our reservation ticket 1629 * and kick the async worker if it's not already running. 1630 * 1631 * If we are a priority flusher then we just need to add our ticket to 1632 * the list and we will do our own flushing further down. 1633 */ 1634 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1635 ticket.bytes = orig_bytes; 1636 ticket.error = 0; 1637 space_info->reclaim_size += ticket.bytes; 1638 init_waitqueue_head(&ticket.wait); 1639 ticket.steal = can_steal(flush); 1640 if (trace_btrfs_reserve_ticket_enabled()) 1641 start_ns = ktime_get_ns(); 1642 1643 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1644 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1645 flush == BTRFS_RESERVE_FLUSH_DATA) { 1646 list_add_tail(&ticket.list, &space_info->tickets); 1647 if (!space_info->flush) { 1648 /* 1649 * We were forced to add a reserve ticket, so 1650 * our preemptive flushing is unable to keep 1651 * up. Clamp down on the threshold for the 1652 * preemptive flushing in order to keep up with 1653 * the workload. 1654 */ 1655 maybe_clamp_preempt(fs_info, space_info); 1656 1657 space_info->flush = 1; 1658 trace_btrfs_trigger_flush(fs_info, 1659 space_info->flags, 1660 orig_bytes, flush, 1661 "enospc"); 1662 queue_work(system_unbound_wq, async_work); 1663 } 1664 } else { 1665 list_add_tail(&ticket.list, 1666 &space_info->priority_tickets); 1667 } 1668 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1669 used += orig_bytes; 1670 /* 1671 * We will do the space reservation dance during log replay, 1672 * which means we won't have fs_info->fs_root set, so don't do 1673 * the async reclaim as we will panic. 1674 */ 1675 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1676 !work_busy(&fs_info->preempt_reclaim_work) && 1677 need_preemptive_reclaim(fs_info, space_info)) { 1678 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1679 orig_bytes, flush, "preempt"); 1680 queue_work(system_unbound_wq, 1681 &fs_info->preempt_reclaim_work); 1682 } 1683 } 1684 spin_unlock(&space_info->lock); 1685 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1686 return ret; 1687 1688 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1689 orig_bytes, flush); 1690 } 1691 1692 /** 1693 * Trye to reserve metadata bytes from the block_rsv's space 1694 * 1695 * @fs_info: the filesystem 1696 * @block_rsv: block_rsv we're allocating for 1697 * @orig_bytes: number of bytes we want 1698 * @flush: whether or not we can flush to make our reservation 1699 * 1700 * This will reserve orig_bytes number of bytes from the space info associated 1701 * with the block_rsv. If there is not enough space it will make an attempt to 1702 * flush out space to make room. It will do this by flushing delalloc if 1703 * possible or committing the transaction. If flush is 0 then no attempts to 1704 * regain reservations will be made and this will fail if there is not enough 1705 * space already. 1706 */ 1707 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1708 struct btrfs_block_rsv *block_rsv, 1709 u64 orig_bytes, 1710 enum btrfs_reserve_flush_enum flush) 1711 { 1712 int ret; 1713 1714 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1715 if (ret == -ENOSPC) { 1716 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1717 block_rsv->space_info->flags, 1718 orig_bytes, 1); 1719 1720 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1721 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1722 orig_bytes, 0); 1723 } 1724 return ret; 1725 } 1726 1727 /** 1728 * Try to reserve data bytes for an allocation 1729 * 1730 * @fs_info: the filesystem 1731 * @bytes: number of bytes we need 1732 * @flush: how we are allowed to flush 1733 * 1734 * This will reserve bytes from the data space info. If there is not enough 1735 * space then we will attempt to flush space as specified by flush. 1736 */ 1737 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1738 enum btrfs_reserve_flush_enum flush) 1739 { 1740 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1741 int ret; 1742 1743 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1744 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1745 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1746 1747 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1748 if (ret == -ENOSPC) { 1749 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1750 data_sinfo->flags, bytes, 1); 1751 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1752 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1753 } 1754 return ret; 1755 } 1756