1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * This will commit the transaction. Historically we had a lot of logic 137 * surrounding whether or not we'd commit the transaction, but this waits born 138 * out of a pre-tickets era where we could end up committing the transaction 139 * thousands of times in a row without making progress. Now thanks to our 140 * ticketing system we know if we're not making progress and can error 141 * everybody out after a few commits rather than burning the disk hoping for 142 * a different answer. 143 * 144 * OVERCOMMIT 145 * 146 * Because we hold so many reservations for metadata we will allow you to 147 * reserve more space than is currently free in the currently allocate 148 * metadata space. This only happens with metadata, data does not allow 149 * overcommitting. 150 * 151 * You can see the current logic for when we allow overcommit in 152 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 153 * is no unallocated space to be had, all reservations are kept within the 154 * free space in the allocated metadata chunks. 155 * 156 * Because of overcommitting, you generally want to use the 157 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 158 * thing with or without extra unallocated space. 159 */ 160 161 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 162 bool may_use_included) 163 { 164 ASSERT(s_info); 165 return s_info->bytes_used + s_info->bytes_reserved + 166 s_info->bytes_pinned + s_info->bytes_readonly + 167 s_info->bytes_zone_unusable + 168 (may_use_included ? s_info->bytes_may_use : 0); 169 } 170 171 /* 172 * after adding space to the filesystem, we need to clear the full flags 173 * on all the space infos. 174 */ 175 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 176 { 177 struct list_head *head = &info->space_info; 178 struct btrfs_space_info *found; 179 180 list_for_each_entry(found, head, list) 181 found->full = 0; 182 } 183 184 /* 185 * Block groups with more than this value (percents) of unusable space will be 186 * scheduled for background reclaim. 187 */ 188 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) 189 190 /* 191 * Calculate chunk size depending on volume type (regular or zoned). 192 */ 193 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) 194 { 195 if (btrfs_is_zoned(fs_info)) 196 return fs_info->zone_size; 197 198 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 199 200 if (flags & BTRFS_BLOCK_GROUP_DATA) 201 return SZ_1G; 202 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 203 return SZ_32M; 204 205 /* Handle BTRFS_BLOCK_GROUP_METADATA */ 206 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) 207 return SZ_1G; 208 209 return SZ_256M; 210 } 211 212 /* 213 * Update default chunk size. 214 */ 215 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 216 u64 chunk_size) 217 { 218 WRITE_ONCE(space_info->chunk_size, chunk_size); 219 } 220 221 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 222 { 223 224 struct btrfs_space_info *space_info; 225 int i; 226 int ret; 227 228 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 229 if (!space_info) 230 return -ENOMEM; 231 232 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 233 INIT_LIST_HEAD(&space_info->block_groups[i]); 234 init_rwsem(&space_info->groups_sem); 235 spin_lock_init(&space_info->lock); 236 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 237 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 238 INIT_LIST_HEAD(&space_info->ro_bgs); 239 INIT_LIST_HEAD(&space_info->tickets); 240 INIT_LIST_HEAD(&space_info->priority_tickets); 241 space_info->clamp = 1; 242 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); 243 244 if (btrfs_is_zoned(info)) 245 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; 246 247 ret = btrfs_sysfs_add_space_info_type(info, space_info); 248 if (ret) 249 return ret; 250 251 list_add(&space_info->list, &info->space_info); 252 if (flags & BTRFS_BLOCK_GROUP_DATA) 253 info->data_sinfo = space_info; 254 255 return ret; 256 } 257 258 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 259 { 260 struct btrfs_super_block *disk_super; 261 u64 features; 262 u64 flags; 263 int mixed = 0; 264 int ret; 265 266 disk_super = fs_info->super_copy; 267 if (!btrfs_super_root(disk_super)) 268 return -EINVAL; 269 270 features = btrfs_super_incompat_flags(disk_super); 271 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 272 mixed = 1; 273 274 flags = BTRFS_BLOCK_GROUP_SYSTEM; 275 ret = create_space_info(fs_info, flags); 276 if (ret) 277 goto out; 278 279 if (mixed) { 280 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 281 ret = create_space_info(fs_info, flags); 282 } else { 283 flags = BTRFS_BLOCK_GROUP_METADATA; 284 ret = create_space_info(fs_info, flags); 285 if (ret) 286 goto out; 287 288 flags = BTRFS_BLOCK_GROUP_DATA; 289 ret = create_space_info(fs_info, flags); 290 } 291 out: 292 return ret; 293 } 294 295 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 296 u64 total_bytes, u64 bytes_used, 297 u64 bytes_readonly, u64 bytes_zone_unusable, 298 bool active, struct btrfs_space_info **space_info) 299 { 300 struct btrfs_space_info *found; 301 int factor; 302 303 factor = btrfs_bg_type_to_factor(flags); 304 305 found = btrfs_find_space_info(info, flags); 306 ASSERT(found); 307 spin_lock(&found->lock); 308 found->total_bytes += total_bytes; 309 if (active) 310 found->active_total_bytes += total_bytes; 311 found->disk_total += total_bytes * factor; 312 found->bytes_used += bytes_used; 313 found->disk_used += bytes_used * factor; 314 found->bytes_readonly += bytes_readonly; 315 found->bytes_zone_unusable += bytes_zone_unusable; 316 if (total_bytes > 0) 317 found->full = 0; 318 btrfs_try_granting_tickets(info, found); 319 spin_unlock(&found->lock); 320 *space_info = found; 321 } 322 323 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 324 u64 flags) 325 { 326 struct list_head *head = &info->space_info; 327 struct btrfs_space_info *found; 328 329 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 330 331 list_for_each_entry(found, head, list) { 332 if (found->flags & flags) 333 return found; 334 } 335 return NULL; 336 } 337 338 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 339 struct btrfs_space_info *space_info, 340 enum btrfs_reserve_flush_enum flush) 341 { 342 u64 profile; 343 u64 avail; 344 int factor; 345 346 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 347 profile = btrfs_system_alloc_profile(fs_info); 348 else 349 profile = btrfs_metadata_alloc_profile(fs_info); 350 351 avail = atomic64_read(&fs_info->free_chunk_space); 352 353 /* 354 * If we have dup, raid1 or raid10 then only half of the free 355 * space is actually usable. For raid56, the space info used 356 * doesn't include the parity drive, so we don't have to 357 * change the math 358 */ 359 factor = btrfs_bg_type_to_factor(profile); 360 avail = div_u64(avail, factor); 361 362 /* 363 * If we aren't flushing all things, let us overcommit up to 364 * 1/2th of the space. If we can flush, don't let us overcommit 365 * too much, let it overcommit up to 1/8 of the space. 366 */ 367 if (flush == BTRFS_RESERVE_FLUSH_ALL) 368 avail >>= 3; 369 else 370 avail >>= 1; 371 return avail; 372 } 373 374 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info, 375 struct btrfs_space_info *space_info) 376 { 377 /* 378 * On regular filesystem, all total_bytes are always writable. On zoned 379 * filesystem, there may be a limitation imposed by max_active_zones. 380 * For metadata allocation, we cannot finish an existing active block 381 * group to avoid a deadlock. Thus, we need to consider only the active 382 * groups to be writable for metadata space. 383 */ 384 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) 385 return space_info->total_bytes; 386 387 return space_info->active_total_bytes; 388 } 389 390 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 391 struct btrfs_space_info *space_info, u64 bytes, 392 enum btrfs_reserve_flush_enum flush) 393 { 394 u64 avail; 395 u64 used; 396 397 /* Don't overcommit when in mixed mode */ 398 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 399 return 0; 400 401 used = btrfs_space_info_used(space_info, true); 402 avail = calc_available_free_space(fs_info, space_info, flush); 403 404 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail) 405 return 1; 406 return 0; 407 } 408 409 static void remove_ticket(struct btrfs_space_info *space_info, 410 struct reserve_ticket *ticket) 411 { 412 if (!list_empty(&ticket->list)) { 413 list_del_init(&ticket->list); 414 ASSERT(space_info->reclaim_size >= ticket->bytes); 415 space_info->reclaim_size -= ticket->bytes; 416 } 417 } 418 419 /* 420 * This is for space we already have accounted in space_info->bytes_may_use, so 421 * basically when we're returning space from block_rsv's. 422 */ 423 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 424 struct btrfs_space_info *space_info) 425 { 426 struct list_head *head; 427 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 428 429 lockdep_assert_held(&space_info->lock); 430 431 head = &space_info->priority_tickets; 432 again: 433 while (!list_empty(head)) { 434 struct reserve_ticket *ticket; 435 u64 used = btrfs_space_info_used(space_info, true); 436 437 ticket = list_first_entry(head, struct reserve_ticket, list); 438 439 /* Check and see if our ticket can be satisfied now. */ 440 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) || 441 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 442 flush)) { 443 btrfs_space_info_update_bytes_may_use(fs_info, 444 space_info, 445 ticket->bytes); 446 remove_ticket(space_info, ticket); 447 ticket->bytes = 0; 448 space_info->tickets_id++; 449 wake_up(&ticket->wait); 450 } else { 451 break; 452 } 453 } 454 455 if (head == &space_info->priority_tickets) { 456 head = &space_info->tickets; 457 flush = BTRFS_RESERVE_FLUSH_ALL; 458 goto again; 459 } 460 } 461 462 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 463 do { \ 464 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 465 spin_lock(&__rsv->lock); \ 466 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 467 __rsv->size, __rsv->reserved); \ 468 spin_unlock(&__rsv->lock); \ 469 } while (0) 470 471 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 472 struct btrfs_space_info *info) 473 { 474 lockdep_assert_held(&info->lock); 475 476 /* The free space could be negative in case of overcommit */ 477 btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull", 478 info->flags, 479 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), 480 info->full ? "" : "not "); 481 btrfs_info(fs_info, 482 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 483 info->total_bytes, info->bytes_used, info->bytes_pinned, 484 info->bytes_reserved, info->bytes_may_use, 485 info->bytes_readonly, info->bytes_zone_unusable); 486 487 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 488 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 489 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 490 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 491 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 492 493 } 494 495 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 496 struct btrfs_space_info *info, u64 bytes, 497 int dump_block_groups) 498 { 499 struct btrfs_block_group *cache; 500 int index = 0; 501 502 spin_lock(&info->lock); 503 __btrfs_dump_space_info(fs_info, info); 504 spin_unlock(&info->lock); 505 506 if (!dump_block_groups) 507 return; 508 509 down_read(&info->groups_sem); 510 again: 511 list_for_each_entry(cache, &info->block_groups[index], list) { 512 spin_lock(&cache->lock); 513 btrfs_info(fs_info, 514 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 515 cache->start, cache->length, cache->used, cache->pinned, 516 cache->reserved, cache->zone_unusable, 517 cache->ro ? "[readonly]" : ""); 518 spin_unlock(&cache->lock); 519 btrfs_dump_free_space(cache, bytes); 520 } 521 if (++index < BTRFS_NR_RAID_TYPES) 522 goto again; 523 up_read(&info->groups_sem); 524 } 525 526 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 527 u64 to_reclaim) 528 { 529 u64 bytes; 530 u64 nr; 531 532 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 533 nr = div64_u64(to_reclaim, bytes); 534 if (!nr) 535 nr = 1; 536 return nr; 537 } 538 539 #define EXTENT_SIZE_PER_ITEM SZ_256K 540 541 /* 542 * shrink metadata reservation for delalloc 543 */ 544 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 545 struct btrfs_space_info *space_info, 546 u64 to_reclaim, bool wait_ordered, 547 bool for_preempt) 548 { 549 struct btrfs_trans_handle *trans; 550 u64 delalloc_bytes; 551 u64 ordered_bytes; 552 u64 items; 553 long time_left; 554 int loops; 555 556 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 557 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 558 if (delalloc_bytes == 0 && ordered_bytes == 0) 559 return; 560 561 /* Calc the number of the pages we need flush for space reservation */ 562 if (to_reclaim == U64_MAX) { 563 items = U64_MAX; 564 } else { 565 /* 566 * to_reclaim is set to however much metadata we need to 567 * reclaim, but reclaiming that much data doesn't really track 568 * exactly. What we really want to do is reclaim full inode's 569 * worth of reservations, however that's not available to us 570 * here. We will take a fraction of the delalloc bytes for our 571 * flushing loops and hope for the best. Delalloc will expand 572 * the amount we write to cover an entire dirty extent, which 573 * will reclaim the metadata reservation for that range. If 574 * it's not enough subsequent flush stages will be more 575 * aggressive. 576 */ 577 to_reclaim = max(to_reclaim, delalloc_bytes >> 3); 578 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 579 } 580 581 trans = current->journal_info; 582 583 /* 584 * If we are doing more ordered than delalloc we need to just wait on 585 * ordered extents, otherwise we'll waste time trying to flush delalloc 586 * that likely won't give us the space back we need. 587 */ 588 if (ordered_bytes > delalloc_bytes && !for_preempt) 589 wait_ordered = true; 590 591 loops = 0; 592 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 593 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 594 long nr_pages = min_t(u64, temp, LONG_MAX); 595 int async_pages; 596 597 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 598 599 /* 600 * We need to make sure any outstanding async pages are now 601 * processed before we continue. This is because things like 602 * sync_inode() try to be smart and skip writing if the inode is 603 * marked clean. We don't use filemap_fwrite for flushing 604 * because we want to control how many pages we write out at a 605 * time, thus this is the only safe way to make sure we've 606 * waited for outstanding compressed workers to have started 607 * their jobs and thus have ordered extents set up properly. 608 * 609 * This exists because we do not want to wait for each 610 * individual inode to finish its async work, we simply want to 611 * start the IO on everybody, and then come back here and wait 612 * for all of the async work to catch up. Once we're done with 613 * that we know we'll have ordered extents for everything and we 614 * can decide if we wait for that or not. 615 * 616 * If we choose to replace this in the future, make absolutely 617 * sure that the proper waiting is being done in the async case, 618 * as there have been bugs in that area before. 619 */ 620 async_pages = atomic_read(&fs_info->async_delalloc_pages); 621 if (!async_pages) 622 goto skip_async; 623 624 /* 625 * We don't want to wait forever, if we wrote less pages in this 626 * loop than we have outstanding, only wait for that number of 627 * pages, otherwise we can wait for all async pages to finish 628 * before continuing. 629 */ 630 if (async_pages > nr_pages) 631 async_pages -= nr_pages; 632 else 633 async_pages = 0; 634 wait_event(fs_info->async_submit_wait, 635 atomic_read(&fs_info->async_delalloc_pages) <= 636 async_pages); 637 skip_async: 638 loops++; 639 if (wait_ordered && !trans) { 640 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 641 } else { 642 time_left = schedule_timeout_killable(1); 643 if (time_left) 644 break; 645 } 646 647 /* 648 * If we are for preemption we just want a one-shot of delalloc 649 * flushing so we can stop flushing if we decide we don't need 650 * to anymore. 651 */ 652 if (for_preempt) 653 break; 654 655 spin_lock(&space_info->lock); 656 if (list_empty(&space_info->tickets) && 657 list_empty(&space_info->priority_tickets)) { 658 spin_unlock(&space_info->lock); 659 break; 660 } 661 spin_unlock(&space_info->lock); 662 663 delalloc_bytes = percpu_counter_sum_positive( 664 &fs_info->delalloc_bytes); 665 ordered_bytes = percpu_counter_sum_positive( 666 &fs_info->ordered_bytes); 667 } 668 } 669 670 /* 671 * Try to flush some data based on policy set by @state. This is only advisory 672 * and may fail for various reasons. The caller is supposed to examine the 673 * state of @space_info to detect the outcome. 674 */ 675 static void flush_space(struct btrfs_fs_info *fs_info, 676 struct btrfs_space_info *space_info, u64 num_bytes, 677 enum btrfs_flush_state state, bool for_preempt) 678 { 679 struct btrfs_root *root = fs_info->tree_root; 680 struct btrfs_trans_handle *trans; 681 int nr; 682 int ret = 0; 683 684 switch (state) { 685 case FLUSH_DELAYED_ITEMS_NR: 686 case FLUSH_DELAYED_ITEMS: 687 if (state == FLUSH_DELAYED_ITEMS_NR) 688 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 689 else 690 nr = -1; 691 692 trans = btrfs_join_transaction(root); 693 if (IS_ERR(trans)) { 694 ret = PTR_ERR(trans); 695 break; 696 } 697 ret = btrfs_run_delayed_items_nr(trans, nr); 698 btrfs_end_transaction(trans); 699 break; 700 case FLUSH_DELALLOC: 701 case FLUSH_DELALLOC_WAIT: 702 case FLUSH_DELALLOC_FULL: 703 if (state == FLUSH_DELALLOC_FULL) 704 num_bytes = U64_MAX; 705 shrink_delalloc(fs_info, space_info, num_bytes, 706 state != FLUSH_DELALLOC, for_preempt); 707 break; 708 case FLUSH_DELAYED_REFS_NR: 709 case FLUSH_DELAYED_REFS: 710 trans = btrfs_join_transaction(root); 711 if (IS_ERR(trans)) { 712 ret = PTR_ERR(trans); 713 break; 714 } 715 if (state == FLUSH_DELAYED_REFS_NR) 716 nr = calc_reclaim_items_nr(fs_info, num_bytes); 717 else 718 nr = 0; 719 btrfs_run_delayed_refs(trans, nr); 720 btrfs_end_transaction(trans); 721 break; 722 case ALLOC_CHUNK: 723 case ALLOC_CHUNK_FORCE: 724 trans = btrfs_join_transaction(root); 725 if (IS_ERR(trans)) { 726 ret = PTR_ERR(trans); 727 break; 728 } 729 ret = btrfs_chunk_alloc(trans, 730 btrfs_get_alloc_profile(fs_info, space_info->flags), 731 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 732 CHUNK_ALLOC_FORCE); 733 btrfs_end_transaction(trans); 734 if (ret > 0 || ret == -ENOSPC) 735 ret = 0; 736 break; 737 case RUN_DELAYED_IPUTS: 738 /* 739 * If we have pending delayed iputs then we could free up a 740 * bunch of pinned space, so make sure we run the iputs before 741 * we do our pinned bytes check below. 742 */ 743 btrfs_run_delayed_iputs(fs_info); 744 btrfs_wait_on_delayed_iputs(fs_info); 745 break; 746 case COMMIT_TRANS: 747 ASSERT(current->journal_info == NULL); 748 trans = btrfs_join_transaction(root); 749 if (IS_ERR(trans)) { 750 ret = PTR_ERR(trans); 751 break; 752 } 753 ret = btrfs_commit_transaction(trans); 754 break; 755 default: 756 ret = -ENOSPC; 757 break; 758 } 759 760 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 761 ret, for_preempt); 762 return; 763 } 764 765 static inline u64 766 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 767 struct btrfs_space_info *space_info) 768 { 769 u64 used; 770 u64 avail; 771 u64 total; 772 u64 to_reclaim = space_info->reclaim_size; 773 774 lockdep_assert_held(&space_info->lock); 775 776 avail = calc_available_free_space(fs_info, space_info, 777 BTRFS_RESERVE_FLUSH_ALL); 778 used = btrfs_space_info_used(space_info, true); 779 780 /* 781 * We may be flushing because suddenly we have less space than we had 782 * before, and now we're well over-committed based on our current free 783 * space. If that's the case add in our overage so we make sure to put 784 * appropriate pressure on the flushing state machine. 785 */ 786 total = writable_total_bytes(fs_info, space_info); 787 if (total + avail < used) 788 to_reclaim += used - (total + avail); 789 790 return to_reclaim; 791 } 792 793 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 794 struct btrfs_space_info *space_info) 795 { 796 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 797 u64 ordered, delalloc; 798 u64 total = writable_total_bytes(fs_info, space_info); 799 u64 thresh; 800 u64 used; 801 802 thresh = div_factor_fine(total, 90); 803 804 lockdep_assert_held(&space_info->lock); 805 806 /* If we're just plain full then async reclaim just slows us down. */ 807 if ((space_info->bytes_used + space_info->bytes_reserved + 808 global_rsv_size) >= thresh) 809 return false; 810 811 used = space_info->bytes_may_use + space_info->bytes_pinned; 812 813 /* The total flushable belongs to the global rsv, don't flush. */ 814 if (global_rsv_size >= used) 815 return false; 816 817 /* 818 * 128MiB is 1/4 of the maximum global rsv size. If we have less than 819 * that devoted to other reservations then there's no sense in flushing, 820 * we don't have a lot of things that need flushing. 821 */ 822 if (used - global_rsv_size <= SZ_128M) 823 return false; 824 825 /* 826 * We have tickets queued, bail so we don't compete with the async 827 * flushers. 828 */ 829 if (space_info->reclaim_size) 830 return false; 831 832 /* 833 * If we have over half of the free space occupied by reservations or 834 * pinned then we want to start flushing. 835 * 836 * We do not do the traditional thing here, which is to say 837 * 838 * if (used >= ((total_bytes + avail) / 2)) 839 * return 1; 840 * 841 * because this doesn't quite work how we want. If we had more than 50% 842 * of the space_info used by bytes_used and we had 0 available we'd just 843 * constantly run the background flusher. Instead we want it to kick in 844 * if our reclaimable space exceeds our clamped free space. 845 * 846 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 847 * the following: 848 * 849 * Amount of RAM Minimum threshold Maximum threshold 850 * 851 * 256GiB 1GiB 128GiB 852 * 128GiB 512MiB 64GiB 853 * 64GiB 256MiB 32GiB 854 * 32GiB 128MiB 16GiB 855 * 16GiB 64MiB 8GiB 856 * 857 * These are the range our thresholds will fall in, corresponding to how 858 * much delalloc we need for the background flusher to kick in. 859 */ 860 861 thresh = calc_available_free_space(fs_info, space_info, 862 BTRFS_RESERVE_FLUSH_ALL); 863 used = space_info->bytes_used + space_info->bytes_reserved + 864 space_info->bytes_readonly + global_rsv_size; 865 if (used < total) 866 thresh += total - used; 867 thresh >>= space_info->clamp; 868 869 used = space_info->bytes_pinned; 870 871 /* 872 * If we have more ordered bytes than delalloc bytes then we're either 873 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 874 * around. Preemptive flushing is only useful in that it can free up 875 * space before tickets need to wait for things to finish. In the case 876 * of ordered extents, preemptively waiting on ordered extents gets us 877 * nothing, if our reservations are tied up in ordered extents we'll 878 * simply have to slow down writers by forcing them to wait on ordered 879 * extents. 880 * 881 * In the case that ordered is larger than delalloc, only include the 882 * block reserves that we would actually be able to directly reclaim 883 * from. In this case if we're heavy on metadata operations this will 884 * clearly be heavy enough to warrant preemptive flushing. In the case 885 * of heavy DIO or ordered reservations, preemptive flushing will just 886 * waste time and cause us to slow down. 887 * 888 * We want to make sure we truly are maxed out on ordered however, so 889 * cut ordered in half, and if it's still higher than delalloc then we 890 * can keep flushing. This is to avoid the case where we start 891 * flushing, and now delalloc == ordered and we stop preemptively 892 * flushing when we could still have several gigs of delalloc to flush. 893 */ 894 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 895 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 896 if (ordered >= delalloc) 897 used += fs_info->delayed_refs_rsv.reserved + 898 fs_info->delayed_block_rsv.reserved; 899 else 900 used += space_info->bytes_may_use - global_rsv_size; 901 902 return (used >= thresh && !btrfs_fs_closing(fs_info) && 903 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 904 } 905 906 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 907 struct btrfs_space_info *space_info, 908 struct reserve_ticket *ticket) 909 { 910 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 911 u64 min_bytes; 912 913 if (!ticket->steal) 914 return false; 915 916 if (global_rsv->space_info != space_info) 917 return false; 918 919 spin_lock(&global_rsv->lock); 920 min_bytes = div_factor(global_rsv->size, 1); 921 if (global_rsv->reserved < min_bytes + ticket->bytes) { 922 spin_unlock(&global_rsv->lock); 923 return false; 924 } 925 global_rsv->reserved -= ticket->bytes; 926 remove_ticket(space_info, ticket); 927 ticket->bytes = 0; 928 wake_up(&ticket->wait); 929 space_info->tickets_id++; 930 if (global_rsv->reserved < global_rsv->size) 931 global_rsv->full = 0; 932 spin_unlock(&global_rsv->lock); 933 934 return true; 935 } 936 937 /* 938 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 939 * @fs_info - fs_info for this fs 940 * @space_info - the space info we were flushing 941 * 942 * We call this when we've exhausted our flushing ability and haven't made 943 * progress in satisfying tickets. The reservation code handles tickets in 944 * order, so if there is a large ticket first and then smaller ones we could 945 * very well satisfy the smaller tickets. This will attempt to wake up any 946 * tickets in the list to catch this case. 947 * 948 * This function returns true if it was able to make progress by clearing out 949 * other tickets, or if it stumbles across a ticket that was smaller than the 950 * first ticket. 951 */ 952 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 953 struct btrfs_space_info *space_info) 954 { 955 struct reserve_ticket *ticket; 956 u64 tickets_id = space_info->tickets_id; 957 const bool aborted = BTRFS_FS_ERROR(fs_info); 958 959 trace_btrfs_fail_all_tickets(fs_info, space_info); 960 961 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 962 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 963 __btrfs_dump_space_info(fs_info, space_info); 964 } 965 966 while (!list_empty(&space_info->tickets) && 967 tickets_id == space_info->tickets_id) { 968 ticket = list_first_entry(&space_info->tickets, 969 struct reserve_ticket, list); 970 971 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) 972 return true; 973 974 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 975 btrfs_info(fs_info, "failing ticket with %llu bytes", 976 ticket->bytes); 977 978 remove_ticket(space_info, ticket); 979 if (aborted) 980 ticket->error = -EIO; 981 else 982 ticket->error = -ENOSPC; 983 wake_up(&ticket->wait); 984 985 /* 986 * We're just throwing tickets away, so more flushing may not 987 * trip over btrfs_try_granting_tickets, so we need to call it 988 * here to see if we can make progress with the next ticket in 989 * the list. 990 */ 991 if (!aborted) 992 btrfs_try_granting_tickets(fs_info, space_info); 993 } 994 return (tickets_id != space_info->tickets_id); 995 } 996 997 /* 998 * This is for normal flushers, we can wait all goddamned day if we want to. We 999 * will loop and continuously try to flush as long as we are making progress. 1000 * We count progress as clearing off tickets each time we have to loop. 1001 */ 1002 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 1003 { 1004 struct btrfs_fs_info *fs_info; 1005 struct btrfs_space_info *space_info; 1006 u64 to_reclaim; 1007 enum btrfs_flush_state flush_state; 1008 int commit_cycles = 0; 1009 u64 last_tickets_id; 1010 1011 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 1012 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1013 1014 spin_lock(&space_info->lock); 1015 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1016 if (!to_reclaim) { 1017 space_info->flush = 0; 1018 spin_unlock(&space_info->lock); 1019 return; 1020 } 1021 last_tickets_id = space_info->tickets_id; 1022 spin_unlock(&space_info->lock); 1023 1024 flush_state = FLUSH_DELAYED_ITEMS_NR; 1025 do { 1026 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 1027 spin_lock(&space_info->lock); 1028 if (list_empty(&space_info->tickets)) { 1029 space_info->flush = 0; 1030 spin_unlock(&space_info->lock); 1031 return; 1032 } 1033 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 1034 space_info); 1035 if (last_tickets_id == space_info->tickets_id) { 1036 flush_state++; 1037 } else { 1038 last_tickets_id = space_info->tickets_id; 1039 flush_state = FLUSH_DELAYED_ITEMS_NR; 1040 if (commit_cycles) 1041 commit_cycles--; 1042 } 1043 1044 /* 1045 * We do not want to empty the system of delalloc unless we're 1046 * under heavy pressure, so allow one trip through the flushing 1047 * logic before we start doing a FLUSH_DELALLOC_FULL. 1048 */ 1049 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) 1050 flush_state++; 1051 1052 /* 1053 * We don't want to force a chunk allocation until we've tried 1054 * pretty hard to reclaim space. Think of the case where we 1055 * freed up a bunch of space and so have a lot of pinned space 1056 * to reclaim. We would rather use that than possibly create a 1057 * underutilized metadata chunk. So if this is our first run 1058 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 1059 * commit the transaction. If nothing has changed the next go 1060 * around then we can force a chunk allocation. 1061 */ 1062 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 1063 flush_state++; 1064 1065 if (flush_state > COMMIT_TRANS) { 1066 commit_cycles++; 1067 if (commit_cycles > 2) { 1068 if (maybe_fail_all_tickets(fs_info, space_info)) { 1069 flush_state = FLUSH_DELAYED_ITEMS_NR; 1070 commit_cycles--; 1071 } else { 1072 space_info->flush = 0; 1073 } 1074 } else { 1075 flush_state = FLUSH_DELAYED_ITEMS_NR; 1076 } 1077 } 1078 spin_unlock(&space_info->lock); 1079 } while (flush_state <= COMMIT_TRANS); 1080 } 1081 1082 /* 1083 * This handles pre-flushing of metadata space before we get to the point that 1084 * we need to start blocking threads on tickets. The logic here is different 1085 * from the other flush paths because it doesn't rely on tickets to tell us how 1086 * much we need to flush, instead it attempts to keep us below the 80% full 1087 * watermark of space by flushing whichever reservation pool is currently the 1088 * largest. 1089 */ 1090 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1091 { 1092 struct btrfs_fs_info *fs_info; 1093 struct btrfs_space_info *space_info; 1094 struct btrfs_block_rsv *delayed_block_rsv; 1095 struct btrfs_block_rsv *delayed_refs_rsv; 1096 struct btrfs_block_rsv *global_rsv; 1097 struct btrfs_block_rsv *trans_rsv; 1098 int loops = 0; 1099 1100 fs_info = container_of(work, struct btrfs_fs_info, 1101 preempt_reclaim_work); 1102 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1103 delayed_block_rsv = &fs_info->delayed_block_rsv; 1104 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1105 global_rsv = &fs_info->global_block_rsv; 1106 trans_rsv = &fs_info->trans_block_rsv; 1107 1108 spin_lock(&space_info->lock); 1109 while (need_preemptive_reclaim(fs_info, space_info)) { 1110 enum btrfs_flush_state flush; 1111 u64 delalloc_size = 0; 1112 u64 to_reclaim, block_rsv_size; 1113 u64 global_rsv_size = global_rsv->reserved; 1114 1115 loops++; 1116 1117 /* 1118 * We don't have a precise counter for the metadata being 1119 * reserved for delalloc, so we'll approximate it by subtracting 1120 * out the block rsv's space from the bytes_may_use. If that 1121 * amount is higher than the individual reserves, then we can 1122 * assume it's tied up in delalloc reservations. 1123 */ 1124 block_rsv_size = global_rsv_size + 1125 delayed_block_rsv->reserved + 1126 delayed_refs_rsv->reserved + 1127 trans_rsv->reserved; 1128 if (block_rsv_size < space_info->bytes_may_use) 1129 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1130 1131 /* 1132 * We don't want to include the global_rsv in our calculation, 1133 * because that's space we can't touch. Subtract it from the 1134 * block_rsv_size for the next checks. 1135 */ 1136 block_rsv_size -= global_rsv_size; 1137 1138 /* 1139 * We really want to avoid flushing delalloc too much, as it 1140 * could result in poor allocation patterns, so only flush it if 1141 * it's larger than the rest of the pools combined. 1142 */ 1143 if (delalloc_size > block_rsv_size) { 1144 to_reclaim = delalloc_size; 1145 flush = FLUSH_DELALLOC; 1146 } else if (space_info->bytes_pinned > 1147 (delayed_block_rsv->reserved + 1148 delayed_refs_rsv->reserved)) { 1149 to_reclaim = space_info->bytes_pinned; 1150 flush = COMMIT_TRANS; 1151 } else if (delayed_block_rsv->reserved > 1152 delayed_refs_rsv->reserved) { 1153 to_reclaim = delayed_block_rsv->reserved; 1154 flush = FLUSH_DELAYED_ITEMS_NR; 1155 } else { 1156 to_reclaim = delayed_refs_rsv->reserved; 1157 flush = FLUSH_DELAYED_REFS_NR; 1158 } 1159 1160 spin_unlock(&space_info->lock); 1161 1162 /* 1163 * We don't want to reclaim everything, just a portion, so scale 1164 * down the to_reclaim by 1/4. If it takes us down to 0, 1165 * reclaim 1 items worth. 1166 */ 1167 to_reclaim >>= 2; 1168 if (!to_reclaim) 1169 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1170 flush_space(fs_info, space_info, to_reclaim, flush, true); 1171 cond_resched(); 1172 spin_lock(&space_info->lock); 1173 } 1174 1175 /* We only went through once, back off our clamping. */ 1176 if (loops == 1 && !space_info->reclaim_size) 1177 space_info->clamp = max(1, space_info->clamp - 1); 1178 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1179 spin_unlock(&space_info->lock); 1180 } 1181 1182 /* 1183 * FLUSH_DELALLOC_WAIT: 1184 * Space is freed from flushing delalloc in one of two ways. 1185 * 1186 * 1) compression is on and we allocate less space than we reserved 1187 * 2) we are overwriting existing space 1188 * 1189 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1190 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1191 * length to ->bytes_reserved, and subtracts the reserved space from 1192 * ->bytes_may_use. 1193 * 1194 * For #2 this is trickier. Once the ordered extent runs we will drop the 1195 * extent in the range we are overwriting, which creates a delayed ref for 1196 * that freed extent. This however is not reclaimed until the transaction 1197 * commits, thus the next stages. 1198 * 1199 * RUN_DELAYED_IPUTS 1200 * If we are freeing inodes, we want to make sure all delayed iputs have 1201 * completed, because they could have been on an inode with i_nlink == 0, and 1202 * thus have been truncated and freed up space. But again this space is not 1203 * immediately re-usable, it comes in the form of a delayed ref, which must be 1204 * run and then the transaction must be committed. 1205 * 1206 * COMMIT_TRANS 1207 * This is where we reclaim all of the pinned space generated by running the 1208 * iputs 1209 * 1210 * ALLOC_CHUNK_FORCE 1211 * For data we start with alloc chunk force, however we could have been full 1212 * before, and then the transaction commit could have freed new block groups, 1213 * so if we now have space to allocate do the force chunk allocation. 1214 */ 1215 static const enum btrfs_flush_state data_flush_states[] = { 1216 FLUSH_DELALLOC_FULL, 1217 RUN_DELAYED_IPUTS, 1218 COMMIT_TRANS, 1219 ALLOC_CHUNK_FORCE, 1220 }; 1221 1222 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1223 { 1224 struct btrfs_fs_info *fs_info; 1225 struct btrfs_space_info *space_info; 1226 u64 last_tickets_id; 1227 enum btrfs_flush_state flush_state = 0; 1228 1229 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1230 space_info = fs_info->data_sinfo; 1231 1232 spin_lock(&space_info->lock); 1233 if (list_empty(&space_info->tickets)) { 1234 space_info->flush = 0; 1235 spin_unlock(&space_info->lock); 1236 return; 1237 } 1238 last_tickets_id = space_info->tickets_id; 1239 spin_unlock(&space_info->lock); 1240 1241 while (!space_info->full) { 1242 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1243 spin_lock(&space_info->lock); 1244 if (list_empty(&space_info->tickets)) { 1245 space_info->flush = 0; 1246 spin_unlock(&space_info->lock); 1247 return; 1248 } 1249 1250 /* Something happened, fail everything and bail. */ 1251 if (BTRFS_FS_ERROR(fs_info)) 1252 goto aborted_fs; 1253 last_tickets_id = space_info->tickets_id; 1254 spin_unlock(&space_info->lock); 1255 } 1256 1257 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1258 flush_space(fs_info, space_info, U64_MAX, 1259 data_flush_states[flush_state], false); 1260 spin_lock(&space_info->lock); 1261 if (list_empty(&space_info->tickets)) { 1262 space_info->flush = 0; 1263 spin_unlock(&space_info->lock); 1264 return; 1265 } 1266 1267 if (last_tickets_id == space_info->tickets_id) { 1268 flush_state++; 1269 } else { 1270 last_tickets_id = space_info->tickets_id; 1271 flush_state = 0; 1272 } 1273 1274 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1275 if (space_info->full) { 1276 if (maybe_fail_all_tickets(fs_info, space_info)) 1277 flush_state = 0; 1278 else 1279 space_info->flush = 0; 1280 } else { 1281 flush_state = 0; 1282 } 1283 1284 /* Something happened, fail everything and bail. */ 1285 if (BTRFS_FS_ERROR(fs_info)) 1286 goto aborted_fs; 1287 1288 } 1289 spin_unlock(&space_info->lock); 1290 } 1291 return; 1292 1293 aborted_fs: 1294 maybe_fail_all_tickets(fs_info, space_info); 1295 space_info->flush = 0; 1296 spin_unlock(&space_info->lock); 1297 } 1298 1299 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1300 { 1301 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1302 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1303 INIT_WORK(&fs_info->preempt_reclaim_work, 1304 btrfs_preempt_reclaim_metadata_space); 1305 } 1306 1307 static const enum btrfs_flush_state priority_flush_states[] = { 1308 FLUSH_DELAYED_ITEMS_NR, 1309 FLUSH_DELAYED_ITEMS, 1310 ALLOC_CHUNK, 1311 }; 1312 1313 static const enum btrfs_flush_state evict_flush_states[] = { 1314 FLUSH_DELAYED_ITEMS_NR, 1315 FLUSH_DELAYED_ITEMS, 1316 FLUSH_DELAYED_REFS_NR, 1317 FLUSH_DELAYED_REFS, 1318 FLUSH_DELALLOC, 1319 FLUSH_DELALLOC_WAIT, 1320 FLUSH_DELALLOC_FULL, 1321 ALLOC_CHUNK, 1322 COMMIT_TRANS, 1323 }; 1324 1325 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1326 struct btrfs_space_info *space_info, 1327 struct reserve_ticket *ticket, 1328 const enum btrfs_flush_state *states, 1329 int states_nr) 1330 { 1331 u64 to_reclaim; 1332 int flush_state = 0; 1333 1334 spin_lock(&space_info->lock); 1335 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1336 /* 1337 * This is the priority reclaim path, so to_reclaim could be >0 still 1338 * because we may have only satisfied the priority tickets and still 1339 * left non priority tickets on the list. We would then have 1340 * to_reclaim but ->bytes == 0. 1341 */ 1342 if (ticket->bytes == 0) { 1343 spin_unlock(&space_info->lock); 1344 return; 1345 } 1346 1347 while (flush_state < states_nr) { 1348 spin_unlock(&space_info->lock); 1349 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1350 false); 1351 flush_state++; 1352 spin_lock(&space_info->lock); 1353 if (ticket->bytes == 0) { 1354 spin_unlock(&space_info->lock); 1355 return; 1356 } 1357 } 1358 1359 /* Attempt to steal from the global rsv if we can. */ 1360 if (!steal_from_global_rsv(fs_info, space_info, ticket)) { 1361 ticket->error = -ENOSPC; 1362 remove_ticket(space_info, ticket); 1363 } 1364 1365 /* 1366 * We must run try_granting_tickets here because we could be a large 1367 * ticket in front of a smaller ticket that can now be satisfied with 1368 * the available space. 1369 */ 1370 btrfs_try_granting_tickets(fs_info, space_info); 1371 spin_unlock(&space_info->lock); 1372 } 1373 1374 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1375 struct btrfs_space_info *space_info, 1376 struct reserve_ticket *ticket) 1377 { 1378 spin_lock(&space_info->lock); 1379 1380 /* We could have been granted before we got here. */ 1381 if (ticket->bytes == 0) { 1382 spin_unlock(&space_info->lock); 1383 return; 1384 } 1385 1386 while (!space_info->full) { 1387 spin_unlock(&space_info->lock); 1388 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1389 spin_lock(&space_info->lock); 1390 if (ticket->bytes == 0) { 1391 spin_unlock(&space_info->lock); 1392 return; 1393 } 1394 } 1395 1396 ticket->error = -ENOSPC; 1397 remove_ticket(space_info, ticket); 1398 btrfs_try_granting_tickets(fs_info, space_info); 1399 spin_unlock(&space_info->lock); 1400 } 1401 1402 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1403 struct btrfs_space_info *space_info, 1404 struct reserve_ticket *ticket) 1405 1406 { 1407 DEFINE_WAIT(wait); 1408 int ret = 0; 1409 1410 spin_lock(&space_info->lock); 1411 while (ticket->bytes > 0 && ticket->error == 0) { 1412 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1413 if (ret) { 1414 /* 1415 * Delete us from the list. After we unlock the space 1416 * info, we don't want the async reclaim job to reserve 1417 * space for this ticket. If that would happen, then the 1418 * ticket's task would not known that space was reserved 1419 * despite getting an error, resulting in a space leak 1420 * (bytes_may_use counter of our space_info). 1421 */ 1422 remove_ticket(space_info, ticket); 1423 ticket->error = -EINTR; 1424 break; 1425 } 1426 spin_unlock(&space_info->lock); 1427 1428 schedule(); 1429 1430 finish_wait(&ticket->wait, &wait); 1431 spin_lock(&space_info->lock); 1432 } 1433 spin_unlock(&space_info->lock); 1434 } 1435 1436 /** 1437 * Do the appropriate flushing and waiting for a ticket 1438 * 1439 * @fs_info: the filesystem 1440 * @space_info: space info for the reservation 1441 * @ticket: ticket for the reservation 1442 * @start_ns: timestamp when the reservation started 1443 * @orig_bytes: amount of bytes originally reserved 1444 * @flush: how much we can flush 1445 * 1446 * This does the work of figuring out how to flush for the ticket, waiting for 1447 * the reservation, and returning the appropriate error if there is one. 1448 */ 1449 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1450 struct btrfs_space_info *space_info, 1451 struct reserve_ticket *ticket, 1452 u64 start_ns, u64 orig_bytes, 1453 enum btrfs_reserve_flush_enum flush) 1454 { 1455 int ret; 1456 1457 switch (flush) { 1458 case BTRFS_RESERVE_FLUSH_DATA: 1459 case BTRFS_RESERVE_FLUSH_ALL: 1460 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1461 wait_reserve_ticket(fs_info, space_info, ticket); 1462 break; 1463 case BTRFS_RESERVE_FLUSH_LIMIT: 1464 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1465 priority_flush_states, 1466 ARRAY_SIZE(priority_flush_states)); 1467 break; 1468 case BTRFS_RESERVE_FLUSH_EVICT: 1469 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1470 evict_flush_states, 1471 ARRAY_SIZE(evict_flush_states)); 1472 break; 1473 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1474 priority_reclaim_data_space(fs_info, space_info, ticket); 1475 break; 1476 default: 1477 ASSERT(0); 1478 break; 1479 } 1480 1481 ret = ticket->error; 1482 ASSERT(list_empty(&ticket->list)); 1483 /* 1484 * Check that we can't have an error set if the reservation succeeded, 1485 * as that would confuse tasks and lead them to error out without 1486 * releasing reserved space (if an error happens the expectation is that 1487 * space wasn't reserved at all). 1488 */ 1489 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1490 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1491 start_ns, flush, ticket->error); 1492 return ret; 1493 } 1494 1495 /* 1496 * This returns true if this flush state will go through the ordinary flushing 1497 * code. 1498 */ 1499 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1500 { 1501 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1502 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1503 } 1504 1505 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1506 struct btrfs_space_info *space_info) 1507 { 1508 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1509 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1510 1511 /* 1512 * If we're heavy on ordered operations then clamping won't help us. We 1513 * need to clamp specifically to keep up with dirty'ing buffered 1514 * writers, because there's not a 1:1 correlation of writing delalloc 1515 * and freeing space, like there is with flushing delayed refs or 1516 * delayed nodes. If we're already more ordered than delalloc then 1517 * we're keeping up, otherwise we aren't and should probably clamp. 1518 */ 1519 if (ordered < delalloc) 1520 space_info->clamp = min(space_info->clamp + 1, 8); 1521 } 1522 1523 static inline bool can_steal(enum btrfs_reserve_flush_enum flush) 1524 { 1525 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1526 flush == BTRFS_RESERVE_FLUSH_EVICT); 1527 } 1528 1529 /** 1530 * Try to reserve bytes from the block_rsv's space 1531 * 1532 * @fs_info: the filesystem 1533 * @space_info: space info we want to allocate from 1534 * @orig_bytes: number of bytes we want 1535 * @flush: whether or not we can flush to make our reservation 1536 * 1537 * This will reserve orig_bytes number of bytes from the space info associated 1538 * with the block_rsv. If there is not enough space it will make an attempt to 1539 * flush out space to make room. It will do this by flushing delalloc if 1540 * possible or committing the transaction. If flush is 0 then no attempts to 1541 * regain reservations will be made and this will fail if there is not enough 1542 * space already. 1543 */ 1544 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1545 struct btrfs_space_info *space_info, u64 orig_bytes, 1546 enum btrfs_reserve_flush_enum flush) 1547 { 1548 struct work_struct *async_work; 1549 struct reserve_ticket ticket; 1550 u64 start_ns = 0; 1551 u64 used; 1552 int ret = 0; 1553 bool pending_tickets; 1554 1555 ASSERT(orig_bytes); 1556 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1557 1558 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1559 async_work = &fs_info->async_data_reclaim_work; 1560 else 1561 async_work = &fs_info->async_reclaim_work; 1562 1563 spin_lock(&space_info->lock); 1564 ret = -ENOSPC; 1565 used = btrfs_space_info_used(space_info, true); 1566 1567 /* 1568 * We don't want NO_FLUSH allocations to jump everybody, they can 1569 * generally handle ENOSPC in a different way, so treat them the same as 1570 * normal flushers when it comes to skipping pending tickets. 1571 */ 1572 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1573 pending_tickets = !list_empty(&space_info->tickets) || 1574 !list_empty(&space_info->priority_tickets); 1575 else 1576 pending_tickets = !list_empty(&space_info->priority_tickets); 1577 1578 /* 1579 * Carry on if we have enough space (short-circuit) OR call 1580 * can_overcommit() to ensure we can overcommit to continue. 1581 */ 1582 if (!pending_tickets && 1583 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) || 1584 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1585 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1586 orig_bytes); 1587 ret = 0; 1588 } 1589 1590 /* 1591 * If we couldn't make a reservation then setup our reservation ticket 1592 * and kick the async worker if it's not already running. 1593 * 1594 * If we are a priority flusher then we just need to add our ticket to 1595 * the list and we will do our own flushing further down. 1596 */ 1597 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1598 ticket.bytes = orig_bytes; 1599 ticket.error = 0; 1600 space_info->reclaim_size += ticket.bytes; 1601 init_waitqueue_head(&ticket.wait); 1602 ticket.steal = can_steal(flush); 1603 if (trace_btrfs_reserve_ticket_enabled()) 1604 start_ns = ktime_get_ns(); 1605 1606 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1607 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1608 flush == BTRFS_RESERVE_FLUSH_DATA) { 1609 list_add_tail(&ticket.list, &space_info->tickets); 1610 if (!space_info->flush) { 1611 /* 1612 * We were forced to add a reserve ticket, so 1613 * our preemptive flushing is unable to keep 1614 * up. Clamp down on the threshold for the 1615 * preemptive flushing in order to keep up with 1616 * the workload. 1617 */ 1618 maybe_clamp_preempt(fs_info, space_info); 1619 1620 space_info->flush = 1; 1621 trace_btrfs_trigger_flush(fs_info, 1622 space_info->flags, 1623 orig_bytes, flush, 1624 "enospc"); 1625 queue_work(system_unbound_wq, async_work); 1626 } 1627 } else { 1628 list_add_tail(&ticket.list, 1629 &space_info->priority_tickets); 1630 } 1631 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1632 used += orig_bytes; 1633 /* 1634 * We will do the space reservation dance during log replay, 1635 * which means we won't have fs_info->fs_root set, so don't do 1636 * the async reclaim as we will panic. 1637 */ 1638 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1639 !work_busy(&fs_info->preempt_reclaim_work) && 1640 need_preemptive_reclaim(fs_info, space_info)) { 1641 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1642 orig_bytes, flush, "preempt"); 1643 queue_work(system_unbound_wq, 1644 &fs_info->preempt_reclaim_work); 1645 } 1646 } 1647 spin_unlock(&space_info->lock); 1648 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1649 return ret; 1650 1651 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1652 orig_bytes, flush); 1653 } 1654 1655 /** 1656 * Trye to reserve metadata bytes from the block_rsv's space 1657 * 1658 * @fs_info: the filesystem 1659 * @block_rsv: block_rsv we're allocating for 1660 * @orig_bytes: number of bytes we want 1661 * @flush: whether or not we can flush to make our reservation 1662 * 1663 * This will reserve orig_bytes number of bytes from the space info associated 1664 * with the block_rsv. If there is not enough space it will make an attempt to 1665 * flush out space to make room. It will do this by flushing delalloc if 1666 * possible or committing the transaction. If flush is 0 then no attempts to 1667 * regain reservations will be made and this will fail if there is not enough 1668 * space already. 1669 */ 1670 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1671 struct btrfs_block_rsv *block_rsv, 1672 u64 orig_bytes, 1673 enum btrfs_reserve_flush_enum flush) 1674 { 1675 int ret; 1676 1677 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1678 if (ret == -ENOSPC) { 1679 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1680 block_rsv->space_info->flags, 1681 orig_bytes, 1); 1682 1683 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1684 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1685 orig_bytes, 0); 1686 } 1687 return ret; 1688 } 1689 1690 /** 1691 * Try to reserve data bytes for an allocation 1692 * 1693 * @fs_info: the filesystem 1694 * @bytes: number of bytes we need 1695 * @flush: how we are allowed to flush 1696 * 1697 * This will reserve bytes from the data space info. If there is not enough 1698 * space then we will attempt to flush space as specified by flush. 1699 */ 1700 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1701 enum btrfs_reserve_flush_enum flush) 1702 { 1703 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1704 int ret; 1705 1706 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1707 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE); 1708 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1709 1710 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1711 if (ret == -ENOSPC) { 1712 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1713 data_sinfo->flags, bytes, 1); 1714 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1715 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1716 } 1717 return ret; 1718 } 1719