1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 #include "zoned.h" 13 #include "fs.h" 14 #include "accessors.h" 15 16 /* 17 * HOW DOES SPACE RESERVATION WORK 18 * 19 * If you want to know about delalloc specifically, there is a separate comment 20 * for that with the delalloc code. This comment is about how the whole system 21 * works generally. 22 * 23 * BASIC CONCEPTS 24 * 25 * 1) space_info. This is the ultimate arbiter of how much space we can use. 26 * There's a description of the bytes_ fields with the struct declaration, 27 * refer to that for specifics on each field. Suffice it to say that for 28 * reservations we care about total_bytes - SUM(space_info->bytes_) when 29 * determining if there is space to make an allocation. There is a space_info 30 * for METADATA, SYSTEM, and DATA areas. 31 * 32 * 2) block_rsv's. These are basically buckets for every different type of 33 * metadata reservation we have. You can see the comment in the block_rsv 34 * code on the rules for each type, but generally block_rsv->reserved is how 35 * much space is accounted for in space_info->bytes_may_use. 36 * 37 * 3) btrfs_calc*_size. These are the worst case calculations we used based 38 * on the number of items we will want to modify. We have one for changing 39 * items, and one for inserting new items. Generally we use these helpers to 40 * determine the size of the block reserves, and then use the actual bytes 41 * values to adjust the space_info counters. 42 * 43 * MAKING RESERVATIONS, THE NORMAL CASE 44 * 45 * We call into either btrfs_reserve_data_bytes() or 46 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 47 * num_bytes we want to reserve. 48 * 49 * ->reserve 50 * space_info->bytes_may_reserve += num_bytes 51 * 52 * ->extent allocation 53 * Call btrfs_add_reserved_bytes() which does 54 * space_info->bytes_may_reserve -= num_bytes 55 * space_info->bytes_reserved += extent_bytes 56 * 57 * ->insert reference 58 * Call btrfs_update_block_group() which does 59 * space_info->bytes_reserved -= extent_bytes 60 * space_info->bytes_used += extent_bytes 61 * 62 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 63 * 64 * Assume we are unable to simply make the reservation because we do not have 65 * enough space 66 * 67 * -> __reserve_bytes 68 * create a reserve_ticket with ->bytes set to our reservation, add it to 69 * the tail of space_info->tickets, kick async flush thread 70 * 71 * ->handle_reserve_ticket 72 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 73 * on the ticket. 74 * 75 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 76 * Flushes various things attempting to free up space. 77 * 78 * -> btrfs_try_granting_tickets() 79 * This is called by anything that either subtracts space from 80 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 81 * space_info->total_bytes. This loops through the ->priority_tickets and 82 * then the ->tickets list checking to see if the reservation can be 83 * completed. If it can the space is added to space_info->bytes_may_use and 84 * the ticket is woken up. 85 * 86 * -> ticket wakeup 87 * Check if ->bytes == 0, if it does we got our reservation and we can carry 88 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 89 * were interrupted.) 90 * 91 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 92 * 93 * Same as the above, except we add ourselves to the 94 * space_info->priority_tickets, and we do not use ticket->wait, we simply 95 * call flush_space() ourselves for the states that are safe for us to call 96 * without deadlocking and hope for the best. 97 * 98 * THE FLUSHING STATES 99 * 100 * Generally speaking we will have two cases for each state, a "nice" state 101 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 102 * reduce the locking over head on the various trees, and even to keep from 103 * doing any work at all in the case of delayed refs. Each of these delayed 104 * things however hold reservations, and so letting them run allows us to 105 * reclaim space so we can make new reservations. 106 * 107 * FLUSH_DELAYED_ITEMS 108 * Every inode has a delayed item to update the inode. Take a simple write 109 * for example, we would update the inode item at write time to update the 110 * mtime, and then again at finish_ordered_io() time in order to update the 111 * isize or bytes. We keep these delayed items to coalesce these operations 112 * into a single operation done on demand. These are an easy way to reclaim 113 * metadata space. 114 * 115 * FLUSH_DELALLOC 116 * Look at the delalloc comment to get an idea of how much space is reserved 117 * for delayed allocation. We can reclaim some of this space simply by 118 * running delalloc, but usually we need to wait for ordered extents to 119 * reclaim the bulk of this space. 120 * 121 * FLUSH_DELAYED_REFS 122 * We have a block reserve for the outstanding delayed refs space, and every 123 * delayed ref operation holds a reservation. Running these is a quick way 124 * to reclaim space, but we want to hold this until the end because COW can 125 * churn a lot and we can avoid making some extent tree modifications if we 126 * are able to delay for as long as possible. 127 * 128 * ALLOC_CHUNK 129 * We will skip this the first time through space reservation, because of 130 * overcommit and we don't want to have a lot of useless metadata space when 131 * our worst case reservations will likely never come true. 132 * 133 * RUN_DELAYED_IPUTS 134 * If we're freeing inodes we're likely freeing checksums, file extent 135 * items, and extent tree items. Loads of space could be freed up by these 136 * operations, however they won't be usable until the transaction commits. 137 * 138 * COMMIT_TRANS 139 * This will commit the transaction. Historically we had a lot of logic 140 * surrounding whether or not we'd commit the transaction, but this waits born 141 * out of a pre-tickets era where we could end up committing the transaction 142 * thousands of times in a row without making progress. Now thanks to our 143 * ticketing system we know if we're not making progress and can error 144 * everybody out after a few commits rather than burning the disk hoping for 145 * a different answer. 146 * 147 * OVERCOMMIT 148 * 149 * Because we hold so many reservations for metadata we will allow you to 150 * reserve more space than is currently free in the currently allocate 151 * metadata space. This only happens with metadata, data does not allow 152 * overcommitting. 153 * 154 * You can see the current logic for when we allow overcommit in 155 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 156 * is no unallocated space to be had, all reservations are kept within the 157 * free space in the allocated metadata chunks. 158 * 159 * Because of overcommitting, you generally want to use the 160 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 161 * thing with or without extra unallocated space. 162 */ 163 164 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 165 bool may_use_included) 166 { 167 ASSERT(s_info); 168 return s_info->bytes_used + s_info->bytes_reserved + 169 s_info->bytes_pinned + s_info->bytes_readonly + 170 s_info->bytes_zone_unusable + 171 (may_use_included ? s_info->bytes_may_use : 0); 172 } 173 174 /* 175 * after adding space to the filesystem, we need to clear the full flags 176 * on all the space infos. 177 */ 178 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 179 { 180 struct list_head *head = &info->space_info; 181 struct btrfs_space_info *found; 182 183 list_for_each_entry(found, head, list) 184 found->full = 0; 185 } 186 187 /* 188 * Block groups with more than this value (percents) of unusable space will be 189 * scheduled for background reclaim. 190 */ 191 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) 192 193 /* 194 * Calculate chunk size depending on volume type (regular or zoned). 195 */ 196 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) 197 { 198 if (btrfs_is_zoned(fs_info)) 199 return fs_info->zone_size; 200 201 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 202 203 if (flags & BTRFS_BLOCK_GROUP_DATA) 204 return BTRFS_MAX_DATA_CHUNK_SIZE; 205 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 206 return SZ_32M; 207 208 /* Handle BTRFS_BLOCK_GROUP_METADATA */ 209 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) 210 return SZ_1G; 211 212 return SZ_256M; 213 } 214 215 /* 216 * Update default chunk size. 217 */ 218 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 219 u64 chunk_size) 220 { 221 WRITE_ONCE(space_info->chunk_size, chunk_size); 222 } 223 224 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 225 { 226 227 struct btrfs_space_info *space_info; 228 int i; 229 int ret; 230 231 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 232 if (!space_info) 233 return -ENOMEM; 234 235 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 236 INIT_LIST_HEAD(&space_info->block_groups[i]); 237 init_rwsem(&space_info->groups_sem); 238 spin_lock_init(&space_info->lock); 239 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 240 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 241 INIT_LIST_HEAD(&space_info->ro_bgs); 242 INIT_LIST_HEAD(&space_info->tickets); 243 INIT_LIST_HEAD(&space_info->priority_tickets); 244 space_info->clamp = 1; 245 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); 246 247 if (btrfs_is_zoned(info)) 248 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; 249 250 ret = btrfs_sysfs_add_space_info_type(info, space_info); 251 if (ret) 252 return ret; 253 254 list_add(&space_info->list, &info->space_info); 255 if (flags & BTRFS_BLOCK_GROUP_DATA) 256 info->data_sinfo = space_info; 257 258 return ret; 259 } 260 261 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 262 { 263 struct btrfs_super_block *disk_super; 264 u64 features; 265 u64 flags; 266 int mixed = 0; 267 int ret; 268 269 disk_super = fs_info->super_copy; 270 if (!btrfs_super_root(disk_super)) 271 return -EINVAL; 272 273 features = btrfs_super_incompat_flags(disk_super); 274 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 275 mixed = 1; 276 277 flags = BTRFS_BLOCK_GROUP_SYSTEM; 278 ret = create_space_info(fs_info, flags); 279 if (ret) 280 goto out; 281 282 if (mixed) { 283 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 284 ret = create_space_info(fs_info, flags); 285 } else { 286 flags = BTRFS_BLOCK_GROUP_METADATA; 287 ret = create_space_info(fs_info, flags); 288 if (ret) 289 goto out; 290 291 flags = BTRFS_BLOCK_GROUP_DATA; 292 ret = create_space_info(fs_info, flags); 293 } 294 out: 295 return ret; 296 } 297 298 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, 299 struct btrfs_block_group *block_group) 300 { 301 struct btrfs_space_info *found; 302 int factor, index; 303 304 factor = btrfs_bg_type_to_factor(block_group->flags); 305 306 found = btrfs_find_space_info(info, block_group->flags); 307 ASSERT(found); 308 spin_lock(&found->lock); 309 found->total_bytes += block_group->length; 310 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) 311 found->active_total_bytes += block_group->length; 312 found->disk_total += block_group->length * factor; 313 found->bytes_used += block_group->used; 314 found->disk_used += block_group->used * factor; 315 found->bytes_readonly += block_group->bytes_super; 316 found->bytes_zone_unusable += block_group->zone_unusable; 317 if (block_group->length > 0) 318 found->full = 0; 319 btrfs_try_granting_tickets(info, found); 320 spin_unlock(&found->lock); 321 322 block_group->space_info = found; 323 324 index = btrfs_bg_flags_to_raid_index(block_group->flags); 325 down_write(&found->groups_sem); 326 list_add_tail(&block_group->list, &found->block_groups[index]); 327 up_write(&found->groups_sem); 328 } 329 330 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 331 u64 flags) 332 { 333 struct list_head *head = &info->space_info; 334 struct btrfs_space_info *found; 335 336 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 337 338 list_for_each_entry(found, head, list) { 339 if (found->flags & flags) 340 return found; 341 } 342 return NULL; 343 } 344 345 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 346 struct btrfs_space_info *space_info, 347 enum btrfs_reserve_flush_enum flush) 348 { 349 u64 profile; 350 u64 avail; 351 int factor; 352 353 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 354 profile = btrfs_system_alloc_profile(fs_info); 355 else 356 profile = btrfs_metadata_alloc_profile(fs_info); 357 358 avail = atomic64_read(&fs_info->free_chunk_space); 359 360 /* 361 * If we have dup, raid1 or raid10 then only half of the free 362 * space is actually usable. For raid56, the space info used 363 * doesn't include the parity drive, so we don't have to 364 * change the math 365 */ 366 factor = btrfs_bg_type_to_factor(profile); 367 avail = div_u64(avail, factor); 368 369 /* 370 * If we aren't flushing all things, let us overcommit up to 371 * 1/2th of the space. If we can flush, don't let us overcommit 372 * too much, let it overcommit up to 1/8 of the space. 373 */ 374 if (flush == BTRFS_RESERVE_FLUSH_ALL) 375 avail >>= 3; 376 else 377 avail >>= 1; 378 return avail; 379 } 380 381 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info, 382 struct btrfs_space_info *space_info) 383 { 384 /* 385 * On regular filesystem, all total_bytes are always writable. On zoned 386 * filesystem, there may be a limitation imposed by max_active_zones. 387 * For metadata allocation, we cannot finish an existing active block 388 * group to avoid a deadlock. Thus, we need to consider only the active 389 * groups to be writable for metadata space. 390 */ 391 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) 392 return space_info->total_bytes; 393 394 return space_info->active_total_bytes; 395 } 396 397 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 398 struct btrfs_space_info *space_info, u64 bytes, 399 enum btrfs_reserve_flush_enum flush) 400 { 401 u64 avail; 402 u64 used; 403 404 /* Don't overcommit when in mixed mode */ 405 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 406 return 0; 407 408 used = btrfs_space_info_used(space_info, true); 409 if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA)) 410 avail = 0; 411 else 412 avail = calc_available_free_space(fs_info, space_info, flush); 413 414 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail) 415 return 1; 416 return 0; 417 } 418 419 static void remove_ticket(struct btrfs_space_info *space_info, 420 struct reserve_ticket *ticket) 421 { 422 if (!list_empty(&ticket->list)) { 423 list_del_init(&ticket->list); 424 ASSERT(space_info->reclaim_size >= ticket->bytes); 425 space_info->reclaim_size -= ticket->bytes; 426 } 427 } 428 429 /* 430 * This is for space we already have accounted in space_info->bytes_may_use, so 431 * basically when we're returning space from block_rsv's. 432 */ 433 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 434 struct btrfs_space_info *space_info) 435 { 436 struct list_head *head; 437 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 438 439 lockdep_assert_held(&space_info->lock); 440 441 head = &space_info->priority_tickets; 442 again: 443 while (!list_empty(head)) { 444 struct reserve_ticket *ticket; 445 u64 used = btrfs_space_info_used(space_info, true); 446 447 ticket = list_first_entry(head, struct reserve_ticket, list); 448 449 /* Check and see if our ticket can be satisfied now. */ 450 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) || 451 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 452 flush)) { 453 btrfs_space_info_update_bytes_may_use(fs_info, 454 space_info, 455 ticket->bytes); 456 remove_ticket(space_info, ticket); 457 ticket->bytes = 0; 458 space_info->tickets_id++; 459 wake_up(&ticket->wait); 460 } else { 461 break; 462 } 463 } 464 465 if (head == &space_info->priority_tickets) { 466 head = &space_info->tickets; 467 flush = BTRFS_RESERVE_FLUSH_ALL; 468 goto again; 469 } 470 } 471 472 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 473 do { \ 474 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 475 spin_lock(&__rsv->lock); \ 476 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 477 __rsv->size, __rsv->reserved); \ 478 spin_unlock(&__rsv->lock); \ 479 } while (0) 480 481 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info) 482 { 483 switch (space_info->flags) { 484 case BTRFS_BLOCK_GROUP_SYSTEM: 485 return "SYSTEM"; 486 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: 487 return "DATA+METADATA"; 488 case BTRFS_BLOCK_GROUP_DATA: 489 return "DATA"; 490 case BTRFS_BLOCK_GROUP_METADATA: 491 return "METADATA"; 492 default: 493 return "UNKNOWN"; 494 } 495 } 496 497 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) 498 { 499 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 500 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 501 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 502 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 503 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 504 } 505 506 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 507 struct btrfs_space_info *info) 508 { 509 const char *flag_str = space_info_flag_to_str(info); 510 lockdep_assert_held(&info->lock); 511 512 /* The free space could be negative in case of overcommit */ 513 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", 514 flag_str, 515 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), 516 info->full ? "" : "not "); 517 btrfs_info(fs_info, 518 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 519 info->total_bytes, info->bytes_used, info->bytes_pinned, 520 info->bytes_reserved, info->bytes_may_use, 521 info->bytes_readonly, info->bytes_zone_unusable); 522 } 523 524 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 525 struct btrfs_space_info *info, u64 bytes, 526 int dump_block_groups) 527 { 528 struct btrfs_block_group *cache; 529 int index = 0; 530 531 spin_lock(&info->lock); 532 __btrfs_dump_space_info(fs_info, info); 533 dump_global_block_rsv(fs_info); 534 spin_unlock(&info->lock); 535 536 if (!dump_block_groups) 537 return; 538 539 down_read(&info->groups_sem); 540 again: 541 list_for_each_entry(cache, &info->block_groups[index], list) { 542 spin_lock(&cache->lock); 543 btrfs_info(fs_info, 544 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s", 545 cache->start, cache->length, cache->used, cache->pinned, 546 cache->reserved, cache->zone_unusable, 547 cache->ro ? "[readonly]" : ""); 548 spin_unlock(&cache->lock); 549 btrfs_dump_free_space(cache, bytes); 550 } 551 if (++index < BTRFS_NR_RAID_TYPES) 552 goto again; 553 up_read(&info->groups_sem); 554 } 555 556 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 557 u64 to_reclaim) 558 { 559 u64 bytes; 560 u64 nr; 561 562 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 563 nr = div64_u64(to_reclaim, bytes); 564 if (!nr) 565 nr = 1; 566 return nr; 567 } 568 569 #define EXTENT_SIZE_PER_ITEM SZ_256K 570 571 /* 572 * shrink metadata reservation for delalloc 573 */ 574 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 575 struct btrfs_space_info *space_info, 576 u64 to_reclaim, bool wait_ordered, 577 bool for_preempt) 578 { 579 struct btrfs_trans_handle *trans; 580 u64 delalloc_bytes; 581 u64 ordered_bytes; 582 u64 items; 583 long time_left; 584 int loops; 585 586 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 587 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 588 if (delalloc_bytes == 0 && ordered_bytes == 0) 589 return; 590 591 /* Calc the number of the pages we need flush for space reservation */ 592 if (to_reclaim == U64_MAX) { 593 items = U64_MAX; 594 } else { 595 /* 596 * to_reclaim is set to however much metadata we need to 597 * reclaim, but reclaiming that much data doesn't really track 598 * exactly. What we really want to do is reclaim full inode's 599 * worth of reservations, however that's not available to us 600 * here. We will take a fraction of the delalloc bytes for our 601 * flushing loops and hope for the best. Delalloc will expand 602 * the amount we write to cover an entire dirty extent, which 603 * will reclaim the metadata reservation for that range. If 604 * it's not enough subsequent flush stages will be more 605 * aggressive. 606 */ 607 to_reclaim = max(to_reclaim, delalloc_bytes >> 3); 608 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 609 } 610 611 trans = current->journal_info; 612 613 /* 614 * If we are doing more ordered than delalloc we need to just wait on 615 * ordered extents, otherwise we'll waste time trying to flush delalloc 616 * that likely won't give us the space back we need. 617 */ 618 if (ordered_bytes > delalloc_bytes && !for_preempt) 619 wait_ordered = true; 620 621 loops = 0; 622 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 623 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 624 long nr_pages = min_t(u64, temp, LONG_MAX); 625 int async_pages; 626 627 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 628 629 /* 630 * We need to make sure any outstanding async pages are now 631 * processed before we continue. This is because things like 632 * sync_inode() try to be smart and skip writing if the inode is 633 * marked clean. We don't use filemap_fwrite for flushing 634 * because we want to control how many pages we write out at a 635 * time, thus this is the only safe way to make sure we've 636 * waited for outstanding compressed workers to have started 637 * their jobs and thus have ordered extents set up properly. 638 * 639 * This exists because we do not want to wait for each 640 * individual inode to finish its async work, we simply want to 641 * start the IO on everybody, and then come back here and wait 642 * for all of the async work to catch up. Once we're done with 643 * that we know we'll have ordered extents for everything and we 644 * can decide if we wait for that or not. 645 * 646 * If we choose to replace this in the future, make absolutely 647 * sure that the proper waiting is being done in the async case, 648 * as there have been bugs in that area before. 649 */ 650 async_pages = atomic_read(&fs_info->async_delalloc_pages); 651 if (!async_pages) 652 goto skip_async; 653 654 /* 655 * We don't want to wait forever, if we wrote less pages in this 656 * loop than we have outstanding, only wait for that number of 657 * pages, otherwise we can wait for all async pages to finish 658 * before continuing. 659 */ 660 if (async_pages > nr_pages) 661 async_pages -= nr_pages; 662 else 663 async_pages = 0; 664 wait_event(fs_info->async_submit_wait, 665 atomic_read(&fs_info->async_delalloc_pages) <= 666 async_pages); 667 skip_async: 668 loops++; 669 if (wait_ordered && !trans) { 670 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 671 } else { 672 time_left = schedule_timeout_killable(1); 673 if (time_left) 674 break; 675 } 676 677 /* 678 * If we are for preemption we just want a one-shot of delalloc 679 * flushing so we can stop flushing if we decide we don't need 680 * to anymore. 681 */ 682 if (for_preempt) 683 break; 684 685 spin_lock(&space_info->lock); 686 if (list_empty(&space_info->tickets) && 687 list_empty(&space_info->priority_tickets)) { 688 spin_unlock(&space_info->lock); 689 break; 690 } 691 spin_unlock(&space_info->lock); 692 693 delalloc_bytes = percpu_counter_sum_positive( 694 &fs_info->delalloc_bytes); 695 ordered_bytes = percpu_counter_sum_positive( 696 &fs_info->ordered_bytes); 697 } 698 } 699 700 /* 701 * Try to flush some data based on policy set by @state. This is only advisory 702 * and may fail for various reasons. The caller is supposed to examine the 703 * state of @space_info to detect the outcome. 704 */ 705 static void flush_space(struct btrfs_fs_info *fs_info, 706 struct btrfs_space_info *space_info, u64 num_bytes, 707 enum btrfs_flush_state state, bool for_preempt) 708 { 709 struct btrfs_root *root = fs_info->tree_root; 710 struct btrfs_trans_handle *trans; 711 int nr; 712 int ret = 0; 713 714 switch (state) { 715 case FLUSH_DELAYED_ITEMS_NR: 716 case FLUSH_DELAYED_ITEMS: 717 if (state == FLUSH_DELAYED_ITEMS_NR) 718 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 719 else 720 nr = -1; 721 722 trans = btrfs_join_transaction(root); 723 if (IS_ERR(trans)) { 724 ret = PTR_ERR(trans); 725 break; 726 } 727 ret = btrfs_run_delayed_items_nr(trans, nr); 728 btrfs_end_transaction(trans); 729 break; 730 case FLUSH_DELALLOC: 731 case FLUSH_DELALLOC_WAIT: 732 case FLUSH_DELALLOC_FULL: 733 if (state == FLUSH_DELALLOC_FULL) 734 num_bytes = U64_MAX; 735 shrink_delalloc(fs_info, space_info, num_bytes, 736 state != FLUSH_DELALLOC, for_preempt); 737 break; 738 case FLUSH_DELAYED_REFS_NR: 739 case FLUSH_DELAYED_REFS: 740 trans = btrfs_join_transaction(root); 741 if (IS_ERR(trans)) { 742 ret = PTR_ERR(trans); 743 break; 744 } 745 if (state == FLUSH_DELAYED_REFS_NR) 746 nr = calc_reclaim_items_nr(fs_info, num_bytes); 747 else 748 nr = 0; 749 btrfs_run_delayed_refs(trans, nr); 750 btrfs_end_transaction(trans); 751 break; 752 case ALLOC_CHUNK: 753 case ALLOC_CHUNK_FORCE: 754 /* 755 * For metadata space on zoned filesystem, reaching here means we 756 * don't have enough space left in active_total_bytes. Try to 757 * activate a block group first, because we may have inactive 758 * block group already allocated. 759 */ 760 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false); 761 if (ret < 0) 762 break; 763 else if (ret == 1) 764 break; 765 766 trans = btrfs_join_transaction(root); 767 if (IS_ERR(trans)) { 768 ret = PTR_ERR(trans); 769 break; 770 } 771 ret = btrfs_chunk_alloc(trans, 772 btrfs_get_alloc_profile(fs_info, space_info->flags), 773 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 774 CHUNK_ALLOC_FORCE); 775 btrfs_end_transaction(trans); 776 777 /* 778 * For metadata space on zoned filesystem, allocating a new chunk 779 * is not enough. We still need to activate the block * group. 780 * Active the newly allocated block group by (maybe) finishing 781 * a block group. 782 */ 783 if (ret == 1) { 784 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true); 785 /* 786 * Revert to the original ret regardless we could finish 787 * one block group or not. 788 */ 789 if (ret >= 0) 790 ret = 1; 791 } 792 793 if (ret > 0 || ret == -ENOSPC) 794 ret = 0; 795 break; 796 case RUN_DELAYED_IPUTS: 797 /* 798 * If we have pending delayed iputs then we could free up a 799 * bunch of pinned space, so make sure we run the iputs before 800 * we do our pinned bytes check below. 801 */ 802 btrfs_run_delayed_iputs(fs_info); 803 btrfs_wait_on_delayed_iputs(fs_info); 804 break; 805 case COMMIT_TRANS: 806 ASSERT(current->journal_info == NULL); 807 trans = btrfs_join_transaction(root); 808 if (IS_ERR(trans)) { 809 ret = PTR_ERR(trans); 810 break; 811 } 812 ret = btrfs_commit_transaction(trans); 813 break; 814 default: 815 ret = -ENOSPC; 816 break; 817 } 818 819 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 820 ret, for_preempt); 821 return; 822 } 823 824 static inline u64 825 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 826 struct btrfs_space_info *space_info) 827 { 828 u64 used; 829 u64 avail; 830 u64 total; 831 u64 to_reclaim = space_info->reclaim_size; 832 833 lockdep_assert_held(&space_info->lock); 834 835 avail = calc_available_free_space(fs_info, space_info, 836 BTRFS_RESERVE_FLUSH_ALL); 837 used = btrfs_space_info_used(space_info, true); 838 839 /* 840 * We may be flushing because suddenly we have less space than we had 841 * before, and now we're well over-committed based on our current free 842 * space. If that's the case add in our overage so we make sure to put 843 * appropriate pressure on the flushing state machine. 844 */ 845 total = writable_total_bytes(fs_info, space_info); 846 if (total + avail < used) 847 to_reclaim += used - (total + avail); 848 849 return to_reclaim; 850 } 851 852 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 853 struct btrfs_space_info *space_info) 854 { 855 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 856 u64 ordered, delalloc; 857 u64 total = writable_total_bytes(fs_info, space_info); 858 u64 thresh; 859 u64 used; 860 861 thresh = div_factor_fine(total, 90); 862 863 lockdep_assert_held(&space_info->lock); 864 865 /* If we're just plain full then async reclaim just slows us down. */ 866 if ((space_info->bytes_used + space_info->bytes_reserved + 867 global_rsv_size) >= thresh) 868 return false; 869 870 used = space_info->bytes_may_use + space_info->bytes_pinned; 871 872 /* The total flushable belongs to the global rsv, don't flush. */ 873 if (global_rsv_size >= used) 874 return false; 875 876 /* 877 * 128MiB is 1/4 of the maximum global rsv size. If we have less than 878 * that devoted to other reservations then there's no sense in flushing, 879 * we don't have a lot of things that need flushing. 880 */ 881 if (used - global_rsv_size <= SZ_128M) 882 return false; 883 884 /* 885 * We have tickets queued, bail so we don't compete with the async 886 * flushers. 887 */ 888 if (space_info->reclaim_size) 889 return false; 890 891 /* 892 * If we have over half of the free space occupied by reservations or 893 * pinned then we want to start flushing. 894 * 895 * We do not do the traditional thing here, which is to say 896 * 897 * if (used >= ((total_bytes + avail) / 2)) 898 * return 1; 899 * 900 * because this doesn't quite work how we want. If we had more than 50% 901 * of the space_info used by bytes_used and we had 0 available we'd just 902 * constantly run the background flusher. Instead we want it to kick in 903 * if our reclaimable space exceeds our clamped free space. 904 * 905 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 906 * the following: 907 * 908 * Amount of RAM Minimum threshold Maximum threshold 909 * 910 * 256GiB 1GiB 128GiB 911 * 128GiB 512MiB 64GiB 912 * 64GiB 256MiB 32GiB 913 * 32GiB 128MiB 16GiB 914 * 16GiB 64MiB 8GiB 915 * 916 * These are the range our thresholds will fall in, corresponding to how 917 * much delalloc we need for the background flusher to kick in. 918 */ 919 920 thresh = calc_available_free_space(fs_info, space_info, 921 BTRFS_RESERVE_FLUSH_ALL); 922 used = space_info->bytes_used + space_info->bytes_reserved + 923 space_info->bytes_readonly + global_rsv_size; 924 if (used < total) 925 thresh += total - used; 926 thresh >>= space_info->clamp; 927 928 used = space_info->bytes_pinned; 929 930 /* 931 * If we have more ordered bytes than delalloc bytes then we're either 932 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 933 * around. Preemptive flushing is only useful in that it can free up 934 * space before tickets need to wait for things to finish. In the case 935 * of ordered extents, preemptively waiting on ordered extents gets us 936 * nothing, if our reservations are tied up in ordered extents we'll 937 * simply have to slow down writers by forcing them to wait on ordered 938 * extents. 939 * 940 * In the case that ordered is larger than delalloc, only include the 941 * block reserves that we would actually be able to directly reclaim 942 * from. In this case if we're heavy on metadata operations this will 943 * clearly be heavy enough to warrant preemptive flushing. In the case 944 * of heavy DIO or ordered reservations, preemptive flushing will just 945 * waste time and cause us to slow down. 946 * 947 * We want to make sure we truly are maxed out on ordered however, so 948 * cut ordered in half, and if it's still higher than delalloc then we 949 * can keep flushing. This is to avoid the case where we start 950 * flushing, and now delalloc == ordered and we stop preemptively 951 * flushing when we could still have several gigs of delalloc to flush. 952 */ 953 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 954 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 955 if (ordered >= delalloc) 956 used += fs_info->delayed_refs_rsv.reserved + 957 fs_info->delayed_block_rsv.reserved; 958 else 959 used += space_info->bytes_may_use - global_rsv_size; 960 961 return (used >= thresh && !btrfs_fs_closing(fs_info) && 962 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 963 } 964 965 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 966 struct btrfs_space_info *space_info, 967 struct reserve_ticket *ticket) 968 { 969 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 970 u64 min_bytes; 971 972 if (!ticket->steal) 973 return false; 974 975 if (global_rsv->space_info != space_info) 976 return false; 977 978 spin_lock(&global_rsv->lock); 979 min_bytes = div_factor(global_rsv->size, 1); 980 if (global_rsv->reserved < min_bytes + ticket->bytes) { 981 spin_unlock(&global_rsv->lock); 982 return false; 983 } 984 global_rsv->reserved -= ticket->bytes; 985 remove_ticket(space_info, ticket); 986 ticket->bytes = 0; 987 wake_up(&ticket->wait); 988 space_info->tickets_id++; 989 if (global_rsv->reserved < global_rsv->size) 990 global_rsv->full = 0; 991 spin_unlock(&global_rsv->lock); 992 993 return true; 994 } 995 996 /* 997 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 998 * @fs_info - fs_info for this fs 999 * @space_info - the space info we were flushing 1000 * 1001 * We call this when we've exhausted our flushing ability and haven't made 1002 * progress in satisfying tickets. The reservation code handles tickets in 1003 * order, so if there is a large ticket first and then smaller ones we could 1004 * very well satisfy the smaller tickets. This will attempt to wake up any 1005 * tickets in the list to catch this case. 1006 * 1007 * This function returns true if it was able to make progress by clearing out 1008 * other tickets, or if it stumbles across a ticket that was smaller than the 1009 * first ticket. 1010 */ 1011 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 1012 struct btrfs_space_info *space_info) 1013 { 1014 struct reserve_ticket *ticket; 1015 u64 tickets_id = space_info->tickets_id; 1016 const bool aborted = BTRFS_FS_ERROR(fs_info); 1017 1018 trace_btrfs_fail_all_tickets(fs_info, space_info); 1019 1020 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1021 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 1022 __btrfs_dump_space_info(fs_info, space_info); 1023 } 1024 1025 while (!list_empty(&space_info->tickets) && 1026 tickets_id == space_info->tickets_id) { 1027 ticket = list_first_entry(&space_info->tickets, 1028 struct reserve_ticket, list); 1029 1030 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) 1031 return true; 1032 1033 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1034 btrfs_info(fs_info, "failing ticket with %llu bytes", 1035 ticket->bytes); 1036 1037 remove_ticket(space_info, ticket); 1038 if (aborted) 1039 ticket->error = -EIO; 1040 else 1041 ticket->error = -ENOSPC; 1042 wake_up(&ticket->wait); 1043 1044 /* 1045 * We're just throwing tickets away, so more flushing may not 1046 * trip over btrfs_try_granting_tickets, so we need to call it 1047 * here to see if we can make progress with the next ticket in 1048 * the list. 1049 */ 1050 if (!aborted) 1051 btrfs_try_granting_tickets(fs_info, space_info); 1052 } 1053 return (tickets_id != space_info->tickets_id); 1054 } 1055 1056 /* 1057 * This is for normal flushers, we can wait all goddamned day if we want to. We 1058 * will loop and continuously try to flush as long as we are making progress. 1059 * We count progress as clearing off tickets each time we have to loop. 1060 */ 1061 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 1062 { 1063 struct btrfs_fs_info *fs_info; 1064 struct btrfs_space_info *space_info; 1065 u64 to_reclaim; 1066 enum btrfs_flush_state flush_state; 1067 int commit_cycles = 0; 1068 u64 last_tickets_id; 1069 1070 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 1071 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1072 1073 spin_lock(&space_info->lock); 1074 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1075 if (!to_reclaim) { 1076 space_info->flush = 0; 1077 spin_unlock(&space_info->lock); 1078 return; 1079 } 1080 last_tickets_id = space_info->tickets_id; 1081 spin_unlock(&space_info->lock); 1082 1083 flush_state = FLUSH_DELAYED_ITEMS_NR; 1084 do { 1085 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 1086 spin_lock(&space_info->lock); 1087 if (list_empty(&space_info->tickets)) { 1088 space_info->flush = 0; 1089 spin_unlock(&space_info->lock); 1090 return; 1091 } 1092 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 1093 space_info); 1094 if (last_tickets_id == space_info->tickets_id) { 1095 flush_state++; 1096 } else { 1097 last_tickets_id = space_info->tickets_id; 1098 flush_state = FLUSH_DELAYED_ITEMS_NR; 1099 if (commit_cycles) 1100 commit_cycles--; 1101 } 1102 1103 /* 1104 * We do not want to empty the system of delalloc unless we're 1105 * under heavy pressure, so allow one trip through the flushing 1106 * logic before we start doing a FLUSH_DELALLOC_FULL. 1107 */ 1108 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) 1109 flush_state++; 1110 1111 /* 1112 * We don't want to force a chunk allocation until we've tried 1113 * pretty hard to reclaim space. Think of the case where we 1114 * freed up a bunch of space and so have a lot of pinned space 1115 * to reclaim. We would rather use that than possibly create a 1116 * underutilized metadata chunk. So if this is our first run 1117 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 1118 * commit the transaction. If nothing has changed the next go 1119 * around then we can force a chunk allocation. 1120 */ 1121 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 1122 flush_state++; 1123 1124 if (flush_state > COMMIT_TRANS) { 1125 commit_cycles++; 1126 if (commit_cycles > 2) { 1127 if (maybe_fail_all_tickets(fs_info, space_info)) { 1128 flush_state = FLUSH_DELAYED_ITEMS_NR; 1129 commit_cycles--; 1130 } else { 1131 space_info->flush = 0; 1132 } 1133 } else { 1134 flush_state = FLUSH_DELAYED_ITEMS_NR; 1135 } 1136 } 1137 spin_unlock(&space_info->lock); 1138 } while (flush_state <= COMMIT_TRANS); 1139 } 1140 1141 /* 1142 * This handles pre-flushing of metadata space before we get to the point that 1143 * we need to start blocking threads on tickets. The logic here is different 1144 * from the other flush paths because it doesn't rely on tickets to tell us how 1145 * much we need to flush, instead it attempts to keep us below the 80% full 1146 * watermark of space by flushing whichever reservation pool is currently the 1147 * largest. 1148 */ 1149 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1150 { 1151 struct btrfs_fs_info *fs_info; 1152 struct btrfs_space_info *space_info; 1153 struct btrfs_block_rsv *delayed_block_rsv; 1154 struct btrfs_block_rsv *delayed_refs_rsv; 1155 struct btrfs_block_rsv *global_rsv; 1156 struct btrfs_block_rsv *trans_rsv; 1157 int loops = 0; 1158 1159 fs_info = container_of(work, struct btrfs_fs_info, 1160 preempt_reclaim_work); 1161 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1162 delayed_block_rsv = &fs_info->delayed_block_rsv; 1163 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1164 global_rsv = &fs_info->global_block_rsv; 1165 trans_rsv = &fs_info->trans_block_rsv; 1166 1167 spin_lock(&space_info->lock); 1168 while (need_preemptive_reclaim(fs_info, space_info)) { 1169 enum btrfs_flush_state flush; 1170 u64 delalloc_size = 0; 1171 u64 to_reclaim, block_rsv_size; 1172 u64 global_rsv_size = global_rsv->reserved; 1173 1174 loops++; 1175 1176 /* 1177 * We don't have a precise counter for the metadata being 1178 * reserved for delalloc, so we'll approximate it by subtracting 1179 * out the block rsv's space from the bytes_may_use. If that 1180 * amount is higher than the individual reserves, then we can 1181 * assume it's tied up in delalloc reservations. 1182 */ 1183 block_rsv_size = global_rsv_size + 1184 delayed_block_rsv->reserved + 1185 delayed_refs_rsv->reserved + 1186 trans_rsv->reserved; 1187 if (block_rsv_size < space_info->bytes_may_use) 1188 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1189 1190 /* 1191 * We don't want to include the global_rsv in our calculation, 1192 * because that's space we can't touch. Subtract it from the 1193 * block_rsv_size for the next checks. 1194 */ 1195 block_rsv_size -= global_rsv_size; 1196 1197 /* 1198 * We really want to avoid flushing delalloc too much, as it 1199 * could result in poor allocation patterns, so only flush it if 1200 * it's larger than the rest of the pools combined. 1201 */ 1202 if (delalloc_size > block_rsv_size) { 1203 to_reclaim = delalloc_size; 1204 flush = FLUSH_DELALLOC; 1205 } else if (space_info->bytes_pinned > 1206 (delayed_block_rsv->reserved + 1207 delayed_refs_rsv->reserved)) { 1208 to_reclaim = space_info->bytes_pinned; 1209 flush = COMMIT_TRANS; 1210 } else if (delayed_block_rsv->reserved > 1211 delayed_refs_rsv->reserved) { 1212 to_reclaim = delayed_block_rsv->reserved; 1213 flush = FLUSH_DELAYED_ITEMS_NR; 1214 } else { 1215 to_reclaim = delayed_refs_rsv->reserved; 1216 flush = FLUSH_DELAYED_REFS_NR; 1217 } 1218 1219 spin_unlock(&space_info->lock); 1220 1221 /* 1222 * We don't want to reclaim everything, just a portion, so scale 1223 * down the to_reclaim by 1/4. If it takes us down to 0, 1224 * reclaim 1 items worth. 1225 */ 1226 to_reclaim >>= 2; 1227 if (!to_reclaim) 1228 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1229 flush_space(fs_info, space_info, to_reclaim, flush, true); 1230 cond_resched(); 1231 spin_lock(&space_info->lock); 1232 } 1233 1234 /* We only went through once, back off our clamping. */ 1235 if (loops == 1 && !space_info->reclaim_size) 1236 space_info->clamp = max(1, space_info->clamp - 1); 1237 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1238 spin_unlock(&space_info->lock); 1239 } 1240 1241 /* 1242 * FLUSH_DELALLOC_WAIT: 1243 * Space is freed from flushing delalloc in one of two ways. 1244 * 1245 * 1) compression is on and we allocate less space than we reserved 1246 * 2) we are overwriting existing space 1247 * 1248 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1249 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1250 * length to ->bytes_reserved, and subtracts the reserved space from 1251 * ->bytes_may_use. 1252 * 1253 * For #2 this is trickier. Once the ordered extent runs we will drop the 1254 * extent in the range we are overwriting, which creates a delayed ref for 1255 * that freed extent. This however is not reclaimed until the transaction 1256 * commits, thus the next stages. 1257 * 1258 * RUN_DELAYED_IPUTS 1259 * If we are freeing inodes, we want to make sure all delayed iputs have 1260 * completed, because they could have been on an inode with i_nlink == 0, and 1261 * thus have been truncated and freed up space. But again this space is not 1262 * immediately re-usable, it comes in the form of a delayed ref, which must be 1263 * run and then the transaction must be committed. 1264 * 1265 * COMMIT_TRANS 1266 * This is where we reclaim all of the pinned space generated by running the 1267 * iputs 1268 * 1269 * ALLOC_CHUNK_FORCE 1270 * For data we start with alloc chunk force, however we could have been full 1271 * before, and then the transaction commit could have freed new block groups, 1272 * so if we now have space to allocate do the force chunk allocation. 1273 */ 1274 static const enum btrfs_flush_state data_flush_states[] = { 1275 FLUSH_DELALLOC_FULL, 1276 RUN_DELAYED_IPUTS, 1277 COMMIT_TRANS, 1278 ALLOC_CHUNK_FORCE, 1279 }; 1280 1281 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1282 { 1283 struct btrfs_fs_info *fs_info; 1284 struct btrfs_space_info *space_info; 1285 u64 last_tickets_id; 1286 enum btrfs_flush_state flush_state = 0; 1287 1288 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1289 space_info = fs_info->data_sinfo; 1290 1291 spin_lock(&space_info->lock); 1292 if (list_empty(&space_info->tickets)) { 1293 space_info->flush = 0; 1294 spin_unlock(&space_info->lock); 1295 return; 1296 } 1297 last_tickets_id = space_info->tickets_id; 1298 spin_unlock(&space_info->lock); 1299 1300 while (!space_info->full) { 1301 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1302 spin_lock(&space_info->lock); 1303 if (list_empty(&space_info->tickets)) { 1304 space_info->flush = 0; 1305 spin_unlock(&space_info->lock); 1306 return; 1307 } 1308 1309 /* Something happened, fail everything and bail. */ 1310 if (BTRFS_FS_ERROR(fs_info)) 1311 goto aborted_fs; 1312 last_tickets_id = space_info->tickets_id; 1313 spin_unlock(&space_info->lock); 1314 } 1315 1316 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1317 flush_space(fs_info, space_info, U64_MAX, 1318 data_flush_states[flush_state], false); 1319 spin_lock(&space_info->lock); 1320 if (list_empty(&space_info->tickets)) { 1321 space_info->flush = 0; 1322 spin_unlock(&space_info->lock); 1323 return; 1324 } 1325 1326 if (last_tickets_id == space_info->tickets_id) { 1327 flush_state++; 1328 } else { 1329 last_tickets_id = space_info->tickets_id; 1330 flush_state = 0; 1331 } 1332 1333 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1334 if (space_info->full) { 1335 if (maybe_fail_all_tickets(fs_info, space_info)) 1336 flush_state = 0; 1337 else 1338 space_info->flush = 0; 1339 } else { 1340 flush_state = 0; 1341 } 1342 1343 /* Something happened, fail everything and bail. */ 1344 if (BTRFS_FS_ERROR(fs_info)) 1345 goto aborted_fs; 1346 1347 } 1348 spin_unlock(&space_info->lock); 1349 } 1350 return; 1351 1352 aborted_fs: 1353 maybe_fail_all_tickets(fs_info, space_info); 1354 space_info->flush = 0; 1355 spin_unlock(&space_info->lock); 1356 } 1357 1358 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1359 { 1360 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1361 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1362 INIT_WORK(&fs_info->preempt_reclaim_work, 1363 btrfs_preempt_reclaim_metadata_space); 1364 } 1365 1366 static const enum btrfs_flush_state priority_flush_states[] = { 1367 FLUSH_DELAYED_ITEMS_NR, 1368 FLUSH_DELAYED_ITEMS, 1369 ALLOC_CHUNK, 1370 }; 1371 1372 static const enum btrfs_flush_state evict_flush_states[] = { 1373 FLUSH_DELAYED_ITEMS_NR, 1374 FLUSH_DELAYED_ITEMS, 1375 FLUSH_DELAYED_REFS_NR, 1376 FLUSH_DELAYED_REFS, 1377 FLUSH_DELALLOC, 1378 FLUSH_DELALLOC_WAIT, 1379 FLUSH_DELALLOC_FULL, 1380 ALLOC_CHUNK, 1381 COMMIT_TRANS, 1382 }; 1383 1384 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1385 struct btrfs_space_info *space_info, 1386 struct reserve_ticket *ticket, 1387 const enum btrfs_flush_state *states, 1388 int states_nr) 1389 { 1390 u64 to_reclaim; 1391 int flush_state = 0; 1392 1393 spin_lock(&space_info->lock); 1394 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1395 /* 1396 * This is the priority reclaim path, so to_reclaim could be >0 still 1397 * because we may have only satisfied the priority tickets and still 1398 * left non priority tickets on the list. We would then have 1399 * to_reclaim but ->bytes == 0. 1400 */ 1401 if (ticket->bytes == 0) { 1402 spin_unlock(&space_info->lock); 1403 return; 1404 } 1405 1406 while (flush_state < states_nr) { 1407 spin_unlock(&space_info->lock); 1408 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1409 false); 1410 flush_state++; 1411 spin_lock(&space_info->lock); 1412 if (ticket->bytes == 0) { 1413 spin_unlock(&space_info->lock); 1414 return; 1415 } 1416 } 1417 1418 /* Attempt to steal from the global rsv if we can. */ 1419 if (!steal_from_global_rsv(fs_info, space_info, ticket)) { 1420 ticket->error = -ENOSPC; 1421 remove_ticket(space_info, ticket); 1422 } 1423 1424 /* 1425 * We must run try_granting_tickets here because we could be a large 1426 * ticket in front of a smaller ticket that can now be satisfied with 1427 * the available space. 1428 */ 1429 btrfs_try_granting_tickets(fs_info, space_info); 1430 spin_unlock(&space_info->lock); 1431 } 1432 1433 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1434 struct btrfs_space_info *space_info, 1435 struct reserve_ticket *ticket) 1436 { 1437 spin_lock(&space_info->lock); 1438 1439 /* We could have been granted before we got here. */ 1440 if (ticket->bytes == 0) { 1441 spin_unlock(&space_info->lock); 1442 return; 1443 } 1444 1445 while (!space_info->full) { 1446 spin_unlock(&space_info->lock); 1447 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1448 spin_lock(&space_info->lock); 1449 if (ticket->bytes == 0) { 1450 spin_unlock(&space_info->lock); 1451 return; 1452 } 1453 } 1454 1455 ticket->error = -ENOSPC; 1456 remove_ticket(space_info, ticket); 1457 btrfs_try_granting_tickets(fs_info, space_info); 1458 spin_unlock(&space_info->lock); 1459 } 1460 1461 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1462 struct btrfs_space_info *space_info, 1463 struct reserve_ticket *ticket) 1464 1465 { 1466 DEFINE_WAIT(wait); 1467 int ret = 0; 1468 1469 spin_lock(&space_info->lock); 1470 while (ticket->bytes > 0 && ticket->error == 0) { 1471 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1472 if (ret) { 1473 /* 1474 * Delete us from the list. After we unlock the space 1475 * info, we don't want the async reclaim job to reserve 1476 * space for this ticket. If that would happen, then the 1477 * ticket's task would not known that space was reserved 1478 * despite getting an error, resulting in a space leak 1479 * (bytes_may_use counter of our space_info). 1480 */ 1481 remove_ticket(space_info, ticket); 1482 ticket->error = -EINTR; 1483 break; 1484 } 1485 spin_unlock(&space_info->lock); 1486 1487 schedule(); 1488 1489 finish_wait(&ticket->wait, &wait); 1490 spin_lock(&space_info->lock); 1491 } 1492 spin_unlock(&space_info->lock); 1493 } 1494 1495 /** 1496 * Do the appropriate flushing and waiting for a ticket 1497 * 1498 * @fs_info: the filesystem 1499 * @space_info: space info for the reservation 1500 * @ticket: ticket for the reservation 1501 * @start_ns: timestamp when the reservation started 1502 * @orig_bytes: amount of bytes originally reserved 1503 * @flush: how much we can flush 1504 * 1505 * This does the work of figuring out how to flush for the ticket, waiting for 1506 * the reservation, and returning the appropriate error if there is one. 1507 */ 1508 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1509 struct btrfs_space_info *space_info, 1510 struct reserve_ticket *ticket, 1511 u64 start_ns, u64 orig_bytes, 1512 enum btrfs_reserve_flush_enum flush) 1513 { 1514 int ret; 1515 1516 switch (flush) { 1517 case BTRFS_RESERVE_FLUSH_DATA: 1518 case BTRFS_RESERVE_FLUSH_ALL: 1519 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1520 wait_reserve_ticket(fs_info, space_info, ticket); 1521 break; 1522 case BTRFS_RESERVE_FLUSH_LIMIT: 1523 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1524 priority_flush_states, 1525 ARRAY_SIZE(priority_flush_states)); 1526 break; 1527 case BTRFS_RESERVE_FLUSH_EVICT: 1528 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1529 evict_flush_states, 1530 ARRAY_SIZE(evict_flush_states)); 1531 break; 1532 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1533 priority_reclaim_data_space(fs_info, space_info, ticket); 1534 break; 1535 default: 1536 ASSERT(0); 1537 break; 1538 } 1539 1540 ret = ticket->error; 1541 ASSERT(list_empty(&ticket->list)); 1542 /* 1543 * Check that we can't have an error set if the reservation succeeded, 1544 * as that would confuse tasks and lead them to error out without 1545 * releasing reserved space (if an error happens the expectation is that 1546 * space wasn't reserved at all). 1547 */ 1548 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1549 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1550 start_ns, flush, ticket->error); 1551 return ret; 1552 } 1553 1554 /* 1555 * This returns true if this flush state will go through the ordinary flushing 1556 * code. 1557 */ 1558 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1559 { 1560 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1561 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1562 } 1563 1564 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1565 struct btrfs_space_info *space_info) 1566 { 1567 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1568 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1569 1570 /* 1571 * If we're heavy on ordered operations then clamping won't help us. We 1572 * need to clamp specifically to keep up with dirty'ing buffered 1573 * writers, because there's not a 1:1 correlation of writing delalloc 1574 * and freeing space, like there is with flushing delayed refs or 1575 * delayed nodes. If we're already more ordered than delalloc then 1576 * we're keeping up, otherwise we aren't and should probably clamp. 1577 */ 1578 if (ordered < delalloc) 1579 space_info->clamp = min(space_info->clamp + 1, 8); 1580 } 1581 1582 static inline bool can_steal(enum btrfs_reserve_flush_enum flush) 1583 { 1584 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1585 flush == BTRFS_RESERVE_FLUSH_EVICT); 1586 } 1587 1588 /* 1589 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to 1590 * fail as quickly as possible. 1591 */ 1592 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush) 1593 { 1594 return (flush != BTRFS_RESERVE_NO_FLUSH && 1595 flush != BTRFS_RESERVE_FLUSH_EMERGENCY); 1596 } 1597 1598 /** 1599 * Try to reserve bytes from the block_rsv's space 1600 * 1601 * @fs_info: the filesystem 1602 * @space_info: space info we want to allocate from 1603 * @orig_bytes: number of bytes we want 1604 * @flush: whether or not we can flush to make our reservation 1605 * 1606 * This will reserve orig_bytes number of bytes from the space info associated 1607 * with the block_rsv. If there is not enough space it will make an attempt to 1608 * flush out space to make room. It will do this by flushing delalloc if 1609 * possible or committing the transaction. If flush is 0 then no attempts to 1610 * regain reservations will be made and this will fail if there is not enough 1611 * space already. 1612 */ 1613 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1614 struct btrfs_space_info *space_info, u64 orig_bytes, 1615 enum btrfs_reserve_flush_enum flush) 1616 { 1617 struct work_struct *async_work; 1618 struct reserve_ticket ticket; 1619 u64 start_ns = 0; 1620 u64 used; 1621 int ret = 0; 1622 bool pending_tickets; 1623 1624 ASSERT(orig_bytes); 1625 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1626 1627 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1628 async_work = &fs_info->async_data_reclaim_work; 1629 else 1630 async_work = &fs_info->async_reclaim_work; 1631 1632 spin_lock(&space_info->lock); 1633 ret = -ENOSPC; 1634 used = btrfs_space_info_used(space_info, true); 1635 1636 /* 1637 * We don't want NO_FLUSH allocations to jump everybody, they can 1638 * generally handle ENOSPC in a different way, so treat them the same as 1639 * normal flushers when it comes to skipping pending tickets. 1640 */ 1641 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1642 pending_tickets = !list_empty(&space_info->tickets) || 1643 !list_empty(&space_info->priority_tickets); 1644 else 1645 pending_tickets = !list_empty(&space_info->priority_tickets); 1646 1647 /* 1648 * Carry on if we have enough space (short-circuit) OR call 1649 * can_overcommit() to ensure we can overcommit to continue. 1650 */ 1651 if (!pending_tickets && 1652 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) || 1653 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1654 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1655 orig_bytes); 1656 ret = 0; 1657 } 1658 1659 /* 1660 * Things are dire, we need to make a reservation so we don't abort. We 1661 * will let this reservation go through as long as we have actual space 1662 * left to allocate for the block. 1663 */ 1664 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) { 1665 used = btrfs_space_info_used(space_info, false); 1666 if (used + orig_bytes <= 1667 writable_total_bytes(fs_info, space_info)) { 1668 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1669 orig_bytes); 1670 ret = 0; 1671 } 1672 } 1673 1674 /* 1675 * If we couldn't make a reservation then setup our reservation ticket 1676 * and kick the async worker if it's not already running. 1677 * 1678 * If we are a priority flusher then we just need to add our ticket to 1679 * the list and we will do our own flushing further down. 1680 */ 1681 if (ret && can_ticket(flush)) { 1682 ticket.bytes = orig_bytes; 1683 ticket.error = 0; 1684 space_info->reclaim_size += ticket.bytes; 1685 init_waitqueue_head(&ticket.wait); 1686 ticket.steal = can_steal(flush); 1687 if (trace_btrfs_reserve_ticket_enabled()) 1688 start_ns = ktime_get_ns(); 1689 1690 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1691 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1692 flush == BTRFS_RESERVE_FLUSH_DATA) { 1693 list_add_tail(&ticket.list, &space_info->tickets); 1694 if (!space_info->flush) { 1695 /* 1696 * We were forced to add a reserve ticket, so 1697 * our preemptive flushing is unable to keep 1698 * up. Clamp down on the threshold for the 1699 * preemptive flushing in order to keep up with 1700 * the workload. 1701 */ 1702 maybe_clamp_preempt(fs_info, space_info); 1703 1704 space_info->flush = 1; 1705 trace_btrfs_trigger_flush(fs_info, 1706 space_info->flags, 1707 orig_bytes, flush, 1708 "enospc"); 1709 queue_work(system_unbound_wq, async_work); 1710 } 1711 } else { 1712 list_add_tail(&ticket.list, 1713 &space_info->priority_tickets); 1714 } 1715 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1716 /* 1717 * We will do the space reservation dance during log replay, 1718 * which means we won't have fs_info->fs_root set, so don't do 1719 * the async reclaim as we will panic. 1720 */ 1721 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1722 !work_busy(&fs_info->preempt_reclaim_work) && 1723 need_preemptive_reclaim(fs_info, space_info)) { 1724 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1725 orig_bytes, flush, "preempt"); 1726 queue_work(system_unbound_wq, 1727 &fs_info->preempt_reclaim_work); 1728 } 1729 } 1730 spin_unlock(&space_info->lock); 1731 if (!ret || !can_ticket(flush)) 1732 return ret; 1733 1734 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1735 orig_bytes, flush); 1736 } 1737 1738 /** 1739 * Trye to reserve metadata bytes from the block_rsv's space 1740 * 1741 * @fs_info: the filesystem 1742 * @block_rsv: block_rsv we're allocating for 1743 * @orig_bytes: number of bytes we want 1744 * @flush: whether or not we can flush to make our reservation 1745 * 1746 * This will reserve orig_bytes number of bytes from the space info associated 1747 * with the block_rsv. If there is not enough space it will make an attempt to 1748 * flush out space to make room. It will do this by flushing delalloc if 1749 * possible or committing the transaction. If flush is 0 then no attempts to 1750 * regain reservations will be made and this will fail if there is not enough 1751 * space already. 1752 */ 1753 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1754 struct btrfs_block_rsv *block_rsv, 1755 u64 orig_bytes, 1756 enum btrfs_reserve_flush_enum flush) 1757 { 1758 int ret; 1759 1760 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1761 if (ret == -ENOSPC) { 1762 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1763 block_rsv->space_info->flags, 1764 orig_bytes, 1); 1765 1766 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1767 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1768 orig_bytes, 0); 1769 } 1770 return ret; 1771 } 1772 1773 /** 1774 * Try to reserve data bytes for an allocation 1775 * 1776 * @fs_info: the filesystem 1777 * @bytes: number of bytes we need 1778 * @flush: how we are allowed to flush 1779 * 1780 * This will reserve bytes from the data space info. If there is not enough 1781 * space then we will attempt to flush space as specified by flush. 1782 */ 1783 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1784 enum btrfs_reserve_flush_enum flush) 1785 { 1786 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1787 int ret; 1788 1789 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1790 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE || 1791 flush == BTRFS_RESERVE_NO_FLUSH); 1792 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1793 1794 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1795 if (ret == -ENOSPC) { 1796 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1797 data_sinfo->flags, bytes, 1); 1798 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1799 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1800 } 1801 return ret; 1802 } 1803 1804 /* Dump all the space infos when we abort a transaction due to ENOSPC. */ 1805 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info) 1806 { 1807 struct btrfs_space_info *space_info; 1808 1809 btrfs_info(fs_info, "dumping space info:"); 1810 list_for_each_entry(space_info, &fs_info->space_info, list) { 1811 spin_lock(&space_info->lock); 1812 __btrfs_dump_space_info(fs_info, space_info); 1813 spin_unlock(&space_info->lock); 1814 } 1815 dump_global_block_rsv(fs_info); 1816 } 1817