1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 #include "zoned.h" 13 #include "fs.h" 14 #include "accessors.h" 15 #include "extent-tree.h" 16 17 /* 18 * HOW DOES SPACE RESERVATION WORK 19 * 20 * If you want to know about delalloc specifically, there is a separate comment 21 * for that with the delalloc code. This comment is about how the whole system 22 * works generally. 23 * 24 * BASIC CONCEPTS 25 * 26 * 1) space_info. This is the ultimate arbiter of how much space we can use. 27 * There's a description of the bytes_ fields with the struct declaration, 28 * refer to that for specifics on each field. Suffice it to say that for 29 * reservations we care about total_bytes - SUM(space_info->bytes_) when 30 * determining if there is space to make an allocation. There is a space_info 31 * for METADATA, SYSTEM, and DATA areas. 32 * 33 * 2) block_rsv's. These are basically buckets for every different type of 34 * metadata reservation we have. You can see the comment in the block_rsv 35 * code on the rules for each type, but generally block_rsv->reserved is how 36 * much space is accounted for in space_info->bytes_may_use. 37 * 38 * 3) btrfs_calc*_size. These are the worst case calculations we used based 39 * on the number of items we will want to modify. We have one for changing 40 * items, and one for inserting new items. Generally we use these helpers to 41 * determine the size of the block reserves, and then use the actual bytes 42 * values to adjust the space_info counters. 43 * 44 * MAKING RESERVATIONS, THE NORMAL CASE 45 * 46 * We call into either btrfs_reserve_data_bytes() or 47 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 48 * num_bytes we want to reserve. 49 * 50 * ->reserve 51 * space_info->bytes_may_reserve += num_bytes 52 * 53 * ->extent allocation 54 * Call btrfs_add_reserved_bytes() which does 55 * space_info->bytes_may_reserve -= num_bytes 56 * space_info->bytes_reserved += extent_bytes 57 * 58 * ->insert reference 59 * Call btrfs_update_block_group() which does 60 * space_info->bytes_reserved -= extent_bytes 61 * space_info->bytes_used += extent_bytes 62 * 63 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 64 * 65 * Assume we are unable to simply make the reservation because we do not have 66 * enough space 67 * 68 * -> __reserve_bytes 69 * create a reserve_ticket with ->bytes set to our reservation, add it to 70 * the tail of space_info->tickets, kick async flush thread 71 * 72 * ->handle_reserve_ticket 73 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 74 * on the ticket. 75 * 76 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 77 * Flushes various things attempting to free up space. 78 * 79 * -> btrfs_try_granting_tickets() 80 * This is called by anything that either subtracts space from 81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 82 * space_info->total_bytes. This loops through the ->priority_tickets and 83 * then the ->tickets list checking to see if the reservation can be 84 * completed. If it can the space is added to space_info->bytes_may_use and 85 * the ticket is woken up. 86 * 87 * -> ticket wakeup 88 * Check if ->bytes == 0, if it does we got our reservation and we can carry 89 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 90 * were interrupted.) 91 * 92 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 93 * 94 * Same as the above, except we add ourselves to the 95 * space_info->priority_tickets, and we do not use ticket->wait, we simply 96 * call flush_space() ourselves for the states that are safe for us to call 97 * without deadlocking and hope for the best. 98 * 99 * THE FLUSHING STATES 100 * 101 * Generally speaking we will have two cases for each state, a "nice" state 102 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 103 * reduce the locking over head on the various trees, and even to keep from 104 * doing any work at all in the case of delayed refs. Each of these delayed 105 * things however hold reservations, and so letting them run allows us to 106 * reclaim space so we can make new reservations. 107 * 108 * FLUSH_DELAYED_ITEMS 109 * Every inode has a delayed item to update the inode. Take a simple write 110 * for example, we would update the inode item at write time to update the 111 * mtime, and then again at finish_ordered_io() time in order to update the 112 * isize or bytes. We keep these delayed items to coalesce these operations 113 * into a single operation done on demand. These are an easy way to reclaim 114 * metadata space. 115 * 116 * FLUSH_DELALLOC 117 * Look at the delalloc comment to get an idea of how much space is reserved 118 * for delayed allocation. We can reclaim some of this space simply by 119 * running delalloc, but usually we need to wait for ordered extents to 120 * reclaim the bulk of this space. 121 * 122 * FLUSH_DELAYED_REFS 123 * We have a block reserve for the outstanding delayed refs space, and every 124 * delayed ref operation holds a reservation. Running these is a quick way 125 * to reclaim space, but we want to hold this until the end because COW can 126 * churn a lot and we can avoid making some extent tree modifications if we 127 * are able to delay for as long as possible. 128 * 129 * ALLOC_CHUNK 130 * We will skip this the first time through space reservation, because of 131 * overcommit and we don't want to have a lot of useless metadata space when 132 * our worst case reservations will likely never come true. 133 * 134 * RUN_DELAYED_IPUTS 135 * If we're freeing inodes we're likely freeing checksums, file extent 136 * items, and extent tree items. Loads of space could be freed up by these 137 * operations, however they won't be usable until the transaction commits. 138 * 139 * COMMIT_TRANS 140 * This will commit the transaction. Historically we had a lot of logic 141 * surrounding whether or not we'd commit the transaction, but this waits born 142 * out of a pre-tickets era where we could end up committing the transaction 143 * thousands of times in a row without making progress. Now thanks to our 144 * ticketing system we know if we're not making progress and can error 145 * everybody out after a few commits rather than burning the disk hoping for 146 * a different answer. 147 * 148 * OVERCOMMIT 149 * 150 * Because we hold so many reservations for metadata we will allow you to 151 * reserve more space than is currently free in the currently allocate 152 * metadata space. This only happens with metadata, data does not allow 153 * overcommitting. 154 * 155 * You can see the current logic for when we allow overcommit in 156 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 157 * is no unallocated space to be had, all reservations are kept within the 158 * free space in the allocated metadata chunks. 159 * 160 * Because of overcommitting, you generally want to use the 161 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 162 * thing with or without extra unallocated space. 163 */ 164 165 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 166 bool may_use_included) 167 { 168 ASSERT(s_info); 169 return s_info->bytes_used + s_info->bytes_reserved + 170 s_info->bytes_pinned + s_info->bytes_readonly + 171 s_info->bytes_zone_unusable + 172 (may_use_included ? s_info->bytes_may_use : 0); 173 } 174 175 /* 176 * after adding space to the filesystem, we need to clear the full flags 177 * on all the space infos. 178 */ 179 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 180 { 181 struct list_head *head = &info->space_info; 182 struct btrfs_space_info *found; 183 184 list_for_each_entry(found, head, list) 185 found->full = 0; 186 } 187 188 /* 189 * Block groups with more than this value (percents) of unusable space will be 190 * scheduled for background reclaim. 191 */ 192 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) 193 194 /* 195 * Calculate chunk size depending on volume type (regular or zoned). 196 */ 197 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) 198 { 199 if (btrfs_is_zoned(fs_info)) 200 return fs_info->zone_size; 201 202 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 203 204 if (flags & BTRFS_BLOCK_GROUP_DATA) 205 return BTRFS_MAX_DATA_CHUNK_SIZE; 206 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 207 return SZ_32M; 208 209 /* Handle BTRFS_BLOCK_GROUP_METADATA */ 210 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) 211 return SZ_1G; 212 213 return SZ_256M; 214 } 215 216 /* 217 * Update default chunk size. 218 */ 219 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 220 u64 chunk_size) 221 { 222 WRITE_ONCE(space_info->chunk_size, chunk_size); 223 } 224 225 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 226 { 227 228 struct btrfs_space_info *space_info; 229 int i; 230 int ret; 231 232 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 233 if (!space_info) 234 return -ENOMEM; 235 236 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 237 INIT_LIST_HEAD(&space_info->block_groups[i]); 238 init_rwsem(&space_info->groups_sem); 239 spin_lock_init(&space_info->lock); 240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 242 INIT_LIST_HEAD(&space_info->ro_bgs); 243 INIT_LIST_HEAD(&space_info->tickets); 244 INIT_LIST_HEAD(&space_info->priority_tickets); 245 space_info->clamp = 1; 246 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); 247 248 if (btrfs_is_zoned(info)) 249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; 250 251 ret = btrfs_sysfs_add_space_info_type(info, space_info); 252 if (ret) 253 return ret; 254 255 list_add(&space_info->list, &info->space_info); 256 if (flags & BTRFS_BLOCK_GROUP_DATA) 257 info->data_sinfo = space_info; 258 259 return ret; 260 } 261 262 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 263 { 264 struct btrfs_super_block *disk_super; 265 u64 features; 266 u64 flags; 267 int mixed = 0; 268 int ret; 269 270 disk_super = fs_info->super_copy; 271 if (!btrfs_super_root(disk_super)) 272 return -EINVAL; 273 274 features = btrfs_super_incompat_flags(disk_super); 275 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 276 mixed = 1; 277 278 flags = BTRFS_BLOCK_GROUP_SYSTEM; 279 ret = create_space_info(fs_info, flags); 280 if (ret) 281 goto out; 282 283 if (mixed) { 284 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 285 ret = create_space_info(fs_info, flags); 286 } else { 287 flags = BTRFS_BLOCK_GROUP_METADATA; 288 ret = create_space_info(fs_info, flags); 289 if (ret) 290 goto out; 291 292 flags = BTRFS_BLOCK_GROUP_DATA; 293 ret = create_space_info(fs_info, flags); 294 } 295 out: 296 return ret; 297 } 298 299 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, 300 struct btrfs_block_group *block_group) 301 { 302 struct btrfs_space_info *found; 303 int factor, index; 304 305 factor = btrfs_bg_type_to_factor(block_group->flags); 306 307 found = btrfs_find_space_info(info, block_group->flags); 308 ASSERT(found); 309 spin_lock(&found->lock); 310 found->total_bytes += block_group->length; 311 found->disk_total += block_group->length * factor; 312 found->bytes_used += block_group->used; 313 found->disk_used += block_group->used * factor; 314 found->bytes_readonly += block_group->bytes_super; 315 found->bytes_zone_unusable += block_group->zone_unusable; 316 if (block_group->length > 0) 317 found->full = 0; 318 btrfs_try_granting_tickets(info, found); 319 spin_unlock(&found->lock); 320 321 block_group->space_info = found; 322 323 index = btrfs_bg_flags_to_raid_index(block_group->flags); 324 down_write(&found->groups_sem); 325 list_add_tail(&block_group->list, &found->block_groups[index]); 326 up_write(&found->groups_sem); 327 } 328 329 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 330 u64 flags) 331 { 332 struct list_head *head = &info->space_info; 333 struct btrfs_space_info *found; 334 335 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 336 337 list_for_each_entry(found, head, list) { 338 if (found->flags & flags) 339 return found; 340 } 341 return NULL; 342 } 343 344 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 345 struct btrfs_space_info *space_info, 346 enum btrfs_reserve_flush_enum flush) 347 { 348 u64 profile; 349 u64 avail; 350 int factor; 351 352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 353 profile = btrfs_system_alloc_profile(fs_info); 354 else 355 profile = btrfs_metadata_alloc_profile(fs_info); 356 357 avail = atomic64_read(&fs_info->free_chunk_space); 358 359 /* 360 * If we have dup, raid1 or raid10 then only half of the free 361 * space is actually usable. For raid56, the space info used 362 * doesn't include the parity drive, so we don't have to 363 * change the math 364 */ 365 factor = btrfs_bg_type_to_factor(profile); 366 avail = div_u64(avail, factor); 367 368 /* 369 * If we aren't flushing all things, let us overcommit up to 370 * 1/2th of the space. If we can flush, don't let us overcommit 371 * too much, let it overcommit up to 1/8 of the space. 372 */ 373 if (flush == BTRFS_RESERVE_FLUSH_ALL) 374 avail >>= 3; 375 else 376 avail >>= 1; 377 return avail; 378 } 379 380 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 381 struct btrfs_space_info *space_info, u64 bytes, 382 enum btrfs_reserve_flush_enum flush) 383 { 384 u64 avail; 385 u64 used; 386 387 /* Don't overcommit when in mixed mode */ 388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 389 return 0; 390 391 used = btrfs_space_info_used(space_info, true); 392 if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) && 393 (space_info->flags & BTRFS_BLOCK_GROUP_METADATA)) 394 avail = 0; 395 else 396 avail = calc_available_free_space(fs_info, space_info, flush); 397 398 if (used + bytes < space_info->total_bytes + avail) 399 return 1; 400 return 0; 401 } 402 403 static void remove_ticket(struct btrfs_space_info *space_info, 404 struct reserve_ticket *ticket) 405 { 406 if (!list_empty(&ticket->list)) { 407 list_del_init(&ticket->list); 408 ASSERT(space_info->reclaim_size >= ticket->bytes); 409 space_info->reclaim_size -= ticket->bytes; 410 } 411 } 412 413 /* 414 * This is for space we already have accounted in space_info->bytes_may_use, so 415 * basically when we're returning space from block_rsv's. 416 */ 417 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 418 struct btrfs_space_info *space_info) 419 { 420 struct list_head *head; 421 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 422 423 lockdep_assert_held(&space_info->lock); 424 425 head = &space_info->priority_tickets; 426 again: 427 while (!list_empty(head)) { 428 struct reserve_ticket *ticket; 429 u64 used = btrfs_space_info_used(space_info, true); 430 431 ticket = list_first_entry(head, struct reserve_ticket, list); 432 433 /* Check and see if our ticket can be satisfied now. */ 434 if ((used + ticket->bytes <= space_info->total_bytes) || 435 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 436 flush)) { 437 btrfs_space_info_update_bytes_may_use(fs_info, 438 space_info, 439 ticket->bytes); 440 remove_ticket(space_info, ticket); 441 ticket->bytes = 0; 442 space_info->tickets_id++; 443 wake_up(&ticket->wait); 444 } else { 445 break; 446 } 447 } 448 449 if (head == &space_info->priority_tickets) { 450 head = &space_info->tickets; 451 flush = BTRFS_RESERVE_FLUSH_ALL; 452 goto again; 453 } 454 } 455 456 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 457 do { \ 458 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 459 spin_lock(&__rsv->lock); \ 460 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 461 __rsv->size, __rsv->reserved); \ 462 spin_unlock(&__rsv->lock); \ 463 } while (0) 464 465 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info) 466 { 467 switch (space_info->flags) { 468 case BTRFS_BLOCK_GROUP_SYSTEM: 469 return "SYSTEM"; 470 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: 471 return "DATA+METADATA"; 472 case BTRFS_BLOCK_GROUP_DATA: 473 return "DATA"; 474 case BTRFS_BLOCK_GROUP_METADATA: 475 return "METADATA"; 476 default: 477 return "UNKNOWN"; 478 } 479 } 480 481 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) 482 { 483 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 484 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 485 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 486 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 487 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 488 } 489 490 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 491 struct btrfs_space_info *info) 492 { 493 const char *flag_str = space_info_flag_to_str(info); 494 lockdep_assert_held(&info->lock); 495 496 /* The free space could be negative in case of overcommit */ 497 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", 498 flag_str, 499 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), 500 info->full ? "" : "not "); 501 btrfs_info(fs_info, 502 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", 503 info->total_bytes, info->bytes_used, info->bytes_pinned, 504 info->bytes_reserved, info->bytes_may_use, 505 info->bytes_readonly, info->bytes_zone_unusable); 506 } 507 508 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 509 struct btrfs_space_info *info, u64 bytes, 510 int dump_block_groups) 511 { 512 struct btrfs_block_group *cache; 513 u64 total_avail = 0; 514 int index = 0; 515 516 spin_lock(&info->lock); 517 __btrfs_dump_space_info(fs_info, info); 518 dump_global_block_rsv(fs_info); 519 spin_unlock(&info->lock); 520 521 if (!dump_block_groups) 522 return; 523 524 down_read(&info->groups_sem); 525 again: 526 list_for_each_entry(cache, &info->block_groups[index], list) { 527 u64 avail; 528 529 spin_lock(&cache->lock); 530 avail = cache->length - cache->used - cache->pinned - 531 cache->reserved - cache->delalloc_bytes - 532 cache->bytes_super - cache->zone_unusable; 533 btrfs_info(fs_info, 534 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s", 535 cache->start, cache->length, cache->used, cache->pinned, 536 cache->reserved, cache->delalloc_bytes, 537 cache->bytes_super, cache->zone_unusable, 538 avail, cache->ro ? "[readonly]" : ""); 539 spin_unlock(&cache->lock); 540 btrfs_dump_free_space(cache, bytes); 541 total_avail += avail; 542 } 543 if (++index < BTRFS_NR_RAID_TYPES) 544 goto again; 545 up_read(&info->groups_sem); 546 547 btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail); 548 } 549 550 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info, 551 u64 to_reclaim) 552 { 553 u64 bytes; 554 u64 nr; 555 556 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 557 nr = div64_u64(to_reclaim, bytes); 558 if (!nr) 559 nr = 1; 560 return nr; 561 } 562 563 static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info, 564 u64 to_reclaim) 565 { 566 const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1); 567 u64 nr; 568 569 nr = div64_u64(to_reclaim, bytes); 570 if (!nr) 571 nr = 1; 572 return nr; 573 } 574 575 #define EXTENT_SIZE_PER_ITEM SZ_256K 576 577 /* 578 * shrink metadata reservation for delalloc 579 */ 580 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 581 struct btrfs_space_info *space_info, 582 u64 to_reclaim, bool wait_ordered, 583 bool for_preempt) 584 { 585 struct btrfs_trans_handle *trans; 586 u64 delalloc_bytes; 587 u64 ordered_bytes; 588 u64 items; 589 long time_left; 590 int loops; 591 592 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 593 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); 594 if (delalloc_bytes == 0 && ordered_bytes == 0) 595 return; 596 597 /* Calc the number of the pages we need flush for space reservation */ 598 if (to_reclaim == U64_MAX) { 599 items = U64_MAX; 600 } else { 601 /* 602 * to_reclaim is set to however much metadata we need to 603 * reclaim, but reclaiming that much data doesn't really track 604 * exactly. What we really want to do is reclaim full inode's 605 * worth of reservations, however that's not available to us 606 * here. We will take a fraction of the delalloc bytes for our 607 * flushing loops and hope for the best. Delalloc will expand 608 * the amount we write to cover an entire dirty extent, which 609 * will reclaim the metadata reservation for that range. If 610 * it's not enough subsequent flush stages will be more 611 * aggressive. 612 */ 613 to_reclaim = max(to_reclaim, delalloc_bytes >> 3); 614 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 615 } 616 617 trans = current->journal_info; 618 619 /* 620 * If we are doing more ordered than delalloc we need to just wait on 621 * ordered extents, otherwise we'll waste time trying to flush delalloc 622 * that likely won't give us the space back we need. 623 */ 624 if (ordered_bytes > delalloc_bytes && !for_preempt) 625 wait_ordered = true; 626 627 loops = 0; 628 while ((delalloc_bytes || ordered_bytes) && loops < 3) { 629 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; 630 long nr_pages = min_t(u64, temp, LONG_MAX); 631 int async_pages; 632 633 btrfs_start_delalloc_roots(fs_info, nr_pages, true); 634 635 /* 636 * We need to make sure any outstanding async pages are now 637 * processed before we continue. This is because things like 638 * sync_inode() try to be smart and skip writing if the inode is 639 * marked clean. We don't use filemap_fwrite for flushing 640 * because we want to control how many pages we write out at a 641 * time, thus this is the only safe way to make sure we've 642 * waited for outstanding compressed workers to have started 643 * their jobs and thus have ordered extents set up properly. 644 * 645 * This exists because we do not want to wait for each 646 * individual inode to finish its async work, we simply want to 647 * start the IO on everybody, and then come back here and wait 648 * for all of the async work to catch up. Once we're done with 649 * that we know we'll have ordered extents for everything and we 650 * can decide if we wait for that or not. 651 * 652 * If we choose to replace this in the future, make absolutely 653 * sure that the proper waiting is being done in the async case, 654 * as there have been bugs in that area before. 655 */ 656 async_pages = atomic_read(&fs_info->async_delalloc_pages); 657 if (!async_pages) 658 goto skip_async; 659 660 /* 661 * We don't want to wait forever, if we wrote less pages in this 662 * loop than we have outstanding, only wait for that number of 663 * pages, otherwise we can wait for all async pages to finish 664 * before continuing. 665 */ 666 if (async_pages > nr_pages) 667 async_pages -= nr_pages; 668 else 669 async_pages = 0; 670 wait_event(fs_info->async_submit_wait, 671 atomic_read(&fs_info->async_delalloc_pages) <= 672 async_pages); 673 skip_async: 674 loops++; 675 if (wait_ordered && !trans) { 676 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 677 } else { 678 time_left = schedule_timeout_killable(1); 679 if (time_left) 680 break; 681 } 682 683 /* 684 * If we are for preemption we just want a one-shot of delalloc 685 * flushing so we can stop flushing if we decide we don't need 686 * to anymore. 687 */ 688 if (for_preempt) 689 break; 690 691 spin_lock(&space_info->lock); 692 if (list_empty(&space_info->tickets) && 693 list_empty(&space_info->priority_tickets)) { 694 spin_unlock(&space_info->lock); 695 break; 696 } 697 spin_unlock(&space_info->lock); 698 699 delalloc_bytes = percpu_counter_sum_positive( 700 &fs_info->delalloc_bytes); 701 ordered_bytes = percpu_counter_sum_positive( 702 &fs_info->ordered_bytes); 703 } 704 } 705 706 /* 707 * Try to flush some data based on policy set by @state. This is only advisory 708 * and may fail for various reasons. The caller is supposed to examine the 709 * state of @space_info to detect the outcome. 710 */ 711 static void flush_space(struct btrfs_fs_info *fs_info, 712 struct btrfs_space_info *space_info, u64 num_bytes, 713 enum btrfs_flush_state state, bool for_preempt) 714 { 715 struct btrfs_root *root = fs_info->tree_root; 716 struct btrfs_trans_handle *trans; 717 int nr; 718 int ret = 0; 719 720 switch (state) { 721 case FLUSH_DELAYED_ITEMS_NR: 722 case FLUSH_DELAYED_ITEMS: 723 if (state == FLUSH_DELAYED_ITEMS_NR) 724 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 725 else 726 nr = -1; 727 728 trans = btrfs_join_transaction_nostart(root); 729 if (IS_ERR(trans)) { 730 ret = PTR_ERR(trans); 731 if (ret == -ENOENT) 732 ret = 0; 733 break; 734 } 735 ret = btrfs_run_delayed_items_nr(trans, nr); 736 btrfs_end_transaction(trans); 737 break; 738 case FLUSH_DELALLOC: 739 case FLUSH_DELALLOC_WAIT: 740 case FLUSH_DELALLOC_FULL: 741 if (state == FLUSH_DELALLOC_FULL) 742 num_bytes = U64_MAX; 743 shrink_delalloc(fs_info, space_info, num_bytes, 744 state != FLUSH_DELALLOC, for_preempt); 745 break; 746 case FLUSH_DELAYED_REFS_NR: 747 case FLUSH_DELAYED_REFS: 748 trans = btrfs_join_transaction_nostart(root); 749 if (IS_ERR(trans)) { 750 ret = PTR_ERR(trans); 751 if (ret == -ENOENT) 752 ret = 0; 753 break; 754 } 755 if (state == FLUSH_DELAYED_REFS_NR) 756 nr = calc_delayed_refs_nr(fs_info, num_bytes); 757 else 758 nr = 0; 759 btrfs_run_delayed_refs(trans, nr); 760 btrfs_end_transaction(trans); 761 break; 762 case ALLOC_CHUNK: 763 case ALLOC_CHUNK_FORCE: 764 /* 765 * For metadata space on zoned filesystem, reaching here means we 766 * don't have enough space left in active_total_bytes. Try to 767 * activate a block group first, because we may have inactive 768 * block group already allocated. 769 */ 770 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false); 771 if (ret < 0) 772 break; 773 else if (ret == 1) 774 break; 775 776 trans = btrfs_join_transaction(root); 777 if (IS_ERR(trans)) { 778 ret = PTR_ERR(trans); 779 break; 780 } 781 ret = btrfs_chunk_alloc(trans, 782 btrfs_get_alloc_profile(fs_info, space_info->flags), 783 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 784 CHUNK_ALLOC_FORCE); 785 btrfs_end_transaction(trans); 786 787 /* 788 * For metadata space on zoned filesystem, allocating a new chunk 789 * is not enough. We still need to activate the block * group. 790 * Active the newly allocated block group by (maybe) finishing 791 * a block group. 792 */ 793 if (ret == 1) { 794 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true); 795 /* 796 * Revert to the original ret regardless we could finish 797 * one block group or not. 798 */ 799 if (ret >= 0) 800 ret = 1; 801 } 802 803 if (ret > 0 || ret == -ENOSPC) 804 ret = 0; 805 break; 806 case RUN_DELAYED_IPUTS: 807 /* 808 * If we have pending delayed iputs then we could free up a 809 * bunch of pinned space, so make sure we run the iputs before 810 * we do our pinned bytes check below. 811 */ 812 btrfs_run_delayed_iputs(fs_info); 813 btrfs_wait_on_delayed_iputs(fs_info); 814 break; 815 case COMMIT_TRANS: 816 ASSERT(current->journal_info == NULL); 817 trans = btrfs_join_transaction(root); 818 if (IS_ERR(trans)) { 819 ret = PTR_ERR(trans); 820 break; 821 } 822 ret = btrfs_commit_transaction(trans); 823 break; 824 default: 825 ret = -ENOSPC; 826 break; 827 } 828 829 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 830 ret, for_preempt); 831 return; 832 } 833 834 static inline u64 835 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 836 struct btrfs_space_info *space_info) 837 { 838 u64 used; 839 u64 avail; 840 u64 to_reclaim = space_info->reclaim_size; 841 842 lockdep_assert_held(&space_info->lock); 843 844 avail = calc_available_free_space(fs_info, space_info, 845 BTRFS_RESERVE_FLUSH_ALL); 846 used = btrfs_space_info_used(space_info, true); 847 848 /* 849 * We may be flushing because suddenly we have less space than we had 850 * before, and now we're well over-committed based on our current free 851 * space. If that's the case add in our overage so we make sure to put 852 * appropriate pressure on the flushing state machine. 853 */ 854 if (space_info->total_bytes + avail < used) 855 to_reclaim += used - (space_info->total_bytes + avail); 856 857 return to_reclaim; 858 } 859 860 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 861 struct btrfs_space_info *space_info) 862 { 863 u64 global_rsv_size = fs_info->global_block_rsv.reserved; 864 u64 ordered, delalloc; 865 u64 thresh; 866 u64 used; 867 868 thresh = mult_perc(space_info->total_bytes, 90); 869 870 lockdep_assert_held(&space_info->lock); 871 872 /* If we're just plain full then async reclaim just slows us down. */ 873 if ((space_info->bytes_used + space_info->bytes_reserved + 874 global_rsv_size) >= thresh) 875 return false; 876 877 used = space_info->bytes_may_use + space_info->bytes_pinned; 878 879 /* The total flushable belongs to the global rsv, don't flush. */ 880 if (global_rsv_size >= used) 881 return false; 882 883 /* 884 * 128MiB is 1/4 of the maximum global rsv size. If we have less than 885 * that devoted to other reservations then there's no sense in flushing, 886 * we don't have a lot of things that need flushing. 887 */ 888 if (used - global_rsv_size <= SZ_128M) 889 return false; 890 891 /* 892 * We have tickets queued, bail so we don't compete with the async 893 * flushers. 894 */ 895 if (space_info->reclaim_size) 896 return false; 897 898 /* 899 * If we have over half of the free space occupied by reservations or 900 * pinned then we want to start flushing. 901 * 902 * We do not do the traditional thing here, which is to say 903 * 904 * if (used >= ((total_bytes + avail) / 2)) 905 * return 1; 906 * 907 * because this doesn't quite work how we want. If we had more than 50% 908 * of the space_info used by bytes_used and we had 0 available we'd just 909 * constantly run the background flusher. Instead we want it to kick in 910 * if our reclaimable space exceeds our clamped free space. 911 * 912 * Our clamping range is 2^1 -> 2^8. Practically speaking that means 913 * the following: 914 * 915 * Amount of RAM Minimum threshold Maximum threshold 916 * 917 * 256GiB 1GiB 128GiB 918 * 128GiB 512MiB 64GiB 919 * 64GiB 256MiB 32GiB 920 * 32GiB 128MiB 16GiB 921 * 16GiB 64MiB 8GiB 922 * 923 * These are the range our thresholds will fall in, corresponding to how 924 * much delalloc we need for the background flusher to kick in. 925 */ 926 927 thresh = calc_available_free_space(fs_info, space_info, 928 BTRFS_RESERVE_FLUSH_ALL); 929 used = space_info->bytes_used + space_info->bytes_reserved + 930 space_info->bytes_readonly + global_rsv_size; 931 if (used < space_info->total_bytes) 932 thresh += space_info->total_bytes - used; 933 thresh >>= space_info->clamp; 934 935 used = space_info->bytes_pinned; 936 937 /* 938 * If we have more ordered bytes than delalloc bytes then we're either 939 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting 940 * around. Preemptive flushing is only useful in that it can free up 941 * space before tickets need to wait for things to finish. In the case 942 * of ordered extents, preemptively waiting on ordered extents gets us 943 * nothing, if our reservations are tied up in ordered extents we'll 944 * simply have to slow down writers by forcing them to wait on ordered 945 * extents. 946 * 947 * In the case that ordered is larger than delalloc, only include the 948 * block reserves that we would actually be able to directly reclaim 949 * from. In this case if we're heavy on metadata operations this will 950 * clearly be heavy enough to warrant preemptive flushing. In the case 951 * of heavy DIO or ordered reservations, preemptive flushing will just 952 * waste time and cause us to slow down. 953 * 954 * We want to make sure we truly are maxed out on ordered however, so 955 * cut ordered in half, and if it's still higher than delalloc then we 956 * can keep flushing. This is to avoid the case where we start 957 * flushing, and now delalloc == ordered and we stop preemptively 958 * flushing when we could still have several gigs of delalloc to flush. 959 */ 960 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; 961 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); 962 if (ordered >= delalloc) 963 used += fs_info->delayed_refs_rsv.reserved + 964 fs_info->delayed_block_rsv.reserved; 965 else 966 used += space_info->bytes_may_use - global_rsv_size; 967 968 return (used >= thresh && !btrfs_fs_closing(fs_info) && 969 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 970 } 971 972 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 973 struct btrfs_space_info *space_info, 974 struct reserve_ticket *ticket) 975 { 976 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 977 u64 min_bytes; 978 979 if (!ticket->steal) 980 return false; 981 982 if (global_rsv->space_info != space_info) 983 return false; 984 985 spin_lock(&global_rsv->lock); 986 min_bytes = mult_perc(global_rsv->size, 10); 987 if (global_rsv->reserved < min_bytes + ticket->bytes) { 988 spin_unlock(&global_rsv->lock); 989 return false; 990 } 991 global_rsv->reserved -= ticket->bytes; 992 remove_ticket(space_info, ticket); 993 ticket->bytes = 0; 994 wake_up(&ticket->wait); 995 space_info->tickets_id++; 996 if (global_rsv->reserved < global_rsv->size) 997 global_rsv->full = 0; 998 spin_unlock(&global_rsv->lock); 999 1000 return true; 1001 } 1002 1003 /* 1004 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 1005 * @fs_info - fs_info for this fs 1006 * @space_info - the space info we were flushing 1007 * 1008 * We call this when we've exhausted our flushing ability and haven't made 1009 * progress in satisfying tickets. The reservation code handles tickets in 1010 * order, so if there is a large ticket first and then smaller ones we could 1011 * very well satisfy the smaller tickets. This will attempt to wake up any 1012 * tickets in the list to catch this case. 1013 * 1014 * This function returns true if it was able to make progress by clearing out 1015 * other tickets, or if it stumbles across a ticket that was smaller than the 1016 * first ticket. 1017 */ 1018 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 1019 struct btrfs_space_info *space_info) 1020 { 1021 struct reserve_ticket *ticket; 1022 u64 tickets_id = space_info->tickets_id; 1023 const bool aborted = BTRFS_FS_ERROR(fs_info); 1024 1025 trace_btrfs_fail_all_tickets(fs_info, space_info); 1026 1027 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1028 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 1029 __btrfs_dump_space_info(fs_info, space_info); 1030 } 1031 1032 while (!list_empty(&space_info->tickets) && 1033 tickets_id == space_info->tickets_id) { 1034 ticket = list_first_entry(&space_info->tickets, 1035 struct reserve_ticket, list); 1036 1037 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) 1038 return true; 1039 1040 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1041 btrfs_info(fs_info, "failing ticket with %llu bytes", 1042 ticket->bytes); 1043 1044 remove_ticket(space_info, ticket); 1045 if (aborted) 1046 ticket->error = -EIO; 1047 else 1048 ticket->error = -ENOSPC; 1049 wake_up(&ticket->wait); 1050 1051 /* 1052 * We're just throwing tickets away, so more flushing may not 1053 * trip over btrfs_try_granting_tickets, so we need to call it 1054 * here to see if we can make progress with the next ticket in 1055 * the list. 1056 */ 1057 if (!aborted) 1058 btrfs_try_granting_tickets(fs_info, space_info); 1059 } 1060 return (tickets_id != space_info->tickets_id); 1061 } 1062 1063 /* 1064 * This is for normal flushers, we can wait all goddamned day if we want to. We 1065 * will loop and continuously try to flush as long as we are making progress. 1066 * We count progress as clearing off tickets each time we have to loop. 1067 */ 1068 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 1069 { 1070 struct btrfs_fs_info *fs_info; 1071 struct btrfs_space_info *space_info; 1072 u64 to_reclaim; 1073 enum btrfs_flush_state flush_state; 1074 int commit_cycles = 0; 1075 u64 last_tickets_id; 1076 1077 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 1078 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1079 1080 spin_lock(&space_info->lock); 1081 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1082 if (!to_reclaim) { 1083 space_info->flush = 0; 1084 spin_unlock(&space_info->lock); 1085 return; 1086 } 1087 last_tickets_id = space_info->tickets_id; 1088 spin_unlock(&space_info->lock); 1089 1090 flush_state = FLUSH_DELAYED_ITEMS_NR; 1091 do { 1092 flush_space(fs_info, space_info, to_reclaim, flush_state, false); 1093 spin_lock(&space_info->lock); 1094 if (list_empty(&space_info->tickets)) { 1095 space_info->flush = 0; 1096 spin_unlock(&space_info->lock); 1097 return; 1098 } 1099 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 1100 space_info); 1101 if (last_tickets_id == space_info->tickets_id) { 1102 flush_state++; 1103 } else { 1104 last_tickets_id = space_info->tickets_id; 1105 flush_state = FLUSH_DELAYED_ITEMS_NR; 1106 if (commit_cycles) 1107 commit_cycles--; 1108 } 1109 1110 /* 1111 * We do not want to empty the system of delalloc unless we're 1112 * under heavy pressure, so allow one trip through the flushing 1113 * logic before we start doing a FLUSH_DELALLOC_FULL. 1114 */ 1115 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) 1116 flush_state++; 1117 1118 /* 1119 * We don't want to force a chunk allocation until we've tried 1120 * pretty hard to reclaim space. Think of the case where we 1121 * freed up a bunch of space and so have a lot of pinned space 1122 * to reclaim. We would rather use that than possibly create a 1123 * underutilized metadata chunk. So if this is our first run 1124 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 1125 * commit the transaction. If nothing has changed the next go 1126 * around then we can force a chunk allocation. 1127 */ 1128 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 1129 flush_state++; 1130 1131 if (flush_state > COMMIT_TRANS) { 1132 commit_cycles++; 1133 if (commit_cycles > 2) { 1134 if (maybe_fail_all_tickets(fs_info, space_info)) { 1135 flush_state = FLUSH_DELAYED_ITEMS_NR; 1136 commit_cycles--; 1137 } else { 1138 space_info->flush = 0; 1139 } 1140 } else { 1141 flush_state = FLUSH_DELAYED_ITEMS_NR; 1142 } 1143 } 1144 spin_unlock(&space_info->lock); 1145 } while (flush_state <= COMMIT_TRANS); 1146 } 1147 1148 /* 1149 * This handles pre-flushing of metadata space before we get to the point that 1150 * we need to start blocking threads on tickets. The logic here is different 1151 * from the other flush paths because it doesn't rely on tickets to tell us how 1152 * much we need to flush, instead it attempts to keep us below the 80% full 1153 * watermark of space by flushing whichever reservation pool is currently the 1154 * largest. 1155 */ 1156 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) 1157 { 1158 struct btrfs_fs_info *fs_info; 1159 struct btrfs_space_info *space_info; 1160 struct btrfs_block_rsv *delayed_block_rsv; 1161 struct btrfs_block_rsv *delayed_refs_rsv; 1162 struct btrfs_block_rsv *global_rsv; 1163 struct btrfs_block_rsv *trans_rsv; 1164 int loops = 0; 1165 1166 fs_info = container_of(work, struct btrfs_fs_info, 1167 preempt_reclaim_work); 1168 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 1169 delayed_block_rsv = &fs_info->delayed_block_rsv; 1170 delayed_refs_rsv = &fs_info->delayed_refs_rsv; 1171 global_rsv = &fs_info->global_block_rsv; 1172 trans_rsv = &fs_info->trans_block_rsv; 1173 1174 spin_lock(&space_info->lock); 1175 while (need_preemptive_reclaim(fs_info, space_info)) { 1176 enum btrfs_flush_state flush; 1177 u64 delalloc_size = 0; 1178 u64 to_reclaim, block_rsv_size; 1179 u64 global_rsv_size = global_rsv->reserved; 1180 1181 loops++; 1182 1183 /* 1184 * We don't have a precise counter for the metadata being 1185 * reserved for delalloc, so we'll approximate it by subtracting 1186 * out the block rsv's space from the bytes_may_use. If that 1187 * amount is higher than the individual reserves, then we can 1188 * assume it's tied up in delalloc reservations. 1189 */ 1190 block_rsv_size = global_rsv_size + 1191 delayed_block_rsv->reserved + 1192 delayed_refs_rsv->reserved + 1193 trans_rsv->reserved; 1194 if (block_rsv_size < space_info->bytes_may_use) 1195 delalloc_size = space_info->bytes_may_use - block_rsv_size; 1196 1197 /* 1198 * We don't want to include the global_rsv in our calculation, 1199 * because that's space we can't touch. Subtract it from the 1200 * block_rsv_size for the next checks. 1201 */ 1202 block_rsv_size -= global_rsv_size; 1203 1204 /* 1205 * We really want to avoid flushing delalloc too much, as it 1206 * could result in poor allocation patterns, so only flush it if 1207 * it's larger than the rest of the pools combined. 1208 */ 1209 if (delalloc_size > block_rsv_size) { 1210 to_reclaim = delalloc_size; 1211 flush = FLUSH_DELALLOC; 1212 } else if (space_info->bytes_pinned > 1213 (delayed_block_rsv->reserved + 1214 delayed_refs_rsv->reserved)) { 1215 to_reclaim = space_info->bytes_pinned; 1216 flush = COMMIT_TRANS; 1217 } else if (delayed_block_rsv->reserved > 1218 delayed_refs_rsv->reserved) { 1219 to_reclaim = delayed_block_rsv->reserved; 1220 flush = FLUSH_DELAYED_ITEMS_NR; 1221 } else { 1222 to_reclaim = delayed_refs_rsv->reserved; 1223 flush = FLUSH_DELAYED_REFS_NR; 1224 } 1225 1226 spin_unlock(&space_info->lock); 1227 1228 /* 1229 * We don't want to reclaim everything, just a portion, so scale 1230 * down the to_reclaim by 1/4. If it takes us down to 0, 1231 * reclaim 1 items worth. 1232 */ 1233 to_reclaim >>= 2; 1234 if (!to_reclaim) 1235 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); 1236 flush_space(fs_info, space_info, to_reclaim, flush, true); 1237 cond_resched(); 1238 spin_lock(&space_info->lock); 1239 } 1240 1241 /* We only went through once, back off our clamping. */ 1242 if (loops == 1 && !space_info->reclaim_size) 1243 space_info->clamp = max(1, space_info->clamp - 1); 1244 trace_btrfs_done_preemptive_reclaim(fs_info, space_info); 1245 spin_unlock(&space_info->lock); 1246 } 1247 1248 /* 1249 * FLUSH_DELALLOC_WAIT: 1250 * Space is freed from flushing delalloc in one of two ways. 1251 * 1252 * 1) compression is on and we allocate less space than we reserved 1253 * 2) we are overwriting existing space 1254 * 1255 * For #1 that extra space is reclaimed as soon as the delalloc pages are 1256 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent 1257 * length to ->bytes_reserved, and subtracts the reserved space from 1258 * ->bytes_may_use. 1259 * 1260 * For #2 this is trickier. Once the ordered extent runs we will drop the 1261 * extent in the range we are overwriting, which creates a delayed ref for 1262 * that freed extent. This however is not reclaimed until the transaction 1263 * commits, thus the next stages. 1264 * 1265 * RUN_DELAYED_IPUTS 1266 * If we are freeing inodes, we want to make sure all delayed iputs have 1267 * completed, because they could have been on an inode with i_nlink == 0, and 1268 * thus have been truncated and freed up space. But again this space is not 1269 * immediately re-usable, it comes in the form of a delayed ref, which must be 1270 * run and then the transaction must be committed. 1271 * 1272 * COMMIT_TRANS 1273 * This is where we reclaim all of the pinned space generated by running the 1274 * iputs 1275 * 1276 * ALLOC_CHUNK_FORCE 1277 * For data we start with alloc chunk force, however we could have been full 1278 * before, and then the transaction commit could have freed new block groups, 1279 * so if we now have space to allocate do the force chunk allocation. 1280 */ 1281 static const enum btrfs_flush_state data_flush_states[] = { 1282 FLUSH_DELALLOC_FULL, 1283 RUN_DELAYED_IPUTS, 1284 COMMIT_TRANS, 1285 ALLOC_CHUNK_FORCE, 1286 }; 1287 1288 static void btrfs_async_reclaim_data_space(struct work_struct *work) 1289 { 1290 struct btrfs_fs_info *fs_info; 1291 struct btrfs_space_info *space_info; 1292 u64 last_tickets_id; 1293 enum btrfs_flush_state flush_state = 0; 1294 1295 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); 1296 space_info = fs_info->data_sinfo; 1297 1298 spin_lock(&space_info->lock); 1299 if (list_empty(&space_info->tickets)) { 1300 space_info->flush = 0; 1301 spin_unlock(&space_info->lock); 1302 return; 1303 } 1304 last_tickets_id = space_info->tickets_id; 1305 spin_unlock(&space_info->lock); 1306 1307 while (!space_info->full) { 1308 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1309 spin_lock(&space_info->lock); 1310 if (list_empty(&space_info->tickets)) { 1311 space_info->flush = 0; 1312 spin_unlock(&space_info->lock); 1313 return; 1314 } 1315 1316 /* Something happened, fail everything and bail. */ 1317 if (BTRFS_FS_ERROR(fs_info)) 1318 goto aborted_fs; 1319 last_tickets_id = space_info->tickets_id; 1320 spin_unlock(&space_info->lock); 1321 } 1322 1323 while (flush_state < ARRAY_SIZE(data_flush_states)) { 1324 flush_space(fs_info, space_info, U64_MAX, 1325 data_flush_states[flush_state], false); 1326 spin_lock(&space_info->lock); 1327 if (list_empty(&space_info->tickets)) { 1328 space_info->flush = 0; 1329 spin_unlock(&space_info->lock); 1330 return; 1331 } 1332 1333 if (last_tickets_id == space_info->tickets_id) { 1334 flush_state++; 1335 } else { 1336 last_tickets_id = space_info->tickets_id; 1337 flush_state = 0; 1338 } 1339 1340 if (flush_state >= ARRAY_SIZE(data_flush_states)) { 1341 if (space_info->full) { 1342 if (maybe_fail_all_tickets(fs_info, space_info)) 1343 flush_state = 0; 1344 else 1345 space_info->flush = 0; 1346 } else { 1347 flush_state = 0; 1348 } 1349 1350 /* Something happened, fail everything and bail. */ 1351 if (BTRFS_FS_ERROR(fs_info)) 1352 goto aborted_fs; 1353 1354 } 1355 spin_unlock(&space_info->lock); 1356 } 1357 return; 1358 1359 aborted_fs: 1360 maybe_fail_all_tickets(fs_info, space_info); 1361 space_info->flush = 0; 1362 spin_unlock(&space_info->lock); 1363 } 1364 1365 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) 1366 { 1367 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); 1368 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); 1369 INIT_WORK(&fs_info->preempt_reclaim_work, 1370 btrfs_preempt_reclaim_metadata_space); 1371 } 1372 1373 static const enum btrfs_flush_state priority_flush_states[] = { 1374 FLUSH_DELAYED_ITEMS_NR, 1375 FLUSH_DELAYED_ITEMS, 1376 ALLOC_CHUNK, 1377 }; 1378 1379 static const enum btrfs_flush_state evict_flush_states[] = { 1380 FLUSH_DELAYED_ITEMS_NR, 1381 FLUSH_DELAYED_ITEMS, 1382 FLUSH_DELAYED_REFS_NR, 1383 FLUSH_DELAYED_REFS, 1384 FLUSH_DELALLOC, 1385 FLUSH_DELALLOC_WAIT, 1386 FLUSH_DELALLOC_FULL, 1387 ALLOC_CHUNK, 1388 COMMIT_TRANS, 1389 }; 1390 1391 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1392 struct btrfs_space_info *space_info, 1393 struct reserve_ticket *ticket, 1394 const enum btrfs_flush_state *states, 1395 int states_nr) 1396 { 1397 u64 to_reclaim; 1398 int flush_state = 0; 1399 1400 spin_lock(&space_info->lock); 1401 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1402 /* 1403 * This is the priority reclaim path, so to_reclaim could be >0 still 1404 * because we may have only satisfied the priority tickets and still 1405 * left non priority tickets on the list. We would then have 1406 * to_reclaim but ->bytes == 0. 1407 */ 1408 if (ticket->bytes == 0) { 1409 spin_unlock(&space_info->lock); 1410 return; 1411 } 1412 1413 while (flush_state < states_nr) { 1414 spin_unlock(&space_info->lock); 1415 flush_space(fs_info, space_info, to_reclaim, states[flush_state], 1416 false); 1417 flush_state++; 1418 spin_lock(&space_info->lock); 1419 if (ticket->bytes == 0) { 1420 spin_unlock(&space_info->lock); 1421 return; 1422 } 1423 } 1424 1425 /* 1426 * Attempt to steal from the global rsv if we can, except if the fs was 1427 * turned into error mode due to a transaction abort when flushing space 1428 * above, in that case fail with the abort error instead of returning 1429 * success to the caller if we can steal from the global rsv - this is 1430 * just to have caller fail immeditelly instead of later when trying to 1431 * modify the fs, making it easier to debug -ENOSPC problems. 1432 */ 1433 if (BTRFS_FS_ERROR(fs_info)) { 1434 ticket->error = BTRFS_FS_ERROR(fs_info); 1435 remove_ticket(space_info, ticket); 1436 } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) { 1437 ticket->error = -ENOSPC; 1438 remove_ticket(space_info, ticket); 1439 } 1440 1441 /* 1442 * We must run try_granting_tickets here because we could be a large 1443 * ticket in front of a smaller ticket that can now be satisfied with 1444 * the available space. 1445 */ 1446 btrfs_try_granting_tickets(fs_info, space_info); 1447 spin_unlock(&space_info->lock); 1448 } 1449 1450 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, 1451 struct btrfs_space_info *space_info, 1452 struct reserve_ticket *ticket) 1453 { 1454 spin_lock(&space_info->lock); 1455 1456 /* We could have been granted before we got here. */ 1457 if (ticket->bytes == 0) { 1458 spin_unlock(&space_info->lock); 1459 return; 1460 } 1461 1462 while (!space_info->full) { 1463 spin_unlock(&space_info->lock); 1464 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); 1465 spin_lock(&space_info->lock); 1466 if (ticket->bytes == 0) { 1467 spin_unlock(&space_info->lock); 1468 return; 1469 } 1470 } 1471 1472 ticket->error = -ENOSPC; 1473 remove_ticket(space_info, ticket); 1474 btrfs_try_granting_tickets(fs_info, space_info); 1475 spin_unlock(&space_info->lock); 1476 } 1477 1478 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1479 struct btrfs_space_info *space_info, 1480 struct reserve_ticket *ticket) 1481 1482 { 1483 DEFINE_WAIT(wait); 1484 int ret = 0; 1485 1486 spin_lock(&space_info->lock); 1487 while (ticket->bytes > 0 && ticket->error == 0) { 1488 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1489 if (ret) { 1490 /* 1491 * Delete us from the list. After we unlock the space 1492 * info, we don't want the async reclaim job to reserve 1493 * space for this ticket. If that would happen, then the 1494 * ticket's task would not known that space was reserved 1495 * despite getting an error, resulting in a space leak 1496 * (bytes_may_use counter of our space_info). 1497 */ 1498 remove_ticket(space_info, ticket); 1499 ticket->error = -EINTR; 1500 break; 1501 } 1502 spin_unlock(&space_info->lock); 1503 1504 schedule(); 1505 1506 finish_wait(&ticket->wait, &wait); 1507 spin_lock(&space_info->lock); 1508 } 1509 spin_unlock(&space_info->lock); 1510 } 1511 1512 /* 1513 * Do the appropriate flushing and waiting for a ticket. 1514 * 1515 * @fs_info: the filesystem 1516 * @space_info: space info for the reservation 1517 * @ticket: ticket for the reservation 1518 * @start_ns: timestamp when the reservation started 1519 * @orig_bytes: amount of bytes originally reserved 1520 * @flush: how much we can flush 1521 * 1522 * This does the work of figuring out how to flush for the ticket, waiting for 1523 * the reservation, and returning the appropriate error if there is one. 1524 */ 1525 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1526 struct btrfs_space_info *space_info, 1527 struct reserve_ticket *ticket, 1528 u64 start_ns, u64 orig_bytes, 1529 enum btrfs_reserve_flush_enum flush) 1530 { 1531 int ret; 1532 1533 switch (flush) { 1534 case BTRFS_RESERVE_FLUSH_DATA: 1535 case BTRFS_RESERVE_FLUSH_ALL: 1536 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1537 wait_reserve_ticket(fs_info, space_info, ticket); 1538 break; 1539 case BTRFS_RESERVE_FLUSH_LIMIT: 1540 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1541 priority_flush_states, 1542 ARRAY_SIZE(priority_flush_states)); 1543 break; 1544 case BTRFS_RESERVE_FLUSH_EVICT: 1545 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1546 evict_flush_states, 1547 ARRAY_SIZE(evict_flush_states)); 1548 break; 1549 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: 1550 priority_reclaim_data_space(fs_info, space_info, ticket); 1551 break; 1552 default: 1553 ASSERT(0); 1554 break; 1555 } 1556 1557 ret = ticket->error; 1558 ASSERT(list_empty(&ticket->list)); 1559 /* 1560 * Check that we can't have an error set if the reservation succeeded, 1561 * as that would confuse tasks and lead them to error out without 1562 * releasing reserved space (if an error happens the expectation is that 1563 * space wasn't reserved at all). 1564 */ 1565 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1566 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, 1567 start_ns, flush, ticket->error); 1568 return ret; 1569 } 1570 1571 /* 1572 * This returns true if this flush state will go through the ordinary flushing 1573 * code. 1574 */ 1575 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1576 { 1577 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1578 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1579 } 1580 1581 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, 1582 struct btrfs_space_info *space_info) 1583 { 1584 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); 1585 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); 1586 1587 /* 1588 * If we're heavy on ordered operations then clamping won't help us. We 1589 * need to clamp specifically to keep up with dirty'ing buffered 1590 * writers, because there's not a 1:1 correlation of writing delalloc 1591 * and freeing space, like there is with flushing delayed refs or 1592 * delayed nodes. If we're already more ordered than delalloc then 1593 * we're keeping up, otherwise we aren't and should probably clamp. 1594 */ 1595 if (ordered < delalloc) 1596 space_info->clamp = min(space_info->clamp + 1, 8); 1597 } 1598 1599 static inline bool can_steal(enum btrfs_reserve_flush_enum flush) 1600 { 1601 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1602 flush == BTRFS_RESERVE_FLUSH_EVICT); 1603 } 1604 1605 /* 1606 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to 1607 * fail as quickly as possible. 1608 */ 1609 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush) 1610 { 1611 return (flush != BTRFS_RESERVE_NO_FLUSH && 1612 flush != BTRFS_RESERVE_FLUSH_EMERGENCY); 1613 } 1614 1615 /* 1616 * Try to reserve bytes from the block_rsv's space. 1617 * 1618 * @fs_info: the filesystem 1619 * @space_info: space info we want to allocate from 1620 * @orig_bytes: number of bytes we want 1621 * @flush: whether or not we can flush to make our reservation 1622 * 1623 * This will reserve orig_bytes number of bytes from the space info associated 1624 * with the block_rsv. If there is not enough space it will make an attempt to 1625 * flush out space to make room. It will do this by flushing delalloc if 1626 * possible or committing the transaction. If flush is 0 then no attempts to 1627 * regain reservations will be made and this will fail if there is not enough 1628 * space already. 1629 */ 1630 static int __reserve_bytes(struct btrfs_fs_info *fs_info, 1631 struct btrfs_space_info *space_info, u64 orig_bytes, 1632 enum btrfs_reserve_flush_enum flush) 1633 { 1634 struct work_struct *async_work; 1635 struct reserve_ticket ticket; 1636 u64 start_ns = 0; 1637 u64 used; 1638 int ret = -ENOSPC; 1639 bool pending_tickets; 1640 1641 ASSERT(orig_bytes); 1642 /* 1643 * If have a transaction handle (current->journal_info != NULL), then 1644 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor 1645 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those 1646 * flushing methods can trigger transaction commits. 1647 */ 1648 if (current->journal_info) { 1649 /* One assert per line for easier debugging. */ 1650 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); 1651 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); 1652 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); 1653 } 1654 1655 if (flush == BTRFS_RESERVE_FLUSH_DATA) 1656 async_work = &fs_info->async_data_reclaim_work; 1657 else 1658 async_work = &fs_info->async_reclaim_work; 1659 1660 spin_lock(&space_info->lock); 1661 used = btrfs_space_info_used(space_info, true); 1662 1663 /* 1664 * We don't want NO_FLUSH allocations to jump everybody, they can 1665 * generally handle ENOSPC in a different way, so treat them the same as 1666 * normal flushers when it comes to skipping pending tickets. 1667 */ 1668 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1669 pending_tickets = !list_empty(&space_info->tickets) || 1670 !list_empty(&space_info->priority_tickets); 1671 else 1672 pending_tickets = !list_empty(&space_info->priority_tickets); 1673 1674 /* 1675 * Carry on if we have enough space (short-circuit) OR call 1676 * can_overcommit() to ensure we can overcommit to continue. 1677 */ 1678 if (!pending_tickets && 1679 ((used + orig_bytes <= space_info->total_bytes) || 1680 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1681 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1682 orig_bytes); 1683 ret = 0; 1684 } 1685 1686 /* 1687 * Things are dire, we need to make a reservation so we don't abort. We 1688 * will let this reservation go through as long as we have actual space 1689 * left to allocate for the block. 1690 */ 1691 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) { 1692 used = btrfs_space_info_used(space_info, false); 1693 if (used + orig_bytes <= space_info->total_bytes) { 1694 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1695 orig_bytes); 1696 ret = 0; 1697 } 1698 } 1699 1700 /* 1701 * If we couldn't make a reservation then setup our reservation ticket 1702 * and kick the async worker if it's not already running. 1703 * 1704 * If we are a priority flusher then we just need to add our ticket to 1705 * the list and we will do our own flushing further down. 1706 */ 1707 if (ret && can_ticket(flush)) { 1708 ticket.bytes = orig_bytes; 1709 ticket.error = 0; 1710 space_info->reclaim_size += ticket.bytes; 1711 init_waitqueue_head(&ticket.wait); 1712 ticket.steal = can_steal(flush); 1713 if (trace_btrfs_reserve_ticket_enabled()) 1714 start_ns = ktime_get_ns(); 1715 1716 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1717 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || 1718 flush == BTRFS_RESERVE_FLUSH_DATA) { 1719 list_add_tail(&ticket.list, &space_info->tickets); 1720 if (!space_info->flush) { 1721 /* 1722 * We were forced to add a reserve ticket, so 1723 * our preemptive flushing is unable to keep 1724 * up. Clamp down on the threshold for the 1725 * preemptive flushing in order to keep up with 1726 * the workload. 1727 */ 1728 maybe_clamp_preempt(fs_info, space_info); 1729 1730 space_info->flush = 1; 1731 trace_btrfs_trigger_flush(fs_info, 1732 space_info->flags, 1733 orig_bytes, flush, 1734 "enospc"); 1735 queue_work(system_unbound_wq, async_work); 1736 } 1737 } else { 1738 list_add_tail(&ticket.list, 1739 &space_info->priority_tickets); 1740 } 1741 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1742 /* 1743 * We will do the space reservation dance during log replay, 1744 * which means we won't have fs_info->fs_root set, so don't do 1745 * the async reclaim as we will panic. 1746 */ 1747 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1748 !work_busy(&fs_info->preempt_reclaim_work) && 1749 need_preemptive_reclaim(fs_info, space_info)) { 1750 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1751 orig_bytes, flush, "preempt"); 1752 queue_work(system_unbound_wq, 1753 &fs_info->preempt_reclaim_work); 1754 } 1755 } 1756 spin_unlock(&space_info->lock); 1757 if (!ret || !can_ticket(flush)) 1758 return ret; 1759 1760 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, 1761 orig_bytes, flush); 1762 } 1763 1764 /* 1765 * Try to reserve metadata bytes from the block_rsv's space. 1766 * 1767 * @fs_info: the filesystem 1768 * @block_rsv: block_rsv we're allocating for 1769 * @orig_bytes: number of bytes we want 1770 * @flush: whether or not we can flush to make our reservation 1771 * 1772 * This will reserve orig_bytes number of bytes from the space info associated 1773 * with the block_rsv. If there is not enough space it will make an attempt to 1774 * flush out space to make room. It will do this by flushing delalloc if 1775 * possible or committing the transaction. If flush is 0 then no attempts to 1776 * regain reservations will be made and this will fail if there is not enough 1777 * space already. 1778 */ 1779 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1780 struct btrfs_block_rsv *block_rsv, 1781 u64 orig_bytes, 1782 enum btrfs_reserve_flush_enum flush) 1783 { 1784 int ret; 1785 1786 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); 1787 if (ret == -ENOSPC) { 1788 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1789 block_rsv->space_info->flags, 1790 orig_bytes, 1); 1791 1792 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1793 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1794 orig_bytes, 0); 1795 } 1796 return ret; 1797 } 1798 1799 /* 1800 * Try to reserve data bytes for an allocation. 1801 * 1802 * @fs_info: the filesystem 1803 * @bytes: number of bytes we need 1804 * @flush: how we are allowed to flush 1805 * 1806 * This will reserve bytes from the data space info. If there is not enough 1807 * space then we will attempt to flush space as specified by flush. 1808 */ 1809 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 1810 enum btrfs_reserve_flush_enum flush) 1811 { 1812 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; 1813 int ret; 1814 1815 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1816 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE || 1817 flush == BTRFS_RESERVE_NO_FLUSH); 1818 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1819 1820 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); 1821 if (ret == -ENOSPC) { 1822 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1823 data_sinfo->flags, bytes, 1); 1824 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1825 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); 1826 } 1827 return ret; 1828 } 1829 1830 /* Dump all the space infos when we abort a transaction due to ENOSPC. */ 1831 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info) 1832 { 1833 struct btrfs_space_info *space_info; 1834 1835 btrfs_info(fs_info, "dumping space info:"); 1836 list_for_each_entry(space_info, &fs_info->space_info, list) { 1837 spin_lock(&space_info->lock); 1838 __btrfs_dump_space_info(fs_info, space_info); 1839 spin_unlock(&space_info->lock); 1840 } 1841 dump_global_block_rsv(fs_info); 1842 } 1843 1844 /* 1845 * Account the unused space of all the readonly block group in the space_info. 1846 * takes mirrors into account. 1847 */ 1848 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) 1849 { 1850 struct btrfs_block_group *block_group; 1851 u64 free_bytes = 0; 1852 int factor; 1853 1854 /* It's df, we don't care if it's racy */ 1855 if (list_empty(&sinfo->ro_bgs)) 1856 return 0; 1857 1858 spin_lock(&sinfo->lock); 1859 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { 1860 spin_lock(&block_group->lock); 1861 1862 if (!block_group->ro) { 1863 spin_unlock(&block_group->lock); 1864 continue; 1865 } 1866 1867 factor = btrfs_bg_type_to_factor(block_group->flags); 1868 free_bytes += (block_group->length - 1869 block_group->used) * factor; 1870 1871 spin_unlock(&block_group->lock); 1872 } 1873 spin_unlock(&sinfo->lock); 1874 1875 return free_bytes; 1876 } 1877