1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "misc.h" 4 #include "ctree.h" 5 #include "space-info.h" 6 #include "sysfs.h" 7 #include "volumes.h" 8 #include "free-space-cache.h" 9 #include "ordered-data.h" 10 #include "transaction.h" 11 #include "block-group.h" 12 13 /* 14 * HOW DOES SPACE RESERVATION WORK 15 * 16 * If you want to know about delalloc specifically, there is a separate comment 17 * for that with the delalloc code. This comment is about how the whole system 18 * works generally. 19 * 20 * BASIC CONCEPTS 21 * 22 * 1) space_info. This is the ultimate arbiter of how much space we can use. 23 * There's a description of the bytes_ fields with the struct declaration, 24 * refer to that for specifics on each field. Suffice it to say that for 25 * reservations we care about total_bytes - SUM(space_info->bytes_) when 26 * determining if there is space to make an allocation. There is a space_info 27 * for METADATA, SYSTEM, and DATA areas. 28 * 29 * 2) block_rsv's. These are basically buckets for every different type of 30 * metadata reservation we have. You can see the comment in the block_rsv 31 * code on the rules for each type, but generally block_rsv->reserved is how 32 * much space is accounted for in space_info->bytes_may_use. 33 * 34 * 3) btrfs_calc*_size. These are the worst case calculations we used based 35 * on the number of items we will want to modify. We have one for changing 36 * items, and one for inserting new items. Generally we use these helpers to 37 * determine the size of the block reserves, and then use the actual bytes 38 * values to adjust the space_info counters. 39 * 40 * MAKING RESERVATIONS, THE NORMAL CASE 41 * 42 * We call into either btrfs_reserve_data_bytes() or 43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with 44 * num_bytes we want to reserve. 45 * 46 * ->reserve 47 * space_info->bytes_may_reserve += num_bytes 48 * 49 * ->extent allocation 50 * Call btrfs_add_reserved_bytes() which does 51 * space_info->bytes_may_reserve -= num_bytes 52 * space_info->bytes_reserved += extent_bytes 53 * 54 * ->insert reference 55 * Call btrfs_update_block_group() which does 56 * space_info->bytes_reserved -= extent_bytes 57 * space_info->bytes_used += extent_bytes 58 * 59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) 60 * 61 * Assume we are unable to simply make the reservation because we do not have 62 * enough space 63 * 64 * -> __reserve_bytes 65 * create a reserve_ticket with ->bytes set to our reservation, add it to 66 * the tail of space_info->tickets, kick async flush thread 67 * 68 * ->handle_reserve_ticket 69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set 70 * on the ticket. 71 * 72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space 73 * Flushes various things attempting to free up space. 74 * 75 * -> btrfs_try_granting_tickets() 76 * This is called by anything that either subtracts space from 77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the 78 * space_info->total_bytes. This loops through the ->priority_tickets and 79 * then the ->tickets list checking to see if the reservation can be 80 * completed. If it can the space is added to space_info->bytes_may_use and 81 * the ticket is woken up. 82 * 83 * -> ticket wakeup 84 * Check if ->bytes == 0, if it does we got our reservation and we can carry 85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we 86 * were interrupted.) 87 * 88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY 89 * 90 * Same as the above, except we add ourselves to the 91 * space_info->priority_tickets, and we do not use ticket->wait, we simply 92 * call flush_space() ourselves for the states that are safe for us to call 93 * without deadlocking and hope for the best. 94 * 95 * THE FLUSHING STATES 96 * 97 * Generally speaking we will have two cases for each state, a "nice" state 98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to 99 * reduce the locking over head on the various trees, and even to keep from 100 * doing any work at all in the case of delayed refs. Each of these delayed 101 * things however hold reservations, and so letting them run allows us to 102 * reclaim space so we can make new reservations. 103 * 104 * FLUSH_DELAYED_ITEMS 105 * Every inode has a delayed item to update the inode. Take a simple write 106 * for example, we would update the inode item at write time to update the 107 * mtime, and then again at finish_ordered_io() time in order to update the 108 * isize or bytes. We keep these delayed items to coalesce these operations 109 * into a single operation done on demand. These are an easy way to reclaim 110 * metadata space. 111 * 112 * FLUSH_DELALLOC 113 * Look at the delalloc comment to get an idea of how much space is reserved 114 * for delayed allocation. We can reclaim some of this space simply by 115 * running delalloc, but usually we need to wait for ordered extents to 116 * reclaim the bulk of this space. 117 * 118 * FLUSH_DELAYED_REFS 119 * We have a block reserve for the outstanding delayed refs space, and every 120 * delayed ref operation holds a reservation. Running these is a quick way 121 * to reclaim space, but we want to hold this until the end because COW can 122 * churn a lot and we can avoid making some extent tree modifications if we 123 * are able to delay for as long as possible. 124 * 125 * ALLOC_CHUNK 126 * We will skip this the first time through space reservation, because of 127 * overcommit and we don't want to have a lot of useless metadata space when 128 * our worst case reservations will likely never come true. 129 * 130 * RUN_DELAYED_IPUTS 131 * If we're freeing inodes we're likely freeing checksums, file extent 132 * items, and extent tree items. Loads of space could be freed up by these 133 * operations, however they won't be usable until the transaction commits. 134 * 135 * COMMIT_TRANS 136 * may_commit_transaction() is the ultimate arbiter on whether we commit the 137 * transaction or not. In order to avoid constantly churning we do all the 138 * above flushing first and then commit the transaction as the last resort. 139 * However we need to take into account things like pinned space that would 140 * be freed, plus any delayed work we may not have gotten rid of in the case 141 * of metadata. 142 * 143 * OVERCOMMIT 144 * 145 * Because we hold so many reservations for metadata we will allow you to 146 * reserve more space than is currently free in the currently allocate 147 * metadata space. This only happens with metadata, data does not allow 148 * overcommitting. 149 * 150 * You can see the current logic for when we allow overcommit in 151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there 152 * is no unallocated space to be had, all reservations are kept within the 153 * free space in the allocated metadata chunks. 154 * 155 * Because of overcommitting, you generally want to use the 156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right 157 * thing with or without extra unallocated space. 158 */ 159 160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 161 bool may_use_included) 162 { 163 ASSERT(s_info); 164 return s_info->bytes_used + s_info->bytes_reserved + 165 s_info->bytes_pinned + s_info->bytes_readonly + 166 (may_use_included ? s_info->bytes_may_use : 0); 167 } 168 169 /* 170 * after adding space to the filesystem, we need to clear the full flags 171 * on all the space infos. 172 */ 173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 174 { 175 struct list_head *head = &info->space_info; 176 struct btrfs_space_info *found; 177 178 rcu_read_lock(); 179 list_for_each_entry_rcu(found, head, list) 180 found->full = 0; 181 rcu_read_unlock(); 182 } 183 184 static int create_space_info(struct btrfs_fs_info *info, u64 flags) 185 { 186 187 struct btrfs_space_info *space_info; 188 int i; 189 int ret; 190 191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS); 192 if (!space_info) 193 return -ENOMEM; 194 195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, 196 GFP_KERNEL); 197 if (ret) { 198 kfree(space_info); 199 return ret; 200 } 201 202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 203 INIT_LIST_HEAD(&space_info->block_groups[i]); 204 init_rwsem(&space_info->groups_sem); 205 spin_lock_init(&space_info->lock); 206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 208 INIT_LIST_HEAD(&space_info->ro_bgs); 209 INIT_LIST_HEAD(&space_info->tickets); 210 INIT_LIST_HEAD(&space_info->priority_tickets); 211 212 ret = btrfs_sysfs_add_space_info_type(info, space_info); 213 if (ret) 214 return ret; 215 216 list_add_rcu(&space_info->list, &info->space_info); 217 if (flags & BTRFS_BLOCK_GROUP_DATA) 218 info->data_sinfo = space_info; 219 220 return ret; 221 } 222 223 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 224 { 225 struct btrfs_super_block *disk_super; 226 u64 features; 227 u64 flags; 228 int mixed = 0; 229 int ret; 230 231 disk_super = fs_info->super_copy; 232 if (!btrfs_super_root(disk_super)) 233 return -EINVAL; 234 235 features = btrfs_super_incompat_flags(disk_super); 236 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 237 mixed = 1; 238 239 flags = BTRFS_BLOCK_GROUP_SYSTEM; 240 ret = create_space_info(fs_info, flags); 241 if (ret) 242 goto out; 243 244 if (mixed) { 245 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 246 ret = create_space_info(fs_info, flags); 247 } else { 248 flags = BTRFS_BLOCK_GROUP_METADATA; 249 ret = create_space_info(fs_info, flags); 250 if (ret) 251 goto out; 252 253 flags = BTRFS_BLOCK_GROUP_DATA; 254 ret = create_space_info(fs_info, flags); 255 } 256 out: 257 return ret; 258 } 259 260 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, 261 u64 total_bytes, u64 bytes_used, 262 u64 bytes_readonly, 263 struct btrfs_space_info **space_info) 264 { 265 struct btrfs_space_info *found; 266 int factor; 267 268 factor = btrfs_bg_type_to_factor(flags); 269 270 found = btrfs_find_space_info(info, flags); 271 ASSERT(found); 272 spin_lock(&found->lock); 273 found->total_bytes += total_bytes; 274 found->disk_total += total_bytes * factor; 275 found->bytes_used += bytes_used; 276 found->disk_used += bytes_used * factor; 277 found->bytes_readonly += bytes_readonly; 278 if (total_bytes > 0) 279 found->full = 0; 280 btrfs_try_granting_tickets(info, found); 281 spin_unlock(&found->lock); 282 *space_info = found; 283 } 284 285 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 286 u64 flags) 287 { 288 struct list_head *head = &info->space_info; 289 struct btrfs_space_info *found; 290 291 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 292 293 rcu_read_lock(); 294 list_for_each_entry_rcu(found, head, list) { 295 if (found->flags & flags) { 296 rcu_read_unlock(); 297 return found; 298 } 299 } 300 rcu_read_unlock(); 301 return NULL; 302 } 303 304 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 305 { 306 return (global->size << 1); 307 } 308 309 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 310 struct btrfs_space_info *space_info, 311 enum btrfs_reserve_flush_enum flush) 312 { 313 u64 profile; 314 u64 avail; 315 int factor; 316 317 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) 318 profile = btrfs_system_alloc_profile(fs_info); 319 else 320 profile = btrfs_metadata_alloc_profile(fs_info); 321 322 avail = atomic64_read(&fs_info->free_chunk_space); 323 324 /* 325 * If we have dup, raid1 or raid10 then only half of the free 326 * space is actually usable. For raid56, the space info used 327 * doesn't include the parity drive, so we don't have to 328 * change the math 329 */ 330 factor = btrfs_bg_type_to_factor(profile); 331 avail = div_u64(avail, factor); 332 333 /* 334 * If we aren't flushing all things, let us overcommit up to 335 * 1/2th of the space. If we can flush, don't let us overcommit 336 * too much, let it overcommit up to 1/8 of the space. 337 */ 338 if (flush == BTRFS_RESERVE_FLUSH_ALL) 339 avail >>= 3; 340 else 341 avail >>= 1; 342 return avail; 343 } 344 345 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 346 struct btrfs_space_info *space_info, u64 bytes, 347 enum btrfs_reserve_flush_enum flush) 348 { 349 u64 avail; 350 u64 used; 351 352 /* Don't overcommit when in mixed mode */ 353 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) 354 return 0; 355 356 used = btrfs_space_info_used(space_info, true); 357 avail = calc_available_free_space(fs_info, space_info, flush); 358 359 if (used + bytes < space_info->total_bytes + avail) 360 return 1; 361 return 0; 362 } 363 364 static void remove_ticket(struct btrfs_space_info *space_info, 365 struct reserve_ticket *ticket) 366 { 367 if (!list_empty(&ticket->list)) { 368 list_del_init(&ticket->list); 369 ASSERT(space_info->reclaim_size >= ticket->bytes); 370 space_info->reclaim_size -= ticket->bytes; 371 } 372 } 373 374 /* 375 * This is for space we already have accounted in space_info->bytes_may_use, so 376 * basically when we're returning space from block_rsv's. 377 */ 378 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 379 struct btrfs_space_info *space_info) 380 { 381 struct list_head *head; 382 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; 383 384 lockdep_assert_held(&space_info->lock); 385 386 head = &space_info->priority_tickets; 387 again: 388 while (!list_empty(head)) { 389 struct reserve_ticket *ticket; 390 u64 used = btrfs_space_info_used(space_info, true); 391 392 ticket = list_first_entry(head, struct reserve_ticket, list); 393 394 /* Check and see if our ticket can be satisified now. */ 395 if ((used + ticket->bytes <= space_info->total_bytes) || 396 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, 397 flush)) { 398 btrfs_space_info_update_bytes_may_use(fs_info, 399 space_info, 400 ticket->bytes); 401 remove_ticket(space_info, ticket); 402 ticket->bytes = 0; 403 space_info->tickets_id++; 404 wake_up(&ticket->wait); 405 } else { 406 break; 407 } 408 } 409 410 if (head == &space_info->priority_tickets) { 411 head = &space_info->tickets; 412 flush = BTRFS_RESERVE_FLUSH_ALL; 413 goto again; 414 } 415 } 416 417 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ 418 do { \ 419 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ 420 spin_lock(&__rsv->lock); \ 421 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ 422 __rsv->size, __rsv->reserved); \ 423 spin_unlock(&__rsv->lock); \ 424 } while (0) 425 426 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 427 struct btrfs_space_info *info) 428 { 429 lockdep_assert_held(&info->lock); 430 431 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 432 info->flags, 433 info->total_bytes - btrfs_space_info_used(info, true), 434 info->full ? "" : "not "); 435 btrfs_info(fs_info, 436 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu", 437 info->total_bytes, info->bytes_used, info->bytes_pinned, 438 info->bytes_reserved, info->bytes_may_use, 439 info->bytes_readonly); 440 441 DUMP_BLOCK_RSV(fs_info, global_block_rsv); 442 DUMP_BLOCK_RSV(fs_info, trans_block_rsv); 443 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); 444 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); 445 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 446 447 } 448 449 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 450 struct btrfs_space_info *info, u64 bytes, 451 int dump_block_groups) 452 { 453 struct btrfs_block_group *cache; 454 int index = 0; 455 456 spin_lock(&info->lock); 457 __btrfs_dump_space_info(fs_info, info); 458 spin_unlock(&info->lock); 459 460 if (!dump_block_groups) 461 return; 462 463 down_read(&info->groups_sem); 464 again: 465 list_for_each_entry(cache, &info->block_groups[index], list) { 466 spin_lock(&cache->lock); 467 btrfs_info(fs_info, 468 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", 469 cache->start, cache->length, cache->used, cache->pinned, 470 cache->reserved, cache->ro ? "[readonly]" : ""); 471 spin_unlock(&cache->lock); 472 btrfs_dump_free_space(cache, bytes); 473 } 474 if (++index < BTRFS_NR_RAID_TYPES) 475 goto again; 476 up_read(&info->groups_sem); 477 } 478 479 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, 480 u64 to_reclaim) 481 { 482 u64 bytes; 483 u64 nr; 484 485 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 486 nr = div64_u64(to_reclaim, bytes); 487 if (!nr) 488 nr = 1; 489 return nr; 490 } 491 492 #define EXTENT_SIZE_PER_ITEM SZ_256K 493 494 /* 495 * shrink metadata reservation for delalloc 496 */ 497 static void shrink_delalloc(struct btrfs_fs_info *fs_info, 498 struct btrfs_space_info *space_info, 499 u64 to_reclaim, bool wait_ordered) 500 { 501 struct btrfs_trans_handle *trans; 502 u64 delalloc_bytes; 503 u64 dio_bytes; 504 u64 items; 505 long time_left; 506 int loops; 507 508 /* Calc the number of the pages we need flush for space reservation */ 509 if (to_reclaim == U64_MAX) { 510 items = U64_MAX; 511 } else { 512 /* 513 * to_reclaim is set to however much metadata we need to 514 * reclaim, but reclaiming that much data doesn't really track 515 * exactly, so increase the amount to reclaim by 2x in order to 516 * make sure we're flushing enough delalloc to hopefully reclaim 517 * some metadata reservations. 518 */ 519 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; 520 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 521 } 522 523 trans = (struct btrfs_trans_handle *)current->journal_info; 524 525 delalloc_bytes = percpu_counter_sum_positive( 526 &fs_info->delalloc_bytes); 527 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 528 if (delalloc_bytes == 0 && dio_bytes == 0) { 529 if (trans) 530 return; 531 if (wait_ordered) 532 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 533 return; 534 } 535 536 /* 537 * If we are doing more ordered than delalloc we need to just wait on 538 * ordered extents, otherwise we'll waste time trying to flush delalloc 539 * that likely won't give us the space back we need. 540 */ 541 if (dio_bytes > delalloc_bytes) 542 wait_ordered = true; 543 544 loops = 0; 545 while ((delalloc_bytes || dio_bytes) && loops < 3) { 546 btrfs_start_delalloc_roots(fs_info, items); 547 548 spin_lock(&space_info->lock); 549 if (list_empty(&space_info->tickets) && 550 list_empty(&space_info->priority_tickets)) { 551 spin_unlock(&space_info->lock); 552 break; 553 } 554 spin_unlock(&space_info->lock); 555 556 loops++; 557 if (wait_ordered && !trans) { 558 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); 559 } else { 560 time_left = schedule_timeout_killable(1); 561 if (time_left) 562 break; 563 } 564 delalloc_bytes = percpu_counter_sum_positive( 565 &fs_info->delalloc_bytes); 566 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); 567 } 568 } 569 570 /** 571 * maybe_commit_transaction - possibly commit the transaction if its ok to 572 * @root - the root we're allocating for 573 * @bytes - the number of bytes we want to reserve 574 * @force - force the commit 575 * 576 * This will check to make sure that committing the transaction will actually 577 * get us somewhere and then commit the transaction if it does. Otherwise it 578 * will return -ENOSPC. 579 */ 580 static int may_commit_transaction(struct btrfs_fs_info *fs_info, 581 struct btrfs_space_info *space_info) 582 { 583 struct reserve_ticket *ticket = NULL; 584 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 585 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 586 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 587 struct btrfs_trans_handle *trans; 588 u64 bytes_needed; 589 u64 reclaim_bytes = 0; 590 u64 cur_free_bytes = 0; 591 592 trans = (struct btrfs_trans_handle *)current->journal_info; 593 if (trans) 594 return -EAGAIN; 595 596 spin_lock(&space_info->lock); 597 cur_free_bytes = btrfs_space_info_used(space_info, true); 598 if (cur_free_bytes < space_info->total_bytes) 599 cur_free_bytes = space_info->total_bytes - cur_free_bytes; 600 else 601 cur_free_bytes = 0; 602 603 if (!list_empty(&space_info->priority_tickets)) 604 ticket = list_first_entry(&space_info->priority_tickets, 605 struct reserve_ticket, list); 606 else if (!list_empty(&space_info->tickets)) 607 ticket = list_first_entry(&space_info->tickets, 608 struct reserve_ticket, list); 609 bytes_needed = (ticket) ? ticket->bytes : 0; 610 611 if (bytes_needed > cur_free_bytes) 612 bytes_needed -= cur_free_bytes; 613 else 614 bytes_needed = 0; 615 spin_unlock(&space_info->lock); 616 617 if (!bytes_needed) 618 return 0; 619 620 trans = btrfs_join_transaction(fs_info->extent_root); 621 if (IS_ERR(trans)) 622 return PTR_ERR(trans); 623 624 /* 625 * See if there is enough pinned space to make this reservation, or if 626 * we have block groups that are going to be freed, allowing us to 627 * possibly do a chunk allocation the next loop through. 628 */ 629 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || 630 __percpu_counter_compare(&space_info->total_bytes_pinned, 631 bytes_needed, 632 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0) 633 goto commit; 634 635 /* 636 * See if there is some space in the delayed insertion reservation for 637 * this reservation. 638 */ 639 if (space_info != delayed_rsv->space_info) 640 goto enospc; 641 642 spin_lock(&delayed_rsv->lock); 643 reclaim_bytes += delayed_rsv->reserved; 644 spin_unlock(&delayed_rsv->lock); 645 646 spin_lock(&delayed_refs_rsv->lock); 647 reclaim_bytes += delayed_refs_rsv->reserved; 648 spin_unlock(&delayed_refs_rsv->lock); 649 650 spin_lock(&trans_rsv->lock); 651 reclaim_bytes += trans_rsv->reserved; 652 spin_unlock(&trans_rsv->lock); 653 654 if (reclaim_bytes >= bytes_needed) 655 goto commit; 656 bytes_needed -= reclaim_bytes; 657 658 if (__percpu_counter_compare(&space_info->total_bytes_pinned, 659 bytes_needed, 660 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) 661 goto enospc; 662 663 commit: 664 return btrfs_commit_transaction(trans); 665 enospc: 666 btrfs_end_transaction(trans); 667 return -ENOSPC; 668 } 669 670 /* 671 * Try to flush some data based on policy set by @state. This is only advisory 672 * and may fail for various reasons. The caller is supposed to examine the 673 * state of @space_info to detect the outcome. 674 */ 675 static void flush_space(struct btrfs_fs_info *fs_info, 676 struct btrfs_space_info *space_info, u64 num_bytes, 677 int state) 678 { 679 struct btrfs_root *root = fs_info->extent_root; 680 struct btrfs_trans_handle *trans; 681 int nr; 682 int ret = 0; 683 684 switch (state) { 685 case FLUSH_DELAYED_ITEMS_NR: 686 case FLUSH_DELAYED_ITEMS: 687 if (state == FLUSH_DELAYED_ITEMS_NR) 688 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; 689 else 690 nr = -1; 691 692 trans = btrfs_join_transaction(root); 693 if (IS_ERR(trans)) { 694 ret = PTR_ERR(trans); 695 break; 696 } 697 ret = btrfs_run_delayed_items_nr(trans, nr); 698 btrfs_end_transaction(trans); 699 break; 700 case FLUSH_DELALLOC: 701 case FLUSH_DELALLOC_WAIT: 702 shrink_delalloc(fs_info, space_info, num_bytes, 703 state == FLUSH_DELALLOC_WAIT); 704 break; 705 case FLUSH_DELAYED_REFS_NR: 706 case FLUSH_DELAYED_REFS: 707 trans = btrfs_join_transaction(root); 708 if (IS_ERR(trans)) { 709 ret = PTR_ERR(trans); 710 break; 711 } 712 if (state == FLUSH_DELAYED_REFS_NR) 713 nr = calc_reclaim_items_nr(fs_info, num_bytes); 714 else 715 nr = 0; 716 btrfs_run_delayed_refs(trans, nr); 717 btrfs_end_transaction(trans); 718 break; 719 case ALLOC_CHUNK: 720 case ALLOC_CHUNK_FORCE: 721 trans = btrfs_join_transaction(root); 722 if (IS_ERR(trans)) { 723 ret = PTR_ERR(trans); 724 break; 725 } 726 ret = btrfs_chunk_alloc(trans, 727 btrfs_get_alloc_profile(fs_info, space_info->flags), 728 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : 729 CHUNK_ALLOC_FORCE); 730 btrfs_end_transaction(trans); 731 if (ret > 0 || ret == -ENOSPC) 732 ret = 0; 733 break; 734 case RUN_DELAYED_IPUTS: 735 /* 736 * If we have pending delayed iputs then we could free up a 737 * bunch of pinned space, so make sure we run the iputs before 738 * we do our pinned bytes check below. 739 */ 740 btrfs_run_delayed_iputs(fs_info); 741 btrfs_wait_on_delayed_iputs(fs_info); 742 break; 743 case COMMIT_TRANS: 744 ret = may_commit_transaction(fs_info, space_info); 745 break; 746 default: 747 ret = -ENOSPC; 748 break; 749 } 750 751 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, 752 ret); 753 return; 754 } 755 756 static inline u64 757 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 758 struct btrfs_space_info *space_info) 759 { 760 u64 used; 761 u64 avail; 762 u64 expected; 763 u64 to_reclaim = space_info->reclaim_size; 764 765 lockdep_assert_held(&space_info->lock); 766 767 avail = calc_available_free_space(fs_info, space_info, 768 BTRFS_RESERVE_FLUSH_ALL); 769 used = btrfs_space_info_used(space_info, true); 770 771 /* 772 * We may be flushing because suddenly we have less space than we had 773 * before, and now we're well over-committed based on our current free 774 * space. If that's the case add in our overage so we make sure to put 775 * appropriate pressure on the flushing state machine. 776 */ 777 if (space_info->total_bytes + avail < used) 778 to_reclaim += used - (space_info->total_bytes + avail); 779 780 if (to_reclaim) 781 return to_reclaim; 782 783 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 784 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, 785 BTRFS_RESERVE_FLUSH_ALL)) 786 return 0; 787 788 used = btrfs_space_info_used(space_info, true); 789 790 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, 791 BTRFS_RESERVE_FLUSH_ALL)) 792 expected = div_factor_fine(space_info->total_bytes, 95); 793 else 794 expected = div_factor_fine(space_info->total_bytes, 90); 795 796 if (used > expected) 797 to_reclaim = used - expected; 798 else 799 to_reclaim = 0; 800 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 801 space_info->bytes_reserved); 802 return to_reclaim; 803 } 804 805 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, 806 struct btrfs_space_info *space_info, 807 u64 used) 808 { 809 u64 thresh = div_factor_fine(space_info->total_bytes, 98); 810 811 /* If we're just plain full then async reclaim just slows us down. */ 812 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) 813 return 0; 814 815 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) 816 return 0; 817 818 return (used >= thresh && !btrfs_fs_closing(fs_info) && 819 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 820 } 821 822 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 823 struct btrfs_space_info *space_info, 824 struct reserve_ticket *ticket) 825 { 826 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 827 u64 min_bytes; 828 829 if (global_rsv->space_info != space_info) 830 return false; 831 832 spin_lock(&global_rsv->lock); 833 min_bytes = div_factor(global_rsv->size, 1); 834 if (global_rsv->reserved < min_bytes + ticket->bytes) { 835 spin_unlock(&global_rsv->lock); 836 return false; 837 } 838 global_rsv->reserved -= ticket->bytes; 839 remove_ticket(space_info, ticket); 840 ticket->bytes = 0; 841 wake_up(&ticket->wait); 842 space_info->tickets_id++; 843 if (global_rsv->reserved < global_rsv->size) 844 global_rsv->full = 0; 845 spin_unlock(&global_rsv->lock); 846 847 return true; 848 } 849 850 /* 851 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 852 * @fs_info - fs_info for this fs 853 * @space_info - the space info we were flushing 854 * 855 * We call this when we've exhausted our flushing ability and haven't made 856 * progress in satisfying tickets. The reservation code handles tickets in 857 * order, so if there is a large ticket first and then smaller ones we could 858 * very well satisfy the smaller tickets. This will attempt to wake up any 859 * tickets in the list to catch this case. 860 * 861 * This function returns true if it was able to make progress by clearing out 862 * other tickets, or if it stumbles across a ticket that was smaller than the 863 * first ticket. 864 */ 865 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, 866 struct btrfs_space_info *space_info) 867 { 868 struct reserve_ticket *ticket; 869 u64 tickets_id = space_info->tickets_id; 870 u64 first_ticket_bytes = 0; 871 872 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 873 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); 874 __btrfs_dump_space_info(fs_info, space_info); 875 } 876 877 while (!list_empty(&space_info->tickets) && 878 tickets_id == space_info->tickets_id) { 879 ticket = list_first_entry(&space_info->tickets, 880 struct reserve_ticket, list); 881 882 if (ticket->steal && 883 steal_from_global_rsv(fs_info, space_info, ticket)) 884 return true; 885 886 /* 887 * may_commit_transaction will avoid committing the transaction 888 * if it doesn't feel like the space reclaimed by the commit 889 * would result in the ticket succeeding. However if we have a 890 * smaller ticket in the queue it may be small enough to be 891 * satisified by committing the transaction, so if any 892 * subsequent ticket is smaller than the first ticket go ahead 893 * and send us back for another loop through the enospc flushing 894 * code. 895 */ 896 if (first_ticket_bytes == 0) 897 first_ticket_bytes = ticket->bytes; 898 else if (first_ticket_bytes > ticket->bytes) 899 return true; 900 901 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 902 btrfs_info(fs_info, "failing ticket with %llu bytes", 903 ticket->bytes); 904 905 remove_ticket(space_info, ticket); 906 ticket->error = -ENOSPC; 907 wake_up(&ticket->wait); 908 909 /* 910 * We're just throwing tickets away, so more flushing may not 911 * trip over btrfs_try_granting_tickets, so we need to call it 912 * here to see if we can make progress with the next ticket in 913 * the list. 914 */ 915 btrfs_try_granting_tickets(fs_info, space_info); 916 } 917 return (tickets_id != space_info->tickets_id); 918 } 919 920 /* 921 * This is for normal flushers, we can wait all goddamned day if we want to. We 922 * will loop and continuously try to flush as long as we are making progress. 923 * We count progress as clearing off tickets each time we have to loop. 924 */ 925 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 926 { 927 struct btrfs_fs_info *fs_info; 928 struct btrfs_space_info *space_info; 929 u64 to_reclaim; 930 int flush_state; 931 int commit_cycles = 0; 932 u64 last_tickets_id; 933 934 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 935 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 936 937 spin_lock(&space_info->lock); 938 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 939 if (!to_reclaim) { 940 space_info->flush = 0; 941 spin_unlock(&space_info->lock); 942 return; 943 } 944 last_tickets_id = space_info->tickets_id; 945 spin_unlock(&space_info->lock); 946 947 flush_state = FLUSH_DELAYED_ITEMS_NR; 948 do { 949 flush_space(fs_info, space_info, to_reclaim, flush_state); 950 spin_lock(&space_info->lock); 951 if (list_empty(&space_info->tickets)) { 952 space_info->flush = 0; 953 spin_unlock(&space_info->lock); 954 return; 955 } 956 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, 957 space_info); 958 if (last_tickets_id == space_info->tickets_id) { 959 flush_state++; 960 } else { 961 last_tickets_id = space_info->tickets_id; 962 flush_state = FLUSH_DELAYED_ITEMS_NR; 963 if (commit_cycles) 964 commit_cycles--; 965 } 966 967 /* 968 * We don't want to force a chunk allocation until we've tried 969 * pretty hard to reclaim space. Think of the case where we 970 * freed up a bunch of space and so have a lot of pinned space 971 * to reclaim. We would rather use that than possibly create a 972 * underutilized metadata chunk. So if this is our first run 973 * through the flushing state machine skip ALLOC_CHUNK_FORCE and 974 * commit the transaction. If nothing has changed the next go 975 * around then we can force a chunk allocation. 976 */ 977 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) 978 flush_state++; 979 980 if (flush_state > COMMIT_TRANS) { 981 commit_cycles++; 982 if (commit_cycles > 2) { 983 if (maybe_fail_all_tickets(fs_info, space_info)) { 984 flush_state = FLUSH_DELAYED_ITEMS_NR; 985 commit_cycles--; 986 } else { 987 space_info->flush = 0; 988 } 989 } else { 990 flush_state = FLUSH_DELAYED_ITEMS_NR; 991 } 992 } 993 spin_unlock(&space_info->lock); 994 } while (flush_state <= COMMIT_TRANS); 995 } 996 997 void btrfs_init_async_reclaim_work(struct work_struct *work) 998 { 999 INIT_WORK(work, btrfs_async_reclaim_metadata_space); 1000 } 1001 1002 static const enum btrfs_flush_state priority_flush_states[] = { 1003 FLUSH_DELAYED_ITEMS_NR, 1004 FLUSH_DELAYED_ITEMS, 1005 ALLOC_CHUNK, 1006 }; 1007 1008 static const enum btrfs_flush_state evict_flush_states[] = { 1009 FLUSH_DELAYED_ITEMS_NR, 1010 FLUSH_DELAYED_ITEMS, 1011 FLUSH_DELAYED_REFS_NR, 1012 FLUSH_DELAYED_REFS, 1013 FLUSH_DELALLOC, 1014 FLUSH_DELALLOC_WAIT, 1015 ALLOC_CHUNK, 1016 COMMIT_TRANS, 1017 }; 1018 1019 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, 1020 struct btrfs_space_info *space_info, 1021 struct reserve_ticket *ticket, 1022 const enum btrfs_flush_state *states, 1023 int states_nr) 1024 { 1025 u64 to_reclaim; 1026 int flush_state; 1027 1028 spin_lock(&space_info->lock); 1029 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); 1030 if (!to_reclaim) { 1031 spin_unlock(&space_info->lock); 1032 return; 1033 } 1034 spin_unlock(&space_info->lock); 1035 1036 flush_state = 0; 1037 do { 1038 flush_space(fs_info, space_info, to_reclaim, states[flush_state]); 1039 flush_state++; 1040 spin_lock(&space_info->lock); 1041 if (ticket->bytes == 0) { 1042 spin_unlock(&space_info->lock); 1043 return; 1044 } 1045 spin_unlock(&space_info->lock); 1046 } while (flush_state < states_nr); 1047 } 1048 1049 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, 1050 struct btrfs_space_info *space_info, 1051 struct reserve_ticket *ticket) 1052 1053 { 1054 DEFINE_WAIT(wait); 1055 int ret = 0; 1056 1057 spin_lock(&space_info->lock); 1058 while (ticket->bytes > 0 && ticket->error == 0) { 1059 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 1060 if (ret) { 1061 /* 1062 * Delete us from the list. After we unlock the space 1063 * info, we don't want the async reclaim job to reserve 1064 * space for this ticket. If that would happen, then the 1065 * ticket's task would not known that space was reserved 1066 * despite getting an error, resulting in a space leak 1067 * (bytes_may_use counter of our space_info). 1068 */ 1069 remove_ticket(space_info, ticket); 1070 ticket->error = -EINTR; 1071 break; 1072 } 1073 spin_unlock(&space_info->lock); 1074 1075 schedule(); 1076 1077 finish_wait(&ticket->wait, &wait); 1078 spin_lock(&space_info->lock); 1079 } 1080 spin_unlock(&space_info->lock); 1081 } 1082 1083 /** 1084 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket 1085 * @fs_info - the fs 1086 * @space_info - the space_info for the reservation 1087 * @ticket - the ticket for the reservation 1088 * @flush - how much we can flush 1089 * 1090 * This does the work of figuring out how to flush for the ticket, waiting for 1091 * the reservation, and returning the appropriate error if there is one. 1092 */ 1093 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, 1094 struct btrfs_space_info *space_info, 1095 struct reserve_ticket *ticket, 1096 enum btrfs_reserve_flush_enum flush) 1097 { 1098 int ret; 1099 1100 switch (flush) { 1101 case BTRFS_RESERVE_FLUSH_ALL: 1102 case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1103 wait_reserve_ticket(fs_info, space_info, ticket); 1104 break; 1105 case BTRFS_RESERVE_FLUSH_LIMIT: 1106 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1107 priority_flush_states, 1108 ARRAY_SIZE(priority_flush_states)); 1109 break; 1110 case BTRFS_RESERVE_FLUSH_EVICT: 1111 priority_reclaim_metadata_space(fs_info, space_info, ticket, 1112 evict_flush_states, 1113 ARRAY_SIZE(evict_flush_states)); 1114 break; 1115 default: 1116 ASSERT(0); 1117 break; 1118 } 1119 1120 spin_lock(&space_info->lock); 1121 ret = ticket->error; 1122 if (ticket->bytes || ticket->error) { 1123 /* 1124 * We were a priority ticket, so we need to delete ourselves 1125 * from the list. Because we could have other priority tickets 1126 * behind us that require less space, run 1127 * btrfs_try_granting_tickets() to see if their reservations can 1128 * now be made. 1129 */ 1130 if (!list_empty(&ticket->list)) { 1131 remove_ticket(space_info, ticket); 1132 btrfs_try_granting_tickets(fs_info, space_info); 1133 } 1134 1135 if (!ret) 1136 ret = -ENOSPC; 1137 } 1138 spin_unlock(&space_info->lock); 1139 ASSERT(list_empty(&ticket->list)); 1140 /* 1141 * Check that we can't have an error set if the reservation succeeded, 1142 * as that would confuse tasks and lead them to error out without 1143 * releasing reserved space (if an error happens the expectation is that 1144 * space wasn't reserved at all). 1145 */ 1146 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1147 return ret; 1148 } 1149 1150 /* 1151 * This returns true if this flush state will go through the ordinary flushing 1152 * code. 1153 */ 1154 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1155 { 1156 return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1157 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1158 } 1159 1160 /** 1161 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1162 * @root - the root we're allocating for 1163 * @space_info - the space info we want to allocate from 1164 * @orig_bytes - the number of bytes we want 1165 * @flush - whether or not we can flush to make our reservation 1166 * 1167 * This will reserve orig_bytes number of bytes from the space info associated 1168 * with the block_rsv. If there is not enough space it will make an attempt to 1169 * flush out space to make room. It will do this by flushing delalloc if 1170 * possible or committing the transaction. If flush is 0 then no attempts to 1171 * regain reservations will be made and this will fail if there is not enough 1172 * space already. 1173 */ 1174 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 1175 struct btrfs_space_info *space_info, 1176 u64 orig_bytes, 1177 enum btrfs_reserve_flush_enum flush) 1178 { 1179 struct reserve_ticket ticket; 1180 u64 used; 1181 int ret = 0; 1182 bool pending_tickets; 1183 1184 ASSERT(orig_bytes); 1185 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); 1186 1187 spin_lock(&space_info->lock); 1188 ret = -ENOSPC; 1189 used = btrfs_space_info_used(space_info, true); 1190 1191 /* 1192 * We don't want NO_FLUSH allocations to jump everybody, they can 1193 * generally handle ENOSPC in a different way, so treat them the same as 1194 * normal flushers when it comes to skipping pending tickets. 1195 */ 1196 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1197 pending_tickets = !list_empty(&space_info->tickets) || 1198 !list_empty(&space_info->priority_tickets); 1199 else 1200 pending_tickets = !list_empty(&space_info->priority_tickets); 1201 1202 /* 1203 * Carry on if we have enough space (short-circuit) OR call 1204 * can_overcommit() to ensure we can overcommit to continue. 1205 */ 1206 if (!pending_tickets && 1207 ((used + orig_bytes <= space_info->total_bytes) || 1208 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { 1209 btrfs_space_info_update_bytes_may_use(fs_info, space_info, 1210 orig_bytes); 1211 ret = 0; 1212 } 1213 1214 /* 1215 * If we couldn't make a reservation then setup our reservation ticket 1216 * and kick the async worker if it's not already running. 1217 * 1218 * If we are a priority flusher then we just need to add our ticket to 1219 * the list and we will do our own flushing further down. 1220 */ 1221 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1222 ticket.bytes = orig_bytes; 1223 ticket.error = 0; 1224 space_info->reclaim_size += ticket.bytes; 1225 init_waitqueue_head(&ticket.wait); 1226 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1227 if (flush == BTRFS_RESERVE_FLUSH_ALL || 1228 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { 1229 list_add_tail(&ticket.list, &space_info->tickets); 1230 if (!space_info->flush) { 1231 space_info->flush = 1; 1232 trace_btrfs_trigger_flush(fs_info, 1233 space_info->flags, 1234 orig_bytes, flush, 1235 "enospc"); 1236 queue_work(system_unbound_wq, 1237 &fs_info->async_reclaim_work); 1238 } 1239 } else { 1240 list_add_tail(&ticket.list, 1241 &space_info->priority_tickets); 1242 } 1243 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 1244 used += orig_bytes; 1245 /* 1246 * We will do the space reservation dance during log replay, 1247 * which means we won't have fs_info->fs_root set, so don't do 1248 * the async reclaim as we will panic. 1249 */ 1250 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && 1251 need_do_async_reclaim(fs_info, space_info, used) && 1252 !work_busy(&fs_info->async_reclaim_work)) { 1253 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1254 orig_bytes, flush, "preempt"); 1255 queue_work(system_unbound_wq, 1256 &fs_info->async_reclaim_work); 1257 } 1258 } 1259 spin_unlock(&space_info->lock); 1260 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 1261 return ret; 1262 1263 return handle_reserve_ticket(fs_info, space_info, &ticket, flush); 1264 } 1265 1266 /** 1267 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 1268 * @root - the root we're allocating for 1269 * @block_rsv - the block_rsv we're allocating for 1270 * @orig_bytes - the number of bytes we want 1271 * @flush - whether or not we can flush to make our reservation 1272 * 1273 * This will reserve orig_bytes number of bytes from the space info associated 1274 * with the block_rsv. If there is not enough space it will make an attempt to 1275 * flush out space to make room. It will do this by flushing delalloc if 1276 * possible or committing the transaction. If flush is 0 then no attempts to 1277 * regain reservations will be made and this will fail if there is not enough 1278 * space already. 1279 */ 1280 int btrfs_reserve_metadata_bytes(struct btrfs_root *root, 1281 struct btrfs_block_rsv *block_rsv, 1282 u64 orig_bytes, 1283 enum btrfs_reserve_flush_enum flush) 1284 { 1285 struct btrfs_fs_info *fs_info = root->fs_info; 1286 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 1287 int ret; 1288 1289 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info, 1290 orig_bytes, flush); 1291 if (ret == -ENOSPC && 1292 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 1293 if (block_rsv != global_rsv && 1294 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes)) 1295 ret = 0; 1296 } 1297 if (ret == -ENOSPC) { 1298 trace_btrfs_space_reservation(fs_info, "space_info:enospc", 1299 block_rsv->space_info->flags, 1300 orig_bytes, 1); 1301 1302 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) 1303 btrfs_dump_space_info(fs_info, block_rsv->space_info, 1304 orig_bytes, 0); 1305 } 1306 return ret; 1307 } 1308