xref: /openbmc/linux/fs/btrfs/space-info.c (revision 7f9fe614)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 
13 /*
14  * HOW DOES SPACE RESERVATION WORK
15  *
16  * If you want to know about delalloc specifically, there is a separate comment
17  * for that with the delalloc code.  This comment is about how the whole system
18  * works generally.
19  *
20  * BASIC CONCEPTS
21  *
22  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
23  *   There's a description of the bytes_ fields with the struct declaration,
24  *   refer to that for specifics on each field.  Suffice it to say that for
25  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
26  *   determining if there is space to make an allocation.  There is a space_info
27  *   for METADATA, SYSTEM, and DATA areas.
28  *
29  *   2) block_rsv's.  These are basically buckets for every different type of
30  *   metadata reservation we have.  You can see the comment in the block_rsv
31  *   code on the rules for each type, but generally block_rsv->reserved is how
32  *   much space is accounted for in space_info->bytes_may_use.
33  *
34  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
35  *   on the number of items we will want to modify.  We have one for changing
36  *   items, and one for inserting new items.  Generally we use these helpers to
37  *   determine the size of the block reserves, and then use the actual bytes
38  *   values to adjust the space_info counters.
39  *
40  * MAKING RESERVATIONS, THE NORMAL CASE
41  *
42  *   We call into either btrfs_reserve_data_bytes() or
43  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44  *   num_bytes we want to reserve.
45  *
46  *   ->reserve
47  *     space_info->bytes_may_reserve += num_bytes
48  *
49  *   ->extent allocation
50  *     Call btrfs_add_reserved_bytes() which does
51  *     space_info->bytes_may_reserve -= num_bytes
52  *     space_info->bytes_reserved += extent_bytes
53  *
54  *   ->insert reference
55  *     Call btrfs_update_block_group() which does
56  *     space_info->bytes_reserved -= extent_bytes
57  *     space_info->bytes_used += extent_bytes
58  *
59  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
60  *
61  *   Assume we are unable to simply make the reservation because we do not have
62  *   enough space
63  *
64  *   -> __reserve_bytes
65  *     create a reserve_ticket with ->bytes set to our reservation, add it to
66  *     the tail of space_info->tickets, kick async flush thread
67  *
68  *   ->handle_reserve_ticket
69  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
70  *     on the ticket.
71  *
72  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73  *     Flushes various things attempting to free up space.
74  *
75  *   -> btrfs_try_granting_tickets()
76  *     This is called by anything that either subtracts space from
77  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78  *     space_info->total_bytes.  This loops through the ->priority_tickets and
79  *     then the ->tickets list checking to see if the reservation can be
80  *     completed.  If it can the space is added to space_info->bytes_may_use and
81  *     the ticket is woken up.
82  *
83  *   -> ticket wakeup
84  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
85  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
86  *     were interrupted.)
87  *
88  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
89  *
90  *   Same as the above, except we add ourselves to the
91  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
92  *   call flush_space() ourselves for the states that are safe for us to call
93  *   without deadlocking and hope for the best.
94  *
95  * THE FLUSHING STATES
96  *
97  *   Generally speaking we will have two cases for each state, a "nice" state
98  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
99  *   reduce the locking over head on the various trees, and even to keep from
100  *   doing any work at all in the case of delayed refs.  Each of these delayed
101  *   things however hold reservations, and so letting them run allows us to
102  *   reclaim space so we can make new reservations.
103  *
104  *   FLUSH_DELAYED_ITEMS
105  *     Every inode has a delayed item to update the inode.  Take a simple write
106  *     for example, we would update the inode item at write time to update the
107  *     mtime, and then again at finish_ordered_io() time in order to update the
108  *     isize or bytes.  We keep these delayed items to coalesce these operations
109  *     into a single operation done on demand.  These are an easy way to reclaim
110  *     metadata space.
111  *
112  *   FLUSH_DELALLOC
113  *     Look at the delalloc comment to get an idea of how much space is reserved
114  *     for delayed allocation.  We can reclaim some of this space simply by
115  *     running delalloc, but usually we need to wait for ordered extents to
116  *     reclaim the bulk of this space.
117  *
118  *   FLUSH_DELAYED_REFS
119  *     We have a block reserve for the outstanding delayed refs space, and every
120  *     delayed ref operation holds a reservation.  Running these is a quick way
121  *     to reclaim space, but we want to hold this until the end because COW can
122  *     churn a lot and we can avoid making some extent tree modifications if we
123  *     are able to delay for as long as possible.
124  *
125  *   ALLOC_CHUNK
126  *     We will skip this the first time through space reservation, because of
127  *     overcommit and we don't want to have a lot of useless metadata space when
128  *     our worst case reservations will likely never come true.
129  *
130  *   RUN_DELAYED_IPUTS
131  *     If we're freeing inodes we're likely freeing checksums, file extent
132  *     items, and extent tree items.  Loads of space could be freed up by these
133  *     operations, however they won't be usable until the transaction commits.
134  *
135  *   COMMIT_TRANS
136  *     may_commit_transaction() is the ultimate arbiter on whether we commit the
137  *     transaction or not.  In order to avoid constantly churning we do all the
138  *     above flushing first and then commit the transaction as the last resort.
139  *     However we need to take into account things like pinned space that would
140  *     be freed, plus any delayed work we may not have gotten rid of in the case
141  *     of metadata.
142  *
143  * OVERCOMMIT
144  *
145  *   Because we hold so many reservations for metadata we will allow you to
146  *   reserve more space than is currently free in the currently allocate
147  *   metadata space.  This only happens with metadata, data does not allow
148  *   overcommitting.
149  *
150  *   You can see the current logic for when we allow overcommit in
151  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
152  *   is no unallocated space to be had, all reservations are kept within the
153  *   free space in the allocated metadata chunks.
154  *
155  *   Because of overcommitting, you generally want to use the
156  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
157  *   thing with or without extra unallocated space.
158  */
159 
160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
161 			  bool may_use_included)
162 {
163 	ASSERT(s_info);
164 	return s_info->bytes_used + s_info->bytes_reserved +
165 		s_info->bytes_pinned + s_info->bytes_readonly +
166 		(may_use_included ? s_info->bytes_may_use : 0);
167 }
168 
169 /*
170  * after adding space to the filesystem, we need to clear the full flags
171  * on all the space infos.
172  */
173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
174 {
175 	struct list_head *head = &info->space_info;
176 	struct btrfs_space_info *found;
177 
178 	rcu_read_lock();
179 	list_for_each_entry_rcu(found, head, list)
180 		found->full = 0;
181 	rcu_read_unlock();
182 }
183 
184 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
185 {
186 
187 	struct btrfs_space_info *space_info;
188 	int i;
189 	int ret;
190 
191 	space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
192 	if (!space_info)
193 		return -ENOMEM;
194 
195 	ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
196 				 GFP_KERNEL);
197 	if (ret) {
198 		kfree(space_info);
199 		return ret;
200 	}
201 
202 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
203 		INIT_LIST_HEAD(&space_info->block_groups[i]);
204 	init_rwsem(&space_info->groups_sem);
205 	spin_lock_init(&space_info->lock);
206 	space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
207 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
208 	INIT_LIST_HEAD(&space_info->ro_bgs);
209 	INIT_LIST_HEAD(&space_info->tickets);
210 	INIT_LIST_HEAD(&space_info->priority_tickets);
211 
212 	ret = btrfs_sysfs_add_space_info_type(info, space_info);
213 	if (ret)
214 		return ret;
215 
216 	list_add_rcu(&space_info->list, &info->space_info);
217 	if (flags & BTRFS_BLOCK_GROUP_DATA)
218 		info->data_sinfo = space_info;
219 
220 	return ret;
221 }
222 
223 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
224 {
225 	struct btrfs_super_block *disk_super;
226 	u64 features;
227 	u64 flags;
228 	int mixed = 0;
229 	int ret;
230 
231 	disk_super = fs_info->super_copy;
232 	if (!btrfs_super_root(disk_super))
233 		return -EINVAL;
234 
235 	features = btrfs_super_incompat_flags(disk_super);
236 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
237 		mixed = 1;
238 
239 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
240 	ret = create_space_info(fs_info, flags);
241 	if (ret)
242 		goto out;
243 
244 	if (mixed) {
245 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
246 		ret = create_space_info(fs_info, flags);
247 	} else {
248 		flags = BTRFS_BLOCK_GROUP_METADATA;
249 		ret = create_space_info(fs_info, flags);
250 		if (ret)
251 			goto out;
252 
253 		flags = BTRFS_BLOCK_GROUP_DATA;
254 		ret = create_space_info(fs_info, flags);
255 	}
256 out:
257 	return ret;
258 }
259 
260 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
261 			     u64 total_bytes, u64 bytes_used,
262 			     u64 bytes_readonly,
263 			     struct btrfs_space_info **space_info)
264 {
265 	struct btrfs_space_info *found;
266 	int factor;
267 
268 	factor = btrfs_bg_type_to_factor(flags);
269 
270 	found = btrfs_find_space_info(info, flags);
271 	ASSERT(found);
272 	spin_lock(&found->lock);
273 	found->total_bytes += total_bytes;
274 	found->disk_total += total_bytes * factor;
275 	found->bytes_used += bytes_used;
276 	found->disk_used += bytes_used * factor;
277 	found->bytes_readonly += bytes_readonly;
278 	if (total_bytes > 0)
279 		found->full = 0;
280 	btrfs_try_granting_tickets(info, found);
281 	spin_unlock(&found->lock);
282 	*space_info = found;
283 }
284 
285 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
286 					       u64 flags)
287 {
288 	struct list_head *head = &info->space_info;
289 	struct btrfs_space_info *found;
290 
291 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
292 
293 	rcu_read_lock();
294 	list_for_each_entry_rcu(found, head, list) {
295 		if (found->flags & flags) {
296 			rcu_read_unlock();
297 			return found;
298 		}
299 	}
300 	rcu_read_unlock();
301 	return NULL;
302 }
303 
304 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
305 {
306 	return (global->size << 1);
307 }
308 
309 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
310 			  struct btrfs_space_info *space_info,
311 			  enum btrfs_reserve_flush_enum flush)
312 {
313 	u64 profile;
314 	u64 avail;
315 	int factor;
316 
317 	if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
318 		profile = btrfs_system_alloc_profile(fs_info);
319 	else
320 		profile = btrfs_metadata_alloc_profile(fs_info);
321 
322 	avail = atomic64_read(&fs_info->free_chunk_space);
323 
324 	/*
325 	 * If we have dup, raid1 or raid10 then only half of the free
326 	 * space is actually usable.  For raid56, the space info used
327 	 * doesn't include the parity drive, so we don't have to
328 	 * change the math
329 	 */
330 	factor = btrfs_bg_type_to_factor(profile);
331 	avail = div_u64(avail, factor);
332 
333 	/*
334 	 * If we aren't flushing all things, let us overcommit up to
335 	 * 1/2th of the space. If we can flush, don't let us overcommit
336 	 * too much, let it overcommit up to 1/8 of the space.
337 	 */
338 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
339 		avail >>= 3;
340 	else
341 		avail >>= 1;
342 	return avail;
343 }
344 
345 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
346 			 struct btrfs_space_info *space_info, u64 bytes,
347 			 enum btrfs_reserve_flush_enum flush)
348 {
349 	u64 avail;
350 	u64 used;
351 
352 	/* Don't overcommit when in mixed mode */
353 	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
354 		return 0;
355 
356 	used = btrfs_space_info_used(space_info, true);
357 	avail = calc_available_free_space(fs_info, space_info, flush);
358 
359 	if (used + bytes < space_info->total_bytes + avail)
360 		return 1;
361 	return 0;
362 }
363 
364 static void remove_ticket(struct btrfs_space_info *space_info,
365 			  struct reserve_ticket *ticket)
366 {
367 	if (!list_empty(&ticket->list)) {
368 		list_del_init(&ticket->list);
369 		ASSERT(space_info->reclaim_size >= ticket->bytes);
370 		space_info->reclaim_size -= ticket->bytes;
371 	}
372 }
373 
374 /*
375  * This is for space we already have accounted in space_info->bytes_may_use, so
376  * basically when we're returning space from block_rsv's.
377  */
378 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
379 				struct btrfs_space_info *space_info)
380 {
381 	struct list_head *head;
382 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
383 
384 	lockdep_assert_held(&space_info->lock);
385 
386 	head = &space_info->priority_tickets;
387 again:
388 	while (!list_empty(head)) {
389 		struct reserve_ticket *ticket;
390 		u64 used = btrfs_space_info_used(space_info, true);
391 
392 		ticket = list_first_entry(head, struct reserve_ticket, list);
393 
394 		/* Check and see if our ticket can be satisified now. */
395 		if ((used + ticket->bytes <= space_info->total_bytes) ||
396 		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
397 					 flush)) {
398 			btrfs_space_info_update_bytes_may_use(fs_info,
399 							      space_info,
400 							      ticket->bytes);
401 			remove_ticket(space_info, ticket);
402 			ticket->bytes = 0;
403 			space_info->tickets_id++;
404 			wake_up(&ticket->wait);
405 		} else {
406 			break;
407 		}
408 	}
409 
410 	if (head == &space_info->priority_tickets) {
411 		head = &space_info->tickets;
412 		flush = BTRFS_RESERVE_FLUSH_ALL;
413 		goto again;
414 	}
415 }
416 
417 #define DUMP_BLOCK_RSV(fs_info, rsv_name)				\
418 do {									\
419 	struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;		\
420 	spin_lock(&__rsv->lock);					\
421 	btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",	\
422 		   __rsv->size, __rsv->reserved);			\
423 	spin_unlock(&__rsv->lock);					\
424 } while (0)
425 
426 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
427 				    struct btrfs_space_info *info)
428 {
429 	lockdep_assert_held(&info->lock);
430 
431 	btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
432 		   info->flags,
433 		   info->total_bytes - btrfs_space_info_used(info, true),
434 		   info->full ? "" : "not ");
435 	btrfs_info(fs_info,
436 		"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
437 		info->total_bytes, info->bytes_used, info->bytes_pinned,
438 		info->bytes_reserved, info->bytes_may_use,
439 		info->bytes_readonly);
440 
441 	DUMP_BLOCK_RSV(fs_info, global_block_rsv);
442 	DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
443 	DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
444 	DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
445 	DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
446 
447 }
448 
449 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
450 			   struct btrfs_space_info *info, u64 bytes,
451 			   int dump_block_groups)
452 {
453 	struct btrfs_block_group *cache;
454 	int index = 0;
455 
456 	spin_lock(&info->lock);
457 	__btrfs_dump_space_info(fs_info, info);
458 	spin_unlock(&info->lock);
459 
460 	if (!dump_block_groups)
461 		return;
462 
463 	down_read(&info->groups_sem);
464 again:
465 	list_for_each_entry(cache, &info->block_groups[index], list) {
466 		spin_lock(&cache->lock);
467 		btrfs_info(fs_info,
468 			"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
469 			cache->start, cache->length, cache->used, cache->pinned,
470 			cache->reserved, cache->ro ? "[readonly]" : "");
471 		btrfs_dump_free_space(cache, bytes);
472 		spin_unlock(&cache->lock);
473 	}
474 	if (++index < BTRFS_NR_RAID_TYPES)
475 		goto again;
476 	up_read(&info->groups_sem);
477 }
478 
479 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
480 					 unsigned long nr_pages, int nr_items)
481 {
482 	struct super_block *sb = fs_info->sb;
483 
484 	if (down_read_trylock(&sb->s_umount)) {
485 		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
486 		up_read(&sb->s_umount);
487 	} else {
488 		/*
489 		 * We needn't worry the filesystem going from r/w to r/o though
490 		 * we don't acquire ->s_umount mutex, because the filesystem
491 		 * should guarantee the delalloc inodes list be empty after
492 		 * the filesystem is readonly(all dirty pages are written to
493 		 * the disk).
494 		 */
495 		btrfs_start_delalloc_roots(fs_info, nr_items);
496 		if (!current->journal_info)
497 			btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
498 	}
499 }
500 
501 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
502 					u64 to_reclaim)
503 {
504 	u64 bytes;
505 	u64 nr;
506 
507 	bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
508 	nr = div64_u64(to_reclaim, bytes);
509 	if (!nr)
510 		nr = 1;
511 	return nr;
512 }
513 
514 #define EXTENT_SIZE_PER_ITEM	SZ_256K
515 
516 /*
517  * shrink metadata reservation for delalloc
518  */
519 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
520 			    u64 orig, bool wait_ordered)
521 {
522 	struct btrfs_space_info *space_info;
523 	struct btrfs_trans_handle *trans;
524 	u64 delalloc_bytes;
525 	u64 dio_bytes;
526 	u64 async_pages;
527 	u64 items;
528 	long time_left;
529 	unsigned long nr_pages;
530 	int loops;
531 
532 	/* Calc the number of the pages we need flush for space reservation */
533 	items = calc_reclaim_items_nr(fs_info, to_reclaim);
534 	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
535 
536 	trans = (struct btrfs_trans_handle *)current->journal_info;
537 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
538 
539 	delalloc_bytes = percpu_counter_sum_positive(
540 						&fs_info->delalloc_bytes);
541 	dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
542 	if (delalloc_bytes == 0 && dio_bytes == 0) {
543 		if (trans)
544 			return;
545 		if (wait_ordered)
546 			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
547 		return;
548 	}
549 
550 	/*
551 	 * If we are doing more ordered than delalloc we need to just wait on
552 	 * ordered extents, otherwise we'll waste time trying to flush delalloc
553 	 * that likely won't give us the space back we need.
554 	 */
555 	if (dio_bytes > delalloc_bytes)
556 		wait_ordered = true;
557 
558 	loops = 0;
559 	while ((delalloc_bytes || dio_bytes) && loops < 3) {
560 		nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
561 
562 		/*
563 		 * Triggers inode writeback for up to nr_pages. This will invoke
564 		 * ->writepages callback and trigger delalloc filling
565 		 *  (btrfs_run_delalloc_range()).
566 		 */
567 		btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
568 
569 		/*
570 		 * We need to wait for the compressed pages to start before
571 		 * we continue.
572 		 */
573 		async_pages = atomic_read(&fs_info->async_delalloc_pages);
574 		if (!async_pages)
575 			goto skip_async;
576 
577 		/*
578 		 * Calculate how many compressed pages we want to be written
579 		 * before we continue. I.e if there are more async pages than we
580 		 * require wait_event will wait until nr_pages are written.
581 		 */
582 		if (async_pages <= nr_pages)
583 			async_pages = 0;
584 		else
585 			async_pages -= nr_pages;
586 
587 		wait_event(fs_info->async_submit_wait,
588 			   atomic_read(&fs_info->async_delalloc_pages) <=
589 			   (int)async_pages);
590 skip_async:
591 		spin_lock(&space_info->lock);
592 		if (list_empty(&space_info->tickets) &&
593 		    list_empty(&space_info->priority_tickets)) {
594 			spin_unlock(&space_info->lock);
595 			break;
596 		}
597 		spin_unlock(&space_info->lock);
598 
599 		loops++;
600 		if (wait_ordered && !trans) {
601 			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
602 		} else {
603 			time_left = schedule_timeout_killable(1);
604 			if (time_left)
605 				break;
606 		}
607 		delalloc_bytes = percpu_counter_sum_positive(
608 						&fs_info->delalloc_bytes);
609 		dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
610 	}
611 }
612 
613 /**
614  * maybe_commit_transaction - possibly commit the transaction if its ok to
615  * @root - the root we're allocating for
616  * @bytes - the number of bytes we want to reserve
617  * @force - force the commit
618  *
619  * This will check to make sure that committing the transaction will actually
620  * get us somewhere and then commit the transaction if it does.  Otherwise it
621  * will return -ENOSPC.
622  */
623 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
624 				  struct btrfs_space_info *space_info)
625 {
626 	struct reserve_ticket *ticket = NULL;
627 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
628 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
629 	struct btrfs_trans_handle *trans;
630 	u64 bytes_needed;
631 	u64 reclaim_bytes = 0;
632 	u64 cur_free_bytes = 0;
633 
634 	trans = (struct btrfs_trans_handle *)current->journal_info;
635 	if (trans)
636 		return -EAGAIN;
637 
638 	spin_lock(&space_info->lock);
639 	cur_free_bytes = btrfs_space_info_used(space_info, true);
640 	if (cur_free_bytes < space_info->total_bytes)
641 		cur_free_bytes = space_info->total_bytes - cur_free_bytes;
642 	else
643 		cur_free_bytes = 0;
644 
645 	if (!list_empty(&space_info->priority_tickets))
646 		ticket = list_first_entry(&space_info->priority_tickets,
647 					  struct reserve_ticket, list);
648 	else if (!list_empty(&space_info->tickets))
649 		ticket = list_first_entry(&space_info->tickets,
650 					  struct reserve_ticket, list);
651 	bytes_needed = (ticket) ? ticket->bytes : 0;
652 
653 	if (bytes_needed > cur_free_bytes)
654 		bytes_needed -= cur_free_bytes;
655 	else
656 		bytes_needed = 0;
657 	spin_unlock(&space_info->lock);
658 
659 	if (!bytes_needed)
660 		return 0;
661 
662 	trans = btrfs_join_transaction(fs_info->extent_root);
663 	if (IS_ERR(trans))
664 		return PTR_ERR(trans);
665 
666 	/*
667 	 * See if there is enough pinned space to make this reservation, or if
668 	 * we have block groups that are going to be freed, allowing us to
669 	 * possibly do a chunk allocation the next loop through.
670 	 */
671 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
672 	    __percpu_counter_compare(&space_info->total_bytes_pinned,
673 				     bytes_needed,
674 				     BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
675 		goto commit;
676 
677 	/*
678 	 * See if there is some space in the delayed insertion reservation for
679 	 * this reservation.
680 	 */
681 	if (space_info != delayed_rsv->space_info)
682 		goto enospc;
683 
684 	spin_lock(&delayed_rsv->lock);
685 	reclaim_bytes += delayed_rsv->reserved;
686 	spin_unlock(&delayed_rsv->lock);
687 
688 	spin_lock(&delayed_refs_rsv->lock);
689 	reclaim_bytes += delayed_refs_rsv->reserved;
690 	spin_unlock(&delayed_refs_rsv->lock);
691 	if (reclaim_bytes >= bytes_needed)
692 		goto commit;
693 	bytes_needed -= reclaim_bytes;
694 
695 	if (__percpu_counter_compare(&space_info->total_bytes_pinned,
696 				   bytes_needed,
697 				   BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
698 		goto enospc;
699 
700 commit:
701 	return btrfs_commit_transaction(trans);
702 enospc:
703 	btrfs_end_transaction(trans);
704 	return -ENOSPC;
705 }
706 
707 /*
708  * Try to flush some data based on policy set by @state. This is only advisory
709  * and may fail for various reasons. The caller is supposed to examine the
710  * state of @space_info to detect the outcome.
711  */
712 static void flush_space(struct btrfs_fs_info *fs_info,
713 		       struct btrfs_space_info *space_info, u64 num_bytes,
714 		       int state)
715 {
716 	struct btrfs_root *root = fs_info->extent_root;
717 	struct btrfs_trans_handle *trans;
718 	int nr;
719 	int ret = 0;
720 
721 	switch (state) {
722 	case FLUSH_DELAYED_ITEMS_NR:
723 	case FLUSH_DELAYED_ITEMS:
724 		if (state == FLUSH_DELAYED_ITEMS_NR)
725 			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
726 		else
727 			nr = -1;
728 
729 		trans = btrfs_join_transaction(root);
730 		if (IS_ERR(trans)) {
731 			ret = PTR_ERR(trans);
732 			break;
733 		}
734 		ret = btrfs_run_delayed_items_nr(trans, nr);
735 		btrfs_end_transaction(trans);
736 		break;
737 	case FLUSH_DELALLOC:
738 	case FLUSH_DELALLOC_WAIT:
739 		shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
740 				state == FLUSH_DELALLOC_WAIT);
741 		break;
742 	case FLUSH_DELAYED_REFS_NR:
743 	case FLUSH_DELAYED_REFS:
744 		trans = btrfs_join_transaction(root);
745 		if (IS_ERR(trans)) {
746 			ret = PTR_ERR(trans);
747 			break;
748 		}
749 		if (state == FLUSH_DELAYED_REFS_NR)
750 			nr = calc_reclaim_items_nr(fs_info, num_bytes);
751 		else
752 			nr = 0;
753 		btrfs_run_delayed_refs(trans, nr);
754 		btrfs_end_transaction(trans);
755 		break;
756 	case ALLOC_CHUNK:
757 	case ALLOC_CHUNK_FORCE:
758 		trans = btrfs_join_transaction(root);
759 		if (IS_ERR(trans)) {
760 			ret = PTR_ERR(trans);
761 			break;
762 		}
763 		ret = btrfs_chunk_alloc(trans,
764 				btrfs_metadata_alloc_profile(fs_info),
765 				(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
766 					CHUNK_ALLOC_FORCE);
767 		btrfs_end_transaction(trans);
768 		if (ret > 0 || ret == -ENOSPC)
769 			ret = 0;
770 		break;
771 	case RUN_DELAYED_IPUTS:
772 		/*
773 		 * If we have pending delayed iputs then we could free up a
774 		 * bunch of pinned space, so make sure we run the iputs before
775 		 * we do our pinned bytes check below.
776 		 */
777 		btrfs_run_delayed_iputs(fs_info);
778 		btrfs_wait_on_delayed_iputs(fs_info);
779 		break;
780 	case COMMIT_TRANS:
781 		ret = may_commit_transaction(fs_info, space_info);
782 		break;
783 	default:
784 		ret = -ENOSPC;
785 		break;
786 	}
787 
788 	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
789 				ret);
790 	return;
791 }
792 
793 static inline u64
794 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
795 				 struct btrfs_space_info *space_info)
796 {
797 	u64 used;
798 	u64 avail;
799 	u64 expected;
800 	u64 to_reclaim = space_info->reclaim_size;
801 
802 	lockdep_assert_held(&space_info->lock);
803 
804 	avail = calc_available_free_space(fs_info, space_info,
805 					  BTRFS_RESERVE_FLUSH_ALL);
806 	used = btrfs_space_info_used(space_info, true);
807 
808 	/*
809 	 * We may be flushing because suddenly we have less space than we had
810 	 * before, and now we're well over-committed based on our current free
811 	 * space.  If that's the case add in our overage so we make sure to put
812 	 * appropriate pressure on the flushing state machine.
813 	 */
814 	if (space_info->total_bytes + avail < used)
815 		to_reclaim += used - (space_info->total_bytes + avail);
816 
817 	if (to_reclaim)
818 		return to_reclaim;
819 
820 	to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
821 	if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
822 				 BTRFS_RESERVE_FLUSH_ALL))
823 		return 0;
824 
825 	used = btrfs_space_info_used(space_info, true);
826 
827 	if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
828 				 BTRFS_RESERVE_FLUSH_ALL))
829 		expected = div_factor_fine(space_info->total_bytes, 95);
830 	else
831 		expected = div_factor_fine(space_info->total_bytes, 90);
832 
833 	if (used > expected)
834 		to_reclaim = used - expected;
835 	else
836 		to_reclaim = 0;
837 	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
838 				     space_info->bytes_reserved);
839 	return to_reclaim;
840 }
841 
842 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
843 					struct btrfs_space_info *space_info,
844 					u64 used)
845 {
846 	u64 thresh = div_factor_fine(space_info->total_bytes, 98);
847 
848 	/* If we're just plain full then async reclaim just slows us down. */
849 	if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
850 		return 0;
851 
852 	if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
853 		return 0;
854 
855 	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
856 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
857 }
858 
859 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
860 				  struct btrfs_space_info *space_info,
861 				  struct reserve_ticket *ticket)
862 {
863 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
864 	u64 min_bytes;
865 
866 	if (global_rsv->space_info != space_info)
867 		return false;
868 
869 	spin_lock(&global_rsv->lock);
870 	min_bytes = div_factor(global_rsv->size, 5);
871 	if (global_rsv->reserved < min_bytes + ticket->bytes) {
872 		spin_unlock(&global_rsv->lock);
873 		return false;
874 	}
875 	global_rsv->reserved -= ticket->bytes;
876 	ticket->bytes = 0;
877 	list_del_init(&ticket->list);
878 	wake_up(&ticket->wait);
879 	space_info->tickets_id++;
880 	if (global_rsv->reserved < global_rsv->size)
881 		global_rsv->full = 0;
882 	spin_unlock(&global_rsv->lock);
883 
884 	return true;
885 }
886 
887 /*
888  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
889  * @fs_info - fs_info for this fs
890  * @space_info - the space info we were flushing
891  *
892  * We call this when we've exhausted our flushing ability and haven't made
893  * progress in satisfying tickets.  The reservation code handles tickets in
894  * order, so if there is a large ticket first and then smaller ones we could
895  * very well satisfy the smaller tickets.  This will attempt to wake up any
896  * tickets in the list to catch this case.
897  *
898  * This function returns true if it was able to make progress by clearing out
899  * other tickets, or if it stumbles across a ticket that was smaller than the
900  * first ticket.
901  */
902 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
903 				   struct btrfs_space_info *space_info)
904 {
905 	struct reserve_ticket *ticket;
906 	u64 tickets_id = space_info->tickets_id;
907 	u64 first_ticket_bytes = 0;
908 
909 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
910 		btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
911 		__btrfs_dump_space_info(fs_info, space_info);
912 	}
913 
914 	while (!list_empty(&space_info->tickets) &&
915 	       tickets_id == space_info->tickets_id) {
916 		ticket = list_first_entry(&space_info->tickets,
917 					  struct reserve_ticket, list);
918 
919 		if (ticket->steal &&
920 		    steal_from_global_rsv(fs_info, space_info, ticket))
921 			return true;
922 
923 		/*
924 		 * may_commit_transaction will avoid committing the transaction
925 		 * if it doesn't feel like the space reclaimed by the commit
926 		 * would result in the ticket succeeding.  However if we have a
927 		 * smaller ticket in the queue it may be small enough to be
928 		 * satisified by committing the transaction, so if any
929 		 * subsequent ticket is smaller than the first ticket go ahead
930 		 * and send us back for another loop through the enospc flushing
931 		 * code.
932 		 */
933 		if (first_ticket_bytes == 0)
934 			first_ticket_bytes = ticket->bytes;
935 		else if (first_ticket_bytes > ticket->bytes)
936 			return true;
937 
938 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
939 			btrfs_info(fs_info, "failing ticket with %llu bytes",
940 				   ticket->bytes);
941 
942 		remove_ticket(space_info, ticket);
943 		ticket->error = -ENOSPC;
944 		wake_up(&ticket->wait);
945 
946 		/*
947 		 * We're just throwing tickets away, so more flushing may not
948 		 * trip over btrfs_try_granting_tickets, so we need to call it
949 		 * here to see if we can make progress with the next ticket in
950 		 * the list.
951 		 */
952 		btrfs_try_granting_tickets(fs_info, space_info);
953 	}
954 	return (tickets_id != space_info->tickets_id);
955 }
956 
957 /*
958  * This is for normal flushers, we can wait all goddamned day if we want to.  We
959  * will loop and continuously try to flush as long as we are making progress.
960  * We count progress as clearing off tickets each time we have to loop.
961  */
962 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
963 {
964 	struct btrfs_fs_info *fs_info;
965 	struct btrfs_space_info *space_info;
966 	u64 to_reclaim;
967 	int flush_state;
968 	int commit_cycles = 0;
969 	u64 last_tickets_id;
970 
971 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
972 	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
973 
974 	spin_lock(&space_info->lock);
975 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
976 	if (!to_reclaim) {
977 		space_info->flush = 0;
978 		spin_unlock(&space_info->lock);
979 		return;
980 	}
981 	last_tickets_id = space_info->tickets_id;
982 	spin_unlock(&space_info->lock);
983 
984 	flush_state = FLUSH_DELAYED_ITEMS_NR;
985 	do {
986 		flush_space(fs_info, space_info, to_reclaim, flush_state);
987 		spin_lock(&space_info->lock);
988 		if (list_empty(&space_info->tickets)) {
989 			space_info->flush = 0;
990 			spin_unlock(&space_info->lock);
991 			return;
992 		}
993 		to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
994 							      space_info);
995 		if (last_tickets_id == space_info->tickets_id) {
996 			flush_state++;
997 		} else {
998 			last_tickets_id = space_info->tickets_id;
999 			flush_state = FLUSH_DELAYED_ITEMS_NR;
1000 			if (commit_cycles)
1001 				commit_cycles--;
1002 		}
1003 
1004 		/*
1005 		 * We don't want to force a chunk allocation until we've tried
1006 		 * pretty hard to reclaim space.  Think of the case where we
1007 		 * freed up a bunch of space and so have a lot of pinned space
1008 		 * to reclaim.  We would rather use that than possibly create a
1009 		 * underutilized metadata chunk.  So if this is our first run
1010 		 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1011 		 * commit the transaction.  If nothing has changed the next go
1012 		 * around then we can force a chunk allocation.
1013 		 */
1014 		if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1015 			flush_state++;
1016 
1017 		if (flush_state > COMMIT_TRANS) {
1018 			commit_cycles++;
1019 			if (commit_cycles > 2) {
1020 				if (maybe_fail_all_tickets(fs_info, space_info)) {
1021 					flush_state = FLUSH_DELAYED_ITEMS_NR;
1022 					commit_cycles--;
1023 				} else {
1024 					space_info->flush = 0;
1025 				}
1026 			} else {
1027 				flush_state = FLUSH_DELAYED_ITEMS_NR;
1028 			}
1029 		}
1030 		spin_unlock(&space_info->lock);
1031 	} while (flush_state <= COMMIT_TRANS);
1032 }
1033 
1034 void btrfs_init_async_reclaim_work(struct work_struct *work)
1035 {
1036 	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
1037 }
1038 
1039 static const enum btrfs_flush_state priority_flush_states[] = {
1040 	FLUSH_DELAYED_ITEMS_NR,
1041 	FLUSH_DELAYED_ITEMS,
1042 	ALLOC_CHUNK,
1043 };
1044 
1045 static const enum btrfs_flush_state evict_flush_states[] = {
1046 	FLUSH_DELAYED_ITEMS_NR,
1047 	FLUSH_DELAYED_ITEMS,
1048 	FLUSH_DELAYED_REFS_NR,
1049 	FLUSH_DELAYED_REFS,
1050 	FLUSH_DELALLOC,
1051 	FLUSH_DELALLOC_WAIT,
1052 	ALLOC_CHUNK,
1053 	COMMIT_TRANS,
1054 };
1055 
1056 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1057 				struct btrfs_space_info *space_info,
1058 				struct reserve_ticket *ticket,
1059 				const enum btrfs_flush_state *states,
1060 				int states_nr)
1061 {
1062 	u64 to_reclaim;
1063 	int flush_state;
1064 
1065 	spin_lock(&space_info->lock);
1066 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1067 	if (!to_reclaim) {
1068 		spin_unlock(&space_info->lock);
1069 		return;
1070 	}
1071 	spin_unlock(&space_info->lock);
1072 
1073 	flush_state = 0;
1074 	do {
1075 		flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
1076 		flush_state++;
1077 		spin_lock(&space_info->lock);
1078 		if (ticket->bytes == 0) {
1079 			spin_unlock(&space_info->lock);
1080 			return;
1081 		}
1082 		spin_unlock(&space_info->lock);
1083 	} while (flush_state < states_nr);
1084 }
1085 
1086 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1087 				struct btrfs_space_info *space_info,
1088 				struct reserve_ticket *ticket)
1089 
1090 {
1091 	DEFINE_WAIT(wait);
1092 	int ret = 0;
1093 
1094 	spin_lock(&space_info->lock);
1095 	while (ticket->bytes > 0 && ticket->error == 0) {
1096 		ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1097 		if (ret) {
1098 			/*
1099 			 * Delete us from the list. After we unlock the space
1100 			 * info, we don't want the async reclaim job to reserve
1101 			 * space for this ticket. If that would happen, then the
1102 			 * ticket's task would not known that space was reserved
1103 			 * despite getting an error, resulting in a space leak
1104 			 * (bytes_may_use counter of our space_info).
1105 			 */
1106 			remove_ticket(space_info, ticket);
1107 			ticket->error = -EINTR;
1108 			break;
1109 		}
1110 		spin_unlock(&space_info->lock);
1111 
1112 		schedule();
1113 
1114 		finish_wait(&ticket->wait, &wait);
1115 		spin_lock(&space_info->lock);
1116 	}
1117 	spin_unlock(&space_info->lock);
1118 }
1119 
1120 /**
1121  * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
1122  * @fs_info - the fs
1123  * @space_info - the space_info for the reservation
1124  * @ticket - the ticket for the reservation
1125  * @flush - how much we can flush
1126  *
1127  * This does the work of figuring out how to flush for the ticket, waiting for
1128  * the reservation, and returning the appropriate error if there is one.
1129  */
1130 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1131 				 struct btrfs_space_info *space_info,
1132 				 struct reserve_ticket *ticket,
1133 				 enum btrfs_reserve_flush_enum flush)
1134 {
1135 	int ret;
1136 
1137 	switch (flush) {
1138 	case BTRFS_RESERVE_FLUSH_ALL:
1139 	case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1140 		wait_reserve_ticket(fs_info, space_info, ticket);
1141 		break;
1142 	case BTRFS_RESERVE_FLUSH_LIMIT:
1143 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1144 						priority_flush_states,
1145 						ARRAY_SIZE(priority_flush_states));
1146 		break;
1147 	case BTRFS_RESERVE_FLUSH_EVICT:
1148 		priority_reclaim_metadata_space(fs_info, space_info, ticket,
1149 						evict_flush_states,
1150 						ARRAY_SIZE(evict_flush_states));
1151 		break;
1152 	default:
1153 		ASSERT(0);
1154 		break;
1155 	}
1156 
1157 	spin_lock(&space_info->lock);
1158 	ret = ticket->error;
1159 	if (ticket->bytes || ticket->error) {
1160 		/*
1161 		 * Need to delete here for priority tickets. For regular tickets
1162 		 * either the async reclaim job deletes the ticket from the list
1163 		 * or we delete it ourselves at wait_reserve_ticket().
1164 		 */
1165 		remove_ticket(space_info, ticket);
1166 		if (!ret)
1167 			ret = -ENOSPC;
1168 	}
1169 	spin_unlock(&space_info->lock);
1170 	ASSERT(list_empty(&ticket->list));
1171 	/*
1172 	 * Check that we can't have an error set if the reservation succeeded,
1173 	 * as that would confuse tasks and lead them to error out without
1174 	 * releasing reserved space (if an error happens the expectation is that
1175 	 * space wasn't reserved at all).
1176 	 */
1177 	ASSERT(!(ticket->bytes == 0 && ticket->error));
1178 	return ret;
1179 }
1180 
1181 /**
1182  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1183  * @root - the root we're allocating for
1184  * @space_info - the space info we want to allocate from
1185  * @orig_bytes - the number of bytes we want
1186  * @flush - whether or not we can flush to make our reservation
1187  *
1188  * This will reserve orig_bytes number of bytes from the space info associated
1189  * with the block_rsv.  If there is not enough space it will make an attempt to
1190  * flush out space to make room.  It will do this by flushing delalloc if
1191  * possible or committing the transaction.  If flush is 0 then no attempts to
1192  * regain reservations will be made and this will fail if there is not enough
1193  * space already.
1194  */
1195 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1196 				    struct btrfs_space_info *space_info,
1197 				    u64 orig_bytes,
1198 				    enum btrfs_reserve_flush_enum flush)
1199 {
1200 	struct reserve_ticket ticket;
1201 	u64 used;
1202 	int ret = 0;
1203 	bool pending_tickets;
1204 
1205 	ASSERT(orig_bytes);
1206 	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1207 
1208 	spin_lock(&space_info->lock);
1209 	ret = -ENOSPC;
1210 	used = btrfs_space_info_used(space_info, true);
1211 	pending_tickets = !list_empty(&space_info->tickets) ||
1212 		!list_empty(&space_info->priority_tickets);
1213 
1214 	/*
1215 	 * Carry on if we have enough space (short-circuit) OR call
1216 	 * can_overcommit() to ensure we can overcommit to continue.
1217 	 */
1218 	if (!pending_tickets &&
1219 	    ((used + orig_bytes <= space_info->total_bytes) ||
1220 	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1221 		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1222 						      orig_bytes);
1223 		ret = 0;
1224 	}
1225 
1226 	/*
1227 	 * If we couldn't make a reservation then setup our reservation ticket
1228 	 * and kick the async worker if it's not already running.
1229 	 *
1230 	 * If we are a priority flusher then we just need to add our ticket to
1231 	 * the list and we will do our own flushing further down.
1232 	 */
1233 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1234 		ASSERT(space_info->reclaim_size >= 0);
1235 		ticket.bytes = orig_bytes;
1236 		ticket.error = 0;
1237 		space_info->reclaim_size += ticket.bytes;
1238 		init_waitqueue_head(&ticket.wait);
1239 		ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1240 		if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1241 		    flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
1242 			list_add_tail(&ticket.list, &space_info->tickets);
1243 			if (!space_info->flush) {
1244 				space_info->flush = 1;
1245 				trace_btrfs_trigger_flush(fs_info,
1246 							  space_info->flags,
1247 							  orig_bytes, flush,
1248 							  "enospc");
1249 				queue_work(system_unbound_wq,
1250 					   &fs_info->async_reclaim_work);
1251 			}
1252 		} else {
1253 			list_add_tail(&ticket.list,
1254 				      &space_info->priority_tickets);
1255 		}
1256 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1257 		used += orig_bytes;
1258 		/*
1259 		 * We will do the space reservation dance during log replay,
1260 		 * which means we won't have fs_info->fs_root set, so don't do
1261 		 * the async reclaim as we will panic.
1262 		 */
1263 		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1264 		    need_do_async_reclaim(fs_info, space_info, used) &&
1265 		    !work_busy(&fs_info->async_reclaim_work)) {
1266 			trace_btrfs_trigger_flush(fs_info, space_info->flags,
1267 						  orig_bytes, flush, "preempt");
1268 			queue_work(system_unbound_wq,
1269 				   &fs_info->async_reclaim_work);
1270 		}
1271 	}
1272 	spin_unlock(&space_info->lock);
1273 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1274 		return ret;
1275 
1276 	return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
1277 }
1278 
1279 /**
1280  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1281  * @root - the root we're allocating for
1282  * @block_rsv - the block_rsv we're allocating for
1283  * @orig_bytes - the number of bytes we want
1284  * @flush - whether or not we can flush to make our reservation
1285  *
1286  * This will reserve orig_bytes number of bytes from the space info associated
1287  * with the block_rsv.  If there is not enough space it will make an attempt to
1288  * flush out space to make room.  It will do this by flushing delalloc if
1289  * possible or committing the transaction.  If flush is 0 then no attempts to
1290  * regain reservations will be made and this will fail if there is not enough
1291  * space already.
1292  */
1293 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1294 				 struct btrfs_block_rsv *block_rsv,
1295 				 u64 orig_bytes,
1296 				 enum btrfs_reserve_flush_enum flush)
1297 {
1298 	struct btrfs_fs_info *fs_info = root->fs_info;
1299 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1300 	int ret;
1301 
1302 	ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
1303 				       orig_bytes, flush);
1304 	if (ret == -ENOSPC &&
1305 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1306 		if (block_rsv != global_rsv &&
1307 		    !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1308 			ret = 0;
1309 	}
1310 	if (ret == -ENOSPC) {
1311 		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1312 					      block_rsv->space_info->flags,
1313 					      orig_bytes, 1);
1314 
1315 		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1316 			btrfs_dump_space_info(fs_info, block_rsv->space_info,
1317 					      orig_bytes, 0);
1318 	}
1319 	return ret;
1320 }
1321