xref: /openbmc/linux/fs/btrfs/transaction.c (revision 842ed298)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "misc.h"
14 #include "ctree.h"
15 #include "disk-io.h"
16 #include "transaction.h"
17 #include "locking.h"
18 #include "tree-log.h"
19 #include "volumes.h"
20 #include "dev-replace.h"
21 #include "qgroup.h"
22 #include "block-group.h"
23 #include "space-info.h"
24 
25 #define BTRFS_ROOT_TRANS_TAG 0
26 
27 /*
28  * Transaction states and transitions
29  *
30  * No running transaction (fs tree blocks are not modified)
31  * |
32  * | To next stage:
33  * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
34  * V
35  * Transaction N [[TRANS_STATE_RUNNING]]
36  * |
37  * | New trans handles can be attached to transaction N by calling all
38  * | start_transaction() variants.
39  * |
40  * | To next stage:
41  * |  Call btrfs_commit_transaction() on any trans handle attached to
42  * |  transaction N
43  * V
44  * Transaction N [[TRANS_STATE_COMMIT_START]]
45  * |
46  * | Will wait for previous running transaction to completely finish if there
47  * | is one
48  * |
49  * | Then one of the following happes:
50  * | - Wait for all other trans handle holders to release.
51  * |   The btrfs_commit_transaction() caller will do the commit work.
52  * | - Wait for current transaction to be committed by others.
53  * |   Other btrfs_commit_transaction() caller will do the commit work.
54  * |
55  * | At this stage, only btrfs_join_transaction*() variants can attach
56  * | to this running transaction.
57  * | All other variants will wait for current one to finish and attach to
58  * | transaction N+1.
59  * |
60  * | To next stage:
61  * |  Caller is chosen to commit transaction N, and all other trans handle
62  * |  haven been released.
63  * V
64  * Transaction N [[TRANS_STATE_COMMIT_DOING]]
65  * |
66  * | The heavy lifting transaction work is started.
67  * | From running delayed refs (modifying extent tree) to creating pending
68  * | snapshots, running qgroups.
69  * | In short, modify supporting trees to reflect modifications of subvolume
70  * | trees.
71  * |
72  * | At this stage, all start_transaction() calls will wait for this
73  * | transaction to finish and attach to transaction N+1.
74  * |
75  * | To next stage:
76  * |  Until all supporting trees are updated.
77  * V
78  * Transaction N [[TRANS_STATE_UNBLOCKED]]
79  * |						    Transaction N+1
80  * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
81  * | need to write them back to disk and update	    |
82  * | super blocks.				    |
83  * |						    |
84  * | At this stage, new transaction is allowed to   |
85  * | start.					    |
86  * | All new start_transaction() calls will be	    |
87  * | attached to transid N+1.			    |
88  * |						    |
89  * | To next stage:				    |
90  * |  Until all tree blocks are super blocks are    |
91  * |  written to block devices			    |
92  * V						    |
93  * Transaction N [[TRANS_STATE_COMPLETED]]	    V
94  *   All tree blocks and super blocks are written.  Transaction N+1
95  *   This transaction is finished and all its	    [[TRANS_STATE_COMMIT_START]]
96  *   data structures will be cleaned up.	    | Life goes on
97  */
98 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
99 	[TRANS_STATE_RUNNING]		= 0U,
100 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
101 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
102 					   __TRANS_ATTACH |
103 					   __TRANS_JOIN |
104 					   __TRANS_JOIN_NOSTART),
105 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
106 					   __TRANS_ATTACH |
107 					   __TRANS_JOIN |
108 					   __TRANS_JOIN_NOLOCK |
109 					   __TRANS_JOIN_NOSTART),
110 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
111 					   __TRANS_ATTACH |
112 					   __TRANS_JOIN |
113 					   __TRANS_JOIN_NOLOCK |
114 					   __TRANS_JOIN_NOSTART),
115 };
116 
117 void btrfs_put_transaction(struct btrfs_transaction *transaction)
118 {
119 	WARN_ON(refcount_read(&transaction->use_count) == 0);
120 	if (refcount_dec_and_test(&transaction->use_count)) {
121 		BUG_ON(!list_empty(&transaction->list));
122 		WARN_ON(!RB_EMPTY_ROOT(
123 				&transaction->delayed_refs.href_root.rb_root));
124 		WARN_ON(!RB_EMPTY_ROOT(
125 				&transaction->delayed_refs.dirty_extent_root));
126 		if (transaction->delayed_refs.pending_csums)
127 			btrfs_err(transaction->fs_info,
128 				  "pending csums is %llu",
129 				  transaction->delayed_refs.pending_csums);
130 		/*
131 		 * If any block groups are found in ->deleted_bgs then it's
132 		 * because the transaction was aborted and a commit did not
133 		 * happen (things failed before writing the new superblock
134 		 * and calling btrfs_finish_extent_commit()), so we can not
135 		 * discard the physical locations of the block groups.
136 		 */
137 		while (!list_empty(&transaction->deleted_bgs)) {
138 			struct btrfs_block_group *cache;
139 
140 			cache = list_first_entry(&transaction->deleted_bgs,
141 						 struct btrfs_block_group,
142 						 bg_list);
143 			list_del_init(&cache->bg_list);
144 			btrfs_unfreeze_block_group(cache);
145 			btrfs_put_block_group(cache);
146 		}
147 		WARN_ON(!list_empty(&transaction->dev_update_list));
148 		kfree(transaction);
149 	}
150 }
151 
152 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
153 {
154 	struct btrfs_transaction *cur_trans = trans->transaction;
155 	struct btrfs_fs_info *fs_info = trans->fs_info;
156 	struct btrfs_root *root, *tmp;
157 	struct btrfs_caching_control *caching_ctl, *next;
158 
159 	down_write(&fs_info->commit_root_sem);
160 	list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
161 				 dirty_list) {
162 		list_del_init(&root->dirty_list);
163 		free_extent_buffer(root->commit_root);
164 		root->commit_root = btrfs_root_node(root);
165 		extent_io_tree_release(&root->dirty_log_pages);
166 		btrfs_qgroup_clean_swapped_blocks(root);
167 	}
168 
169 	/* We can free old roots now. */
170 	spin_lock(&cur_trans->dropped_roots_lock);
171 	while (!list_empty(&cur_trans->dropped_roots)) {
172 		root = list_first_entry(&cur_trans->dropped_roots,
173 					struct btrfs_root, root_list);
174 		list_del_init(&root->root_list);
175 		spin_unlock(&cur_trans->dropped_roots_lock);
176 		btrfs_free_log(trans, root);
177 		btrfs_drop_and_free_fs_root(fs_info, root);
178 		spin_lock(&cur_trans->dropped_roots_lock);
179 	}
180 	spin_unlock(&cur_trans->dropped_roots_lock);
181 
182 	/*
183 	 * We have to update the last_byte_to_unpin under the commit_root_sem,
184 	 * at the same time we swap out the commit roots.
185 	 *
186 	 * This is because we must have a real view of the last spot the caching
187 	 * kthreads were while caching.  Consider the following views of the
188 	 * extent tree for a block group
189 	 *
190 	 * commit root
191 	 * +----+----+----+----+----+----+----+
192 	 * |\\\\|    |\\\\|\\\\|    |\\\\|\\\\|
193 	 * +----+----+----+----+----+----+----+
194 	 * 0    1    2    3    4    5    6    7
195 	 *
196 	 * new commit root
197 	 * +----+----+----+----+----+----+----+
198 	 * |    |    |    |\\\\|    |    |\\\\|
199 	 * +----+----+----+----+----+----+----+
200 	 * 0    1    2    3    4    5    6    7
201 	 *
202 	 * If the cache_ctl->progress was at 3, then we are only allowed to
203 	 * unpin [0,1) and [2,3], because the caching thread has already
204 	 * processed those extents.  We are not allowed to unpin [5,6), because
205 	 * the caching thread will re-start it's search from 3, and thus find
206 	 * the hole from [4,6) to add to the free space cache.
207 	 */
208 	spin_lock(&fs_info->block_group_cache_lock);
209 	list_for_each_entry_safe(caching_ctl, next,
210 				 &fs_info->caching_block_groups, list) {
211 		struct btrfs_block_group *cache = caching_ctl->block_group;
212 
213 		if (btrfs_block_group_done(cache)) {
214 			cache->last_byte_to_unpin = (u64)-1;
215 			list_del_init(&caching_ctl->list);
216 			btrfs_put_caching_control(caching_ctl);
217 		} else {
218 			cache->last_byte_to_unpin = caching_ctl->progress;
219 		}
220 	}
221 	spin_unlock(&fs_info->block_group_cache_lock);
222 	up_write(&fs_info->commit_root_sem);
223 }
224 
225 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
226 					 unsigned int type)
227 {
228 	if (type & TRANS_EXTWRITERS)
229 		atomic_inc(&trans->num_extwriters);
230 }
231 
232 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
233 					 unsigned int type)
234 {
235 	if (type & TRANS_EXTWRITERS)
236 		atomic_dec(&trans->num_extwriters);
237 }
238 
239 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
240 					  unsigned int type)
241 {
242 	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
243 }
244 
245 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
246 {
247 	return atomic_read(&trans->num_extwriters);
248 }
249 
250 /*
251  * To be called after all the new block groups attached to the transaction
252  * handle have been created (btrfs_create_pending_block_groups()).
253  */
254 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
255 {
256 	struct btrfs_fs_info *fs_info = trans->fs_info;
257 
258 	if (!trans->chunk_bytes_reserved)
259 		return;
260 
261 	WARN_ON_ONCE(!list_empty(&trans->new_bgs));
262 
263 	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
264 				trans->chunk_bytes_reserved, NULL);
265 	trans->chunk_bytes_reserved = 0;
266 }
267 
268 /*
269  * either allocate a new transaction or hop into the existing one
270  */
271 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
272 				     unsigned int type)
273 {
274 	struct btrfs_transaction *cur_trans;
275 
276 	spin_lock(&fs_info->trans_lock);
277 loop:
278 	/* The file system has been taken offline. No new transactions. */
279 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
280 		spin_unlock(&fs_info->trans_lock);
281 		return -EROFS;
282 	}
283 
284 	cur_trans = fs_info->running_transaction;
285 	if (cur_trans) {
286 		if (TRANS_ABORTED(cur_trans)) {
287 			spin_unlock(&fs_info->trans_lock);
288 			return cur_trans->aborted;
289 		}
290 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
291 			spin_unlock(&fs_info->trans_lock);
292 			return -EBUSY;
293 		}
294 		refcount_inc(&cur_trans->use_count);
295 		atomic_inc(&cur_trans->num_writers);
296 		extwriter_counter_inc(cur_trans, type);
297 		spin_unlock(&fs_info->trans_lock);
298 		return 0;
299 	}
300 	spin_unlock(&fs_info->trans_lock);
301 
302 	/*
303 	 * If we are ATTACH, we just want to catch the current transaction,
304 	 * and commit it. If there is no transaction, just return ENOENT.
305 	 */
306 	if (type == TRANS_ATTACH)
307 		return -ENOENT;
308 
309 	/*
310 	 * JOIN_NOLOCK only happens during the transaction commit, so
311 	 * it is impossible that ->running_transaction is NULL
312 	 */
313 	BUG_ON(type == TRANS_JOIN_NOLOCK);
314 
315 	cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
316 	if (!cur_trans)
317 		return -ENOMEM;
318 
319 	spin_lock(&fs_info->trans_lock);
320 	if (fs_info->running_transaction) {
321 		/*
322 		 * someone started a transaction after we unlocked.  Make sure
323 		 * to redo the checks above
324 		 */
325 		kfree(cur_trans);
326 		goto loop;
327 	} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
328 		spin_unlock(&fs_info->trans_lock);
329 		kfree(cur_trans);
330 		return -EROFS;
331 	}
332 
333 	cur_trans->fs_info = fs_info;
334 	atomic_set(&cur_trans->pending_ordered, 0);
335 	init_waitqueue_head(&cur_trans->pending_wait);
336 	atomic_set(&cur_trans->num_writers, 1);
337 	extwriter_counter_init(cur_trans, type);
338 	init_waitqueue_head(&cur_trans->writer_wait);
339 	init_waitqueue_head(&cur_trans->commit_wait);
340 	cur_trans->state = TRANS_STATE_RUNNING;
341 	/*
342 	 * One for this trans handle, one so it will live on until we
343 	 * commit the transaction.
344 	 */
345 	refcount_set(&cur_trans->use_count, 2);
346 	cur_trans->flags = 0;
347 	cur_trans->start_time = ktime_get_seconds();
348 
349 	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
350 
351 	cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
352 	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
353 	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
354 
355 	/*
356 	 * although the tree mod log is per file system and not per transaction,
357 	 * the log must never go across transaction boundaries.
358 	 */
359 	smp_mb();
360 	if (!list_empty(&fs_info->tree_mod_seq_list))
361 		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
362 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
363 		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
364 	atomic64_set(&fs_info->tree_mod_seq, 0);
365 
366 	spin_lock_init(&cur_trans->delayed_refs.lock);
367 
368 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
369 	INIT_LIST_HEAD(&cur_trans->dev_update_list);
370 	INIT_LIST_HEAD(&cur_trans->switch_commits);
371 	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
372 	INIT_LIST_HEAD(&cur_trans->io_bgs);
373 	INIT_LIST_HEAD(&cur_trans->dropped_roots);
374 	mutex_init(&cur_trans->cache_write_mutex);
375 	spin_lock_init(&cur_trans->dirty_bgs_lock);
376 	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
377 	spin_lock_init(&cur_trans->dropped_roots_lock);
378 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
379 	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
380 			IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
381 	extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
382 			IO_TREE_FS_PINNED_EXTENTS, NULL);
383 	fs_info->generation++;
384 	cur_trans->transid = fs_info->generation;
385 	fs_info->running_transaction = cur_trans;
386 	cur_trans->aborted = 0;
387 	spin_unlock(&fs_info->trans_lock);
388 
389 	return 0;
390 }
391 
392 /*
393  * This does all the record keeping required to make sure that a shareable root
394  * is properly recorded in a given transaction.  This is required to make sure
395  * the old root from before we joined the transaction is deleted when the
396  * transaction commits.
397  */
398 static int record_root_in_trans(struct btrfs_trans_handle *trans,
399 			       struct btrfs_root *root,
400 			       int force)
401 {
402 	struct btrfs_fs_info *fs_info = root->fs_info;
403 
404 	if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
405 	    root->last_trans < trans->transid) || force) {
406 		WARN_ON(root == fs_info->extent_root);
407 		WARN_ON(!force && root->commit_root != root->node);
408 
409 		/*
410 		 * see below for IN_TRANS_SETUP usage rules
411 		 * we have the reloc mutex held now, so there
412 		 * is only one writer in this function
413 		 */
414 		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
415 
416 		/* make sure readers find IN_TRANS_SETUP before
417 		 * they find our root->last_trans update
418 		 */
419 		smp_wmb();
420 
421 		spin_lock(&fs_info->fs_roots_radix_lock);
422 		if (root->last_trans == trans->transid && !force) {
423 			spin_unlock(&fs_info->fs_roots_radix_lock);
424 			return 0;
425 		}
426 		radix_tree_tag_set(&fs_info->fs_roots_radix,
427 				   (unsigned long)root->root_key.objectid,
428 				   BTRFS_ROOT_TRANS_TAG);
429 		spin_unlock(&fs_info->fs_roots_radix_lock);
430 		root->last_trans = trans->transid;
431 
432 		/* this is pretty tricky.  We don't want to
433 		 * take the relocation lock in btrfs_record_root_in_trans
434 		 * unless we're really doing the first setup for this root in
435 		 * this transaction.
436 		 *
437 		 * Normally we'd use root->last_trans as a flag to decide
438 		 * if we want to take the expensive mutex.
439 		 *
440 		 * But, we have to set root->last_trans before we
441 		 * init the relocation root, otherwise, we trip over warnings
442 		 * in ctree.c.  The solution used here is to flag ourselves
443 		 * with root IN_TRANS_SETUP.  When this is 1, we're still
444 		 * fixing up the reloc trees and everyone must wait.
445 		 *
446 		 * When this is zero, they can trust root->last_trans and fly
447 		 * through btrfs_record_root_in_trans without having to take the
448 		 * lock.  smp_wmb() makes sure that all the writes above are
449 		 * done before we pop in the zero below
450 		 */
451 		btrfs_init_reloc_root(trans, root);
452 		smp_mb__before_atomic();
453 		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
454 	}
455 	return 0;
456 }
457 
458 
459 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
460 			    struct btrfs_root *root)
461 {
462 	struct btrfs_fs_info *fs_info = root->fs_info;
463 	struct btrfs_transaction *cur_trans = trans->transaction;
464 
465 	/* Add ourselves to the transaction dropped list */
466 	spin_lock(&cur_trans->dropped_roots_lock);
467 	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
468 	spin_unlock(&cur_trans->dropped_roots_lock);
469 
470 	/* Make sure we don't try to update the root at commit time */
471 	spin_lock(&fs_info->fs_roots_radix_lock);
472 	radix_tree_tag_clear(&fs_info->fs_roots_radix,
473 			     (unsigned long)root->root_key.objectid,
474 			     BTRFS_ROOT_TRANS_TAG);
475 	spin_unlock(&fs_info->fs_roots_radix_lock);
476 }
477 
478 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
479 			       struct btrfs_root *root)
480 {
481 	struct btrfs_fs_info *fs_info = root->fs_info;
482 
483 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
484 		return 0;
485 
486 	/*
487 	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
488 	 * and barriers
489 	 */
490 	smp_rmb();
491 	if (root->last_trans == trans->transid &&
492 	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
493 		return 0;
494 
495 	mutex_lock(&fs_info->reloc_mutex);
496 	record_root_in_trans(trans, root, 0);
497 	mutex_unlock(&fs_info->reloc_mutex);
498 
499 	return 0;
500 }
501 
502 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
503 {
504 	return (trans->state >= TRANS_STATE_COMMIT_START &&
505 		trans->state < TRANS_STATE_UNBLOCKED &&
506 		!TRANS_ABORTED(trans));
507 }
508 
509 /* wait for commit against the current transaction to become unblocked
510  * when this is done, it is safe to start a new transaction, but the current
511  * transaction might not be fully on disk.
512  */
513 static void wait_current_trans(struct btrfs_fs_info *fs_info)
514 {
515 	struct btrfs_transaction *cur_trans;
516 
517 	spin_lock(&fs_info->trans_lock);
518 	cur_trans = fs_info->running_transaction;
519 	if (cur_trans && is_transaction_blocked(cur_trans)) {
520 		refcount_inc(&cur_trans->use_count);
521 		spin_unlock(&fs_info->trans_lock);
522 
523 		wait_event(fs_info->transaction_wait,
524 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
525 			   TRANS_ABORTED(cur_trans));
526 		btrfs_put_transaction(cur_trans);
527 	} else {
528 		spin_unlock(&fs_info->trans_lock);
529 	}
530 }
531 
532 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
533 {
534 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
535 		return 0;
536 
537 	if (type == TRANS_START)
538 		return 1;
539 
540 	return 0;
541 }
542 
543 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
544 {
545 	struct btrfs_fs_info *fs_info = root->fs_info;
546 
547 	if (!fs_info->reloc_ctl ||
548 	    !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
549 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
550 	    root->reloc_root)
551 		return false;
552 
553 	return true;
554 }
555 
556 static struct btrfs_trans_handle *
557 start_transaction(struct btrfs_root *root, unsigned int num_items,
558 		  unsigned int type, enum btrfs_reserve_flush_enum flush,
559 		  bool enforce_qgroups)
560 {
561 	struct btrfs_fs_info *fs_info = root->fs_info;
562 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
563 	struct btrfs_trans_handle *h;
564 	struct btrfs_transaction *cur_trans;
565 	u64 num_bytes = 0;
566 	u64 qgroup_reserved = 0;
567 	bool reloc_reserved = false;
568 	bool do_chunk_alloc = false;
569 	int ret;
570 
571 	/* Send isn't supposed to start transactions. */
572 	ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
573 
574 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
575 		return ERR_PTR(-EROFS);
576 
577 	if (current->journal_info) {
578 		WARN_ON(type & TRANS_EXTWRITERS);
579 		h = current->journal_info;
580 		refcount_inc(&h->use_count);
581 		WARN_ON(refcount_read(&h->use_count) > 2);
582 		h->orig_rsv = h->block_rsv;
583 		h->block_rsv = NULL;
584 		goto got_it;
585 	}
586 
587 	/*
588 	 * Do the reservation before we join the transaction so we can do all
589 	 * the appropriate flushing if need be.
590 	 */
591 	if (num_items && root != fs_info->chunk_root) {
592 		struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
593 		u64 delayed_refs_bytes = 0;
594 
595 		qgroup_reserved = num_items * fs_info->nodesize;
596 		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
597 				enforce_qgroups);
598 		if (ret)
599 			return ERR_PTR(ret);
600 
601 		/*
602 		 * We want to reserve all the bytes we may need all at once, so
603 		 * we only do 1 enospc flushing cycle per transaction start.  We
604 		 * accomplish this by simply assuming we'll do 2 x num_items
605 		 * worth of delayed refs updates in this trans handle, and
606 		 * refill that amount for whatever is missing in the reserve.
607 		 */
608 		num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
609 		if (flush == BTRFS_RESERVE_FLUSH_ALL &&
610 		    delayed_refs_rsv->full == 0) {
611 			delayed_refs_bytes = num_bytes;
612 			num_bytes <<= 1;
613 		}
614 
615 		/*
616 		 * Do the reservation for the relocation root creation
617 		 */
618 		if (need_reserve_reloc_root(root)) {
619 			num_bytes += fs_info->nodesize;
620 			reloc_reserved = true;
621 		}
622 
623 		ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
624 		if (ret)
625 			goto reserve_fail;
626 		if (delayed_refs_bytes) {
627 			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
628 							  delayed_refs_bytes);
629 			num_bytes -= delayed_refs_bytes;
630 		}
631 
632 		if (rsv->space_info->force_alloc)
633 			do_chunk_alloc = true;
634 	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
635 		   !delayed_refs_rsv->full) {
636 		/*
637 		 * Some people call with btrfs_start_transaction(root, 0)
638 		 * because they can be throttled, but have some other mechanism
639 		 * for reserving space.  We still want these guys to refill the
640 		 * delayed block_rsv so just add 1 items worth of reservation
641 		 * here.
642 		 */
643 		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
644 		if (ret)
645 			goto reserve_fail;
646 	}
647 again:
648 	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
649 	if (!h) {
650 		ret = -ENOMEM;
651 		goto alloc_fail;
652 	}
653 
654 	/*
655 	 * If we are JOIN_NOLOCK we're already committing a transaction and
656 	 * waiting on this guy, so we don't need to do the sb_start_intwrite
657 	 * because we're already holding a ref.  We need this because we could
658 	 * have raced in and did an fsync() on a file which can kick a commit
659 	 * and then we deadlock with somebody doing a freeze.
660 	 *
661 	 * If we are ATTACH, it means we just want to catch the current
662 	 * transaction and commit it, so we needn't do sb_start_intwrite().
663 	 */
664 	if (type & __TRANS_FREEZABLE)
665 		sb_start_intwrite(fs_info->sb);
666 
667 	if (may_wait_transaction(fs_info, type))
668 		wait_current_trans(fs_info);
669 
670 	do {
671 		ret = join_transaction(fs_info, type);
672 		if (ret == -EBUSY) {
673 			wait_current_trans(fs_info);
674 			if (unlikely(type == TRANS_ATTACH ||
675 				     type == TRANS_JOIN_NOSTART))
676 				ret = -ENOENT;
677 		}
678 	} while (ret == -EBUSY);
679 
680 	if (ret < 0)
681 		goto join_fail;
682 
683 	cur_trans = fs_info->running_transaction;
684 
685 	h->transid = cur_trans->transid;
686 	h->transaction = cur_trans;
687 	h->root = root;
688 	refcount_set(&h->use_count, 1);
689 	h->fs_info = root->fs_info;
690 
691 	h->type = type;
692 	h->can_flush_pending_bgs = true;
693 	INIT_LIST_HEAD(&h->new_bgs);
694 
695 	smp_mb();
696 	if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
697 	    may_wait_transaction(fs_info, type)) {
698 		current->journal_info = h;
699 		btrfs_commit_transaction(h);
700 		goto again;
701 	}
702 
703 	if (num_bytes) {
704 		trace_btrfs_space_reservation(fs_info, "transaction",
705 					      h->transid, num_bytes, 1);
706 		h->block_rsv = &fs_info->trans_block_rsv;
707 		h->bytes_reserved = num_bytes;
708 		h->reloc_reserved = reloc_reserved;
709 	}
710 
711 got_it:
712 	if (!current->journal_info)
713 		current->journal_info = h;
714 
715 	/*
716 	 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
717 	 * ALLOC_FORCE the first run through, and then we won't allocate for
718 	 * anybody else who races in later.  We don't care about the return
719 	 * value here.
720 	 */
721 	if (do_chunk_alloc && num_bytes) {
722 		u64 flags = h->block_rsv->space_info->flags;
723 
724 		btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
725 				  CHUNK_ALLOC_NO_FORCE);
726 	}
727 
728 	/*
729 	 * btrfs_record_root_in_trans() needs to alloc new extents, and may
730 	 * call btrfs_join_transaction() while we're also starting a
731 	 * transaction.
732 	 *
733 	 * Thus it need to be called after current->journal_info initialized,
734 	 * or we can deadlock.
735 	 */
736 	btrfs_record_root_in_trans(h, root);
737 
738 	return h;
739 
740 join_fail:
741 	if (type & __TRANS_FREEZABLE)
742 		sb_end_intwrite(fs_info->sb);
743 	kmem_cache_free(btrfs_trans_handle_cachep, h);
744 alloc_fail:
745 	if (num_bytes)
746 		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
747 					num_bytes, NULL);
748 reserve_fail:
749 	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
750 	return ERR_PTR(ret);
751 }
752 
753 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
754 						   unsigned int num_items)
755 {
756 	return start_transaction(root, num_items, TRANS_START,
757 				 BTRFS_RESERVE_FLUSH_ALL, true);
758 }
759 
760 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
761 					struct btrfs_root *root,
762 					unsigned int num_items)
763 {
764 	return start_transaction(root, num_items, TRANS_START,
765 				 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
766 }
767 
768 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
769 {
770 	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
771 				 true);
772 }
773 
774 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
775 {
776 	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
777 				 BTRFS_RESERVE_NO_FLUSH, true);
778 }
779 
780 /*
781  * Similar to regular join but it never starts a transaction when none is
782  * running or after waiting for the current one to finish.
783  */
784 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
785 {
786 	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
787 				 BTRFS_RESERVE_NO_FLUSH, true);
788 }
789 
790 /*
791  * btrfs_attach_transaction() - catch the running transaction
792  *
793  * It is used when we want to commit the current the transaction, but
794  * don't want to start a new one.
795  *
796  * Note: If this function return -ENOENT, it just means there is no
797  * running transaction. But it is possible that the inactive transaction
798  * is still in the memory, not fully on disk. If you hope there is no
799  * inactive transaction in the fs when -ENOENT is returned, you should
800  * invoke
801  *     btrfs_attach_transaction_barrier()
802  */
803 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
804 {
805 	return start_transaction(root, 0, TRANS_ATTACH,
806 				 BTRFS_RESERVE_NO_FLUSH, true);
807 }
808 
809 /*
810  * btrfs_attach_transaction_barrier() - catch the running transaction
811  *
812  * It is similar to the above function, the difference is this one
813  * will wait for all the inactive transactions until they fully
814  * complete.
815  */
816 struct btrfs_trans_handle *
817 btrfs_attach_transaction_barrier(struct btrfs_root *root)
818 {
819 	struct btrfs_trans_handle *trans;
820 
821 	trans = start_transaction(root, 0, TRANS_ATTACH,
822 				  BTRFS_RESERVE_NO_FLUSH, true);
823 	if (trans == ERR_PTR(-ENOENT))
824 		btrfs_wait_for_commit(root->fs_info, 0);
825 
826 	return trans;
827 }
828 
829 /* wait for a transaction commit to be fully complete */
830 static noinline void wait_for_commit(struct btrfs_transaction *commit)
831 {
832 	wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
833 }
834 
835 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
836 {
837 	struct btrfs_transaction *cur_trans = NULL, *t;
838 	int ret = 0;
839 
840 	if (transid) {
841 		if (transid <= fs_info->last_trans_committed)
842 			goto out;
843 
844 		/* find specified transaction */
845 		spin_lock(&fs_info->trans_lock);
846 		list_for_each_entry(t, &fs_info->trans_list, list) {
847 			if (t->transid == transid) {
848 				cur_trans = t;
849 				refcount_inc(&cur_trans->use_count);
850 				ret = 0;
851 				break;
852 			}
853 			if (t->transid > transid) {
854 				ret = 0;
855 				break;
856 			}
857 		}
858 		spin_unlock(&fs_info->trans_lock);
859 
860 		/*
861 		 * The specified transaction doesn't exist, or we
862 		 * raced with btrfs_commit_transaction
863 		 */
864 		if (!cur_trans) {
865 			if (transid > fs_info->last_trans_committed)
866 				ret = -EINVAL;
867 			goto out;
868 		}
869 	} else {
870 		/* find newest transaction that is committing | committed */
871 		spin_lock(&fs_info->trans_lock);
872 		list_for_each_entry_reverse(t, &fs_info->trans_list,
873 					    list) {
874 			if (t->state >= TRANS_STATE_COMMIT_START) {
875 				if (t->state == TRANS_STATE_COMPLETED)
876 					break;
877 				cur_trans = t;
878 				refcount_inc(&cur_trans->use_count);
879 				break;
880 			}
881 		}
882 		spin_unlock(&fs_info->trans_lock);
883 		if (!cur_trans)
884 			goto out;  /* nothing committing|committed */
885 	}
886 
887 	wait_for_commit(cur_trans);
888 	btrfs_put_transaction(cur_trans);
889 out:
890 	return ret;
891 }
892 
893 void btrfs_throttle(struct btrfs_fs_info *fs_info)
894 {
895 	wait_current_trans(fs_info);
896 }
897 
898 static bool should_end_transaction(struct btrfs_trans_handle *trans)
899 {
900 	struct btrfs_fs_info *fs_info = trans->fs_info;
901 
902 	if (btrfs_check_space_for_delayed_refs(fs_info))
903 		return true;
904 
905 	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
906 }
907 
908 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
909 {
910 	struct btrfs_transaction *cur_trans = trans->transaction;
911 
912 	smp_mb();
913 	if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
914 	    cur_trans->delayed_refs.flushing)
915 		return true;
916 
917 	return should_end_transaction(trans);
918 }
919 
920 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
921 
922 {
923 	struct btrfs_fs_info *fs_info = trans->fs_info;
924 
925 	if (!trans->block_rsv) {
926 		ASSERT(!trans->bytes_reserved);
927 		return;
928 	}
929 
930 	if (!trans->bytes_reserved)
931 		return;
932 
933 	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
934 	trace_btrfs_space_reservation(fs_info, "transaction",
935 				      trans->transid, trans->bytes_reserved, 0);
936 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
937 				trans->bytes_reserved, NULL);
938 	trans->bytes_reserved = 0;
939 }
940 
941 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
942 				   int throttle)
943 {
944 	struct btrfs_fs_info *info = trans->fs_info;
945 	struct btrfs_transaction *cur_trans = trans->transaction;
946 	int err = 0;
947 
948 	if (refcount_read(&trans->use_count) > 1) {
949 		refcount_dec(&trans->use_count);
950 		trans->block_rsv = trans->orig_rsv;
951 		return 0;
952 	}
953 
954 	btrfs_trans_release_metadata(trans);
955 	trans->block_rsv = NULL;
956 
957 	btrfs_create_pending_block_groups(trans);
958 
959 	btrfs_trans_release_chunk_metadata(trans);
960 
961 	if (trans->type & __TRANS_FREEZABLE)
962 		sb_end_intwrite(info->sb);
963 
964 	WARN_ON(cur_trans != info->running_transaction);
965 	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
966 	atomic_dec(&cur_trans->num_writers);
967 	extwriter_counter_dec(cur_trans, trans->type);
968 
969 	cond_wake_up(&cur_trans->writer_wait);
970 	btrfs_put_transaction(cur_trans);
971 
972 	if (current->journal_info == trans)
973 		current->journal_info = NULL;
974 
975 	if (throttle)
976 		btrfs_run_delayed_iputs(info);
977 
978 	if (TRANS_ABORTED(trans) ||
979 	    test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
980 		wake_up_process(info->transaction_kthread);
981 		if (TRANS_ABORTED(trans))
982 			err = trans->aborted;
983 		else
984 			err = -EROFS;
985 	}
986 
987 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
988 	return err;
989 }
990 
991 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
992 {
993 	return __btrfs_end_transaction(trans, 0);
994 }
995 
996 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
997 {
998 	return __btrfs_end_transaction(trans, 1);
999 }
1000 
1001 /*
1002  * when btree blocks are allocated, they have some corresponding bits set for
1003  * them in one of two extent_io trees.  This is used to make sure all of
1004  * those extents are sent to disk but does not wait on them
1005  */
1006 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1007 			       struct extent_io_tree *dirty_pages, int mark)
1008 {
1009 	int err = 0;
1010 	int werr = 0;
1011 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1012 	struct extent_state *cached_state = NULL;
1013 	u64 start = 0;
1014 	u64 end;
1015 
1016 	atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1017 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1018 				      mark, &cached_state)) {
1019 		bool wait_writeback = false;
1020 
1021 		err = convert_extent_bit(dirty_pages, start, end,
1022 					 EXTENT_NEED_WAIT,
1023 					 mark, &cached_state);
1024 		/*
1025 		 * convert_extent_bit can return -ENOMEM, which is most of the
1026 		 * time a temporary error. So when it happens, ignore the error
1027 		 * and wait for writeback of this range to finish - because we
1028 		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1029 		 * to __btrfs_wait_marked_extents() would not know that
1030 		 * writeback for this range started and therefore wouldn't
1031 		 * wait for it to finish - we don't want to commit a
1032 		 * superblock that points to btree nodes/leafs for which
1033 		 * writeback hasn't finished yet (and without errors).
1034 		 * We cleanup any entries left in the io tree when committing
1035 		 * the transaction (through extent_io_tree_release()).
1036 		 */
1037 		if (err == -ENOMEM) {
1038 			err = 0;
1039 			wait_writeback = true;
1040 		}
1041 		if (!err)
1042 			err = filemap_fdatawrite_range(mapping, start, end);
1043 		if (err)
1044 			werr = err;
1045 		else if (wait_writeback)
1046 			werr = filemap_fdatawait_range(mapping, start, end);
1047 		free_extent_state(cached_state);
1048 		cached_state = NULL;
1049 		cond_resched();
1050 		start = end + 1;
1051 	}
1052 	atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1053 	return werr;
1054 }
1055 
1056 /*
1057  * when btree blocks are allocated, they have some corresponding bits set for
1058  * them in one of two extent_io trees.  This is used to make sure all of
1059  * those extents are on disk for transaction or log commit.  We wait
1060  * on all the pages and clear them from the dirty pages state tree
1061  */
1062 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1063 				       struct extent_io_tree *dirty_pages)
1064 {
1065 	int err = 0;
1066 	int werr = 0;
1067 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1068 	struct extent_state *cached_state = NULL;
1069 	u64 start = 0;
1070 	u64 end;
1071 
1072 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1073 				      EXTENT_NEED_WAIT, &cached_state)) {
1074 		/*
1075 		 * Ignore -ENOMEM errors returned by clear_extent_bit().
1076 		 * When committing the transaction, we'll remove any entries
1077 		 * left in the io tree. For a log commit, we don't remove them
1078 		 * after committing the log because the tree can be accessed
1079 		 * concurrently - we do it only at transaction commit time when
1080 		 * it's safe to do it (through extent_io_tree_release()).
1081 		 */
1082 		err = clear_extent_bit(dirty_pages, start, end,
1083 				       EXTENT_NEED_WAIT, 0, 0, &cached_state);
1084 		if (err == -ENOMEM)
1085 			err = 0;
1086 		if (!err)
1087 			err = filemap_fdatawait_range(mapping, start, end);
1088 		if (err)
1089 			werr = err;
1090 		free_extent_state(cached_state);
1091 		cached_state = NULL;
1092 		cond_resched();
1093 		start = end + 1;
1094 	}
1095 	if (err)
1096 		werr = err;
1097 	return werr;
1098 }
1099 
1100 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1101 		       struct extent_io_tree *dirty_pages)
1102 {
1103 	bool errors = false;
1104 	int err;
1105 
1106 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1107 	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1108 		errors = true;
1109 
1110 	if (errors && !err)
1111 		err = -EIO;
1112 	return err;
1113 }
1114 
1115 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1116 {
1117 	struct btrfs_fs_info *fs_info = log_root->fs_info;
1118 	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1119 	bool errors = false;
1120 	int err;
1121 
1122 	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1123 
1124 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1125 	if ((mark & EXTENT_DIRTY) &&
1126 	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1127 		errors = true;
1128 
1129 	if ((mark & EXTENT_NEW) &&
1130 	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1131 		errors = true;
1132 
1133 	if (errors && !err)
1134 		err = -EIO;
1135 	return err;
1136 }
1137 
1138 /*
1139  * When btree blocks are allocated the corresponding extents are marked dirty.
1140  * This function ensures such extents are persisted on disk for transaction or
1141  * log commit.
1142  *
1143  * @trans: transaction whose dirty pages we'd like to write
1144  */
1145 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1146 {
1147 	int ret;
1148 	int ret2;
1149 	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1150 	struct btrfs_fs_info *fs_info = trans->fs_info;
1151 	struct blk_plug plug;
1152 
1153 	blk_start_plug(&plug);
1154 	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1155 	blk_finish_plug(&plug);
1156 	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1157 
1158 	extent_io_tree_release(&trans->transaction->dirty_pages);
1159 
1160 	if (ret)
1161 		return ret;
1162 	else if (ret2)
1163 		return ret2;
1164 	else
1165 		return 0;
1166 }
1167 
1168 /*
1169  * this is used to update the root pointer in the tree of tree roots.
1170  *
1171  * But, in the case of the extent allocation tree, updating the root
1172  * pointer may allocate blocks which may change the root of the extent
1173  * allocation tree.
1174  *
1175  * So, this loops and repeats and makes sure the cowonly root didn't
1176  * change while the root pointer was being updated in the metadata.
1177  */
1178 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1179 			       struct btrfs_root *root)
1180 {
1181 	int ret;
1182 	u64 old_root_bytenr;
1183 	u64 old_root_used;
1184 	struct btrfs_fs_info *fs_info = root->fs_info;
1185 	struct btrfs_root *tree_root = fs_info->tree_root;
1186 
1187 	old_root_used = btrfs_root_used(&root->root_item);
1188 
1189 	while (1) {
1190 		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1191 		if (old_root_bytenr == root->node->start &&
1192 		    old_root_used == btrfs_root_used(&root->root_item))
1193 			break;
1194 
1195 		btrfs_set_root_node(&root->root_item, root->node);
1196 		ret = btrfs_update_root(trans, tree_root,
1197 					&root->root_key,
1198 					&root->root_item);
1199 		if (ret)
1200 			return ret;
1201 
1202 		old_root_used = btrfs_root_used(&root->root_item);
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 /*
1209  * update all the cowonly tree roots on disk
1210  *
1211  * The error handling in this function may not be obvious. Any of the
1212  * failures will cause the file system to go offline. We still need
1213  * to clean up the delayed refs.
1214  */
1215 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1216 {
1217 	struct btrfs_fs_info *fs_info = trans->fs_info;
1218 	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1219 	struct list_head *io_bgs = &trans->transaction->io_bgs;
1220 	struct list_head *next;
1221 	struct extent_buffer *eb;
1222 	int ret;
1223 
1224 	eb = btrfs_lock_root_node(fs_info->tree_root);
1225 	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1226 			      0, &eb, BTRFS_NESTING_COW);
1227 	btrfs_tree_unlock(eb);
1228 	free_extent_buffer(eb);
1229 
1230 	if (ret)
1231 		return ret;
1232 
1233 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1234 	if (ret)
1235 		return ret;
1236 
1237 	ret = btrfs_run_dev_stats(trans);
1238 	if (ret)
1239 		return ret;
1240 	ret = btrfs_run_dev_replace(trans);
1241 	if (ret)
1242 		return ret;
1243 	ret = btrfs_run_qgroups(trans);
1244 	if (ret)
1245 		return ret;
1246 
1247 	ret = btrfs_setup_space_cache(trans);
1248 	if (ret)
1249 		return ret;
1250 
1251 	/* run_qgroups might have added some more refs */
1252 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1253 	if (ret)
1254 		return ret;
1255 again:
1256 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1257 		struct btrfs_root *root;
1258 		next = fs_info->dirty_cowonly_roots.next;
1259 		list_del_init(next);
1260 		root = list_entry(next, struct btrfs_root, dirty_list);
1261 		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1262 
1263 		if (root != fs_info->extent_root)
1264 			list_add_tail(&root->dirty_list,
1265 				      &trans->transaction->switch_commits);
1266 		ret = update_cowonly_root(trans, root);
1267 		if (ret)
1268 			return ret;
1269 		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1270 		if (ret)
1271 			return ret;
1272 	}
1273 
1274 	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1275 		ret = btrfs_write_dirty_block_groups(trans);
1276 		if (ret)
1277 			return ret;
1278 		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1279 		if (ret)
1280 			return ret;
1281 	}
1282 
1283 	if (!list_empty(&fs_info->dirty_cowonly_roots))
1284 		goto again;
1285 
1286 	list_add_tail(&fs_info->extent_root->dirty_list,
1287 		      &trans->transaction->switch_commits);
1288 
1289 	/* Update dev-replace pointer once everything is committed */
1290 	fs_info->dev_replace.committed_cursor_left =
1291 		fs_info->dev_replace.cursor_left_last_write_of_item;
1292 
1293 	return 0;
1294 }
1295 
1296 /*
1297  * dead roots are old snapshots that need to be deleted.  This allocates
1298  * a dirty root struct and adds it into the list of dead roots that need to
1299  * be deleted
1300  */
1301 void btrfs_add_dead_root(struct btrfs_root *root)
1302 {
1303 	struct btrfs_fs_info *fs_info = root->fs_info;
1304 
1305 	spin_lock(&fs_info->trans_lock);
1306 	if (list_empty(&root->root_list)) {
1307 		btrfs_grab_root(root);
1308 		list_add_tail(&root->root_list, &fs_info->dead_roots);
1309 	}
1310 	spin_unlock(&fs_info->trans_lock);
1311 }
1312 
1313 /*
1314  * update all the cowonly tree roots on disk
1315  */
1316 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1317 {
1318 	struct btrfs_fs_info *fs_info = trans->fs_info;
1319 	struct btrfs_root *gang[8];
1320 	int i;
1321 	int ret;
1322 	int err = 0;
1323 
1324 	spin_lock(&fs_info->fs_roots_radix_lock);
1325 	while (1) {
1326 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1327 						 (void **)gang, 0,
1328 						 ARRAY_SIZE(gang),
1329 						 BTRFS_ROOT_TRANS_TAG);
1330 		if (ret == 0)
1331 			break;
1332 		for (i = 0; i < ret; i++) {
1333 			struct btrfs_root *root = gang[i];
1334 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1335 					(unsigned long)root->root_key.objectid,
1336 					BTRFS_ROOT_TRANS_TAG);
1337 			spin_unlock(&fs_info->fs_roots_radix_lock);
1338 
1339 			btrfs_free_log(trans, root);
1340 			btrfs_update_reloc_root(trans, root);
1341 
1342 			/* see comments in should_cow_block() */
1343 			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1344 			smp_mb__after_atomic();
1345 
1346 			if (root->commit_root != root->node) {
1347 				list_add_tail(&root->dirty_list,
1348 					&trans->transaction->switch_commits);
1349 				btrfs_set_root_node(&root->root_item,
1350 						    root->node);
1351 			}
1352 
1353 			err = btrfs_update_root(trans, fs_info->tree_root,
1354 						&root->root_key,
1355 						&root->root_item);
1356 			spin_lock(&fs_info->fs_roots_radix_lock);
1357 			if (err)
1358 				break;
1359 			btrfs_qgroup_free_meta_all_pertrans(root);
1360 		}
1361 	}
1362 	spin_unlock(&fs_info->fs_roots_radix_lock);
1363 	return err;
1364 }
1365 
1366 /*
1367  * defrag a given btree.
1368  * Every leaf in the btree is read and defragged.
1369  */
1370 int btrfs_defrag_root(struct btrfs_root *root)
1371 {
1372 	struct btrfs_fs_info *info = root->fs_info;
1373 	struct btrfs_trans_handle *trans;
1374 	int ret;
1375 
1376 	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1377 		return 0;
1378 
1379 	while (1) {
1380 		trans = btrfs_start_transaction(root, 0);
1381 		if (IS_ERR(trans))
1382 			return PTR_ERR(trans);
1383 
1384 		ret = btrfs_defrag_leaves(trans, root);
1385 
1386 		btrfs_end_transaction(trans);
1387 		btrfs_btree_balance_dirty(info);
1388 		cond_resched();
1389 
1390 		if (btrfs_fs_closing(info) || ret != -EAGAIN)
1391 			break;
1392 
1393 		if (btrfs_defrag_cancelled(info)) {
1394 			btrfs_debug(info, "defrag_root cancelled");
1395 			ret = -EAGAIN;
1396 			break;
1397 		}
1398 	}
1399 	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1400 	return ret;
1401 }
1402 
1403 /*
1404  * Do all special snapshot related qgroup dirty hack.
1405  *
1406  * Will do all needed qgroup inherit and dirty hack like switch commit
1407  * roots inside one transaction and write all btree into disk, to make
1408  * qgroup works.
1409  */
1410 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1411 				   struct btrfs_root *src,
1412 				   struct btrfs_root *parent,
1413 				   struct btrfs_qgroup_inherit *inherit,
1414 				   u64 dst_objectid)
1415 {
1416 	struct btrfs_fs_info *fs_info = src->fs_info;
1417 	int ret;
1418 
1419 	/*
1420 	 * Save some performance in the case that qgroups are not
1421 	 * enabled. If this check races with the ioctl, rescan will
1422 	 * kick in anyway.
1423 	 */
1424 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1425 		return 0;
1426 
1427 	/*
1428 	 * Ensure dirty @src will be committed.  Or, after coming
1429 	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1430 	 * recorded root will never be updated again, causing an outdated root
1431 	 * item.
1432 	 */
1433 	record_root_in_trans(trans, src, 1);
1434 
1435 	/*
1436 	 * We are going to commit transaction, see btrfs_commit_transaction()
1437 	 * comment for reason locking tree_log_mutex
1438 	 */
1439 	mutex_lock(&fs_info->tree_log_mutex);
1440 
1441 	ret = commit_fs_roots(trans);
1442 	if (ret)
1443 		goto out;
1444 	ret = btrfs_qgroup_account_extents(trans);
1445 	if (ret < 0)
1446 		goto out;
1447 
1448 	/* Now qgroup are all updated, we can inherit it to new qgroups */
1449 	ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1450 				   inherit);
1451 	if (ret < 0)
1452 		goto out;
1453 
1454 	/*
1455 	 * Now we do a simplified commit transaction, which will:
1456 	 * 1) commit all subvolume and extent tree
1457 	 *    To ensure all subvolume and extent tree have a valid
1458 	 *    commit_root to accounting later insert_dir_item()
1459 	 * 2) write all btree blocks onto disk
1460 	 *    This is to make sure later btree modification will be cowed
1461 	 *    Or commit_root can be populated and cause wrong qgroup numbers
1462 	 * In this simplified commit, we don't really care about other trees
1463 	 * like chunk and root tree, as they won't affect qgroup.
1464 	 * And we don't write super to avoid half committed status.
1465 	 */
1466 	ret = commit_cowonly_roots(trans);
1467 	if (ret)
1468 		goto out;
1469 	switch_commit_roots(trans);
1470 	ret = btrfs_write_and_wait_transaction(trans);
1471 	if (ret)
1472 		btrfs_handle_fs_error(fs_info, ret,
1473 			"Error while writing out transaction for qgroup");
1474 
1475 out:
1476 	mutex_unlock(&fs_info->tree_log_mutex);
1477 
1478 	/*
1479 	 * Force parent root to be updated, as we recorded it before so its
1480 	 * last_trans == cur_transid.
1481 	 * Or it won't be committed again onto disk after later
1482 	 * insert_dir_item()
1483 	 */
1484 	if (!ret)
1485 		record_root_in_trans(trans, parent, 1);
1486 	return ret;
1487 }
1488 
1489 /*
1490  * new snapshots need to be created at a very specific time in the
1491  * transaction commit.  This does the actual creation.
1492  *
1493  * Note:
1494  * If the error which may affect the commitment of the current transaction
1495  * happens, we should return the error number. If the error which just affect
1496  * the creation of the pending snapshots, just return 0.
1497  */
1498 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1499 				   struct btrfs_pending_snapshot *pending)
1500 {
1501 
1502 	struct btrfs_fs_info *fs_info = trans->fs_info;
1503 	struct btrfs_key key;
1504 	struct btrfs_root_item *new_root_item;
1505 	struct btrfs_root *tree_root = fs_info->tree_root;
1506 	struct btrfs_root *root = pending->root;
1507 	struct btrfs_root *parent_root;
1508 	struct btrfs_block_rsv *rsv;
1509 	struct inode *parent_inode;
1510 	struct btrfs_path *path;
1511 	struct btrfs_dir_item *dir_item;
1512 	struct dentry *dentry;
1513 	struct extent_buffer *tmp;
1514 	struct extent_buffer *old;
1515 	struct timespec64 cur_time;
1516 	int ret = 0;
1517 	u64 to_reserve = 0;
1518 	u64 index = 0;
1519 	u64 objectid;
1520 	u64 root_flags;
1521 
1522 	ASSERT(pending->path);
1523 	path = pending->path;
1524 
1525 	ASSERT(pending->root_item);
1526 	new_root_item = pending->root_item;
1527 
1528 	pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1529 	if (pending->error)
1530 		goto no_free_objectid;
1531 
1532 	/*
1533 	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1534 	 * accounted by later btrfs_qgroup_inherit().
1535 	 */
1536 	btrfs_set_skip_qgroup(trans, objectid);
1537 
1538 	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1539 
1540 	if (to_reserve > 0) {
1541 		pending->error = btrfs_block_rsv_add(root,
1542 						     &pending->block_rsv,
1543 						     to_reserve,
1544 						     BTRFS_RESERVE_NO_FLUSH);
1545 		if (pending->error)
1546 			goto clear_skip_qgroup;
1547 	}
1548 
1549 	key.objectid = objectid;
1550 	key.offset = (u64)-1;
1551 	key.type = BTRFS_ROOT_ITEM_KEY;
1552 
1553 	rsv = trans->block_rsv;
1554 	trans->block_rsv = &pending->block_rsv;
1555 	trans->bytes_reserved = trans->block_rsv->reserved;
1556 	trace_btrfs_space_reservation(fs_info, "transaction",
1557 				      trans->transid,
1558 				      trans->bytes_reserved, 1);
1559 	dentry = pending->dentry;
1560 	parent_inode = pending->dir;
1561 	parent_root = BTRFS_I(parent_inode)->root;
1562 	record_root_in_trans(trans, parent_root, 0);
1563 
1564 	cur_time = current_time(parent_inode);
1565 
1566 	/*
1567 	 * insert the directory item
1568 	 */
1569 	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1570 	BUG_ON(ret); /* -ENOMEM */
1571 
1572 	/* check if there is a file/dir which has the same name. */
1573 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1574 					 btrfs_ino(BTRFS_I(parent_inode)),
1575 					 dentry->d_name.name,
1576 					 dentry->d_name.len, 0);
1577 	if (dir_item != NULL && !IS_ERR(dir_item)) {
1578 		pending->error = -EEXIST;
1579 		goto dir_item_existed;
1580 	} else if (IS_ERR(dir_item)) {
1581 		ret = PTR_ERR(dir_item);
1582 		btrfs_abort_transaction(trans, ret);
1583 		goto fail;
1584 	}
1585 	btrfs_release_path(path);
1586 
1587 	/*
1588 	 * pull in the delayed directory update
1589 	 * and the delayed inode item
1590 	 * otherwise we corrupt the FS during
1591 	 * snapshot
1592 	 */
1593 	ret = btrfs_run_delayed_items(trans);
1594 	if (ret) {	/* Transaction aborted */
1595 		btrfs_abort_transaction(trans, ret);
1596 		goto fail;
1597 	}
1598 
1599 	record_root_in_trans(trans, root, 0);
1600 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1601 	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1602 	btrfs_check_and_init_root_item(new_root_item);
1603 
1604 	root_flags = btrfs_root_flags(new_root_item);
1605 	if (pending->readonly)
1606 		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1607 	else
1608 		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1609 	btrfs_set_root_flags(new_root_item, root_flags);
1610 
1611 	btrfs_set_root_generation_v2(new_root_item,
1612 			trans->transid);
1613 	generate_random_guid(new_root_item->uuid);
1614 	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1615 			BTRFS_UUID_SIZE);
1616 	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1617 		memset(new_root_item->received_uuid, 0,
1618 		       sizeof(new_root_item->received_uuid));
1619 		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1620 		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1621 		btrfs_set_root_stransid(new_root_item, 0);
1622 		btrfs_set_root_rtransid(new_root_item, 0);
1623 	}
1624 	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1625 	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1626 	btrfs_set_root_otransid(new_root_item, trans->transid);
1627 
1628 	old = btrfs_lock_root_node(root);
1629 	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1630 			      BTRFS_NESTING_COW);
1631 	if (ret) {
1632 		btrfs_tree_unlock(old);
1633 		free_extent_buffer(old);
1634 		btrfs_abort_transaction(trans, ret);
1635 		goto fail;
1636 	}
1637 
1638 	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1639 	/* clean up in any case */
1640 	btrfs_tree_unlock(old);
1641 	free_extent_buffer(old);
1642 	if (ret) {
1643 		btrfs_abort_transaction(trans, ret);
1644 		goto fail;
1645 	}
1646 	/* see comments in should_cow_block() */
1647 	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1648 	smp_wmb();
1649 
1650 	btrfs_set_root_node(new_root_item, tmp);
1651 	/* record when the snapshot was created in key.offset */
1652 	key.offset = trans->transid;
1653 	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1654 	btrfs_tree_unlock(tmp);
1655 	free_extent_buffer(tmp);
1656 	if (ret) {
1657 		btrfs_abort_transaction(trans, ret);
1658 		goto fail;
1659 	}
1660 
1661 	/*
1662 	 * insert root back/forward references
1663 	 */
1664 	ret = btrfs_add_root_ref(trans, objectid,
1665 				 parent_root->root_key.objectid,
1666 				 btrfs_ino(BTRFS_I(parent_inode)), index,
1667 				 dentry->d_name.name, dentry->d_name.len);
1668 	if (ret) {
1669 		btrfs_abort_transaction(trans, ret);
1670 		goto fail;
1671 	}
1672 
1673 	key.offset = (u64)-1;
1674 	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1675 	if (IS_ERR(pending->snap)) {
1676 		ret = PTR_ERR(pending->snap);
1677 		pending->snap = NULL;
1678 		btrfs_abort_transaction(trans, ret);
1679 		goto fail;
1680 	}
1681 
1682 	ret = btrfs_reloc_post_snapshot(trans, pending);
1683 	if (ret) {
1684 		btrfs_abort_transaction(trans, ret);
1685 		goto fail;
1686 	}
1687 
1688 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1689 	if (ret) {
1690 		btrfs_abort_transaction(trans, ret);
1691 		goto fail;
1692 	}
1693 
1694 	/*
1695 	 * Do special qgroup accounting for snapshot, as we do some qgroup
1696 	 * snapshot hack to do fast snapshot.
1697 	 * To co-operate with that hack, we do hack again.
1698 	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1699 	 */
1700 	ret = qgroup_account_snapshot(trans, root, parent_root,
1701 				      pending->inherit, objectid);
1702 	if (ret < 0)
1703 		goto fail;
1704 
1705 	ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1706 				    dentry->d_name.len, BTRFS_I(parent_inode),
1707 				    &key, BTRFS_FT_DIR, index);
1708 	/* We have check then name at the beginning, so it is impossible. */
1709 	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1710 	if (ret) {
1711 		btrfs_abort_transaction(trans, ret);
1712 		goto fail;
1713 	}
1714 
1715 	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1716 					 dentry->d_name.len * 2);
1717 	parent_inode->i_mtime = parent_inode->i_ctime =
1718 		current_time(parent_inode);
1719 	ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
1720 	if (ret) {
1721 		btrfs_abort_transaction(trans, ret);
1722 		goto fail;
1723 	}
1724 	ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1725 				  BTRFS_UUID_KEY_SUBVOL,
1726 				  objectid);
1727 	if (ret) {
1728 		btrfs_abort_transaction(trans, ret);
1729 		goto fail;
1730 	}
1731 	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1732 		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1733 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1734 					  objectid);
1735 		if (ret && ret != -EEXIST) {
1736 			btrfs_abort_transaction(trans, ret);
1737 			goto fail;
1738 		}
1739 	}
1740 
1741 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1742 	if (ret) {
1743 		btrfs_abort_transaction(trans, ret);
1744 		goto fail;
1745 	}
1746 
1747 fail:
1748 	pending->error = ret;
1749 dir_item_existed:
1750 	trans->block_rsv = rsv;
1751 	trans->bytes_reserved = 0;
1752 clear_skip_qgroup:
1753 	btrfs_clear_skip_qgroup(trans);
1754 no_free_objectid:
1755 	kfree(new_root_item);
1756 	pending->root_item = NULL;
1757 	btrfs_free_path(path);
1758 	pending->path = NULL;
1759 
1760 	return ret;
1761 }
1762 
1763 /*
1764  * create all the snapshots we've scheduled for creation
1765  */
1766 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1767 {
1768 	struct btrfs_pending_snapshot *pending, *next;
1769 	struct list_head *head = &trans->transaction->pending_snapshots;
1770 	int ret = 0;
1771 
1772 	list_for_each_entry_safe(pending, next, head, list) {
1773 		list_del(&pending->list);
1774 		ret = create_pending_snapshot(trans, pending);
1775 		if (ret)
1776 			break;
1777 	}
1778 	return ret;
1779 }
1780 
1781 static void update_super_roots(struct btrfs_fs_info *fs_info)
1782 {
1783 	struct btrfs_root_item *root_item;
1784 	struct btrfs_super_block *super;
1785 
1786 	super = fs_info->super_copy;
1787 
1788 	root_item = &fs_info->chunk_root->root_item;
1789 	super->chunk_root = root_item->bytenr;
1790 	super->chunk_root_generation = root_item->generation;
1791 	super->chunk_root_level = root_item->level;
1792 
1793 	root_item = &fs_info->tree_root->root_item;
1794 	super->root = root_item->bytenr;
1795 	super->generation = root_item->generation;
1796 	super->root_level = root_item->level;
1797 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1798 		super->cache_generation = root_item->generation;
1799 	else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1800 		super->cache_generation = 0;
1801 	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1802 		super->uuid_tree_generation = root_item->generation;
1803 }
1804 
1805 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1806 {
1807 	struct btrfs_transaction *trans;
1808 	int ret = 0;
1809 
1810 	spin_lock(&info->trans_lock);
1811 	trans = info->running_transaction;
1812 	if (trans)
1813 		ret = (trans->state >= TRANS_STATE_COMMIT_START);
1814 	spin_unlock(&info->trans_lock);
1815 	return ret;
1816 }
1817 
1818 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1819 {
1820 	struct btrfs_transaction *trans;
1821 	int ret = 0;
1822 
1823 	spin_lock(&info->trans_lock);
1824 	trans = info->running_transaction;
1825 	if (trans)
1826 		ret = is_transaction_blocked(trans);
1827 	spin_unlock(&info->trans_lock);
1828 	return ret;
1829 }
1830 
1831 /*
1832  * wait for the current transaction commit to start and block subsequent
1833  * transaction joins
1834  */
1835 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1836 					    struct btrfs_transaction *trans)
1837 {
1838 	wait_event(fs_info->transaction_blocked_wait,
1839 		   trans->state >= TRANS_STATE_COMMIT_START ||
1840 		   TRANS_ABORTED(trans));
1841 }
1842 
1843 /*
1844  * wait for the current transaction to start and then become unblocked.
1845  * caller holds ref.
1846  */
1847 static void wait_current_trans_commit_start_and_unblock(
1848 					struct btrfs_fs_info *fs_info,
1849 					struct btrfs_transaction *trans)
1850 {
1851 	wait_event(fs_info->transaction_wait,
1852 		   trans->state >= TRANS_STATE_UNBLOCKED ||
1853 		   TRANS_ABORTED(trans));
1854 }
1855 
1856 /*
1857  * commit transactions asynchronously. once btrfs_commit_transaction_async
1858  * returns, any subsequent transaction will not be allowed to join.
1859  */
1860 struct btrfs_async_commit {
1861 	struct btrfs_trans_handle *newtrans;
1862 	struct work_struct work;
1863 };
1864 
1865 static void do_async_commit(struct work_struct *work)
1866 {
1867 	struct btrfs_async_commit *ac =
1868 		container_of(work, struct btrfs_async_commit, work);
1869 
1870 	/*
1871 	 * We've got freeze protection passed with the transaction.
1872 	 * Tell lockdep about it.
1873 	 */
1874 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1875 		__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1876 
1877 	current->journal_info = ac->newtrans;
1878 
1879 	btrfs_commit_transaction(ac->newtrans);
1880 	kfree(ac);
1881 }
1882 
1883 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1884 				   int wait_for_unblock)
1885 {
1886 	struct btrfs_fs_info *fs_info = trans->fs_info;
1887 	struct btrfs_async_commit *ac;
1888 	struct btrfs_transaction *cur_trans;
1889 
1890 	ac = kmalloc(sizeof(*ac), GFP_NOFS);
1891 	if (!ac)
1892 		return -ENOMEM;
1893 
1894 	INIT_WORK(&ac->work, do_async_commit);
1895 	ac->newtrans = btrfs_join_transaction(trans->root);
1896 	if (IS_ERR(ac->newtrans)) {
1897 		int err = PTR_ERR(ac->newtrans);
1898 		kfree(ac);
1899 		return err;
1900 	}
1901 
1902 	/* take transaction reference */
1903 	cur_trans = trans->transaction;
1904 	refcount_inc(&cur_trans->use_count);
1905 
1906 	btrfs_end_transaction(trans);
1907 
1908 	/*
1909 	 * Tell lockdep we've released the freeze rwsem, since the
1910 	 * async commit thread will be the one to unlock it.
1911 	 */
1912 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1913 		__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1914 
1915 	schedule_work(&ac->work);
1916 
1917 	/* wait for transaction to start and unblock */
1918 	if (wait_for_unblock)
1919 		wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1920 	else
1921 		wait_current_trans_commit_start(fs_info, cur_trans);
1922 
1923 	if (current->journal_info == trans)
1924 		current->journal_info = NULL;
1925 
1926 	btrfs_put_transaction(cur_trans);
1927 	return 0;
1928 }
1929 
1930 
1931 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1932 {
1933 	struct btrfs_fs_info *fs_info = trans->fs_info;
1934 	struct btrfs_transaction *cur_trans = trans->transaction;
1935 
1936 	WARN_ON(refcount_read(&trans->use_count) > 1);
1937 
1938 	btrfs_abort_transaction(trans, err);
1939 
1940 	spin_lock(&fs_info->trans_lock);
1941 
1942 	/*
1943 	 * If the transaction is removed from the list, it means this
1944 	 * transaction has been committed successfully, so it is impossible
1945 	 * to call the cleanup function.
1946 	 */
1947 	BUG_ON(list_empty(&cur_trans->list));
1948 
1949 	list_del_init(&cur_trans->list);
1950 	if (cur_trans == fs_info->running_transaction) {
1951 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
1952 		spin_unlock(&fs_info->trans_lock);
1953 		wait_event(cur_trans->writer_wait,
1954 			   atomic_read(&cur_trans->num_writers) == 1);
1955 
1956 		spin_lock(&fs_info->trans_lock);
1957 	}
1958 	spin_unlock(&fs_info->trans_lock);
1959 
1960 	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1961 
1962 	spin_lock(&fs_info->trans_lock);
1963 	if (cur_trans == fs_info->running_transaction)
1964 		fs_info->running_transaction = NULL;
1965 	spin_unlock(&fs_info->trans_lock);
1966 
1967 	if (trans->type & __TRANS_FREEZABLE)
1968 		sb_end_intwrite(fs_info->sb);
1969 	btrfs_put_transaction(cur_trans);
1970 	btrfs_put_transaction(cur_trans);
1971 
1972 	trace_btrfs_transaction_commit(trans->root);
1973 
1974 	if (current->journal_info == trans)
1975 		current->journal_info = NULL;
1976 	btrfs_scrub_cancel(fs_info);
1977 
1978 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1979 }
1980 
1981 /*
1982  * Release reserved delayed ref space of all pending block groups of the
1983  * transaction and remove them from the list
1984  */
1985 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1986 {
1987        struct btrfs_fs_info *fs_info = trans->fs_info;
1988        struct btrfs_block_group *block_group, *tmp;
1989 
1990        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1991                btrfs_delayed_refs_rsv_release(fs_info, 1);
1992                list_del_init(&block_group->bg_list);
1993        }
1994 }
1995 
1996 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1997 {
1998 	/*
1999 	 * We use writeback_inodes_sb here because if we used
2000 	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2001 	 * Currently are holding the fs freeze lock, if we do an async flush
2002 	 * we'll do btrfs_join_transaction() and deadlock because we need to
2003 	 * wait for the fs freeze lock.  Using the direct flushing we benefit
2004 	 * from already being in a transaction and our join_transaction doesn't
2005 	 * have to re-take the fs freeze lock.
2006 	 */
2007 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2008 		writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2009 	return 0;
2010 }
2011 
2012 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2013 {
2014 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2015 		btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2016 }
2017 
2018 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2019 {
2020 	struct btrfs_fs_info *fs_info = trans->fs_info;
2021 	struct btrfs_transaction *cur_trans = trans->transaction;
2022 	struct btrfs_transaction *prev_trans = NULL;
2023 	int ret;
2024 
2025 	ASSERT(refcount_read(&trans->use_count) == 1);
2026 
2027 	/*
2028 	 * Some places just start a transaction to commit it.  We need to make
2029 	 * sure that if this commit fails that the abort code actually marks the
2030 	 * transaction as failed, so set trans->dirty to make the abort code do
2031 	 * the right thing.
2032 	 */
2033 	trans->dirty = true;
2034 
2035 	/* Stop the commit early if ->aborted is set */
2036 	if (TRANS_ABORTED(cur_trans)) {
2037 		ret = cur_trans->aborted;
2038 		btrfs_end_transaction(trans);
2039 		return ret;
2040 	}
2041 
2042 	btrfs_trans_release_metadata(trans);
2043 	trans->block_rsv = NULL;
2044 
2045 	/* make a pass through all the delayed refs we have so far
2046 	 * any runnings procs may add more while we are here
2047 	 */
2048 	ret = btrfs_run_delayed_refs(trans, 0);
2049 	if (ret) {
2050 		btrfs_end_transaction(trans);
2051 		return ret;
2052 	}
2053 
2054 	cur_trans = trans->transaction;
2055 
2056 	/*
2057 	 * set the flushing flag so procs in this transaction have to
2058 	 * start sending their work down.
2059 	 */
2060 	cur_trans->delayed_refs.flushing = 1;
2061 	smp_wmb();
2062 
2063 	btrfs_create_pending_block_groups(trans);
2064 
2065 	ret = btrfs_run_delayed_refs(trans, 0);
2066 	if (ret) {
2067 		btrfs_end_transaction(trans);
2068 		return ret;
2069 	}
2070 
2071 	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2072 		int run_it = 0;
2073 
2074 		/* this mutex is also taken before trying to set
2075 		 * block groups readonly.  We need to make sure
2076 		 * that nobody has set a block group readonly
2077 		 * after a extents from that block group have been
2078 		 * allocated for cache files.  btrfs_set_block_group_ro
2079 		 * will wait for the transaction to commit if it
2080 		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2081 		 *
2082 		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2083 		 * only one process starts all the block group IO.  It wouldn't
2084 		 * hurt to have more than one go through, but there's no
2085 		 * real advantage to it either.
2086 		 */
2087 		mutex_lock(&fs_info->ro_block_group_mutex);
2088 		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2089 				      &cur_trans->flags))
2090 			run_it = 1;
2091 		mutex_unlock(&fs_info->ro_block_group_mutex);
2092 
2093 		if (run_it) {
2094 			ret = btrfs_start_dirty_block_groups(trans);
2095 			if (ret) {
2096 				btrfs_end_transaction(trans);
2097 				return ret;
2098 			}
2099 		}
2100 	}
2101 
2102 	spin_lock(&fs_info->trans_lock);
2103 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2104 		spin_unlock(&fs_info->trans_lock);
2105 		refcount_inc(&cur_trans->use_count);
2106 		ret = btrfs_end_transaction(trans);
2107 
2108 		wait_for_commit(cur_trans);
2109 
2110 		if (TRANS_ABORTED(cur_trans))
2111 			ret = cur_trans->aborted;
2112 
2113 		btrfs_put_transaction(cur_trans);
2114 
2115 		return ret;
2116 	}
2117 
2118 	cur_trans->state = TRANS_STATE_COMMIT_START;
2119 	wake_up(&fs_info->transaction_blocked_wait);
2120 
2121 	if (cur_trans->list.prev != &fs_info->trans_list) {
2122 		prev_trans = list_entry(cur_trans->list.prev,
2123 					struct btrfs_transaction, list);
2124 		if (prev_trans->state != TRANS_STATE_COMPLETED) {
2125 			refcount_inc(&prev_trans->use_count);
2126 			spin_unlock(&fs_info->trans_lock);
2127 
2128 			wait_for_commit(prev_trans);
2129 			ret = READ_ONCE(prev_trans->aborted);
2130 
2131 			btrfs_put_transaction(prev_trans);
2132 			if (ret)
2133 				goto cleanup_transaction;
2134 		} else {
2135 			spin_unlock(&fs_info->trans_lock);
2136 		}
2137 	} else {
2138 		spin_unlock(&fs_info->trans_lock);
2139 		/*
2140 		 * The previous transaction was aborted and was already removed
2141 		 * from the list of transactions at fs_info->trans_list. So we
2142 		 * abort to prevent writing a new superblock that reflects a
2143 		 * corrupt state (pointing to trees with unwritten nodes/leafs).
2144 		 */
2145 		if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2146 			ret = -EROFS;
2147 			goto cleanup_transaction;
2148 		}
2149 	}
2150 
2151 	extwriter_counter_dec(cur_trans, trans->type);
2152 
2153 	ret = btrfs_start_delalloc_flush(fs_info);
2154 	if (ret)
2155 		goto cleanup_transaction;
2156 
2157 	ret = btrfs_run_delayed_items(trans);
2158 	if (ret)
2159 		goto cleanup_transaction;
2160 
2161 	wait_event(cur_trans->writer_wait,
2162 		   extwriter_counter_read(cur_trans) == 0);
2163 
2164 	/* some pending stuffs might be added after the previous flush. */
2165 	ret = btrfs_run_delayed_items(trans);
2166 	if (ret)
2167 		goto cleanup_transaction;
2168 
2169 	btrfs_wait_delalloc_flush(fs_info);
2170 
2171 	/*
2172 	 * Wait for all ordered extents started by a fast fsync that joined this
2173 	 * transaction. Otherwise if this transaction commits before the ordered
2174 	 * extents complete we lose logged data after a power failure.
2175 	 */
2176 	wait_event(cur_trans->pending_wait,
2177 		   atomic_read(&cur_trans->pending_ordered) == 0);
2178 
2179 	btrfs_scrub_pause(fs_info);
2180 	/*
2181 	 * Ok now we need to make sure to block out any other joins while we
2182 	 * commit the transaction.  We could have started a join before setting
2183 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2184 	 */
2185 	spin_lock(&fs_info->trans_lock);
2186 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2187 	spin_unlock(&fs_info->trans_lock);
2188 	wait_event(cur_trans->writer_wait,
2189 		   atomic_read(&cur_trans->num_writers) == 1);
2190 
2191 	if (TRANS_ABORTED(cur_trans)) {
2192 		ret = cur_trans->aborted;
2193 		goto scrub_continue;
2194 	}
2195 	/*
2196 	 * the reloc mutex makes sure that we stop
2197 	 * the balancing code from coming in and moving
2198 	 * extents around in the middle of the commit
2199 	 */
2200 	mutex_lock(&fs_info->reloc_mutex);
2201 
2202 	/*
2203 	 * We needn't worry about the delayed items because we will
2204 	 * deal with them in create_pending_snapshot(), which is the
2205 	 * core function of the snapshot creation.
2206 	 */
2207 	ret = create_pending_snapshots(trans);
2208 	if (ret)
2209 		goto unlock_reloc;
2210 
2211 	/*
2212 	 * We insert the dir indexes of the snapshots and update the inode
2213 	 * of the snapshots' parents after the snapshot creation, so there
2214 	 * are some delayed items which are not dealt with. Now deal with
2215 	 * them.
2216 	 *
2217 	 * We needn't worry that this operation will corrupt the snapshots,
2218 	 * because all the tree which are snapshoted will be forced to COW
2219 	 * the nodes and leaves.
2220 	 */
2221 	ret = btrfs_run_delayed_items(trans);
2222 	if (ret)
2223 		goto unlock_reloc;
2224 
2225 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2226 	if (ret)
2227 		goto unlock_reloc;
2228 
2229 	/*
2230 	 * make sure none of the code above managed to slip in a
2231 	 * delayed item
2232 	 */
2233 	btrfs_assert_delayed_root_empty(fs_info);
2234 
2235 	WARN_ON(cur_trans != trans->transaction);
2236 
2237 	/* btrfs_commit_tree_roots is responsible for getting the
2238 	 * various roots consistent with each other.  Every pointer
2239 	 * in the tree of tree roots has to point to the most up to date
2240 	 * root for every subvolume and other tree.  So, we have to keep
2241 	 * the tree logging code from jumping in and changing any
2242 	 * of the trees.
2243 	 *
2244 	 * At this point in the commit, there can't be any tree-log
2245 	 * writers, but a little lower down we drop the trans mutex
2246 	 * and let new people in.  By holding the tree_log_mutex
2247 	 * from now until after the super is written, we avoid races
2248 	 * with the tree-log code.
2249 	 */
2250 	mutex_lock(&fs_info->tree_log_mutex);
2251 
2252 	ret = commit_fs_roots(trans);
2253 	if (ret)
2254 		goto unlock_tree_log;
2255 
2256 	/*
2257 	 * Since the transaction is done, we can apply the pending changes
2258 	 * before the next transaction.
2259 	 */
2260 	btrfs_apply_pending_changes(fs_info);
2261 
2262 	/* commit_fs_roots gets rid of all the tree log roots, it is now
2263 	 * safe to free the root of tree log roots
2264 	 */
2265 	btrfs_free_log_root_tree(trans, fs_info);
2266 
2267 	/*
2268 	 * Since fs roots are all committed, we can get a quite accurate
2269 	 * new_roots. So let's do quota accounting.
2270 	 */
2271 	ret = btrfs_qgroup_account_extents(trans);
2272 	if (ret < 0)
2273 		goto unlock_tree_log;
2274 
2275 	ret = commit_cowonly_roots(trans);
2276 	if (ret)
2277 		goto unlock_tree_log;
2278 
2279 	/*
2280 	 * The tasks which save the space cache and inode cache may also
2281 	 * update ->aborted, check it.
2282 	 */
2283 	if (TRANS_ABORTED(cur_trans)) {
2284 		ret = cur_trans->aborted;
2285 		goto unlock_tree_log;
2286 	}
2287 
2288 	cur_trans = fs_info->running_transaction;
2289 
2290 	btrfs_set_root_node(&fs_info->tree_root->root_item,
2291 			    fs_info->tree_root->node);
2292 	list_add_tail(&fs_info->tree_root->dirty_list,
2293 		      &cur_trans->switch_commits);
2294 
2295 	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2296 			    fs_info->chunk_root->node);
2297 	list_add_tail(&fs_info->chunk_root->dirty_list,
2298 		      &cur_trans->switch_commits);
2299 
2300 	switch_commit_roots(trans);
2301 
2302 	ASSERT(list_empty(&cur_trans->dirty_bgs));
2303 	ASSERT(list_empty(&cur_trans->io_bgs));
2304 	update_super_roots(fs_info);
2305 
2306 	btrfs_set_super_log_root(fs_info->super_copy, 0);
2307 	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2308 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2309 	       sizeof(*fs_info->super_copy));
2310 
2311 	btrfs_commit_device_sizes(cur_trans);
2312 
2313 	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2314 	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2315 
2316 	btrfs_trans_release_chunk_metadata(trans);
2317 
2318 	spin_lock(&fs_info->trans_lock);
2319 	cur_trans->state = TRANS_STATE_UNBLOCKED;
2320 	fs_info->running_transaction = NULL;
2321 	spin_unlock(&fs_info->trans_lock);
2322 	mutex_unlock(&fs_info->reloc_mutex);
2323 
2324 	wake_up(&fs_info->transaction_wait);
2325 
2326 	ret = btrfs_write_and_wait_transaction(trans);
2327 	if (ret) {
2328 		btrfs_handle_fs_error(fs_info, ret,
2329 				      "Error while writing out transaction");
2330 		/*
2331 		 * reloc_mutex has been unlocked, tree_log_mutex is still held
2332 		 * but we can't jump to unlock_tree_log causing double unlock
2333 		 */
2334 		mutex_unlock(&fs_info->tree_log_mutex);
2335 		goto scrub_continue;
2336 	}
2337 
2338 	ret = write_all_supers(fs_info, 0);
2339 	/*
2340 	 * the super is written, we can safely allow the tree-loggers
2341 	 * to go about their business
2342 	 */
2343 	mutex_unlock(&fs_info->tree_log_mutex);
2344 	if (ret)
2345 		goto scrub_continue;
2346 
2347 	btrfs_finish_extent_commit(trans);
2348 
2349 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2350 		btrfs_clear_space_info_full(fs_info);
2351 
2352 	fs_info->last_trans_committed = cur_trans->transid;
2353 	/*
2354 	 * We needn't acquire the lock here because there is no other task
2355 	 * which can change it.
2356 	 */
2357 	cur_trans->state = TRANS_STATE_COMPLETED;
2358 	wake_up(&cur_trans->commit_wait);
2359 
2360 	spin_lock(&fs_info->trans_lock);
2361 	list_del_init(&cur_trans->list);
2362 	spin_unlock(&fs_info->trans_lock);
2363 
2364 	btrfs_put_transaction(cur_trans);
2365 	btrfs_put_transaction(cur_trans);
2366 
2367 	if (trans->type & __TRANS_FREEZABLE)
2368 		sb_end_intwrite(fs_info->sb);
2369 
2370 	trace_btrfs_transaction_commit(trans->root);
2371 
2372 	btrfs_scrub_continue(fs_info);
2373 
2374 	if (current->journal_info == trans)
2375 		current->journal_info = NULL;
2376 
2377 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2378 
2379 	return ret;
2380 
2381 unlock_tree_log:
2382 	mutex_unlock(&fs_info->tree_log_mutex);
2383 unlock_reloc:
2384 	mutex_unlock(&fs_info->reloc_mutex);
2385 scrub_continue:
2386 	btrfs_scrub_continue(fs_info);
2387 cleanup_transaction:
2388 	btrfs_trans_release_metadata(trans);
2389 	btrfs_cleanup_pending_block_groups(trans);
2390 	btrfs_trans_release_chunk_metadata(trans);
2391 	trans->block_rsv = NULL;
2392 	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2393 	if (current->journal_info == trans)
2394 		current->journal_info = NULL;
2395 	cleanup_transaction(trans, ret);
2396 
2397 	return ret;
2398 }
2399 
2400 /*
2401  * return < 0 if error
2402  * 0 if there are no more dead_roots at the time of call
2403  * 1 there are more to be processed, call me again
2404  *
2405  * The return value indicates there are certainly more snapshots to delete, but
2406  * if there comes a new one during processing, it may return 0. We don't mind,
2407  * because btrfs_commit_super will poke cleaner thread and it will process it a
2408  * few seconds later.
2409  */
2410 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2411 {
2412 	int ret;
2413 	struct btrfs_fs_info *fs_info = root->fs_info;
2414 
2415 	spin_lock(&fs_info->trans_lock);
2416 	if (list_empty(&fs_info->dead_roots)) {
2417 		spin_unlock(&fs_info->trans_lock);
2418 		return 0;
2419 	}
2420 	root = list_first_entry(&fs_info->dead_roots,
2421 			struct btrfs_root, root_list);
2422 	list_del_init(&root->root_list);
2423 	spin_unlock(&fs_info->trans_lock);
2424 
2425 	btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2426 
2427 	btrfs_kill_all_delayed_nodes(root);
2428 
2429 	if (btrfs_header_backref_rev(root->node) <
2430 			BTRFS_MIXED_BACKREF_REV)
2431 		ret = btrfs_drop_snapshot(root, 0, 0);
2432 	else
2433 		ret = btrfs_drop_snapshot(root, 1, 0);
2434 
2435 	btrfs_put_root(root);
2436 	return (ret < 0) ? 0 : 1;
2437 }
2438 
2439 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2440 {
2441 	unsigned long prev;
2442 	unsigned long bit;
2443 
2444 	prev = xchg(&fs_info->pending_changes, 0);
2445 	if (!prev)
2446 		return;
2447 
2448 	bit = 1 << BTRFS_PENDING_COMMIT;
2449 	if (prev & bit)
2450 		btrfs_debug(fs_info, "pending commit done");
2451 	prev &= ~bit;
2452 
2453 	if (prev)
2454 		btrfs_warn(fs_info,
2455 			"unknown pending changes left 0x%lx, ignoring", prev);
2456 }
2457