xref: /openbmc/linux/fs/btrfs/disk-io.c (revision c5c87812)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "inode-map.h"
33 #include "check-integrity.h"
34 #include "rcu-string.h"
35 #include "dev-replace.h"
36 #include "raid56.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39 #include "compression.h"
40 #include "tree-checker.h"
41 #include "ref-verify.h"
42 #include "block-group.h"
43 #include "discard.h"
44 #include "space-info.h"
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static void end_workqueue_fn(struct btrfs_work *work);
54 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
55 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56 				      struct btrfs_fs_info *fs_info);
57 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
59 					struct extent_io_tree *dirty_pages,
60 					int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
62 				       struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
64 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
65 
66 /*
67  * btrfs_end_io_wq structs are used to do processing in task context when an IO
68  * is complete.  This is used during reads to verify checksums, and it is used
69  * by writes to insert metadata for new file extents after IO is complete.
70  */
71 struct btrfs_end_io_wq {
72 	struct bio *bio;
73 	bio_end_io_t *end_io;
74 	void *private;
75 	struct btrfs_fs_info *info;
76 	blk_status_t status;
77 	enum btrfs_wq_endio_type metadata;
78 	struct btrfs_work work;
79 };
80 
81 static struct kmem_cache *btrfs_end_io_wq_cache;
82 
83 int __init btrfs_end_io_wq_init(void)
84 {
85 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
86 					sizeof(struct btrfs_end_io_wq),
87 					0,
88 					SLAB_MEM_SPREAD,
89 					NULL);
90 	if (!btrfs_end_io_wq_cache)
91 		return -ENOMEM;
92 	return 0;
93 }
94 
95 void __cold btrfs_end_io_wq_exit(void)
96 {
97 	kmem_cache_destroy(btrfs_end_io_wq_cache);
98 }
99 
100 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
101 {
102 	if (fs_info->csum_shash)
103 		crypto_free_shash(fs_info->csum_shash);
104 }
105 
106 /*
107  * async submit bios are used to offload expensive checksumming
108  * onto the worker threads.  They checksum file and metadata bios
109  * just before they are sent down the IO stack.
110  */
111 struct async_submit_bio {
112 	void *private_data;
113 	struct bio *bio;
114 	extent_submit_bio_start_t *submit_bio_start;
115 	int mirror_num;
116 	/*
117 	 * bio_offset is optional, can be used if the pages in the bio
118 	 * can't tell us where in the file the bio should go
119 	 */
120 	u64 bio_offset;
121 	struct btrfs_work work;
122 	blk_status_t status;
123 };
124 
125 /*
126  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
127  * eb, the lockdep key is determined by the btrfs_root it belongs to and
128  * the level the eb occupies in the tree.
129  *
130  * Different roots are used for different purposes and may nest inside each
131  * other and they require separate keysets.  As lockdep keys should be
132  * static, assign keysets according to the purpose of the root as indicated
133  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
134  * roots have separate keysets.
135  *
136  * Lock-nesting across peer nodes is always done with the immediate parent
137  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
138  * subclass to avoid triggering lockdep warning in such cases.
139  *
140  * The key is set by the readpage_end_io_hook after the buffer has passed
141  * csum validation but before the pages are unlocked.  It is also set by
142  * btrfs_init_new_buffer on freshly allocated blocks.
143  *
144  * We also add a check to make sure the highest level of the tree is the
145  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
146  * needs update as well.
147  */
148 #ifdef CONFIG_DEBUG_LOCK_ALLOC
149 # if BTRFS_MAX_LEVEL != 8
150 #  error
151 # endif
152 
153 static struct btrfs_lockdep_keyset {
154 	u64			id;		/* root objectid */
155 	const char		*name_stem;	/* lock name stem */
156 	char			names[BTRFS_MAX_LEVEL + 1][20];
157 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
158 } btrfs_lockdep_keysets[] = {
159 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
160 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
161 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
162 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
163 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
164 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
165 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
166 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
167 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
168 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
169 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
170 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
171 	{ .id = 0,				.name_stem = "tree"	},
172 };
173 
174 void __init btrfs_init_lockdep(void)
175 {
176 	int i, j;
177 
178 	/* initialize lockdep class names */
179 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
180 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
181 
182 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
183 			snprintf(ks->names[j], sizeof(ks->names[j]),
184 				 "btrfs-%s-%02d", ks->name_stem, j);
185 	}
186 }
187 
188 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
189 				    int level)
190 {
191 	struct btrfs_lockdep_keyset *ks;
192 
193 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
194 
195 	/* find the matching keyset, id 0 is the default entry */
196 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
197 		if (ks->id == objectid)
198 			break;
199 
200 	lockdep_set_class_and_name(&eb->lock,
201 				   &ks->keys[level], ks->names[level]);
202 }
203 
204 #endif
205 
206 /*
207  * Compute the csum of a btree block and store the result to provided buffer.
208  */
209 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
210 {
211 	struct btrfs_fs_info *fs_info = buf->fs_info;
212 	const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
213 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
214 	char *kaddr;
215 	int i;
216 
217 	shash->tfm = fs_info->csum_shash;
218 	crypto_shash_init(shash);
219 	kaddr = page_address(buf->pages[0]);
220 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
221 			    PAGE_SIZE - BTRFS_CSUM_SIZE);
222 
223 	for (i = 1; i < num_pages; i++) {
224 		kaddr = page_address(buf->pages[i]);
225 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
226 	}
227 	memset(result, 0, BTRFS_CSUM_SIZE);
228 	crypto_shash_final(shash, result);
229 }
230 
231 /*
232  * we can't consider a given block up to date unless the transid of the
233  * block matches the transid in the parent node's pointer.  This is how we
234  * detect blocks that either didn't get written at all or got written
235  * in the wrong place.
236  */
237 static int verify_parent_transid(struct extent_io_tree *io_tree,
238 				 struct extent_buffer *eb, u64 parent_transid,
239 				 int atomic)
240 {
241 	struct extent_state *cached_state = NULL;
242 	int ret;
243 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
244 
245 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
246 		return 0;
247 
248 	if (atomic)
249 		return -EAGAIN;
250 
251 	if (need_lock) {
252 		btrfs_tree_read_lock(eb);
253 		btrfs_set_lock_blocking_read(eb);
254 	}
255 
256 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
257 			 &cached_state);
258 	if (extent_buffer_uptodate(eb) &&
259 	    btrfs_header_generation(eb) == parent_transid) {
260 		ret = 0;
261 		goto out;
262 	}
263 	btrfs_err_rl(eb->fs_info,
264 		"parent transid verify failed on %llu wanted %llu found %llu",
265 			eb->start,
266 			parent_transid, btrfs_header_generation(eb));
267 	ret = 1;
268 
269 	/*
270 	 * Things reading via commit roots that don't have normal protection,
271 	 * like send, can have a really old block in cache that may point at a
272 	 * block that has been freed and re-allocated.  So don't clear uptodate
273 	 * if we find an eb that is under IO (dirty/writeback) because we could
274 	 * end up reading in the stale data and then writing it back out and
275 	 * making everybody very sad.
276 	 */
277 	if (!extent_buffer_under_io(eb))
278 		clear_extent_buffer_uptodate(eb);
279 out:
280 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
281 			     &cached_state);
282 	if (need_lock)
283 		btrfs_tree_read_unlock_blocking(eb);
284 	return ret;
285 }
286 
287 static bool btrfs_supported_super_csum(u16 csum_type)
288 {
289 	switch (csum_type) {
290 	case BTRFS_CSUM_TYPE_CRC32:
291 	case BTRFS_CSUM_TYPE_XXHASH:
292 	case BTRFS_CSUM_TYPE_SHA256:
293 	case BTRFS_CSUM_TYPE_BLAKE2:
294 		return true;
295 	default:
296 		return false;
297 	}
298 }
299 
300 /*
301  * Return 0 if the superblock checksum type matches the checksum value of that
302  * algorithm. Pass the raw disk superblock data.
303  */
304 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
305 				  char *raw_disk_sb)
306 {
307 	struct btrfs_super_block *disk_sb =
308 		(struct btrfs_super_block *)raw_disk_sb;
309 	char result[BTRFS_CSUM_SIZE];
310 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
311 
312 	shash->tfm = fs_info->csum_shash;
313 
314 	/*
315 	 * The super_block structure does not span the whole
316 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
317 	 * filled with zeros and is included in the checksum.
318 	 */
319 	crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
320 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
321 
322 	if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
323 		return 1;
324 
325 	return 0;
326 }
327 
328 int btrfs_verify_level_key(struct extent_buffer *eb, int level,
329 			   struct btrfs_key *first_key, u64 parent_transid)
330 {
331 	struct btrfs_fs_info *fs_info = eb->fs_info;
332 	int found_level;
333 	struct btrfs_key found_key;
334 	int ret;
335 
336 	found_level = btrfs_header_level(eb);
337 	if (found_level != level) {
338 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
339 		     KERN_ERR "BTRFS: tree level check failed\n");
340 		btrfs_err(fs_info,
341 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
342 			  eb->start, level, found_level);
343 		return -EIO;
344 	}
345 
346 	if (!first_key)
347 		return 0;
348 
349 	/*
350 	 * For live tree block (new tree blocks in current transaction),
351 	 * we need proper lock context to avoid race, which is impossible here.
352 	 * So we only checks tree blocks which is read from disk, whose
353 	 * generation <= fs_info->last_trans_committed.
354 	 */
355 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
356 		return 0;
357 
358 	/* We have @first_key, so this @eb must have at least one item */
359 	if (btrfs_header_nritems(eb) == 0) {
360 		btrfs_err(fs_info,
361 		"invalid tree nritems, bytenr=%llu nritems=0 expect >0",
362 			  eb->start);
363 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
364 		return -EUCLEAN;
365 	}
366 
367 	if (found_level)
368 		btrfs_node_key_to_cpu(eb, &found_key, 0);
369 	else
370 		btrfs_item_key_to_cpu(eb, &found_key, 0);
371 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
372 
373 	if (ret) {
374 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
375 		     KERN_ERR "BTRFS: tree first key check failed\n");
376 		btrfs_err(fs_info,
377 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
378 			  eb->start, parent_transid, first_key->objectid,
379 			  first_key->type, first_key->offset,
380 			  found_key.objectid, found_key.type,
381 			  found_key.offset);
382 	}
383 	return ret;
384 }
385 
386 /*
387  * helper to read a given tree block, doing retries as required when
388  * the checksums don't match and we have alternate mirrors to try.
389  *
390  * @parent_transid:	expected transid, skip check if 0
391  * @level:		expected level, mandatory check
392  * @first_key:		expected key of first slot, skip check if NULL
393  */
394 static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
395 					  u64 parent_transid, int level,
396 					  struct btrfs_key *first_key)
397 {
398 	struct btrfs_fs_info *fs_info = eb->fs_info;
399 	struct extent_io_tree *io_tree;
400 	int failed = 0;
401 	int ret;
402 	int num_copies = 0;
403 	int mirror_num = 0;
404 	int failed_mirror = 0;
405 
406 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
407 	while (1) {
408 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
409 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
410 		if (!ret) {
411 			if (verify_parent_transid(io_tree, eb,
412 						   parent_transid, 0))
413 				ret = -EIO;
414 			else if (btrfs_verify_level_key(eb, level,
415 						first_key, parent_transid))
416 				ret = -EUCLEAN;
417 			else
418 				break;
419 		}
420 
421 		num_copies = btrfs_num_copies(fs_info,
422 					      eb->start, eb->len);
423 		if (num_copies == 1)
424 			break;
425 
426 		if (!failed_mirror) {
427 			failed = 1;
428 			failed_mirror = eb->read_mirror;
429 		}
430 
431 		mirror_num++;
432 		if (mirror_num == failed_mirror)
433 			mirror_num++;
434 
435 		if (mirror_num > num_copies)
436 			break;
437 	}
438 
439 	if (failed && !ret && failed_mirror)
440 		btrfs_repair_eb_io_failure(eb, failed_mirror);
441 
442 	return ret;
443 }
444 
445 /*
446  * checksum a dirty tree block before IO.  This has extra checks to make sure
447  * we only fill in the checksum field in the first page of a multi-page block
448  */
449 
450 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
451 {
452 	u64 start = page_offset(page);
453 	u64 found_start;
454 	u8 result[BTRFS_CSUM_SIZE];
455 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
456 	struct extent_buffer *eb;
457 	int ret;
458 
459 	eb = (struct extent_buffer *)page->private;
460 	if (page != eb->pages[0])
461 		return 0;
462 
463 	found_start = btrfs_header_bytenr(eb);
464 	/*
465 	 * Please do not consolidate these warnings into a single if.
466 	 * It is useful to know what went wrong.
467 	 */
468 	if (WARN_ON(found_start != start))
469 		return -EUCLEAN;
470 	if (WARN_ON(!PageUptodate(page)))
471 		return -EUCLEAN;
472 
473 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
474 				    offsetof(struct btrfs_header, fsid),
475 				    BTRFS_FSID_SIZE) == 0);
476 
477 	csum_tree_block(eb, result);
478 
479 	if (btrfs_header_level(eb))
480 		ret = btrfs_check_node(eb);
481 	else
482 		ret = btrfs_check_leaf_full(eb);
483 
484 	if (ret < 0) {
485 		btrfs_print_tree(eb, 0);
486 		btrfs_err(fs_info,
487 		"block=%llu write time tree block corruption detected",
488 			  eb->start);
489 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
490 		return ret;
491 	}
492 	write_extent_buffer(eb, result, 0, csum_size);
493 
494 	return 0;
495 }
496 
497 static int check_tree_block_fsid(struct extent_buffer *eb)
498 {
499 	struct btrfs_fs_info *fs_info = eb->fs_info;
500 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
501 	u8 fsid[BTRFS_FSID_SIZE];
502 	u8 *metadata_uuid;
503 
504 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
505 			   BTRFS_FSID_SIZE);
506 	/*
507 	 * Checking the incompat flag is only valid for the current fs. For
508 	 * seed devices it's forbidden to have their uuid changed so reading
509 	 * ->fsid in this case is fine
510 	 */
511 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
512 		metadata_uuid = fs_devices->metadata_uuid;
513 	else
514 		metadata_uuid = fs_devices->fsid;
515 
516 	if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE))
517 		return 0;
518 
519 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
520 		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
521 			return 0;
522 
523 	return 1;
524 }
525 
526 int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
527 				   struct page *page, u64 start, u64 end,
528 				   int mirror)
529 {
530 	u64 found_start;
531 	int found_level;
532 	struct extent_buffer *eb;
533 	struct btrfs_fs_info *fs_info;
534 	u16 csum_size;
535 	int ret = 0;
536 	u8 result[BTRFS_CSUM_SIZE];
537 	int reads_done;
538 
539 	if (!page->private)
540 		goto out;
541 
542 	eb = (struct extent_buffer *)page->private;
543 	fs_info = eb->fs_info;
544 	csum_size = btrfs_super_csum_size(fs_info->super_copy);
545 
546 	/* the pending IO might have been the only thing that kept this buffer
547 	 * in memory.  Make sure we have a ref for all this other checks
548 	 */
549 	atomic_inc(&eb->refs);
550 
551 	reads_done = atomic_dec_and_test(&eb->io_pages);
552 	if (!reads_done)
553 		goto err;
554 
555 	eb->read_mirror = mirror;
556 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
557 		ret = -EIO;
558 		goto err;
559 	}
560 
561 	found_start = btrfs_header_bytenr(eb);
562 	if (found_start != eb->start) {
563 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
564 			     eb->start, found_start);
565 		ret = -EIO;
566 		goto err;
567 	}
568 	if (check_tree_block_fsid(eb)) {
569 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
570 			     eb->start);
571 		ret = -EIO;
572 		goto err;
573 	}
574 	found_level = btrfs_header_level(eb);
575 	if (found_level >= BTRFS_MAX_LEVEL) {
576 		btrfs_err(fs_info, "bad tree block level %d on %llu",
577 			  (int)btrfs_header_level(eb), eb->start);
578 		ret = -EIO;
579 		goto err;
580 	}
581 
582 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
583 				       eb, found_level);
584 
585 	csum_tree_block(eb, result);
586 
587 	if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
588 		u8 val[BTRFS_CSUM_SIZE] = { 0 };
589 
590 		read_extent_buffer(eb, &val, 0, csum_size);
591 		btrfs_warn_rl(fs_info,
592 	"%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
593 			      fs_info->sb->s_id, eb->start,
594 			      CSUM_FMT_VALUE(csum_size, val),
595 			      CSUM_FMT_VALUE(csum_size, result),
596 			      btrfs_header_level(eb));
597 		ret = -EUCLEAN;
598 		goto err;
599 	}
600 
601 	/*
602 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
603 	 * that we don't try and read the other copies of this block, just
604 	 * return -EIO.
605 	 */
606 	if (found_level == 0 && btrfs_check_leaf_full(eb)) {
607 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
608 		ret = -EIO;
609 	}
610 
611 	if (found_level > 0 && btrfs_check_node(eb))
612 		ret = -EIO;
613 
614 	if (!ret)
615 		set_extent_buffer_uptodate(eb);
616 	else
617 		btrfs_err(fs_info,
618 			  "block=%llu read time tree block corruption detected",
619 			  eb->start);
620 err:
621 	if (reads_done &&
622 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
623 		btree_readahead_hook(eb, ret);
624 
625 	if (ret) {
626 		/*
627 		 * our io error hook is going to dec the io pages
628 		 * again, we have to make sure it has something
629 		 * to decrement
630 		 */
631 		atomic_inc(&eb->io_pages);
632 		clear_extent_buffer_uptodate(eb);
633 	}
634 	free_extent_buffer(eb);
635 out:
636 	return ret;
637 }
638 
639 static void end_workqueue_bio(struct bio *bio)
640 {
641 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
642 	struct btrfs_fs_info *fs_info;
643 	struct btrfs_workqueue *wq;
644 
645 	fs_info = end_io_wq->info;
646 	end_io_wq->status = bio->bi_status;
647 
648 	if (bio_op(bio) == REQ_OP_WRITE) {
649 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
650 			wq = fs_info->endio_meta_write_workers;
651 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
652 			wq = fs_info->endio_freespace_worker;
653 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
654 			wq = fs_info->endio_raid56_workers;
655 		else
656 			wq = fs_info->endio_write_workers;
657 	} else {
658 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
659 			wq = fs_info->endio_raid56_workers;
660 		else if (end_io_wq->metadata)
661 			wq = fs_info->endio_meta_workers;
662 		else
663 			wq = fs_info->endio_workers;
664 	}
665 
666 	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
667 	btrfs_queue_work(wq, &end_io_wq->work);
668 }
669 
670 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
671 			enum btrfs_wq_endio_type metadata)
672 {
673 	struct btrfs_end_io_wq *end_io_wq;
674 
675 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
676 	if (!end_io_wq)
677 		return BLK_STS_RESOURCE;
678 
679 	end_io_wq->private = bio->bi_private;
680 	end_io_wq->end_io = bio->bi_end_io;
681 	end_io_wq->info = info;
682 	end_io_wq->status = 0;
683 	end_io_wq->bio = bio;
684 	end_io_wq->metadata = metadata;
685 
686 	bio->bi_private = end_io_wq;
687 	bio->bi_end_io = end_workqueue_bio;
688 	return 0;
689 }
690 
691 static void run_one_async_start(struct btrfs_work *work)
692 {
693 	struct async_submit_bio *async;
694 	blk_status_t ret;
695 
696 	async = container_of(work, struct  async_submit_bio, work);
697 	ret = async->submit_bio_start(async->private_data, async->bio,
698 				      async->bio_offset);
699 	if (ret)
700 		async->status = ret;
701 }
702 
703 /*
704  * In order to insert checksums into the metadata in large chunks, we wait
705  * until bio submission time.   All the pages in the bio are checksummed and
706  * sums are attached onto the ordered extent record.
707  *
708  * At IO completion time the csums attached on the ordered extent record are
709  * inserted into the tree.
710  */
711 static void run_one_async_done(struct btrfs_work *work)
712 {
713 	struct async_submit_bio *async;
714 	struct inode *inode;
715 	blk_status_t ret;
716 
717 	async = container_of(work, struct  async_submit_bio, work);
718 	inode = async->private_data;
719 
720 	/* If an error occurred we just want to clean up the bio and move on */
721 	if (async->status) {
722 		async->bio->bi_status = async->status;
723 		bio_endio(async->bio);
724 		return;
725 	}
726 
727 	/*
728 	 * All of the bios that pass through here are from async helpers.
729 	 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
730 	 * This changes nothing when cgroups aren't in use.
731 	 */
732 	async->bio->bi_opf |= REQ_CGROUP_PUNT;
733 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
734 	if (ret) {
735 		async->bio->bi_status = ret;
736 		bio_endio(async->bio);
737 	}
738 }
739 
740 static void run_one_async_free(struct btrfs_work *work)
741 {
742 	struct async_submit_bio *async;
743 
744 	async = container_of(work, struct  async_submit_bio, work);
745 	kfree(async);
746 }
747 
748 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
749 				 int mirror_num, unsigned long bio_flags,
750 				 u64 bio_offset, void *private_data,
751 				 extent_submit_bio_start_t *submit_bio_start)
752 {
753 	struct async_submit_bio *async;
754 
755 	async = kmalloc(sizeof(*async), GFP_NOFS);
756 	if (!async)
757 		return BLK_STS_RESOURCE;
758 
759 	async->private_data = private_data;
760 	async->bio = bio;
761 	async->mirror_num = mirror_num;
762 	async->submit_bio_start = submit_bio_start;
763 
764 	btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
765 			run_one_async_free);
766 
767 	async->bio_offset = bio_offset;
768 
769 	async->status = 0;
770 
771 	if (op_is_sync(bio->bi_opf))
772 		btrfs_set_work_high_priority(&async->work);
773 
774 	btrfs_queue_work(fs_info->workers, &async->work);
775 	return 0;
776 }
777 
778 static blk_status_t btree_csum_one_bio(struct bio *bio)
779 {
780 	struct bio_vec *bvec;
781 	struct btrfs_root *root;
782 	int ret = 0;
783 	struct bvec_iter_all iter_all;
784 
785 	ASSERT(!bio_flagged(bio, BIO_CLONED));
786 	bio_for_each_segment_all(bvec, bio, iter_all) {
787 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
788 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
789 		if (ret)
790 			break;
791 	}
792 
793 	return errno_to_blk_status(ret);
794 }
795 
796 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
797 					     u64 bio_offset)
798 {
799 	/*
800 	 * when we're called for a write, we're already in the async
801 	 * submission context.  Just jump into btrfs_map_bio
802 	 */
803 	return btree_csum_one_bio(bio);
804 }
805 
806 static int check_async_write(struct btrfs_fs_info *fs_info,
807 			     struct btrfs_inode *bi)
808 {
809 	if (atomic_read(&bi->sync_writers))
810 		return 0;
811 	if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
812 		return 0;
813 	return 1;
814 }
815 
816 blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
817 				       int mirror_num, unsigned long bio_flags)
818 {
819 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
820 	int async = check_async_write(fs_info, BTRFS_I(inode));
821 	blk_status_t ret;
822 
823 	if (bio_op(bio) != REQ_OP_WRITE) {
824 		/*
825 		 * called for a read, do the setup so that checksum validation
826 		 * can happen in the async kernel threads
827 		 */
828 		ret = btrfs_bio_wq_end_io(fs_info, bio,
829 					  BTRFS_WQ_ENDIO_METADATA);
830 		if (ret)
831 			goto out_w_error;
832 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
833 	} else if (!async) {
834 		ret = btree_csum_one_bio(bio);
835 		if (ret)
836 			goto out_w_error;
837 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
838 	} else {
839 		/*
840 		 * kthread helpers are used to submit writes so that
841 		 * checksumming can happen in parallel across all CPUs
842 		 */
843 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
844 					  0, inode, btree_submit_bio_start);
845 	}
846 
847 	if (ret)
848 		goto out_w_error;
849 	return 0;
850 
851 out_w_error:
852 	bio->bi_status = ret;
853 	bio_endio(bio);
854 	return ret;
855 }
856 
857 #ifdef CONFIG_MIGRATION
858 static int btree_migratepage(struct address_space *mapping,
859 			struct page *newpage, struct page *page,
860 			enum migrate_mode mode)
861 {
862 	/*
863 	 * we can't safely write a btree page from here,
864 	 * we haven't done the locking hook
865 	 */
866 	if (PageDirty(page))
867 		return -EAGAIN;
868 	/*
869 	 * Buffers may be managed in a filesystem specific way.
870 	 * We must have no buffers or drop them.
871 	 */
872 	if (page_has_private(page) &&
873 	    !try_to_release_page(page, GFP_KERNEL))
874 		return -EAGAIN;
875 	return migrate_page(mapping, newpage, page, mode);
876 }
877 #endif
878 
879 
880 static int btree_writepages(struct address_space *mapping,
881 			    struct writeback_control *wbc)
882 {
883 	struct btrfs_fs_info *fs_info;
884 	int ret;
885 
886 	if (wbc->sync_mode == WB_SYNC_NONE) {
887 
888 		if (wbc->for_kupdate)
889 			return 0;
890 
891 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
892 		/* this is a bit racy, but that's ok */
893 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
894 					     BTRFS_DIRTY_METADATA_THRESH,
895 					     fs_info->dirty_metadata_batch);
896 		if (ret < 0)
897 			return 0;
898 	}
899 	return btree_write_cache_pages(mapping, wbc);
900 }
901 
902 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
903 {
904 	if (PageWriteback(page) || PageDirty(page))
905 		return 0;
906 
907 	return try_release_extent_buffer(page);
908 }
909 
910 static void btree_invalidatepage(struct page *page, unsigned int offset,
911 				 unsigned int length)
912 {
913 	struct extent_io_tree *tree;
914 	tree = &BTRFS_I(page->mapping->host)->io_tree;
915 	extent_invalidatepage(tree, page, offset);
916 	btree_releasepage(page, GFP_NOFS);
917 	if (PagePrivate(page)) {
918 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
919 			   "page private not zero on page %llu",
920 			   (unsigned long long)page_offset(page));
921 		detach_page_private(page);
922 	}
923 }
924 
925 static int btree_set_page_dirty(struct page *page)
926 {
927 #ifdef DEBUG
928 	struct extent_buffer *eb;
929 
930 	BUG_ON(!PagePrivate(page));
931 	eb = (struct extent_buffer *)page->private;
932 	BUG_ON(!eb);
933 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
934 	BUG_ON(!atomic_read(&eb->refs));
935 	btrfs_assert_tree_locked(eb);
936 #endif
937 	return __set_page_dirty_nobuffers(page);
938 }
939 
940 static const struct address_space_operations btree_aops = {
941 	.writepages	= btree_writepages,
942 	.releasepage	= btree_releasepage,
943 	.invalidatepage = btree_invalidatepage,
944 #ifdef CONFIG_MIGRATION
945 	.migratepage	= btree_migratepage,
946 #endif
947 	.set_page_dirty = btree_set_page_dirty,
948 };
949 
950 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
951 {
952 	struct extent_buffer *buf = NULL;
953 	int ret;
954 
955 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
956 	if (IS_ERR(buf))
957 		return;
958 
959 	ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
960 	if (ret < 0)
961 		free_extent_buffer_stale(buf);
962 	else
963 		free_extent_buffer(buf);
964 }
965 
966 struct extent_buffer *btrfs_find_create_tree_block(
967 						struct btrfs_fs_info *fs_info,
968 						u64 bytenr)
969 {
970 	if (btrfs_is_testing(fs_info))
971 		return alloc_test_extent_buffer(fs_info, bytenr);
972 	return alloc_extent_buffer(fs_info, bytenr);
973 }
974 
975 /*
976  * Read tree block at logical address @bytenr and do variant basic but critical
977  * verification.
978  *
979  * @parent_transid:	expected transid of this tree block, skip check if 0
980  * @level:		expected level, mandatory check
981  * @first_key:		expected key in slot 0, skip check if NULL
982  */
983 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
984 				      u64 parent_transid, int level,
985 				      struct btrfs_key *first_key)
986 {
987 	struct extent_buffer *buf = NULL;
988 	int ret;
989 
990 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
991 	if (IS_ERR(buf))
992 		return buf;
993 
994 	ret = btree_read_extent_buffer_pages(buf, parent_transid,
995 					     level, first_key);
996 	if (ret) {
997 		free_extent_buffer_stale(buf);
998 		return ERR_PTR(ret);
999 	}
1000 	return buf;
1001 
1002 }
1003 
1004 void btrfs_clean_tree_block(struct extent_buffer *buf)
1005 {
1006 	struct btrfs_fs_info *fs_info = buf->fs_info;
1007 	if (btrfs_header_generation(buf) ==
1008 	    fs_info->running_transaction->transid) {
1009 		btrfs_assert_tree_locked(buf);
1010 
1011 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1012 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1013 						 -buf->len,
1014 						 fs_info->dirty_metadata_batch);
1015 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1016 			btrfs_set_lock_blocking_write(buf);
1017 			clear_extent_buffer_dirty(buf);
1018 		}
1019 	}
1020 }
1021 
1022 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1023 			 u64 objectid)
1024 {
1025 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1026 	root->fs_info = fs_info;
1027 	root->node = NULL;
1028 	root->commit_root = NULL;
1029 	root->state = 0;
1030 	root->orphan_cleanup_state = 0;
1031 
1032 	root->last_trans = 0;
1033 	root->highest_objectid = 0;
1034 	root->nr_delalloc_inodes = 0;
1035 	root->nr_ordered_extents = 0;
1036 	root->inode_tree = RB_ROOT;
1037 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1038 	root->block_rsv = NULL;
1039 
1040 	INIT_LIST_HEAD(&root->dirty_list);
1041 	INIT_LIST_HEAD(&root->root_list);
1042 	INIT_LIST_HEAD(&root->delalloc_inodes);
1043 	INIT_LIST_HEAD(&root->delalloc_root);
1044 	INIT_LIST_HEAD(&root->ordered_extents);
1045 	INIT_LIST_HEAD(&root->ordered_root);
1046 	INIT_LIST_HEAD(&root->reloc_dirty_list);
1047 	INIT_LIST_HEAD(&root->logged_list[0]);
1048 	INIT_LIST_HEAD(&root->logged_list[1]);
1049 	spin_lock_init(&root->inode_lock);
1050 	spin_lock_init(&root->delalloc_lock);
1051 	spin_lock_init(&root->ordered_extent_lock);
1052 	spin_lock_init(&root->accounting_lock);
1053 	spin_lock_init(&root->log_extents_lock[0]);
1054 	spin_lock_init(&root->log_extents_lock[1]);
1055 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1056 	mutex_init(&root->objectid_mutex);
1057 	mutex_init(&root->log_mutex);
1058 	mutex_init(&root->ordered_extent_mutex);
1059 	mutex_init(&root->delalloc_mutex);
1060 	init_waitqueue_head(&root->qgroup_flush_wait);
1061 	init_waitqueue_head(&root->log_writer_wait);
1062 	init_waitqueue_head(&root->log_commit_wait[0]);
1063 	init_waitqueue_head(&root->log_commit_wait[1]);
1064 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1065 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1066 	atomic_set(&root->log_commit[0], 0);
1067 	atomic_set(&root->log_commit[1], 0);
1068 	atomic_set(&root->log_writers, 0);
1069 	atomic_set(&root->log_batch, 0);
1070 	refcount_set(&root->refs, 1);
1071 	atomic_set(&root->snapshot_force_cow, 0);
1072 	atomic_set(&root->nr_swapfiles, 0);
1073 	root->log_transid = 0;
1074 	root->log_transid_committed = -1;
1075 	root->last_log_commit = 0;
1076 	if (!dummy) {
1077 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
1078 				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
1079 		extent_io_tree_init(fs_info, &root->log_csum_range,
1080 				    IO_TREE_LOG_CSUM_RANGE, NULL);
1081 	}
1082 
1083 	memset(&root->root_key, 0, sizeof(root->root_key));
1084 	memset(&root->root_item, 0, sizeof(root->root_item));
1085 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1086 	root->root_key.objectid = objectid;
1087 	root->anon_dev = 0;
1088 
1089 	spin_lock_init(&root->root_item_lock);
1090 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1091 #ifdef CONFIG_BTRFS_DEBUG
1092 	INIT_LIST_HEAD(&root->leak_list);
1093 	spin_lock(&fs_info->fs_roots_radix_lock);
1094 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
1095 	spin_unlock(&fs_info->fs_roots_radix_lock);
1096 #endif
1097 }
1098 
1099 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1100 					   u64 objectid, gfp_t flags)
1101 {
1102 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1103 	if (root)
1104 		__setup_root(root, fs_info, objectid);
1105 	return root;
1106 }
1107 
1108 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1109 /* Should only be used by the testing infrastructure */
1110 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1111 {
1112 	struct btrfs_root *root;
1113 
1114 	if (!fs_info)
1115 		return ERR_PTR(-EINVAL);
1116 
1117 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
1118 	if (!root)
1119 		return ERR_PTR(-ENOMEM);
1120 
1121 	/* We don't use the stripesize in selftest, set it as sectorsize */
1122 	root->alloc_bytenr = 0;
1123 
1124 	return root;
1125 }
1126 #endif
1127 
1128 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1129 				     u64 objectid)
1130 {
1131 	struct btrfs_fs_info *fs_info = trans->fs_info;
1132 	struct extent_buffer *leaf;
1133 	struct btrfs_root *tree_root = fs_info->tree_root;
1134 	struct btrfs_root *root;
1135 	struct btrfs_key key;
1136 	unsigned int nofs_flag;
1137 	int ret = 0;
1138 
1139 	/*
1140 	 * We're holding a transaction handle, so use a NOFS memory allocation
1141 	 * context to avoid deadlock if reclaim happens.
1142 	 */
1143 	nofs_flag = memalloc_nofs_save();
1144 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
1145 	memalloc_nofs_restore(nofs_flag);
1146 	if (!root)
1147 		return ERR_PTR(-ENOMEM);
1148 
1149 	root->root_key.objectid = objectid;
1150 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1151 	root->root_key.offset = 0;
1152 
1153 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
1154 				      BTRFS_NESTING_NORMAL);
1155 	if (IS_ERR(leaf)) {
1156 		ret = PTR_ERR(leaf);
1157 		leaf = NULL;
1158 		goto fail;
1159 	}
1160 
1161 	root->node = leaf;
1162 	btrfs_mark_buffer_dirty(leaf);
1163 
1164 	root->commit_root = btrfs_root_node(root);
1165 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1166 
1167 	root->root_item.flags = 0;
1168 	root->root_item.byte_limit = 0;
1169 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1170 	btrfs_set_root_generation(&root->root_item, trans->transid);
1171 	btrfs_set_root_level(&root->root_item, 0);
1172 	btrfs_set_root_refs(&root->root_item, 1);
1173 	btrfs_set_root_used(&root->root_item, leaf->len);
1174 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1175 	btrfs_set_root_dirid(&root->root_item, 0);
1176 	if (is_fstree(objectid))
1177 		generate_random_guid(root->root_item.uuid);
1178 	else
1179 		export_guid(root->root_item.uuid, &guid_null);
1180 	root->root_item.drop_level = 0;
1181 
1182 	key.objectid = objectid;
1183 	key.type = BTRFS_ROOT_ITEM_KEY;
1184 	key.offset = 0;
1185 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1186 	if (ret)
1187 		goto fail;
1188 
1189 	btrfs_tree_unlock(leaf);
1190 
1191 	return root;
1192 
1193 fail:
1194 	if (leaf)
1195 		btrfs_tree_unlock(leaf);
1196 	btrfs_put_root(root);
1197 
1198 	return ERR_PTR(ret);
1199 }
1200 
1201 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1202 					 struct btrfs_fs_info *fs_info)
1203 {
1204 	struct btrfs_root *root;
1205 	struct extent_buffer *leaf;
1206 
1207 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
1208 	if (!root)
1209 		return ERR_PTR(-ENOMEM);
1210 
1211 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1212 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1213 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1214 
1215 	/*
1216 	 * DON'T set SHAREABLE bit for log trees.
1217 	 *
1218 	 * Log trees are not exposed to user space thus can't be snapshotted,
1219 	 * and they go away before a real commit is actually done.
1220 	 *
1221 	 * They do store pointers to file data extents, and those reference
1222 	 * counts still get updated (along with back refs to the log tree).
1223 	 */
1224 
1225 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1226 			NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
1227 	if (IS_ERR(leaf)) {
1228 		btrfs_put_root(root);
1229 		return ERR_CAST(leaf);
1230 	}
1231 
1232 	root->node = leaf;
1233 
1234 	btrfs_mark_buffer_dirty(root->node);
1235 	btrfs_tree_unlock(root->node);
1236 	return root;
1237 }
1238 
1239 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1240 			     struct btrfs_fs_info *fs_info)
1241 {
1242 	struct btrfs_root *log_root;
1243 
1244 	log_root = alloc_log_tree(trans, fs_info);
1245 	if (IS_ERR(log_root))
1246 		return PTR_ERR(log_root);
1247 	WARN_ON(fs_info->log_root_tree);
1248 	fs_info->log_root_tree = log_root;
1249 	return 0;
1250 }
1251 
1252 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1253 		       struct btrfs_root *root)
1254 {
1255 	struct btrfs_fs_info *fs_info = root->fs_info;
1256 	struct btrfs_root *log_root;
1257 	struct btrfs_inode_item *inode_item;
1258 
1259 	log_root = alloc_log_tree(trans, fs_info);
1260 	if (IS_ERR(log_root))
1261 		return PTR_ERR(log_root);
1262 
1263 	log_root->last_trans = trans->transid;
1264 	log_root->root_key.offset = root->root_key.objectid;
1265 
1266 	inode_item = &log_root->root_item.inode;
1267 	btrfs_set_stack_inode_generation(inode_item, 1);
1268 	btrfs_set_stack_inode_size(inode_item, 3);
1269 	btrfs_set_stack_inode_nlink(inode_item, 1);
1270 	btrfs_set_stack_inode_nbytes(inode_item,
1271 				     fs_info->nodesize);
1272 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1273 
1274 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1275 
1276 	WARN_ON(root->log_root);
1277 	root->log_root = log_root;
1278 	root->log_transid = 0;
1279 	root->log_transid_committed = -1;
1280 	root->last_log_commit = 0;
1281 	return 0;
1282 }
1283 
1284 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1285 					      struct btrfs_path *path,
1286 					      struct btrfs_key *key)
1287 {
1288 	struct btrfs_root *root;
1289 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1290 	u64 generation;
1291 	int ret;
1292 	int level;
1293 
1294 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1295 	if (!root)
1296 		return ERR_PTR(-ENOMEM);
1297 
1298 	ret = btrfs_find_root(tree_root, key, path,
1299 			      &root->root_item, &root->root_key);
1300 	if (ret) {
1301 		if (ret > 0)
1302 			ret = -ENOENT;
1303 		goto fail;
1304 	}
1305 
1306 	generation = btrfs_root_generation(&root->root_item);
1307 	level = btrfs_root_level(&root->root_item);
1308 	root->node = read_tree_block(fs_info,
1309 				     btrfs_root_bytenr(&root->root_item),
1310 				     generation, level, NULL);
1311 	if (IS_ERR(root->node)) {
1312 		ret = PTR_ERR(root->node);
1313 		root->node = NULL;
1314 		goto fail;
1315 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1316 		ret = -EIO;
1317 		goto fail;
1318 	}
1319 	root->commit_root = btrfs_root_node(root);
1320 	return root;
1321 fail:
1322 	btrfs_put_root(root);
1323 	return ERR_PTR(ret);
1324 }
1325 
1326 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1327 					struct btrfs_key *key)
1328 {
1329 	struct btrfs_root *root;
1330 	struct btrfs_path *path;
1331 
1332 	path = btrfs_alloc_path();
1333 	if (!path)
1334 		return ERR_PTR(-ENOMEM);
1335 	root = read_tree_root_path(tree_root, path, key);
1336 	btrfs_free_path(path);
1337 
1338 	return root;
1339 }
1340 
1341 /*
1342  * Initialize subvolume root in-memory structure
1343  *
1344  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1345  */
1346 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1347 {
1348 	int ret;
1349 	unsigned int nofs_flag;
1350 
1351 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1352 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1353 					GFP_NOFS);
1354 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1355 		ret = -ENOMEM;
1356 		goto fail;
1357 	}
1358 
1359 	/*
1360 	 * We might be called under a transaction (e.g. indirect backref
1361 	 * resolution) which could deadlock if it triggers memory reclaim
1362 	 */
1363 	nofs_flag = memalloc_nofs_save();
1364 	ret = btrfs_drew_lock_init(&root->snapshot_lock);
1365 	memalloc_nofs_restore(nofs_flag);
1366 	if (ret)
1367 		goto fail;
1368 
1369 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1370 	    root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
1371 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1372 		btrfs_check_and_init_root_item(&root->root_item);
1373 	}
1374 
1375 	btrfs_init_free_ino_ctl(root);
1376 	spin_lock_init(&root->ino_cache_lock);
1377 	init_waitqueue_head(&root->ino_cache_wait);
1378 
1379 	/*
1380 	 * Don't assign anonymous block device to roots that are not exposed to
1381 	 * userspace, the id pool is limited to 1M
1382 	 */
1383 	if (is_fstree(root->root_key.objectid) &&
1384 	    btrfs_root_refs(&root->root_item) > 0) {
1385 		if (!anon_dev) {
1386 			ret = get_anon_bdev(&root->anon_dev);
1387 			if (ret)
1388 				goto fail;
1389 		} else {
1390 			root->anon_dev = anon_dev;
1391 		}
1392 	}
1393 
1394 	mutex_lock(&root->objectid_mutex);
1395 	ret = btrfs_find_highest_objectid(root,
1396 					&root->highest_objectid);
1397 	if (ret) {
1398 		mutex_unlock(&root->objectid_mutex);
1399 		goto fail;
1400 	}
1401 
1402 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1403 
1404 	mutex_unlock(&root->objectid_mutex);
1405 
1406 	return 0;
1407 fail:
1408 	/* The caller is responsible to call btrfs_free_fs_root */
1409 	return ret;
1410 }
1411 
1412 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1413 					       u64 root_id)
1414 {
1415 	struct btrfs_root *root;
1416 
1417 	spin_lock(&fs_info->fs_roots_radix_lock);
1418 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1419 				 (unsigned long)root_id);
1420 	if (root)
1421 		root = btrfs_grab_root(root);
1422 	spin_unlock(&fs_info->fs_roots_radix_lock);
1423 	return root;
1424 }
1425 
1426 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1427 						u64 objectid)
1428 {
1429 	if (objectid == BTRFS_ROOT_TREE_OBJECTID)
1430 		return btrfs_grab_root(fs_info->tree_root);
1431 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
1432 		return btrfs_grab_root(fs_info->extent_root);
1433 	if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
1434 		return btrfs_grab_root(fs_info->chunk_root);
1435 	if (objectid == BTRFS_DEV_TREE_OBJECTID)
1436 		return btrfs_grab_root(fs_info->dev_root);
1437 	if (objectid == BTRFS_CSUM_TREE_OBJECTID)
1438 		return btrfs_grab_root(fs_info->csum_root);
1439 	if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
1440 		return btrfs_grab_root(fs_info->quota_root) ?
1441 			fs_info->quota_root : ERR_PTR(-ENOENT);
1442 	if (objectid == BTRFS_UUID_TREE_OBJECTID)
1443 		return btrfs_grab_root(fs_info->uuid_root) ?
1444 			fs_info->uuid_root : ERR_PTR(-ENOENT);
1445 	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1446 		return btrfs_grab_root(fs_info->free_space_root) ?
1447 			fs_info->free_space_root : ERR_PTR(-ENOENT);
1448 	return NULL;
1449 }
1450 
1451 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1452 			 struct btrfs_root *root)
1453 {
1454 	int ret;
1455 
1456 	ret = radix_tree_preload(GFP_NOFS);
1457 	if (ret)
1458 		return ret;
1459 
1460 	spin_lock(&fs_info->fs_roots_radix_lock);
1461 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1462 				(unsigned long)root->root_key.objectid,
1463 				root);
1464 	if (ret == 0) {
1465 		btrfs_grab_root(root);
1466 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1467 	}
1468 	spin_unlock(&fs_info->fs_roots_radix_lock);
1469 	radix_tree_preload_end();
1470 
1471 	return ret;
1472 }
1473 
1474 void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1475 {
1476 #ifdef CONFIG_BTRFS_DEBUG
1477 	struct btrfs_root *root;
1478 
1479 	while (!list_empty(&fs_info->allocated_roots)) {
1480 		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1481 
1482 		root = list_first_entry(&fs_info->allocated_roots,
1483 					struct btrfs_root, leak_list);
1484 		btrfs_err(fs_info, "leaked root %s refcount %d",
1485 			  btrfs_root_name(root->root_key.objectid, buf),
1486 			  refcount_read(&root->refs));
1487 		while (refcount_read(&root->refs) > 1)
1488 			btrfs_put_root(root);
1489 		btrfs_put_root(root);
1490 	}
1491 #endif
1492 }
1493 
1494 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1495 {
1496 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1497 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1498 	percpu_counter_destroy(&fs_info->dio_bytes);
1499 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1500 	btrfs_free_csum_hash(fs_info);
1501 	btrfs_free_stripe_hash_table(fs_info);
1502 	btrfs_free_ref_cache(fs_info);
1503 	kfree(fs_info->balance_ctl);
1504 	kfree(fs_info->delayed_root);
1505 	btrfs_put_root(fs_info->extent_root);
1506 	btrfs_put_root(fs_info->tree_root);
1507 	btrfs_put_root(fs_info->chunk_root);
1508 	btrfs_put_root(fs_info->dev_root);
1509 	btrfs_put_root(fs_info->csum_root);
1510 	btrfs_put_root(fs_info->quota_root);
1511 	btrfs_put_root(fs_info->uuid_root);
1512 	btrfs_put_root(fs_info->free_space_root);
1513 	btrfs_put_root(fs_info->fs_root);
1514 	btrfs_put_root(fs_info->data_reloc_root);
1515 	btrfs_check_leaked_roots(fs_info);
1516 	btrfs_extent_buffer_leak_debug_check(fs_info);
1517 	kfree(fs_info->super_copy);
1518 	kfree(fs_info->super_for_commit);
1519 	kvfree(fs_info);
1520 }
1521 
1522 
1523 /*
1524  * Get an in-memory reference of a root structure.
1525  *
1526  * For essential trees like root/extent tree, we grab it from fs_info directly.
1527  * For subvolume trees, we check the cached filesystem roots first. If not
1528  * found, then read it from disk and add it to cached fs roots.
1529  *
1530  * Caller should release the root by calling btrfs_put_root() after the usage.
1531  *
1532  * NOTE: Reloc and log trees can't be read by this function as they share the
1533  *	 same root objectid.
1534  *
1535  * @objectid:	root id
1536  * @anon_dev:	preallocated anonymous block device number for new roots,
1537  * 		pass 0 for new allocation.
1538  * @check_ref:	whether to check root item references, If true, return -ENOENT
1539  *		for orphan roots
1540  */
1541 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1542 					     u64 objectid, dev_t anon_dev,
1543 					     bool check_ref)
1544 {
1545 	struct btrfs_root *root;
1546 	struct btrfs_path *path;
1547 	struct btrfs_key key;
1548 	int ret;
1549 
1550 	root = btrfs_get_global_root(fs_info, objectid);
1551 	if (root)
1552 		return root;
1553 again:
1554 	root = btrfs_lookup_fs_root(fs_info, objectid);
1555 	if (root) {
1556 		/* Shouldn't get preallocated anon_dev for cached roots */
1557 		ASSERT(!anon_dev);
1558 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1559 			btrfs_put_root(root);
1560 			return ERR_PTR(-ENOENT);
1561 		}
1562 		return root;
1563 	}
1564 
1565 	key.objectid = objectid;
1566 	key.type = BTRFS_ROOT_ITEM_KEY;
1567 	key.offset = (u64)-1;
1568 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1569 	if (IS_ERR(root))
1570 		return root;
1571 
1572 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1573 		ret = -ENOENT;
1574 		goto fail;
1575 	}
1576 
1577 	ret = btrfs_init_fs_root(root, anon_dev);
1578 	if (ret)
1579 		goto fail;
1580 
1581 	path = btrfs_alloc_path();
1582 	if (!path) {
1583 		ret = -ENOMEM;
1584 		goto fail;
1585 	}
1586 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1587 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1588 	key.offset = objectid;
1589 
1590 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1591 	btrfs_free_path(path);
1592 	if (ret < 0)
1593 		goto fail;
1594 	if (ret == 0)
1595 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1596 
1597 	ret = btrfs_insert_fs_root(fs_info, root);
1598 	if (ret) {
1599 		btrfs_put_root(root);
1600 		if (ret == -EEXIST)
1601 			goto again;
1602 		goto fail;
1603 	}
1604 	return root;
1605 fail:
1606 	btrfs_put_root(root);
1607 	return ERR_PTR(ret);
1608 }
1609 
1610 /*
1611  * Get in-memory reference of a root structure
1612  *
1613  * @objectid:	tree objectid
1614  * @check_ref:	if set, verify that the tree exists and the item has at least
1615  *		one reference
1616  */
1617 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1618 				     u64 objectid, bool check_ref)
1619 {
1620 	return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
1621 }
1622 
1623 /*
1624  * Get in-memory reference of a root structure, created as new, optionally pass
1625  * the anonymous block device id
1626  *
1627  * @objectid:	tree objectid
1628  * @anon_dev:	if zero, allocate a new anonymous block device or use the
1629  *		parameter value
1630  */
1631 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1632 					 u64 objectid, dev_t anon_dev)
1633 {
1634 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1635 }
1636 
1637 /*
1638  * btrfs_get_fs_root_commit_root - return a root for the given objectid
1639  * @fs_info:	the fs_info
1640  * @objectid:	the objectid we need to lookup
1641  *
1642  * This is exclusively used for backref walking, and exists specifically because
1643  * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1644  * creation time, which means we may have to read the tree_root in order to look
1645  * up a fs root that is not in memory.  If the root is not in memory we will
1646  * read the tree root commit root and look up the fs root from there.  This is a
1647  * temporary root, it will not be inserted into the radix tree as it doesn't
1648  * have the most uptodate information, it'll simply be discarded once the
1649  * backref code is finished using the root.
1650  */
1651 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1652 						 struct btrfs_path *path,
1653 						 u64 objectid)
1654 {
1655 	struct btrfs_root *root;
1656 	struct btrfs_key key;
1657 
1658 	ASSERT(path->search_commit_root && path->skip_locking);
1659 
1660 	/*
1661 	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1662 	 * since this is called via the backref walking code we won't be looking
1663 	 * up a root that doesn't exist, unless there's corruption.  So if root
1664 	 * != NULL just return it.
1665 	 */
1666 	root = btrfs_get_global_root(fs_info, objectid);
1667 	if (root)
1668 		return root;
1669 
1670 	root = btrfs_lookup_fs_root(fs_info, objectid);
1671 	if (root)
1672 		return root;
1673 
1674 	key.objectid = objectid;
1675 	key.type = BTRFS_ROOT_ITEM_KEY;
1676 	key.offset = (u64)-1;
1677 	root = read_tree_root_path(fs_info->tree_root, path, &key);
1678 	btrfs_release_path(path);
1679 
1680 	return root;
1681 }
1682 
1683 /*
1684  * called by the kthread helper functions to finally call the bio end_io
1685  * functions.  This is where read checksum verification actually happens
1686  */
1687 static void end_workqueue_fn(struct btrfs_work *work)
1688 {
1689 	struct bio *bio;
1690 	struct btrfs_end_io_wq *end_io_wq;
1691 
1692 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1693 	bio = end_io_wq->bio;
1694 
1695 	bio->bi_status = end_io_wq->status;
1696 	bio->bi_private = end_io_wq->private;
1697 	bio->bi_end_io = end_io_wq->end_io;
1698 	bio_endio(bio);
1699 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1700 }
1701 
1702 static int cleaner_kthread(void *arg)
1703 {
1704 	struct btrfs_root *root = arg;
1705 	struct btrfs_fs_info *fs_info = root->fs_info;
1706 	int again;
1707 
1708 	while (1) {
1709 		again = 0;
1710 
1711 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1712 
1713 		/* Make the cleaner go to sleep early. */
1714 		if (btrfs_need_cleaner_sleep(fs_info))
1715 			goto sleep;
1716 
1717 		/*
1718 		 * Do not do anything if we might cause open_ctree() to block
1719 		 * before we have finished mounting the filesystem.
1720 		 */
1721 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1722 			goto sleep;
1723 
1724 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1725 			goto sleep;
1726 
1727 		/*
1728 		 * Avoid the problem that we change the status of the fs
1729 		 * during the above check and trylock.
1730 		 */
1731 		if (btrfs_need_cleaner_sleep(fs_info)) {
1732 			mutex_unlock(&fs_info->cleaner_mutex);
1733 			goto sleep;
1734 		}
1735 
1736 		btrfs_run_delayed_iputs(fs_info);
1737 
1738 		again = btrfs_clean_one_deleted_snapshot(root);
1739 		mutex_unlock(&fs_info->cleaner_mutex);
1740 
1741 		/*
1742 		 * The defragger has dealt with the R/O remount and umount,
1743 		 * needn't do anything special here.
1744 		 */
1745 		btrfs_run_defrag_inodes(fs_info);
1746 
1747 		/*
1748 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1749 		 * with relocation (btrfs_relocate_chunk) and relocation
1750 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1751 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1752 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1753 		 * unused block groups.
1754 		 */
1755 		btrfs_delete_unused_bgs(fs_info);
1756 sleep:
1757 		clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1758 		if (kthread_should_park())
1759 			kthread_parkme();
1760 		if (kthread_should_stop())
1761 			return 0;
1762 		if (!again) {
1763 			set_current_state(TASK_INTERRUPTIBLE);
1764 			schedule();
1765 			__set_current_state(TASK_RUNNING);
1766 		}
1767 	}
1768 }
1769 
1770 static int transaction_kthread(void *arg)
1771 {
1772 	struct btrfs_root *root = arg;
1773 	struct btrfs_fs_info *fs_info = root->fs_info;
1774 	struct btrfs_trans_handle *trans;
1775 	struct btrfs_transaction *cur;
1776 	u64 transid;
1777 	time64_t now;
1778 	unsigned long delay;
1779 	bool cannot_commit;
1780 
1781 	do {
1782 		cannot_commit = false;
1783 		delay = HZ * fs_info->commit_interval;
1784 		mutex_lock(&fs_info->transaction_kthread_mutex);
1785 
1786 		spin_lock(&fs_info->trans_lock);
1787 		cur = fs_info->running_transaction;
1788 		if (!cur) {
1789 			spin_unlock(&fs_info->trans_lock);
1790 			goto sleep;
1791 		}
1792 
1793 		now = ktime_get_seconds();
1794 		if (cur->state < TRANS_STATE_COMMIT_START &&
1795 		    (now < cur->start_time ||
1796 		     now - cur->start_time < fs_info->commit_interval)) {
1797 			spin_unlock(&fs_info->trans_lock);
1798 			delay = HZ * 5;
1799 			goto sleep;
1800 		}
1801 		transid = cur->transid;
1802 		spin_unlock(&fs_info->trans_lock);
1803 
1804 		/* If the file system is aborted, this will always fail. */
1805 		trans = btrfs_attach_transaction(root);
1806 		if (IS_ERR(trans)) {
1807 			if (PTR_ERR(trans) != -ENOENT)
1808 				cannot_commit = true;
1809 			goto sleep;
1810 		}
1811 		if (transid == trans->transid) {
1812 			btrfs_commit_transaction(trans);
1813 		} else {
1814 			btrfs_end_transaction(trans);
1815 		}
1816 sleep:
1817 		wake_up_process(fs_info->cleaner_kthread);
1818 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1819 
1820 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1821 				      &fs_info->fs_state)))
1822 			btrfs_cleanup_transaction(fs_info);
1823 		if (!kthread_should_stop() &&
1824 				(!btrfs_transaction_blocked(fs_info) ||
1825 				 cannot_commit))
1826 			schedule_timeout_interruptible(delay);
1827 	} while (!kthread_should_stop());
1828 	return 0;
1829 }
1830 
1831 /*
1832  * This will find the highest generation in the array of root backups.  The
1833  * index of the highest array is returned, or -EINVAL if we can't find
1834  * anything.
1835  *
1836  * We check to make sure the array is valid by comparing the
1837  * generation of the latest  root in the array with the generation
1838  * in the super block.  If they don't match we pitch it.
1839  */
1840 static int find_newest_super_backup(struct btrfs_fs_info *info)
1841 {
1842 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1843 	u64 cur;
1844 	struct btrfs_root_backup *root_backup;
1845 	int i;
1846 
1847 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1848 		root_backup = info->super_copy->super_roots + i;
1849 		cur = btrfs_backup_tree_root_gen(root_backup);
1850 		if (cur == newest_gen)
1851 			return i;
1852 	}
1853 
1854 	return -EINVAL;
1855 }
1856 
1857 /*
1858  * copy all the root pointers into the super backup array.
1859  * this will bump the backup pointer by one when it is
1860  * done
1861  */
1862 static void backup_super_roots(struct btrfs_fs_info *info)
1863 {
1864 	const int next_backup = info->backup_root_index;
1865 	struct btrfs_root_backup *root_backup;
1866 
1867 	root_backup = info->super_for_commit->super_roots + next_backup;
1868 
1869 	/*
1870 	 * make sure all of our padding and empty slots get zero filled
1871 	 * regardless of which ones we use today
1872 	 */
1873 	memset(root_backup, 0, sizeof(*root_backup));
1874 
1875 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1876 
1877 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1878 	btrfs_set_backup_tree_root_gen(root_backup,
1879 			       btrfs_header_generation(info->tree_root->node));
1880 
1881 	btrfs_set_backup_tree_root_level(root_backup,
1882 			       btrfs_header_level(info->tree_root->node));
1883 
1884 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1885 	btrfs_set_backup_chunk_root_gen(root_backup,
1886 			       btrfs_header_generation(info->chunk_root->node));
1887 	btrfs_set_backup_chunk_root_level(root_backup,
1888 			       btrfs_header_level(info->chunk_root->node));
1889 
1890 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1891 	btrfs_set_backup_extent_root_gen(root_backup,
1892 			       btrfs_header_generation(info->extent_root->node));
1893 	btrfs_set_backup_extent_root_level(root_backup,
1894 			       btrfs_header_level(info->extent_root->node));
1895 
1896 	/*
1897 	 * we might commit during log recovery, which happens before we set
1898 	 * the fs_root.  Make sure it is valid before we fill it in.
1899 	 */
1900 	if (info->fs_root && info->fs_root->node) {
1901 		btrfs_set_backup_fs_root(root_backup,
1902 					 info->fs_root->node->start);
1903 		btrfs_set_backup_fs_root_gen(root_backup,
1904 			       btrfs_header_generation(info->fs_root->node));
1905 		btrfs_set_backup_fs_root_level(root_backup,
1906 			       btrfs_header_level(info->fs_root->node));
1907 	}
1908 
1909 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1910 	btrfs_set_backup_dev_root_gen(root_backup,
1911 			       btrfs_header_generation(info->dev_root->node));
1912 	btrfs_set_backup_dev_root_level(root_backup,
1913 				       btrfs_header_level(info->dev_root->node));
1914 
1915 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1916 	btrfs_set_backup_csum_root_gen(root_backup,
1917 			       btrfs_header_generation(info->csum_root->node));
1918 	btrfs_set_backup_csum_root_level(root_backup,
1919 			       btrfs_header_level(info->csum_root->node));
1920 
1921 	btrfs_set_backup_total_bytes(root_backup,
1922 			     btrfs_super_total_bytes(info->super_copy));
1923 	btrfs_set_backup_bytes_used(root_backup,
1924 			     btrfs_super_bytes_used(info->super_copy));
1925 	btrfs_set_backup_num_devices(root_backup,
1926 			     btrfs_super_num_devices(info->super_copy));
1927 
1928 	/*
1929 	 * if we don't copy this out to the super_copy, it won't get remembered
1930 	 * for the next commit
1931 	 */
1932 	memcpy(&info->super_copy->super_roots,
1933 	       &info->super_for_commit->super_roots,
1934 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1935 }
1936 
1937 /*
1938  * read_backup_root - Reads a backup root based on the passed priority. Prio 0
1939  * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1940  *
1941  * fs_info - filesystem whose backup roots need to be read
1942  * priority - priority of backup root required
1943  *
1944  * Returns backup root index on success and -EINVAL otherwise.
1945  */
1946 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1947 {
1948 	int backup_index = find_newest_super_backup(fs_info);
1949 	struct btrfs_super_block *super = fs_info->super_copy;
1950 	struct btrfs_root_backup *root_backup;
1951 
1952 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1953 		if (priority == 0)
1954 			return backup_index;
1955 
1956 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1957 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1958 	} else {
1959 		return -EINVAL;
1960 	}
1961 
1962 	root_backup = super->super_roots + backup_index;
1963 
1964 	btrfs_set_super_generation(super,
1965 				   btrfs_backup_tree_root_gen(root_backup));
1966 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1967 	btrfs_set_super_root_level(super,
1968 				   btrfs_backup_tree_root_level(root_backup));
1969 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1970 
1971 	/*
1972 	 * Fixme: the total bytes and num_devices need to match or we should
1973 	 * need a fsck
1974 	 */
1975 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1976 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1977 
1978 	return backup_index;
1979 }
1980 
1981 /* helper to cleanup workers */
1982 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1983 {
1984 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1985 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1986 	btrfs_destroy_workqueue(fs_info->workers);
1987 	btrfs_destroy_workqueue(fs_info->endio_workers);
1988 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1989 	btrfs_destroy_workqueue(fs_info->rmw_workers);
1990 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1991 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1992 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1993 	btrfs_destroy_workqueue(fs_info->caching_workers);
1994 	btrfs_destroy_workqueue(fs_info->readahead_workers);
1995 	btrfs_destroy_workqueue(fs_info->flush_workers);
1996 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1997 	if (fs_info->discard_ctl.discard_workers)
1998 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1999 	/*
2000 	 * Now that all other work queues are destroyed, we can safely destroy
2001 	 * the queues used for metadata I/O, since tasks from those other work
2002 	 * queues can do metadata I/O operations.
2003 	 */
2004 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2005 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2006 }
2007 
2008 static void free_root_extent_buffers(struct btrfs_root *root)
2009 {
2010 	if (root) {
2011 		free_extent_buffer(root->node);
2012 		free_extent_buffer(root->commit_root);
2013 		root->node = NULL;
2014 		root->commit_root = NULL;
2015 	}
2016 }
2017 
2018 /* helper to cleanup tree roots */
2019 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
2020 {
2021 	free_root_extent_buffers(info->tree_root);
2022 
2023 	free_root_extent_buffers(info->dev_root);
2024 	free_root_extent_buffers(info->extent_root);
2025 	free_root_extent_buffers(info->csum_root);
2026 	free_root_extent_buffers(info->quota_root);
2027 	free_root_extent_buffers(info->uuid_root);
2028 	free_root_extent_buffers(info->fs_root);
2029 	free_root_extent_buffers(info->data_reloc_root);
2030 	if (free_chunk_root)
2031 		free_root_extent_buffers(info->chunk_root);
2032 	free_root_extent_buffers(info->free_space_root);
2033 }
2034 
2035 void btrfs_put_root(struct btrfs_root *root)
2036 {
2037 	if (!root)
2038 		return;
2039 
2040 	if (refcount_dec_and_test(&root->refs)) {
2041 		WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2042 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
2043 		if (root->anon_dev)
2044 			free_anon_bdev(root->anon_dev);
2045 		btrfs_drew_lock_destroy(&root->snapshot_lock);
2046 		free_root_extent_buffers(root);
2047 		kfree(root->free_ino_ctl);
2048 		kfree(root->free_ino_pinned);
2049 #ifdef CONFIG_BTRFS_DEBUG
2050 		spin_lock(&root->fs_info->fs_roots_radix_lock);
2051 		list_del_init(&root->leak_list);
2052 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
2053 #endif
2054 		kfree(root);
2055 	}
2056 }
2057 
2058 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2059 {
2060 	int ret;
2061 	struct btrfs_root *gang[8];
2062 	int i;
2063 
2064 	while (!list_empty(&fs_info->dead_roots)) {
2065 		gang[0] = list_entry(fs_info->dead_roots.next,
2066 				     struct btrfs_root, root_list);
2067 		list_del(&gang[0]->root_list);
2068 
2069 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
2070 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2071 		btrfs_put_root(gang[0]);
2072 	}
2073 
2074 	while (1) {
2075 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2076 					     (void **)gang, 0,
2077 					     ARRAY_SIZE(gang));
2078 		if (!ret)
2079 			break;
2080 		for (i = 0; i < ret; i++)
2081 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2082 	}
2083 }
2084 
2085 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2086 {
2087 	mutex_init(&fs_info->scrub_lock);
2088 	atomic_set(&fs_info->scrubs_running, 0);
2089 	atomic_set(&fs_info->scrub_pause_req, 0);
2090 	atomic_set(&fs_info->scrubs_paused, 0);
2091 	atomic_set(&fs_info->scrub_cancel_req, 0);
2092 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2093 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
2094 }
2095 
2096 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2097 {
2098 	spin_lock_init(&fs_info->balance_lock);
2099 	mutex_init(&fs_info->balance_mutex);
2100 	atomic_set(&fs_info->balance_pause_req, 0);
2101 	atomic_set(&fs_info->balance_cancel_req, 0);
2102 	fs_info->balance_ctl = NULL;
2103 	init_waitqueue_head(&fs_info->balance_wait_q);
2104 }
2105 
2106 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2107 {
2108 	struct inode *inode = fs_info->btree_inode;
2109 
2110 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2111 	set_nlink(inode, 1);
2112 	/*
2113 	 * we set the i_size on the btree inode to the max possible int.
2114 	 * the real end of the address space is determined by all of
2115 	 * the devices in the system
2116 	 */
2117 	inode->i_size = OFFSET_MAX;
2118 	inode->i_mapping->a_ops = &btree_aops;
2119 
2120 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2121 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
2122 			    IO_TREE_BTREE_INODE_IO, inode);
2123 	BTRFS_I(inode)->io_tree.track_uptodate = false;
2124 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2125 
2126 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
2127 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2128 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2129 	btrfs_insert_inode_hash(inode);
2130 }
2131 
2132 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2133 {
2134 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2135 	init_rwsem(&fs_info->dev_replace.rwsem);
2136 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2137 }
2138 
2139 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2140 {
2141 	spin_lock_init(&fs_info->qgroup_lock);
2142 	mutex_init(&fs_info->qgroup_ioctl_lock);
2143 	fs_info->qgroup_tree = RB_ROOT;
2144 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2145 	fs_info->qgroup_seq = 1;
2146 	fs_info->qgroup_ulist = NULL;
2147 	fs_info->qgroup_rescan_running = false;
2148 	mutex_init(&fs_info->qgroup_rescan_lock);
2149 }
2150 
2151 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2152 		struct btrfs_fs_devices *fs_devices)
2153 {
2154 	u32 max_active = fs_info->thread_pool_size;
2155 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2156 
2157 	fs_info->workers =
2158 		btrfs_alloc_workqueue(fs_info, "worker",
2159 				      flags | WQ_HIGHPRI, max_active, 16);
2160 
2161 	fs_info->delalloc_workers =
2162 		btrfs_alloc_workqueue(fs_info, "delalloc",
2163 				      flags, max_active, 2);
2164 
2165 	fs_info->flush_workers =
2166 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2167 				      flags, max_active, 0);
2168 
2169 	fs_info->caching_workers =
2170 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2171 
2172 	fs_info->fixup_workers =
2173 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2174 
2175 	/*
2176 	 * endios are largely parallel and should have a very
2177 	 * low idle thresh
2178 	 */
2179 	fs_info->endio_workers =
2180 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2181 	fs_info->endio_meta_workers =
2182 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2183 				      max_active, 4);
2184 	fs_info->endio_meta_write_workers =
2185 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2186 				      max_active, 2);
2187 	fs_info->endio_raid56_workers =
2188 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2189 				      max_active, 4);
2190 	fs_info->rmw_workers =
2191 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2192 	fs_info->endio_write_workers =
2193 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2194 				      max_active, 2);
2195 	fs_info->endio_freespace_worker =
2196 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2197 				      max_active, 0);
2198 	fs_info->delayed_workers =
2199 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2200 				      max_active, 0);
2201 	fs_info->readahead_workers =
2202 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2203 				      max_active, 2);
2204 	fs_info->qgroup_rescan_workers =
2205 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2206 	fs_info->discard_ctl.discard_workers =
2207 		alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
2208 
2209 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2210 	      fs_info->flush_workers &&
2211 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2212 	      fs_info->endio_meta_write_workers &&
2213 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2214 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2215 	      fs_info->caching_workers && fs_info->readahead_workers &&
2216 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2217 	      fs_info->qgroup_rescan_workers &&
2218 	      fs_info->discard_ctl.discard_workers)) {
2219 		return -ENOMEM;
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2226 {
2227 	struct crypto_shash *csum_shash;
2228 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2229 
2230 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2231 
2232 	if (IS_ERR(csum_shash)) {
2233 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2234 			  csum_driver);
2235 		return PTR_ERR(csum_shash);
2236 	}
2237 
2238 	fs_info->csum_shash = csum_shash;
2239 
2240 	return 0;
2241 }
2242 
2243 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2244 			    struct btrfs_fs_devices *fs_devices)
2245 {
2246 	int ret;
2247 	struct btrfs_root *log_tree_root;
2248 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2249 	u64 bytenr = btrfs_super_log_root(disk_super);
2250 	int level = btrfs_super_log_root_level(disk_super);
2251 
2252 	if (fs_devices->rw_devices == 0) {
2253 		btrfs_warn(fs_info, "log replay required on RO media");
2254 		return -EIO;
2255 	}
2256 
2257 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2258 					 GFP_KERNEL);
2259 	if (!log_tree_root)
2260 		return -ENOMEM;
2261 
2262 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2263 					      fs_info->generation + 1,
2264 					      level, NULL);
2265 	if (IS_ERR(log_tree_root->node)) {
2266 		btrfs_warn(fs_info, "failed to read log tree");
2267 		ret = PTR_ERR(log_tree_root->node);
2268 		log_tree_root->node = NULL;
2269 		btrfs_put_root(log_tree_root);
2270 		return ret;
2271 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2272 		btrfs_err(fs_info, "failed to read log tree");
2273 		btrfs_put_root(log_tree_root);
2274 		return -EIO;
2275 	}
2276 	/* returns with log_tree_root freed on success */
2277 	ret = btrfs_recover_log_trees(log_tree_root);
2278 	if (ret) {
2279 		btrfs_handle_fs_error(fs_info, ret,
2280 				      "Failed to recover log tree");
2281 		btrfs_put_root(log_tree_root);
2282 		return ret;
2283 	}
2284 
2285 	if (sb_rdonly(fs_info->sb)) {
2286 		ret = btrfs_commit_super(fs_info);
2287 		if (ret)
2288 			return ret;
2289 	}
2290 
2291 	return 0;
2292 }
2293 
2294 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2295 {
2296 	struct btrfs_root *tree_root = fs_info->tree_root;
2297 	struct btrfs_root *root;
2298 	struct btrfs_key location;
2299 	int ret;
2300 
2301 	BUG_ON(!fs_info->tree_root);
2302 
2303 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2304 	location.type = BTRFS_ROOT_ITEM_KEY;
2305 	location.offset = 0;
2306 
2307 	root = btrfs_read_tree_root(tree_root, &location);
2308 	if (IS_ERR(root)) {
2309 		ret = PTR_ERR(root);
2310 		goto out;
2311 	}
2312 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2313 	fs_info->extent_root = root;
2314 
2315 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2316 	root = btrfs_read_tree_root(tree_root, &location);
2317 	if (IS_ERR(root)) {
2318 		ret = PTR_ERR(root);
2319 		goto out;
2320 	}
2321 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2322 	fs_info->dev_root = root;
2323 	btrfs_init_devices_late(fs_info);
2324 
2325 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2326 	root = btrfs_read_tree_root(tree_root, &location);
2327 	if (IS_ERR(root)) {
2328 		ret = PTR_ERR(root);
2329 		goto out;
2330 	}
2331 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2332 	fs_info->csum_root = root;
2333 
2334 	/*
2335 	 * This tree can share blocks with some other fs tree during relocation
2336 	 * and we need a proper setup by btrfs_get_fs_root
2337 	 */
2338 	root = btrfs_get_fs_root(tree_root->fs_info,
2339 				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2340 	if (IS_ERR(root)) {
2341 		ret = PTR_ERR(root);
2342 		goto out;
2343 	}
2344 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2345 	fs_info->data_reloc_root = root;
2346 
2347 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2348 	root = btrfs_read_tree_root(tree_root, &location);
2349 	if (!IS_ERR(root)) {
2350 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2351 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2352 		fs_info->quota_root = root;
2353 	}
2354 
2355 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2356 	root = btrfs_read_tree_root(tree_root, &location);
2357 	if (IS_ERR(root)) {
2358 		ret = PTR_ERR(root);
2359 		if (ret != -ENOENT)
2360 			goto out;
2361 	} else {
2362 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2363 		fs_info->uuid_root = root;
2364 	}
2365 
2366 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2367 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2368 		root = btrfs_read_tree_root(tree_root, &location);
2369 		if (IS_ERR(root)) {
2370 			ret = PTR_ERR(root);
2371 			goto out;
2372 		}
2373 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2374 		fs_info->free_space_root = root;
2375 	}
2376 
2377 	return 0;
2378 out:
2379 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2380 		   location.objectid, ret);
2381 	return ret;
2382 }
2383 
2384 /*
2385  * Real super block validation
2386  * NOTE: super csum type and incompat features will not be checked here.
2387  *
2388  * @sb:		super block to check
2389  * @mirror_num:	the super block number to check its bytenr:
2390  * 		0	the primary (1st) sb
2391  * 		1, 2	2nd and 3rd backup copy
2392  * 	       -1	skip bytenr check
2393  */
2394 static int validate_super(struct btrfs_fs_info *fs_info,
2395 			    struct btrfs_super_block *sb, int mirror_num)
2396 {
2397 	u64 nodesize = btrfs_super_nodesize(sb);
2398 	u64 sectorsize = btrfs_super_sectorsize(sb);
2399 	int ret = 0;
2400 
2401 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2402 		btrfs_err(fs_info, "no valid FS found");
2403 		ret = -EINVAL;
2404 	}
2405 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2406 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2407 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2408 		ret = -EINVAL;
2409 	}
2410 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2411 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2412 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2413 		ret = -EINVAL;
2414 	}
2415 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2416 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2417 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2418 		ret = -EINVAL;
2419 	}
2420 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2421 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2422 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2423 		ret = -EINVAL;
2424 	}
2425 
2426 	/*
2427 	 * Check sectorsize and nodesize first, other check will need it.
2428 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2429 	 */
2430 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2431 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2432 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2433 		ret = -EINVAL;
2434 	}
2435 	/* Only PAGE SIZE is supported yet */
2436 	if (sectorsize != PAGE_SIZE) {
2437 		btrfs_err(fs_info,
2438 			"sectorsize %llu not supported yet, only support %lu",
2439 			sectorsize, PAGE_SIZE);
2440 		ret = -EINVAL;
2441 	}
2442 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2443 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2444 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2445 		ret = -EINVAL;
2446 	}
2447 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2448 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2449 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2450 		ret = -EINVAL;
2451 	}
2452 
2453 	/* Root alignment check */
2454 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2455 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2456 			   btrfs_super_root(sb));
2457 		ret = -EINVAL;
2458 	}
2459 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2460 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2461 			   btrfs_super_chunk_root(sb));
2462 		ret = -EINVAL;
2463 	}
2464 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2465 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2466 			   btrfs_super_log_root(sb));
2467 		ret = -EINVAL;
2468 	}
2469 
2470 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2471 		   BTRFS_FSID_SIZE) != 0) {
2472 		btrfs_err(fs_info,
2473 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2474 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2475 		ret = -EINVAL;
2476 	}
2477 
2478 	/*
2479 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2480 	 * done later
2481 	 */
2482 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2483 		btrfs_err(fs_info, "bytes_used is too small %llu",
2484 			  btrfs_super_bytes_used(sb));
2485 		ret = -EINVAL;
2486 	}
2487 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2488 		btrfs_err(fs_info, "invalid stripesize %u",
2489 			  btrfs_super_stripesize(sb));
2490 		ret = -EINVAL;
2491 	}
2492 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2493 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2494 			   btrfs_super_num_devices(sb));
2495 	if (btrfs_super_num_devices(sb) == 0) {
2496 		btrfs_err(fs_info, "number of devices is 0");
2497 		ret = -EINVAL;
2498 	}
2499 
2500 	if (mirror_num >= 0 &&
2501 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2502 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2503 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2504 		ret = -EINVAL;
2505 	}
2506 
2507 	/*
2508 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2509 	 * and one chunk
2510 	 */
2511 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2512 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2513 			  btrfs_super_sys_array_size(sb),
2514 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2515 		ret = -EINVAL;
2516 	}
2517 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2518 			+ sizeof(struct btrfs_chunk)) {
2519 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2520 			  btrfs_super_sys_array_size(sb),
2521 			  sizeof(struct btrfs_disk_key)
2522 			  + sizeof(struct btrfs_chunk));
2523 		ret = -EINVAL;
2524 	}
2525 
2526 	/*
2527 	 * The generation is a global counter, we'll trust it more than the others
2528 	 * but it's still possible that it's the one that's wrong.
2529 	 */
2530 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2531 		btrfs_warn(fs_info,
2532 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2533 			btrfs_super_generation(sb),
2534 			btrfs_super_chunk_root_generation(sb));
2535 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2536 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2537 		btrfs_warn(fs_info,
2538 			"suspicious: generation < cache_generation: %llu < %llu",
2539 			btrfs_super_generation(sb),
2540 			btrfs_super_cache_generation(sb));
2541 
2542 	return ret;
2543 }
2544 
2545 /*
2546  * Validation of super block at mount time.
2547  * Some checks already done early at mount time, like csum type and incompat
2548  * flags will be skipped.
2549  */
2550 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2551 {
2552 	return validate_super(fs_info, fs_info->super_copy, 0);
2553 }
2554 
2555 /*
2556  * Validation of super block at write time.
2557  * Some checks like bytenr check will be skipped as their values will be
2558  * overwritten soon.
2559  * Extra checks like csum type and incompat flags will be done here.
2560  */
2561 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2562 				      struct btrfs_super_block *sb)
2563 {
2564 	int ret;
2565 
2566 	ret = validate_super(fs_info, sb, -1);
2567 	if (ret < 0)
2568 		goto out;
2569 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2570 		ret = -EUCLEAN;
2571 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2572 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2573 		goto out;
2574 	}
2575 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2576 		ret = -EUCLEAN;
2577 		btrfs_err(fs_info,
2578 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2579 			  btrfs_super_incompat_flags(sb),
2580 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2581 		goto out;
2582 	}
2583 out:
2584 	if (ret < 0)
2585 		btrfs_err(fs_info,
2586 		"super block corruption detected before writing it to disk");
2587 	return ret;
2588 }
2589 
2590 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2591 {
2592 	int backup_index = find_newest_super_backup(fs_info);
2593 	struct btrfs_super_block *sb = fs_info->super_copy;
2594 	struct btrfs_root *tree_root = fs_info->tree_root;
2595 	bool handle_error = false;
2596 	int ret = 0;
2597 	int i;
2598 
2599 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2600 		u64 generation;
2601 		int level;
2602 
2603 		if (handle_error) {
2604 			if (!IS_ERR(tree_root->node))
2605 				free_extent_buffer(tree_root->node);
2606 			tree_root->node = NULL;
2607 
2608 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2609 				break;
2610 
2611 			free_root_pointers(fs_info, 0);
2612 
2613 			/*
2614 			 * Don't use the log in recovery mode, it won't be
2615 			 * valid
2616 			 */
2617 			btrfs_set_super_log_root(sb, 0);
2618 
2619 			/* We can't trust the free space cache either */
2620 			btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2621 
2622 			ret = read_backup_root(fs_info, i);
2623 			backup_index = ret;
2624 			if (ret < 0)
2625 				return ret;
2626 		}
2627 		generation = btrfs_super_generation(sb);
2628 		level = btrfs_super_root_level(sb);
2629 		tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
2630 						  generation, level, NULL);
2631 		if (IS_ERR(tree_root->node)) {
2632 			handle_error = true;
2633 			ret = PTR_ERR(tree_root->node);
2634 			tree_root->node = NULL;
2635 			btrfs_warn(fs_info, "couldn't read tree root");
2636 			continue;
2637 
2638 		} else if (!extent_buffer_uptodate(tree_root->node)) {
2639 			handle_error = true;
2640 			ret = -EIO;
2641 			btrfs_warn(fs_info, "error while reading tree root");
2642 			continue;
2643 		}
2644 
2645 		btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2646 		tree_root->commit_root = btrfs_root_node(tree_root);
2647 		btrfs_set_root_refs(&tree_root->root_item, 1);
2648 
2649 		/*
2650 		 * No need to hold btrfs_root::objectid_mutex since the fs
2651 		 * hasn't been fully initialised and we are the only user
2652 		 */
2653 		ret = btrfs_find_highest_objectid(tree_root,
2654 						&tree_root->highest_objectid);
2655 		if (ret < 0) {
2656 			handle_error = true;
2657 			continue;
2658 		}
2659 
2660 		ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2661 
2662 		ret = btrfs_read_roots(fs_info);
2663 		if (ret < 0) {
2664 			handle_error = true;
2665 			continue;
2666 		}
2667 
2668 		/* All successful */
2669 		fs_info->generation = generation;
2670 		fs_info->last_trans_committed = generation;
2671 
2672 		/* Always begin writing backup roots after the one being used */
2673 		if (backup_index < 0) {
2674 			fs_info->backup_root_index = 0;
2675 		} else {
2676 			fs_info->backup_root_index = backup_index + 1;
2677 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2678 		}
2679 		break;
2680 	}
2681 
2682 	return ret;
2683 }
2684 
2685 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2686 {
2687 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2688 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2689 	INIT_LIST_HEAD(&fs_info->trans_list);
2690 	INIT_LIST_HEAD(&fs_info->dead_roots);
2691 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2692 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2693 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2694 	spin_lock_init(&fs_info->delalloc_root_lock);
2695 	spin_lock_init(&fs_info->trans_lock);
2696 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2697 	spin_lock_init(&fs_info->delayed_iput_lock);
2698 	spin_lock_init(&fs_info->defrag_inodes_lock);
2699 	spin_lock_init(&fs_info->super_lock);
2700 	spin_lock_init(&fs_info->buffer_lock);
2701 	spin_lock_init(&fs_info->unused_bgs_lock);
2702 	rwlock_init(&fs_info->tree_mod_log_lock);
2703 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2704 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2705 	mutex_init(&fs_info->reloc_mutex);
2706 	mutex_init(&fs_info->delalloc_root_mutex);
2707 	seqlock_init(&fs_info->profiles_lock);
2708 
2709 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2710 	INIT_LIST_HEAD(&fs_info->space_info);
2711 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2712 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2713 #ifdef CONFIG_BTRFS_DEBUG
2714 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2715 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2716 	spin_lock_init(&fs_info->eb_leak_lock);
2717 #endif
2718 	extent_map_tree_init(&fs_info->mapping_tree);
2719 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2720 			     BTRFS_BLOCK_RSV_GLOBAL);
2721 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2722 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2723 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2724 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2725 			     BTRFS_BLOCK_RSV_DELOPS);
2726 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2727 			     BTRFS_BLOCK_RSV_DELREFS);
2728 
2729 	atomic_set(&fs_info->async_delalloc_pages, 0);
2730 	atomic_set(&fs_info->defrag_running, 0);
2731 	atomic_set(&fs_info->reada_works_cnt, 0);
2732 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2733 	atomic64_set(&fs_info->tree_mod_seq, 0);
2734 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2735 	fs_info->metadata_ratio = 0;
2736 	fs_info->defrag_inodes = RB_ROOT;
2737 	atomic64_set(&fs_info->free_chunk_space, 0);
2738 	fs_info->tree_mod_log = RB_ROOT;
2739 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2740 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2741 	/* readahead state */
2742 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2743 	spin_lock_init(&fs_info->reada_lock);
2744 	btrfs_init_ref_verify(fs_info);
2745 
2746 	fs_info->thread_pool_size = min_t(unsigned long,
2747 					  num_online_cpus() + 2, 8);
2748 
2749 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2750 	spin_lock_init(&fs_info->ordered_root_lock);
2751 
2752 	btrfs_init_scrub(fs_info);
2753 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2754 	fs_info->check_integrity_print_mask = 0;
2755 #endif
2756 	btrfs_init_balance(fs_info);
2757 	btrfs_init_async_reclaim_work(fs_info);
2758 
2759 	spin_lock_init(&fs_info->block_group_cache_lock);
2760 	fs_info->block_group_cache_tree = RB_ROOT;
2761 	fs_info->first_logical_byte = (u64)-1;
2762 
2763 	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2764 			    IO_TREE_FS_EXCLUDED_EXTENTS, NULL);
2765 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2766 
2767 	mutex_init(&fs_info->ordered_operations_mutex);
2768 	mutex_init(&fs_info->tree_log_mutex);
2769 	mutex_init(&fs_info->chunk_mutex);
2770 	mutex_init(&fs_info->transaction_kthread_mutex);
2771 	mutex_init(&fs_info->cleaner_mutex);
2772 	mutex_init(&fs_info->ro_block_group_mutex);
2773 	init_rwsem(&fs_info->commit_root_sem);
2774 	init_rwsem(&fs_info->cleanup_work_sem);
2775 	init_rwsem(&fs_info->subvol_sem);
2776 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2777 
2778 	btrfs_init_dev_replace_locks(fs_info);
2779 	btrfs_init_qgroup(fs_info);
2780 	btrfs_discard_init(fs_info);
2781 
2782 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2783 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2784 
2785 	init_waitqueue_head(&fs_info->transaction_throttle);
2786 	init_waitqueue_head(&fs_info->transaction_wait);
2787 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2788 	init_waitqueue_head(&fs_info->async_submit_wait);
2789 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2790 
2791 	/* Usable values until the real ones are cached from the superblock */
2792 	fs_info->nodesize = 4096;
2793 	fs_info->sectorsize = 4096;
2794 	fs_info->stripesize = 4096;
2795 
2796 	spin_lock_init(&fs_info->swapfile_pins_lock);
2797 	fs_info->swapfile_pins = RB_ROOT;
2798 
2799 	fs_info->send_in_progress = 0;
2800 }
2801 
2802 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2803 {
2804 	int ret;
2805 
2806 	fs_info->sb = sb;
2807 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2808 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2809 
2810 	ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
2811 	if (ret)
2812 		return ret;
2813 
2814 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2815 	if (ret)
2816 		return ret;
2817 
2818 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2819 					(1 + ilog2(nr_cpu_ids));
2820 
2821 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2822 	if (ret)
2823 		return ret;
2824 
2825 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2826 			GFP_KERNEL);
2827 	if (ret)
2828 		return ret;
2829 
2830 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2831 					GFP_KERNEL);
2832 	if (!fs_info->delayed_root)
2833 		return -ENOMEM;
2834 	btrfs_init_delayed_root(fs_info->delayed_root);
2835 
2836 	return btrfs_alloc_stripe_hash_table(fs_info);
2837 }
2838 
2839 static int btrfs_uuid_rescan_kthread(void *data)
2840 {
2841 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
2842 	int ret;
2843 
2844 	/*
2845 	 * 1st step is to iterate through the existing UUID tree and
2846 	 * to delete all entries that contain outdated data.
2847 	 * 2nd step is to add all missing entries to the UUID tree.
2848 	 */
2849 	ret = btrfs_uuid_tree_iterate(fs_info);
2850 	if (ret < 0) {
2851 		if (ret != -EINTR)
2852 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2853 				   ret);
2854 		up(&fs_info->uuid_tree_rescan_sem);
2855 		return ret;
2856 	}
2857 	return btrfs_uuid_scan_kthread(data);
2858 }
2859 
2860 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2861 {
2862 	struct task_struct *task;
2863 
2864 	down(&fs_info->uuid_tree_rescan_sem);
2865 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2866 	if (IS_ERR(task)) {
2867 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2868 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2869 		up(&fs_info->uuid_tree_rescan_sem);
2870 		return PTR_ERR(task);
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
2877 		      char *options)
2878 {
2879 	u32 sectorsize;
2880 	u32 nodesize;
2881 	u32 stripesize;
2882 	u64 generation;
2883 	u64 features;
2884 	u16 csum_type;
2885 	struct btrfs_super_block *disk_super;
2886 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2887 	struct btrfs_root *tree_root;
2888 	struct btrfs_root *chunk_root;
2889 	int ret;
2890 	int err = -EINVAL;
2891 	int clear_free_space_tree = 0;
2892 	int level;
2893 
2894 	ret = init_mount_fs_info(fs_info, sb);
2895 	if (ret) {
2896 		err = ret;
2897 		goto fail;
2898 	}
2899 
2900 	/* These need to be init'ed before we start creating inodes and such. */
2901 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
2902 				     GFP_KERNEL);
2903 	fs_info->tree_root = tree_root;
2904 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
2905 				      GFP_KERNEL);
2906 	fs_info->chunk_root = chunk_root;
2907 	if (!tree_root || !chunk_root) {
2908 		err = -ENOMEM;
2909 		goto fail;
2910 	}
2911 
2912 	fs_info->btree_inode = new_inode(sb);
2913 	if (!fs_info->btree_inode) {
2914 		err = -ENOMEM;
2915 		goto fail;
2916 	}
2917 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2918 	btrfs_init_btree_inode(fs_info);
2919 
2920 	invalidate_bdev(fs_devices->latest_bdev);
2921 
2922 	/*
2923 	 * Read super block and check the signature bytes only
2924 	 */
2925 	disk_super = btrfs_read_dev_super(fs_devices->latest_bdev);
2926 	if (IS_ERR(disk_super)) {
2927 		err = PTR_ERR(disk_super);
2928 		goto fail_alloc;
2929 	}
2930 
2931 	/*
2932 	 * Verify the type first, if that or the checksum value are
2933 	 * corrupted, we'll find out
2934 	 */
2935 	csum_type = btrfs_super_csum_type(disk_super);
2936 	if (!btrfs_supported_super_csum(csum_type)) {
2937 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
2938 			  csum_type);
2939 		err = -EINVAL;
2940 		btrfs_release_disk_super(disk_super);
2941 		goto fail_alloc;
2942 	}
2943 
2944 	ret = btrfs_init_csum_hash(fs_info, csum_type);
2945 	if (ret) {
2946 		err = ret;
2947 		btrfs_release_disk_super(disk_super);
2948 		goto fail_alloc;
2949 	}
2950 
2951 	/*
2952 	 * We want to check superblock checksum, the type is stored inside.
2953 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2954 	 */
2955 	if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) {
2956 		btrfs_err(fs_info, "superblock checksum mismatch");
2957 		err = -EINVAL;
2958 		btrfs_release_disk_super(disk_super);
2959 		goto fail_alloc;
2960 	}
2961 
2962 	/*
2963 	 * super_copy is zeroed at allocation time and we never touch the
2964 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2965 	 * the whole block of INFO_SIZE
2966 	 */
2967 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
2968 	btrfs_release_disk_super(disk_super);
2969 
2970 	disk_super = fs_info->super_copy;
2971 
2972 	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2973 		       BTRFS_FSID_SIZE));
2974 
2975 	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2976 		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2977 				fs_info->super_copy->metadata_uuid,
2978 				BTRFS_FSID_SIZE));
2979 	}
2980 
2981 	features = btrfs_super_flags(disk_super);
2982 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2983 		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2984 		btrfs_set_super_flags(disk_super, features);
2985 		btrfs_info(fs_info,
2986 			"found metadata UUID change in progress flag, clearing");
2987 	}
2988 
2989 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2990 	       sizeof(*fs_info->super_for_commit));
2991 
2992 	ret = btrfs_validate_mount_super(fs_info);
2993 	if (ret) {
2994 		btrfs_err(fs_info, "superblock contains fatal errors");
2995 		err = -EINVAL;
2996 		goto fail_alloc;
2997 	}
2998 
2999 	if (!btrfs_super_root(disk_super))
3000 		goto fail_alloc;
3001 
3002 	/* check FS state, whether FS is broken. */
3003 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3004 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
3005 
3006 	/*
3007 	 * In the long term, we'll store the compression type in the super
3008 	 * block, and it'll be used for per file compression control.
3009 	 */
3010 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
3011 
3012 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
3013 	if (ret) {
3014 		err = ret;
3015 		goto fail_alloc;
3016 	}
3017 
3018 	features = btrfs_super_incompat_flags(disk_super) &
3019 		~BTRFS_FEATURE_INCOMPAT_SUPP;
3020 	if (features) {
3021 		btrfs_err(fs_info,
3022 		    "cannot mount because of unsupported optional features (%llx)",
3023 		    features);
3024 		err = -EINVAL;
3025 		goto fail_alloc;
3026 	}
3027 
3028 	features = btrfs_super_incompat_flags(disk_super);
3029 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3030 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3031 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3032 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3033 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3034 
3035 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
3036 		btrfs_info(fs_info, "has skinny extents");
3037 
3038 	/*
3039 	 * flag our filesystem as having big metadata blocks if
3040 	 * they are bigger than the page size
3041 	 */
3042 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
3043 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
3044 			btrfs_info(fs_info,
3045 				"flagging fs with big metadata feature");
3046 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3047 	}
3048 
3049 	nodesize = btrfs_super_nodesize(disk_super);
3050 	sectorsize = btrfs_super_sectorsize(disk_super);
3051 	stripesize = sectorsize;
3052 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3053 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3054 
3055 	/* Cache block sizes */
3056 	fs_info->nodesize = nodesize;
3057 	fs_info->sectorsize = sectorsize;
3058 	fs_info->stripesize = stripesize;
3059 
3060 	/*
3061 	 * mixed block groups end up with duplicate but slightly offset
3062 	 * extent buffers for the same range.  It leads to corruptions
3063 	 */
3064 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3065 	    (sectorsize != nodesize)) {
3066 		btrfs_err(fs_info,
3067 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3068 			nodesize, sectorsize);
3069 		goto fail_alloc;
3070 	}
3071 
3072 	/*
3073 	 * Needn't use the lock because there is no other task which will
3074 	 * update the flag.
3075 	 */
3076 	btrfs_set_super_incompat_flags(disk_super, features);
3077 
3078 	features = btrfs_super_compat_ro_flags(disk_super) &
3079 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
3080 	if (!sb_rdonly(sb) && features) {
3081 		btrfs_err(fs_info,
3082 	"cannot mount read-write because of unsupported optional features (%llx)",
3083 		       features);
3084 		err = -EINVAL;
3085 		goto fail_alloc;
3086 	}
3087 
3088 	ret = btrfs_init_workqueues(fs_info, fs_devices);
3089 	if (ret) {
3090 		err = ret;
3091 		goto fail_sb_buffer;
3092 	}
3093 
3094 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3095 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3096 
3097 	sb->s_blocksize = sectorsize;
3098 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3099 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3100 
3101 	mutex_lock(&fs_info->chunk_mutex);
3102 	ret = btrfs_read_sys_array(fs_info);
3103 	mutex_unlock(&fs_info->chunk_mutex);
3104 	if (ret) {
3105 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3106 		goto fail_sb_buffer;
3107 	}
3108 
3109 	generation = btrfs_super_chunk_root_generation(disk_super);
3110 	level = btrfs_super_chunk_root_level(disk_super);
3111 
3112 	chunk_root->node = read_tree_block(fs_info,
3113 					   btrfs_super_chunk_root(disk_super),
3114 					   generation, level, NULL);
3115 	if (IS_ERR(chunk_root->node) ||
3116 	    !extent_buffer_uptodate(chunk_root->node)) {
3117 		btrfs_err(fs_info, "failed to read chunk root");
3118 		if (!IS_ERR(chunk_root->node))
3119 			free_extent_buffer(chunk_root->node);
3120 		chunk_root->node = NULL;
3121 		goto fail_tree_roots;
3122 	}
3123 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
3124 	chunk_root->commit_root = btrfs_root_node(chunk_root);
3125 
3126 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3127 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3128 			   BTRFS_UUID_SIZE);
3129 
3130 	ret = btrfs_read_chunk_tree(fs_info);
3131 	if (ret) {
3132 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3133 		goto fail_tree_roots;
3134 	}
3135 
3136 	/*
3137 	 * Keep the devid that is marked to be the target device for the
3138 	 * device replace procedure
3139 	 */
3140 	btrfs_free_extra_devids(fs_devices, 0);
3141 
3142 	if (!fs_devices->latest_bdev) {
3143 		btrfs_err(fs_info, "failed to read devices");
3144 		goto fail_tree_roots;
3145 	}
3146 
3147 	ret = init_tree_roots(fs_info);
3148 	if (ret)
3149 		goto fail_tree_roots;
3150 
3151 	/*
3152 	 * If we have a uuid root and we're not being told to rescan we need to
3153 	 * check the generation here so we can set the
3154 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3155 	 * transaction during a balance or the log replay without updating the
3156 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3157 	 * even though it was perfectly fine.
3158 	 */
3159 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3160 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3161 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3162 
3163 	ret = btrfs_verify_dev_extents(fs_info);
3164 	if (ret) {
3165 		btrfs_err(fs_info,
3166 			  "failed to verify dev extents against chunks: %d",
3167 			  ret);
3168 		goto fail_block_groups;
3169 	}
3170 	ret = btrfs_recover_balance(fs_info);
3171 	if (ret) {
3172 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3173 		goto fail_block_groups;
3174 	}
3175 
3176 	ret = btrfs_init_dev_stats(fs_info);
3177 	if (ret) {
3178 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3179 		goto fail_block_groups;
3180 	}
3181 
3182 	ret = btrfs_init_dev_replace(fs_info);
3183 	if (ret) {
3184 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3185 		goto fail_block_groups;
3186 	}
3187 
3188 	btrfs_free_extra_devids(fs_devices, 1);
3189 
3190 	ret = btrfs_sysfs_add_fsid(fs_devices);
3191 	if (ret) {
3192 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3193 				ret);
3194 		goto fail_block_groups;
3195 	}
3196 
3197 	ret = btrfs_sysfs_add_mounted(fs_info);
3198 	if (ret) {
3199 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3200 		goto fail_fsdev_sysfs;
3201 	}
3202 
3203 	ret = btrfs_init_space_info(fs_info);
3204 	if (ret) {
3205 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3206 		goto fail_sysfs;
3207 	}
3208 
3209 	ret = btrfs_read_block_groups(fs_info);
3210 	if (ret) {
3211 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3212 		goto fail_sysfs;
3213 	}
3214 
3215 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3216 		btrfs_warn(fs_info,
3217 		"writable mount is not allowed due to too many missing devices");
3218 		goto fail_sysfs;
3219 	}
3220 
3221 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3222 					       "btrfs-cleaner");
3223 	if (IS_ERR(fs_info->cleaner_kthread))
3224 		goto fail_sysfs;
3225 
3226 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3227 						   tree_root,
3228 						   "btrfs-transaction");
3229 	if (IS_ERR(fs_info->transaction_kthread))
3230 		goto fail_cleaner;
3231 
3232 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3233 	    !fs_info->fs_devices->rotating) {
3234 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3235 	}
3236 
3237 	/*
3238 	 * Mount does not set all options immediately, we can do it now and do
3239 	 * not have to wait for transaction commit
3240 	 */
3241 	btrfs_apply_pending_changes(fs_info);
3242 
3243 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3244 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3245 		ret = btrfsic_mount(fs_info, fs_devices,
3246 				    btrfs_test_opt(fs_info,
3247 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3248 				    1 : 0,
3249 				    fs_info->check_integrity_print_mask);
3250 		if (ret)
3251 			btrfs_warn(fs_info,
3252 				"failed to initialize integrity check module: %d",
3253 				ret);
3254 	}
3255 #endif
3256 	ret = btrfs_read_qgroup_config(fs_info);
3257 	if (ret)
3258 		goto fail_trans_kthread;
3259 
3260 	if (btrfs_build_ref_tree(fs_info))
3261 		btrfs_err(fs_info, "couldn't build ref tree");
3262 
3263 	/* do not make disk changes in broken FS or nologreplay is given */
3264 	if (btrfs_super_log_root(disk_super) != 0 &&
3265 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3266 		btrfs_info(fs_info, "start tree-log replay");
3267 		ret = btrfs_replay_log(fs_info, fs_devices);
3268 		if (ret) {
3269 			err = ret;
3270 			goto fail_qgroup;
3271 		}
3272 	}
3273 
3274 	ret = btrfs_find_orphan_roots(fs_info);
3275 	if (ret)
3276 		goto fail_qgroup;
3277 
3278 	if (!sb_rdonly(sb)) {
3279 		ret = btrfs_cleanup_fs_roots(fs_info);
3280 		if (ret)
3281 			goto fail_qgroup;
3282 
3283 		mutex_lock(&fs_info->cleaner_mutex);
3284 		ret = btrfs_recover_relocation(tree_root);
3285 		mutex_unlock(&fs_info->cleaner_mutex);
3286 		if (ret < 0) {
3287 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3288 					ret);
3289 			err = -EINVAL;
3290 			goto fail_qgroup;
3291 		}
3292 	}
3293 
3294 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3295 	if (IS_ERR(fs_info->fs_root)) {
3296 		err = PTR_ERR(fs_info->fs_root);
3297 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3298 		fs_info->fs_root = NULL;
3299 		goto fail_qgroup;
3300 	}
3301 
3302 	if (sb_rdonly(sb))
3303 		return 0;
3304 
3305 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3306 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3307 		clear_free_space_tree = 1;
3308 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3309 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3310 		btrfs_warn(fs_info, "free space tree is invalid");
3311 		clear_free_space_tree = 1;
3312 	}
3313 
3314 	if (clear_free_space_tree) {
3315 		btrfs_info(fs_info, "clearing free space tree");
3316 		ret = btrfs_clear_free_space_tree(fs_info);
3317 		if (ret) {
3318 			btrfs_warn(fs_info,
3319 				   "failed to clear free space tree: %d", ret);
3320 			close_ctree(fs_info);
3321 			return ret;
3322 		}
3323 	}
3324 
3325 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3326 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3327 		btrfs_info(fs_info, "creating free space tree");
3328 		ret = btrfs_create_free_space_tree(fs_info);
3329 		if (ret) {
3330 			btrfs_warn(fs_info,
3331 				"failed to create free space tree: %d", ret);
3332 			close_ctree(fs_info);
3333 			return ret;
3334 		}
3335 	}
3336 
3337 	down_read(&fs_info->cleanup_work_sem);
3338 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3339 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3340 		up_read(&fs_info->cleanup_work_sem);
3341 		close_ctree(fs_info);
3342 		return ret;
3343 	}
3344 	up_read(&fs_info->cleanup_work_sem);
3345 
3346 	ret = btrfs_resume_balance_async(fs_info);
3347 	if (ret) {
3348 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3349 		close_ctree(fs_info);
3350 		return ret;
3351 	}
3352 
3353 	ret = btrfs_resume_dev_replace_async(fs_info);
3354 	if (ret) {
3355 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3356 		close_ctree(fs_info);
3357 		return ret;
3358 	}
3359 
3360 	btrfs_qgroup_rescan_resume(fs_info);
3361 	btrfs_discard_resume(fs_info);
3362 
3363 	if (!fs_info->uuid_root) {
3364 		btrfs_info(fs_info, "creating UUID tree");
3365 		ret = btrfs_create_uuid_tree(fs_info);
3366 		if (ret) {
3367 			btrfs_warn(fs_info,
3368 				"failed to create the UUID tree: %d", ret);
3369 			close_ctree(fs_info);
3370 			return ret;
3371 		}
3372 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3373 		   fs_info->generation !=
3374 				btrfs_super_uuid_tree_generation(disk_super)) {
3375 		btrfs_info(fs_info, "checking UUID tree");
3376 		ret = btrfs_check_uuid_tree(fs_info);
3377 		if (ret) {
3378 			btrfs_warn(fs_info,
3379 				"failed to check the UUID tree: %d", ret);
3380 			close_ctree(fs_info);
3381 			return ret;
3382 		}
3383 	}
3384 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3385 
3386 	/*
3387 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3388 	 * no need to keep the flag
3389 	 */
3390 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3391 
3392 	return 0;
3393 
3394 fail_qgroup:
3395 	btrfs_free_qgroup_config(fs_info);
3396 fail_trans_kthread:
3397 	kthread_stop(fs_info->transaction_kthread);
3398 	btrfs_cleanup_transaction(fs_info);
3399 	btrfs_free_fs_roots(fs_info);
3400 fail_cleaner:
3401 	kthread_stop(fs_info->cleaner_kthread);
3402 
3403 	/*
3404 	 * make sure we're done with the btree inode before we stop our
3405 	 * kthreads
3406 	 */
3407 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3408 
3409 fail_sysfs:
3410 	btrfs_sysfs_remove_mounted(fs_info);
3411 
3412 fail_fsdev_sysfs:
3413 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3414 
3415 fail_block_groups:
3416 	btrfs_put_block_group_cache(fs_info);
3417 
3418 fail_tree_roots:
3419 	if (fs_info->data_reloc_root)
3420 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3421 	free_root_pointers(fs_info, true);
3422 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3423 
3424 fail_sb_buffer:
3425 	btrfs_stop_all_workers(fs_info);
3426 	btrfs_free_block_groups(fs_info);
3427 fail_alloc:
3428 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3429 
3430 	iput(fs_info->btree_inode);
3431 fail:
3432 	btrfs_close_devices(fs_info->fs_devices);
3433 	return err;
3434 }
3435 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3436 
3437 static void btrfs_end_super_write(struct bio *bio)
3438 {
3439 	struct btrfs_device *device = bio->bi_private;
3440 	struct bio_vec *bvec;
3441 	struct bvec_iter_all iter_all;
3442 	struct page *page;
3443 
3444 	bio_for_each_segment_all(bvec, bio, iter_all) {
3445 		page = bvec->bv_page;
3446 
3447 		if (bio->bi_status) {
3448 			btrfs_warn_rl_in_rcu(device->fs_info,
3449 				"lost page write due to IO error on %s (%d)",
3450 				rcu_str_deref(device->name),
3451 				blk_status_to_errno(bio->bi_status));
3452 			ClearPageUptodate(page);
3453 			SetPageError(page);
3454 			btrfs_dev_stat_inc_and_print(device,
3455 						     BTRFS_DEV_STAT_WRITE_ERRS);
3456 		} else {
3457 			SetPageUptodate(page);
3458 		}
3459 
3460 		put_page(page);
3461 		unlock_page(page);
3462 	}
3463 
3464 	bio_put(bio);
3465 }
3466 
3467 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3468 						   int copy_num)
3469 {
3470 	struct btrfs_super_block *super;
3471 	struct page *page;
3472 	u64 bytenr;
3473 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3474 
3475 	bytenr = btrfs_sb_offset(copy_num);
3476 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3477 		return ERR_PTR(-EINVAL);
3478 
3479 	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3480 	if (IS_ERR(page))
3481 		return ERR_CAST(page);
3482 
3483 	super = page_address(page);
3484 	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3485 		btrfs_release_disk_super(super);
3486 		return ERR_PTR(-ENODATA);
3487 	}
3488 
3489 	if (btrfs_super_bytenr(super) != bytenr) {
3490 		btrfs_release_disk_super(super);
3491 		return ERR_PTR(-EINVAL);
3492 	}
3493 
3494 	return super;
3495 }
3496 
3497 
3498 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3499 {
3500 	struct btrfs_super_block *super, *latest = NULL;
3501 	int i;
3502 	u64 transid = 0;
3503 
3504 	/* we would like to check all the supers, but that would make
3505 	 * a btrfs mount succeed after a mkfs from a different FS.
3506 	 * So, we need to add a special mount option to scan for
3507 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3508 	 */
3509 	for (i = 0; i < 1; i++) {
3510 		super = btrfs_read_dev_one_super(bdev, i);
3511 		if (IS_ERR(super))
3512 			continue;
3513 
3514 		if (!latest || btrfs_super_generation(super) > transid) {
3515 			if (latest)
3516 				btrfs_release_disk_super(super);
3517 
3518 			latest = super;
3519 			transid = btrfs_super_generation(super);
3520 		}
3521 	}
3522 
3523 	return super;
3524 }
3525 
3526 /*
3527  * Write superblock @sb to the @device. Do not wait for completion, all the
3528  * pages we use for writing are locked.
3529  *
3530  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3531  * the expected device size at commit time. Note that max_mirrors must be
3532  * same for write and wait phases.
3533  *
3534  * Return number of errors when page is not found or submission fails.
3535  */
3536 static int write_dev_supers(struct btrfs_device *device,
3537 			    struct btrfs_super_block *sb, int max_mirrors)
3538 {
3539 	struct btrfs_fs_info *fs_info = device->fs_info;
3540 	struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3541 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3542 	int i;
3543 	int errors = 0;
3544 	u64 bytenr;
3545 
3546 	if (max_mirrors == 0)
3547 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3548 
3549 	shash->tfm = fs_info->csum_shash;
3550 
3551 	for (i = 0; i < max_mirrors; i++) {
3552 		struct page *page;
3553 		struct bio *bio;
3554 		struct btrfs_super_block *disk_super;
3555 
3556 		bytenr = btrfs_sb_offset(i);
3557 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3558 		    device->commit_total_bytes)
3559 			break;
3560 
3561 		btrfs_set_super_bytenr(sb, bytenr);
3562 
3563 		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3564 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3565 				    sb->csum);
3566 
3567 		page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
3568 					   GFP_NOFS);
3569 		if (!page) {
3570 			btrfs_err(device->fs_info,
3571 			    "couldn't get super block page for bytenr %llu",
3572 			    bytenr);
3573 			errors++;
3574 			continue;
3575 		}
3576 
3577 		/* Bump the refcount for wait_dev_supers() */
3578 		get_page(page);
3579 
3580 		disk_super = page_address(page);
3581 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3582 
3583 		/*
3584 		 * Directly use bios here instead of relying on the page cache
3585 		 * to do I/O, so we don't lose the ability to do integrity
3586 		 * checking.
3587 		 */
3588 		bio = bio_alloc(GFP_NOFS, 1);
3589 		bio_set_dev(bio, device->bdev);
3590 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3591 		bio->bi_private = device;
3592 		bio->bi_end_io = btrfs_end_super_write;
3593 		__bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3594 			       offset_in_page(bytenr));
3595 
3596 		/*
3597 		 * We FUA only the first super block.  The others we allow to
3598 		 * go down lazy and there's a short window where the on-disk
3599 		 * copies might still contain the older version.
3600 		 */
3601 		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
3602 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3603 			bio->bi_opf |= REQ_FUA;
3604 
3605 		btrfsic_submit_bio(bio);
3606 	}
3607 	return errors < i ? 0 : -1;
3608 }
3609 
3610 /*
3611  * Wait for write completion of superblocks done by write_dev_supers,
3612  * @max_mirrors same for write and wait phases.
3613  *
3614  * Return number of errors when page is not found or not marked up to
3615  * date.
3616  */
3617 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3618 {
3619 	int i;
3620 	int errors = 0;
3621 	bool primary_failed = false;
3622 	u64 bytenr;
3623 
3624 	if (max_mirrors == 0)
3625 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3626 
3627 	for (i = 0; i < max_mirrors; i++) {
3628 		struct page *page;
3629 
3630 		bytenr = btrfs_sb_offset(i);
3631 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3632 		    device->commit_total_bytes)
3633 			break;
3634 
3635 		page = find_get_page(device->bdev->bd_inode->i_mapping,
3636 				     bytenr >> PAGE_SHIFT);
3637 		if (!page) {
3638 			errors++;
3639 			if (i == 0)
3640 				primary_failed = true;
3641 			continue;
3642 		}
3643 		/* Page is submitted locked and unlocked once the IO completes */
3644 		wait_on_page_locked(page);
3645 		if (PageError(page)) {
3646 			errors++;
3647 			if (i == 0)
3648 				primary_failed = true;
3649 		}
3650 
3651 		/* Drop our reference */
3652 		put_page(page);
3653 
3654 		/* Drop the reference from the writing run */
3655 		put_page(page);
3656 	}
3657 
3658 	/* log error, force error return */
3659 	if (primary_failed) {
3660 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3661 			  device->devid);
3662 		return -1;
3663 	}
3664 
3665 	return errors < i ? 0 : -1;
3666 }
3667 
3668 /*
3669  * endio for the write_dev_flush, this will wake anyone waiting
3670  * for the barrier when it is done
3671  */
3672 static void btrfs_end_empty_barrier(struct bio *bio)
3673 {
3674 	complete(bio->bi_private);
3675 }
3676 
3677 /*
3678  * Submit a flush request to the device if it supports it. Error handling is
3679  * done in the waiting counterpart.
3680  */
3681 static void write_dev_flush(struct btrfs_device *device)
3682 {
3683 	struct request_queue *q = bdev_get_queue(device->bdev);
3684 	struct bio *bio = device->flush_bio;
3685 
3686 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3687 		return;
3688 
3689 	bio_reset(bio);
3690 	bio->bi_end_io = btrfs_end_empty_barrier;
3691 	bio_set_dev(bio, device->bdev);
3692 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3693 	init_completion(&device->flush_wait);
3694 	bio->bi_private = &device->flush_wait;
3695 
3696 	btrfsic_submit_bio(bio);
3697 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3698 }
3699 
3700 /*
3701  * If the flush bio has been submitted by write_dev_flush, wait for it.
3702  */
3703 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3704 {
3705 	struct bio *bio = device->flush_bio;
3706 
3707 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3708 		return BLK_STS_OK;
3709 
3710 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3711 	wait_for_completion_io(&device->flush_wait);
3712 
3713 	return bio->bi_status;
3714 }
3715 
3716 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3717 {
3718 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3719 		return -EIO;
3720 	return 0;
3721 }
3722 
3723 /*
3724  * send an empty flush down to each device in parallel,
3725  * then wait for them
3726  */
3727 static int barrier_all_devices(struct btrfs_fs_info *info)
3728 {
3729 	struct list_head *head;
3730 	struct btrfs_device *dev;
3731 	int errors_wait = 0;
3732 	blk_status_t ret;
3733 
3734 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3735 	/* send down all the barriers */
3736 	head = &info->fs_devices->devices;
3737 	list_for_each_entry(dev, head, dev_list) {
3738 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3739 			continue;
3740 		if (!dev->bdev)
3741 			continue;
3742 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3743 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3744 			continue;
3745 
3746 		write_dev_flush(dev);
3747 		dev->last_flush_error = BLK_STS_OK;
3748 	}
3749 
3750 	/* wait for all the barriers */
3751 	list_for_each_entry(dev, head, dev_list) {
3752 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3753 			continue;
3754 		if (!dev->bdev) {
3755 			errors_wait++;
3756 			continue;
3757 		}
3758 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3759 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3760 			continue;
3761 
3762 		ret = wait_dev_flush(dev);
3763 		if (ret) {
3764 			dev->last_flush_error = ret;
3765 			btrfs_dev_stat_inc_and_print(dev,
3766 					BTRFS_DEV_STAT_FLUSH_ERRS);
3767 			errors_wait++;
3768 		}
3769 	}
3770 
3771 	if (errors_wait) {
3772 		/*
3773 		 * At some point we need the status of all disks
3774 		 * to arrive at the volume status. So error checking
3775 		 * is being pushed to a separate loop.
3776 		 */
3777 		return check_barrier_error(info);
3778 	}
3779 	return 0;
3780 }
3781 
3782 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3783 {
3784 	int raid_type;
3785 	int min_tolerated = INT_MAX;
3786 
3787 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3788 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3789 		min_tolerated = min_t(int, min_tolerated,
3790 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3791 				    tolerated_failures);
3792 
3793 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3794 		if (raid_type == BTRFS_RAID_SINGLE)
3795 			continue;
3796 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3797 			continue;
3798 		min_tolerated = min_t(int, min_tolerated,
3799 				    btrfs_raid_array[raid_type].
3800 				    tolerated_failures);
3801 	}
3802 
3803 	if (min_tolerated == INT_MAX) {
3804 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3805 		min_tolerated = 0;
3806 	}
3807 
3808 	return min_tolerated;
3809 }
3810 
3811 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3812 {
3813 	struct list_head *head;
3814 	struct btrfs_device *dev;
3815 	struct btrfs_super_block *sb;
3816 	struct btrfs_dev_item *dev_item;
3817 	int ret;
3818 	int do_barriers;
3819 	int max_errors;
3820 	int total_errors = 0;
3821 	u64 flags;
3822 
3823 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3824 
3825 	/*
3826 	 * max_mirrors == 0 indicates we're from commit_transaction,
3827 	 * not from fsync where the tree roots in fs_info have not
3828 	 * been consistent on disk.
3829 	 */
3830 	if (max_mirrors == 0)
3831 		backup_super_roots(fs_info);
3832 
3833 	sb = fs_info->super_for_commit;
3834 	dev_item = &sb->dev_item;
3835 
3836 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3837 	head = &fs_info->fs_devices->devices;
3838 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3839 
3840 	if (do_barriers) {
3841 		ret = barrier_all_devices(fs_info);
3842 		if (ret) {
3843 			mutex_unlock(
3844 				&fs_info->fs_devices->device_list_mutex);
3845 			btrfs_handle_fs_error(fs_info, ret,
3846 					      "errors while submitting device barriers.");
3847 			return ret;
3848 		}
3849 	}
3850 
3851 	list_for_each_entry(dev, head, dev_list) {
3852 		if (!dev->bdev) {
3853 			total_errors++;
3854 			continue;
3855 		}
3856 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3857 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3858 			continue;
3859 
3860 		btrfs_set_stack_device_generation(dev_item, 0);
3861 		btrfs_set_stack_device_type(dev_item, dev->type);
3862 		btrfs_set_stack_device_id(dev_item, dev->devid);
3863 		btrfs_set_stack_device_total_bytes(dev_item,
3864 						   dev->commit_total_bytes);
3865 		btrfs_set_stack_device_bytes_used(dev_item,
3866 						  dev->commit_bytes_used);
3867 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3868 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3869 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3870 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3871 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3872 		       BTRFS_FSID_SIZE);
3873 
3874 		flags = btrfs_super_flags(sb);
3875 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3876 
3877 		ret = btrfs_validate_write_super(fs_info, sb);
3878 		if (ret < 0) {
3879 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3880 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3881 				"unexpected superblock corruption detected");
3882 			return -EUCLEAN;
3883 		}
3884 
3885 		ret = write_dev_supers(dev, sb, max_mirrors);
3886 		if (ret)
3887 			total_errors++;
3888 	}
3889 	if (total_errors > max_errors) {
3890 		btrfs_err(fs_info, "%d errors while writing supers",
3891 			  total_errors);
3892 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3893 
3894 		/* FUA is masked off if unsupported and can't be the reason */
3895 		btrfs_handle_fs_error(fs_info, -EIO,
3896 				      "%d errors while writing supers",
3897 				      total_errors);
3898 		return -EIO;
3899 	}
3900 
3901 	total_errors = 0;
3902 	list_for_each_entry(dev, head, dev_list) {
3903 		if (!dev->bdev)
3904 			continue;
3905 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3906 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3907 			continue;
3908 
3909 		ret = wait_dev_supers(dev, max_mirrors);
3910 		if (ret)
3911 			total_errors++;
3912 	}
3913 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3914 	if (total_errors > max_errors) {
3915 		btrfs_handle_fs_error(fs_info, -EIO,
3916 				      "%d errors while writing supers",
3917 				      total_errors);
3918 		return -EIO;
3919 	}
3920 	return 0;
3921 }
3922 
3923 /* Drop a fs root from the radix tree and free it. */
3924 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3925 				  struct btrfs_root *root)
3926 {
3927 	bool drop_ref = false;
3928 
3929 	spin_lock(&fs_info->fs_roots_radix_lock);
3930 	radix_tree_delete(&fs_info->fs_roots_radix,
3931 			  (unsigned long)root->root_key.objectid);
3932 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
3933 		drop_ref = true;
3934 	spin_unlock(&fs_info->fs_roots_radix_lock);
3935 
3936 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3937 		ASSERT(root->log_root == NULL);
3938 		if (root->reloc_root) {
3939 			btrfs_put_root(root->reloc_root);
3940 			root->reloc_root = NULL;
3941 		}
3942 	}
3943 
3944 	if (root->free_ino_pinned)
3945 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3946 	if (root->free_ino_ctl)
3947 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3948 	if (root->ino_cache_inode) {
3949 		iput(root->ino_cache_inode);
3950 		root->ino_cache_inode = NULL;
3951 	}
3952 	if (drop_ref)
3953 		btrfs_put_root(root);
3954 }
3955 
3956 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3957 {
3958 	u64 root_objectid = 0;
3959 	struct btrfs_root *gang[8];
3960 	int i = 0;
3961 	int err = 0;
3962 	unsigned int ret = 0;
3963 
3964 	while (1) {
3965 		spin_lock(&fs_info->fs_roots_radix_lock);
3966 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3967 					     (void **)gang, root_objectid,
3968 					     ARRAY_SIZE(gang));
3969 		if (!ret) {
3970 			spin_unlock(&fs_info->fs_roots_radix_lock);
3971 			break;
3972 		}
3973 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3974 
3975 		for (i = 0; i < ret; i++) {
3976 			/* Avoid to grab roots in dead_roots */
3977 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3978 				gang[i] = NULL;
3979 				continue;
3980 			}
3981 			/* grab all the search result for later use */
3982 			gang[i] = btrfs_grab_root(gang[i]);
3983 		}
3984 		spin_unlock(&fs_info->fs_roots_radix_lock);
3985 
3986 		for (i = 0; i < ret; i++) {
3987 			if (!gang[i])
3988 				continue;
3989 			root_objectid = gang[i]->root_key.objectid;
3990 			err = btrfs_orphan_cleanup(gang[i]);
3991 			if (err)
3992 				break;
3993 			btrfs_put_root(gang[i]);
3994 		}
3995 		root_objectid++;
3996 	}
3997 
3998 	/* release the uncleaned roots due to error */
3999 	for (; i < ret; i++) {
4000 		if (gang[i])
4001 			btrfs_put_root(gang[i]);
4002 	}
4003 	return err;
4004 }
4005 
4006 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4007 {
4008 	struct btrfs_root *root = fs_info->tree_root;
4009 	struct btrfs_trans_handle *trans;
4010 
4011 	mutex_lock(&fs_info->cleaner_mutex);
4012 	btrfs_run_delayed_iputs(fs_info);
4013 	mutex_unlock(&fs_info->cleaner_mutex);
4014 	wake_up_process(fs_info->cleaner_kthread);
4015 
4016 	/* wait until ongoing cleanup work done */
4017 	down_write(&fs_info->cleanup_work_sem);
4018 	up_write(&fs_info->cleanup_work_sem);
4019 
4020 	trans = btrfs_join_transaction(root);
4021 	if (IS_ERR(trans))
4022 		return PTR_ERR(trans);
4023 	return btrfs_commit_transaction(trans);
4024 }
4025 
4026 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4027 {
4028 	int ret;
4029 
4030 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4031 	/*
4032 	 * We don't want the cleaner to start new transactions, add more delayed
4033 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4034 	 * because that frees the task_struct, and the transaction kthread might
4035 	 * still try to wake up the cleaner.
4036 	 */
4037 	kthread_park(fs_info->cleaner_kthread);
4038 
4039 	/* wait for the qgroup rescan worker to stop */
4040 	btrfs_qgroup_wait_for_completion(fs_info, false);
4041 
4042 	/* wait for the uuid_scan task to finish */
4043 	down(&fs_info->uuid_tree_rescan_sem);
4044 	/* avoid complains from lockdep et al., set sem back to initial state */
4045 	up(&fs_info->uuid_tree_rescan_sem);
4046 
4047 	/* pause restriper - we want to resume on mount */
4048 	btrfs_pause_balance(fs_info);
4049 
4050 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4051 
4052 	btrfs_scrub_cancel(fs_info);
4053 
4054 	/* wait for any defraggers to finish */
4055 	wait_event(fs_info->transaction_wait,
4056 		   (atomic_read(&fs_info->defrag_running) == 0));
4057 
4058 	/* clear out the rbtree of defraggable inodes */
4059 	btrfs_cleanup_defrag_inodes(fs_info);
4060 
4061 	cancel_work_sync(&fs_info->async_reclaim_work);
4062 	cancel_work_sync(&fs_info->async_data_reclaim_work);
4063 
4064 	/* Cancel or finish ongoing discard work */
4065 	btrfs_discard_cleanup(fs_info);
4066 
4067 	if (!sb_rdonly(fs_info->sb)) {
4068 		/*
4069 		 * The cleaner kthread is stopped, so do one final pass over
4070 		 * unused block groups.
4071 		 */
4072 		btrfs_delete_unused_bgs(fs_info);
4073 
4074 		/*
4075 		 * There might be existing delayed inode workers still running
4076 		 * and holding an empty delayed inode item. We must wait for
4077 		 * them to complete first because they can create a transaction.
4078 		 * This happens when someone calls btrfs_balance_delayed_items()
4079 		 * and then a transaction commit runs the same delayed nodes
4080 		 * before any delayed worker has done something with the nodes.
4081 		 * We must wait for any worker here and not at transaction
4082 		 * commit time since that could cause a deadlock.
4083 		 * This is a very rare case.
4084 		 */
4085 		btrfs_flush_workqueue(fs_info->delayed_workers);
4086 
4087 		ret = btrfs_commit_super(fs_info);
4088 		if (ret)
4089 			btrfs_err(fs_info, "commit super ret %d", ret);
4090 	}
4091 
4092 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4093 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4094 		btrfs_error_commit_super(fs_info);
4095 
4096 	kthread_stop(fs_info->transaction_kthread);
4097 	kthread_stop(fs_info->cleaner_kthread);
4098 
4099 	ASSERT(list_empty(&fs_info->delayed_iputs));
4100 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4101 
4102 	if (btrfs_check_quota_leak(fs_info)) {
4103 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4104 		btrfs_err(fs_info, "qgroup reserved space leaked");
4105 	}
4106 
4107 	btrfs_free_qgroup_config(fs_info);
4108 	ASSERT(list_empty(&fs_info->delalloc_roots));
4109 
4110 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4111 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4112 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4113 	}
4114 
4115 	if (percpu_counter_sum(&fs_info->dio_bytes))
4116 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4117 			   percpu_counter_sum(&fs_info->dio_bytes));
4118 
4119 	btrfs_sysfs_remove_mounted(fs_info);
4120 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4121 
4122 	btrfs_put_block_group_cache(fs_info);
4123 
4124 	/*
4125 	 * we must make sure there is not any read request to
4126 	 * submit after we stopping all workers.
4127 	 */
4128 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4129 	btrfs_stop_all_workers(fs_info);
4130 
4131 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4132 	free_root_pointers(fs_info, true);
4133 	btrfs_free_fs_roots(fs_info);
4134 
4135 	/*
4136 	 * We must free the block groups after dropping the fs_roots as we could
4137 	 * have had an IO error and have left over tree log blocks that aren't
4138 	 * cleaned up until the fs roots are freed.  This makes the block group
4139 	 * accounting appear to be wrong because there's pending reserved bytes,
4140 	 * so make sure we do the block group cleanup afterwards.
4141 	 */
4142 	btrfs_free_block_groups(fs_info);
4143 
4144 	iput(fs_info->btree_inode);
4145 
4146 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4147 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4148 		btrfsic_unmount(fs_info->fs_devices);
4149 #endif
4150 
4151 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4152 	btrfs_close_devices(fs_info->fs_devices);
4153 }
4154 
4155 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4156 			  int atomic)
4157 {
4158 	int ret;
4159 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4160 
4161 	ret = extent_buffer_uptodate(buf);
4162 	if (!ret)
4163 		return ret;
4164 
4165 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4166 				    parent_transid, atomic);
4167 	if (ret == -EAGAIN)
4168 		return ret;
4169 	return !ret;
4170 }
4171 
4172 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4173 {
4174 	struct btrfs_fs_info *fs_info;
4175 	struct btrfs_root *root;
4176 	u64 transid = btrfs_header_generation(buf);
4177 	int was_dirty;
4178 
4179 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4180 	/*
4181 	 * This is a fast path so only do this check if we have sanity tests
4182 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4183 	 * outside of the sanity tests.
4184 	 */
4185 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4186 		return;
4187 #endif
4188 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4189 	fs_info = root->fs_info;
4190 	btrfs_assert_tree_locked(buf);
4191 	if (transid != fs_info->generation)
4192 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4193 			buf->start, transid, fs_info->generation);
4194 	was_dirty = set_extent_buffer_dirty(buf);
4195 	if (!was_dirty)
4196 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4197 					 buf->len,
4198 					 fs_info->dirty_metadata_batch);
4199 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4200 	/*
4201 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4202 	 * but item data not updated.
4203 	 * So here we should only check item pointers, not item data.
4204 	 */
4205 	if (btrfs_header_level(buf) == 0 &&
4206 	    btrfs_check_leaf_relaxed(buf)) {
4207 		btrfs_print_leaf(buf);
4208 		ASSERT(0);
4209 	}
4210 #endif
4211 }
4212 
4213 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4214 					int flush_delayed)
4215 {
4216 	/*
4217 	 * looks as though older kernels can get into trouble with
4218 	 * this code, they end up stuck in balance_dirty_pages forever
4219 	 */
4220 	int ret;
4221 
4222 	if (current->flags & PF_MEMALLOC)
4223 		return;
4224 
4225 	if (flush_delayed)
4226 		btrfs_balance_delayed_items(fs_info);
4227 
4228 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4229 				     BTRFS_DIRTY_METADATA_THRESH,
4230 				     fs_info->dirty_metadata_batch);
4231 	if (ret > 0) {
4232 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4233 	}
4234 }
4235 
4236 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4237 {
4238 	__btrfs_btree_balance_dirty(fs_info, 1);
4239 }
4240 
4241 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4242 {
4243 	__btrfs_btree_balance_dirty(fs_info, 0);
4244 }
4245 
4246 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4247 		      struct btrfs_key *first_key)
4248 {
4249 	return btree_read_extent_buffer_pages(buf, parent_transid,
4250 					      level, first_key);
4251 }
4252 
4253 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4254 {
4255 	/* cleanup FS via transaction */
4256 	btrfs_cleanup_transaction(fs_info);
4257 
4258 	mutex_lock(&fs_info->cleaner_mutex);
4259 	btrfs_run_delayed_iputs(fs_info);
4260 	mutex_unlock(&fs_info->cleaner_mutex);
4261 
4262 	down_write(&fs_info->cleanup_work_sem);
4263 	up_write(&fs_info->cleanup_work_sem);
4264 }
4265 
4266 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4267 {
4268 	struct btrfs_root *gang[8];
4269 	u64 root_objectid = 0;
4270 	int ret;
4271 
4272 	spin_lock(&fs_info->fs_roots_radix_lock);
4273 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4274 					     (void **)gang, root_objectid,
4275 					     ARRAY_SIZE(gang))) != 0) {
4276 		int i;
4277 
4278 		for (i = 0; i < ret; i++)
4279 			gang[i] = btrfs_grab_root(gang[i]);
4280 		spin_unlock(&fs_info->fs_roots_radix_lock);
4281 
4282 		for (i = 0; i < ret; i++) {
4283 			if (!gang[i])
4284 				continue;
4285 			root_objectid = gang[i]->root_key.objectid;
4286 			btrfs_free_log(NULL, gang[i]);
4287 			btrfs_put_root(gang[i]);
4288 		}
4289 		root_objectid++;
4290 		spin_lock(&fs_info->fs_roots_radix_lock);
4291 	}
4292 	spin_unlock(&fs_info->fs_roots_radix_lock);
4293 	btrfs_free_log_root_tree(NULL, fs_info);
4294 }
4295 
4296 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4297 {
4298 	struct btrfs_ordered_extent *ordered;
4299 
4300 	spin_lock(&root->ordered_extent_lock);
4301 	/*
4302 	 * This will just short circuit the ordered completion stuff which will
4303 	 * make sure the ordered extent gets properly cleaned up.
4304 	 */
4305 	list_for_each_entry(ordered, &root->ordered_extents,
4306 			    root_extent_list)
4307 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4308 	spin_unlock(&root->ordered_extent_lock);
4309 }
4310 
4311 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4312 {
4313 	struct btrfs_root *root;
4314 	struct list_head splice;
4315 
4316 	INIT_LIST_HEAD(&splice);
4317 
4318 	spin_lock(&fs_info->ordered_root_lock);
4319 	list_splice_init(&fs_info->ordered_roots, &splice);
4320 	while (!list_empty(&splice)) {
4321 		root = list_first_entry(&splice, struct btrfs_root,
4322 					ordered_root);
4323 		list_move_tail(&root->ordered_root,
4324 			       &fs_info->ordered_roots);
4325 
4326 		spin_unlock(&fs_info->ordered_root_lock);
4327 		btrfs_destroy_ordered_extents(root);
4328 
4329 		cond_resched();
4330 		spin_lock(&fs_info->ordered_root_lock);
4331 	}
4332 	spin_unlock(&fs_info->ordered_root_lock);
4333 
4334 	/*
4335 	 * We need this here because if we've been flipped read-only we won't
4336 	 * get sync() from the umount, so we need to make sure any ordered
4337 	 * extents that haven't had their dirty pages IO start writeout yet
4338 	 * actually get run and error out properly.
4339 	 */
4340 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4341 }
4342 
4343 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4344 				      struct btrfs_fs_info *fs_info)
4345 {
4346 	struct rb_node *node;
4347 	struct btrfs_delayed_ref_root *delayed_refs;
4348 	struct btrfs_delayed_ref_node *ref;
4349 	int ret = 0;
4350 
4351 	delayed_refs = &trans->delayed_refs;
4352 
4353 	spin_lock(&delayed_refs->lock);
4354 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4355 		spin_unlock(&delayed_refs->lock);
4356 		btrfs_debug(fs_info, "delayed_refs has NO entry");
4357 		return ret;
4358 	}
4359 
4360 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4361 		struct btrfs_delayed_ref_head *head;
4362 		struct rb_node *n;
4363 		bool pin_bytes = false;
4364 
4365 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4366 				href_node);
4367 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4368 			continue;
4369 
4370 		spin_lock(&head->lock);
4371 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4372 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4373 				       ref_node);
4374 			ref->in_tree = 0;
4375 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4376 			RB_CLEAR_NODE(&ref->ref_node);
4377 			if (!list_empty(&ref->add_list))
4378 				list_del(&ref->add_list);
4379 			atomic_dec(&delayed_refs->num_entries);
4380 			btrfs_put_delayed_ref(ref);
4381 		}
4382 		if (head->must_insert_reserved)
4383 			pin_bytes = true;
4384 		btrfs_free_delayed_extent_op(head->extent_op);
4385 		btrfs_delete_ref_head(delayed_refs, head);
4386 		spin_unlock(&head->lock);
4387 		spin_unlock(&delayed_refs->lock);
4388 		mutex_unlock(&head->mutex);
4389 
4390 		if (pin_bytes) {
4391 			struct btrfs_block_group *cache;
4392 
4393 			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4394 			BUG_ON(!cache);
4395 
4396 			spin_lock(&cache->space_info->lock);
4397 			spin_lock(&cache->lock);
4398 			cache->pinned += head->num_bytes;
4399 			btrfs_space_info_update_bytes_pinned(fs_info,
4400 				cache->space_info, head->num_bytes);
4401 			cache->reserved -= head->num_bytes;
4402 			cache->space_info->bytes_reserved -= head->num_bytes;
4403 			spin_unlock(&cache->lock);
4404 			spin_unlock(&cache->space_info->lock);
4405 			percpu_counter_add_batch(
4406 				&cache->space_info->total_bytes_pinned,
4407 				head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
4408 
4409 			btrfs_put_block_group(cache);
4410 
4411 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4412 				head->bytenr + head->num_bytes - 1);
4413 		}
4414 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4415 		btrfs_put_delayed_ref_head(head);
4416 		cond_resched();
4417 		spin_lock(&delayed_refs->lock);
4418 	}
4419 	btrfs_qgroup_destroy_extent_records(trans);
4420 
4421 	spin_unlock(&delayed_refs->lock);
4422 
4423 	return ret;
4424 }
4425 
4426 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4427 {
4428 	struct btrfs_inode *btrfs_inode;
4429 	struct list_head splice;
4430 
4431 	INIT_LIST_HEAD(&splice);
4432 
4433 	spin_lock(&root->delalloc_lock);
4434 	list_splice_init(&root->delalloc_inodes, &splice);
4435 
4436 	while (!list_empty(&splice)) {
4437 		struct inode *inode = NULL;
4438 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4439 					       delalloc_inodes);
4440 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4441 		spin_unlock(&root->delalloc_lock);
4442 
4443 		/*
4444 		 * Make sure we get a live inode and that it'll not disappear
4445 		 * meanwhile.
4446 		 */
4447 		inode = igrab(&btrfs_inode->vfs_inode);
4448 		if (inode) {
4449 			invalidate_inode_pages2(inode->i_mapping);
4450 			iput(inode);
4451 		}
4452 		spin_lock(&root->delalloc_lock);
4453 	}
4454 	spin_unlock(&root->delalloc_lock);
4455 }
4456 
4457 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4458 {
4459 	struct btrfs_root *root;
4460 	struct list_head splice;
4461 
4462 	INIT_LIST_HEAD(&splice);
4463 
4464 	spin_lock(&fs_info->delalloc_root_lock);
4465 	list_splice_init(&fs_info->delalloc_roots, &splice);
4466 	while (!list_empty(&splice)) {
4467 		root = list_first_entry(&splice, struct btrfs_root,
4468 					 delalloc_root);
4469 		root = btrfs_grab_root(root);
4470 		BUG_ON(!root);
4471 		spin_unlock(&fs_info->delalloc_root_lock);
4472 
4473 		btrfs_destroy_delalloc_inodes(root);
4474 		btrfs_put_root(root);
4475 
4476 		spin_lock(&fs_info->delalloc_root_lock);
4477 	}
4478 	spin_unlock(&fs_info->delalloc_root_lock);
4479 }
4480 
4481 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4482 					struct extent_io_tree *dirty_pages,
4483 					int mark)
4484 {
4485 	int ret;
4486 	struct extent_buffer *eb;
4487 	u64 start = 0;
4488 	u64 end;
4489 
4490 	while (1) {
4491 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4492 					    mark, NULL);
4493 		if (ret)
4494 			break;
4495 
4496 		clear_extent_bits(dirty_pages, start, end, mark);
4497 		while (start <= end) {
4498 			eb = find_extent_buffer(fs_info, start);
4499 			start += fs_info->nodesize;
4500 			if (!eb)
4501 				continue;
4502 			wait_on_extent_buffer_writeback(eb);
4503 
4504 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4505 					       &eb->bflags))
4506 				clear_extent_buffer_dirty(eb);
4507 			free_extent_buffer_stale(eb);
4508 		}
4509 	}
4510 
4511 	return ret;
4512 }
4513 
4514 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4515 				       struct extent_io_tree *unpin)
4516 {
4517 	u64 start;
4518 	u64 end;
4519 	int ret;
4520 
4521 	while (1) {
4522 		struct extent_state *cached_state = NULL;
4523 
4524 		/*
4525 		 * The btrfs_finish_extent_commit() may get the same range as
4526 		 * ours between find_first_extent_bit and clear_extent_dirty.
4527 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4528 		 * the same extent range.
4529 		 */
4530 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4531 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4532 					    EXTENT_DIRTY, &cached_state);
4533 		if (ret) {
4534 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4535 			break;
4536 		}
4537 
4538 		clear_extent_dirty(unpin, start, end, &cached_state);
4539 		free_extent_state(cached_state);
4540 		btrfs_error_unpin_extent_range(fs_info, start, end);
4541 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4542 		cond_resched();
4543 	}
4544 
4545 	return 0;
4546 }
4547 
4548 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4549 {
4550 	struct inode *inode;
4551 
4552 	inode = cache->io_ctl.inode;
4553 	if (inode) {
4554 		invalidate_inode_pages2(inode->i_mapping);
4555 		BTRFS_I(inode)->generation = 0;
4556 		cache->io_ctl.inode = NULL;
4557 		iput(inode);
4558 	}
4559 	ASSERT(cache->io_ctl.pages == NULL);
4560 	btrfs_put_block_group(cache);
4561 }
4562 
4563 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4564 			     struct btrfs_fs_info *fs_info)
4565 {
4566 	struct btrfs_block_group *cache;
4567 
4568 	spin_lock(&cur_trans->dirty_bgs_lock);
4569 	while (!list_empty(&cur_trans->dirty_bgs)) {
4570 		cache = list_first_entry(&cur_trans->dirty_bgs,
4571 					 struct btrfs_block_group,
4572 					 dirty_list);
4573 
4574 		if (!list_empty(&cache->io_list)) {
4575 			spin_unlock(&cur_trans->dirty_bgs_lock);
4576 			list_del_init(&cache->io_list);
4577 			btrfs_cleanup_bg_io(cache);
4578 			spin_lock(&cur_trans->dirty_bgs_lock);
4579 		}
4580 
4581 		list_del_init(&cache->dirty_list);
4582 		spin_lock(&cache->lock);
4583 		cache->disk_cache_state = BTRFS_DC_ERROR;
4584 		spin_unlock(&cache->lock);
4585 
4586 		spin_unlock(&cur_trans->dirty_bgs_lock);
4587 		btrfs_put_block_group(cache);
4588 		btrfs_delayed_refs_rsv_release(fs_info, 1);
4589 		spin_lock(&cur_trans->dirty_bgs_lock);
4590 	}
4591 	spin_unlock(&cur_trans->dirty_bgs_lock);
4592 
4593 	/*
4594 	 * Refer to the definition of io_bgs member for details why it's safe
4595 	 * to use it without any locking
4596 	 */
4597 	while (!list_empty(&cur_trans->io_bgs)) {
4598 		cache = list_first_entry(&cur_trans->io_bgs,
4599 					 struct btrfs_block_group,
4600 					 io_list);
4601 
4602 		list_del_init(&cache->io_list);
4603 		spin_lock(&cache->lock);
4604 		cache->disk_cache_state = BTRFS_DC_ERROR;
4605 		spin_unlock(&cache->lock);
4606 		btrfs_cleanup_bg_io(cache);
4607 	}
4608 }
4609 
4610 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4611 				   struct btrfs_fs_info *fs_info)
4612 {
4613 	struct btrfs_device *dev, *tmp;
4614 
4615 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4616 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4617 	ASSERT(list_empty(&cur_trans->io_bgs));
4618 
4619 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4620 				 post_commit_list) {
4621 		list_del_init(&dev->post_commit_list);
4622 	}
4623 
4624 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4625 
4626 	cur_trans->state = TRANS_STATE_COMMIT_START;
4627 	wake_up(&fs_info->transaction_blocked_wait);
4628 
4629 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4630 	wake_up(&fs_info->transaction_wait);
4631 
4632 	btrfs_destroy_delayed_inodes(fs_info);
4633 
4634 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4635 				     EXTENT_DIRTY);
4636 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4637 
4638 	cur_trans->state =TRANS_STATE_COMPLETED;
4639 	wake_up(&cur_trans->commit_wait);
4640 }
4641 
4642 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4643 {
4644 	struct btrfs_transaction *t;
4645 
4646 	mutex_lock(&fs_info->transaction_kthread_mutex);
4647 
4648 	spin_lock(&fs_info->trans_lock);
4649 	while (!list_empty(&fs_info->trans_list)) {
4650 		t = list_first_entry(&fs_info->trans_list,
4651 				     struct btrfs_transaction, list);
4652 		if (t->state >= TRANS_STATE_COMMIT_START) {
4653 			refcount_inc(&t->use_count);
4654 			spin_unlock(&fs_info->trans_lock);
4655 			btrfs_wait_for_commit(fs_info, t->transid);
4656 			btrfs_put_transaction(t);
4657 			spin_lock(&fs_info->trans_lock);
4658 			continue;
4659 		}
4660 		if (t == fs_info->running_transaction) {
4661 			t->state = TRANS_STATE_COMMIT_DOING;
4662 			spin_unlock(&fs_info->trans_lock);
4663 			/*
4664 			 * We wait for 0 num_writers since we don't hold a trans
4665 			 * handle open currently for this transaction.
4666 			 */
4667 			wait_event(t->writer_wait,
4668 				   atomic_read(&t->num_writers) == 0);
4669 		} else {
4670 			spin_unlock(&fs_info->trans_lock);
4671 		}
4672 		btrfs_cleanup_one_transaction(t, fs_info);
4673 
4674 		spin_lock(&fs_info->trans_lock);
4675 		if (t == fs_info->running_transaction)
4676 			fs_info->running_transaction = NULL;
4677 		list_del_init(&t->list);
4678 		spin_unlock(&fs_info->trans_lock);
4679 
4680 		btrfs_put_transaction(t);
4681 		trace_btrfs_transaction_commit(fs_info->tree_root);
4682 		spin_lock(&fs_info->trans_lock);
4683 	}
4684 	spin_unlock(&fs_info->trans_lock);
4685 	btrfs_destroy_all_ordered_extents(fs_info);
4686 	btrfs_destroy_delayed_inodes(fs_info);
4687 	btrfs_assert_delayed_root_empty(fs_info);
4688 	btrfs_destroy_all_delalloc_inodes(fs_info);
4689 	btrfs_drop_all_logs(fs_info);
4690 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4691 
4692 	return 0;
4693 }
4694