xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 78bb17f7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "inode-map.h"
33 #include "check-integrity.h"
34 #include "rcu-string.h"
35 #include "dev-replace.h"
36 #include "raid56.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39 #include "compression.h"
40 #include "tree-checker.h"
41 #include "ref-verify.h"
42 #include "block-group.h"
43 #include "discard.h"
44 #include "space-info.h"
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static const struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 				      struct btrfs_fs_info *fs_info);
58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 					struct extent_io_tree *dirty_pages,
61 					int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 				       struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66 
67 /*
68  * btrfs_end_io_wq structs are used to do processing in task context when an IO
69  * is complete.  This is used during reads to verify checksums, and it is used
70  * by writes to insert metadata for new file extents after IO is complete.
71  */
72 struct btrfs_end_io_wq {
73 	struct bio *bio;
74 	bio_end_io_t *end_io;
75 	void *private;
76 	struct btrfs_fs_info *info;
77 	blk_status_t status;
78 	enum btrfs_wq_endio_type metadata;
79 	struct btrfs_work work;
80 };
81 
82 static struct kmem_cache *btrfs_end_io_wq_cache;
83 
84 int __init btrfs_end_io_wq_init(void)
85 {
86 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 					sizeof(struct btrfs_end_io_wq),
88 					0,
89 					SLAB_MEM_SPREAD,
90 					NULL);
91 	if (!btrfs_end_io_wq_cache)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 void __cold btrfs_end_io_wq_exit(void)
97 {
98 	kmem_cache_destroy(btrfs_end_io_wq_cache);
99 }
100 
101 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
102 {
103 	if (fs_info->csum_shash)
104 		crypto_free_shash(fs_info->csum_shash);
105 }
106 
107 /*
108  * async submit bios are used to offload expensive checksumming
109  * onto the worker threads.  They checksum file and metadata bios
110  * just before they are sent down the IO stack.
111  */
112 struct async_submit_bio {
113 	void *private_data;
114 	struct bio *bio;
115 	extent_submit_bio_start_t *submit_bio_start;
116 	int mirror_num;
117 	/*
118 	 * bio_offset is optional, can be used if the pages in the bio
119 	 * can't tell us where in the file the bio should go
120 	 */
121 	u64 bio_offset;
122 	struct btrfs_work work;
123 	blk_status_t status;
124 };
125 
126 /*
127  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
128  * eb, the lockdep key is determined by the btrfs_root it belongs to and
129  * the level the eb occupies in the tree.
130  *
131  * Different roots are used for different purposes and may nest inside each
132  * other and they require separate keysets.  As lockdep keys should be
133  * static, assign keysets according to the purpose of the root as indicated
134  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
135  * roots have separate keysets.
136  *
137  * Lock-nesting across peer nodes is always done with the immediate parent
138  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
139  * subclass to avoid triggering lockdep warning in such cases.
140  *
141  * The key is set by the readpage_end_io_hook after the buffer has passed
142  * csum validation but before the pages are unlocked.  It is also set by
143  * btrfs_init_new_buffer on freshly allocated blocks.
144  *
145  * We also add a check to make sure the highest level of the tree is the
146  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
147  * needs update as well.
148  */
149 #ifdef CONFIG_DEBUG_LOCK_ALLOC
150 # if BTRFS_MAX_LEVEL != 8
151 #  error
152 # endif
153 
154 static struct btrfs_lockdep_keyset {
155 	u64			id;		/* root objectid */
156 	const char		*name_stem;	/* lock name stem */
157 	char			names[BTRFS_MAX_LEVEL + 1][20];
158 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
159 } btrfs_lockdep_keysets[] = {
160 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
161 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
162 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
163 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
164 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
165 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
166 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
167 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
168 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
169 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
170 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
171 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
172 	{ .id = 0,				.name_stem = "tree"	},
173 };
174 
175 void __init btrfs_init_lockdep(void)
176 {
177 	int i, j;
178 
179 	/* initialize lockdep class names */
180 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
181 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
182 
183 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
184 			snprintf(ks->names[j], sizeof(ks->names[j]),
185 				 "btrfs-%s-%02d", ks->name_stem, j);
186 	}
187 }
188 
189 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
190 				    int level)
191 {
192 	struct btrfs_lockdep_keyset *ks;
193 
194 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
195 
196 	/* find the matching keyset, id 0 is the default entry */
197 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
198 		if (ks->id == objectid)
199 			break;
200 
201 	lockdep_set_class_and_name(&eb->lock,
202 				   &ks->keys[level], ks->names[level]);
203 }
204 
205 #endif
206 
207 /*
208  * extents on the btree inode are pretty simple, there's one extent
209  * that covers the entire device
210  */
211 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
212 				    struct page *page, size_t pg_offset,
213 				    u64 start, u64 len)
214 {
215 	struct extent_map_tree *em_tree = &inode->extent_tree;
216 	struct extent_map *em;
217 	int ret;
218 
219 	read_lock(&em_tree->lock);
220 	em = lookup_extent_mapping(em_tree, start, len);
221 	if (em) {
222 		read_unlock(&em_tree->lock);
223 		goto out;
224 	}
225 	read_unlock(&em_tree->lock);
226 
227 	em = alloc_extent_map();
228 	if (!em) {
229 		em = ERR_PTR(-ENOMEM);
230 		goto out;
231 	}
232 	em->start = 0;
233 	em->len = (u64)-1;
234 	em->block_len = (u64)-1;
235 	em->block_start = 0;
236 
237 	write_lock(&em_tree->lock);
238 	ret = add_extent_mapping(em_tree, em, 0);
239 	if (ret == -EEXIST) {
240 		free_extent_map(em);
241 		em = lookup_extent_mapping(em_tree, start, len);
242 		if (!em)
243 			em = ERR_PTR(-EIO);
244 	} else if (ret) {
245 		free_extent_map(em);
246 		em = ERR_PTR(ret);
247 	}
248 	write_unlock(&em_tree->lock);
249 
250 out:
251 	return em;
252 }
253 
254 /*
255  * Compute the csum of a btree block and store the result to provided buffer.
256  */
257 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
258 {
259 	struct btrfs_fs_info *fs_info = buf->fs_info;
260 	const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
261 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
262 	char *kaddr;
263 	int i;
264 
265 	shash->tfm = fs_info->csum_shash;
266 	crypto_shash_init(shash);
267 	kaddr = page_address(buf->pages[0]);
268 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
269 			    PAGE_SIZE - BTRFS_CSUM_SIZE);
270 
271 	for (i = 1; i < num_pages; i++) {
272 		kaddr = page_address(buf->pages[i]);
273 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
274 	}
275 	memset(result, 0, BTRFS_CSUM_SIZE);
276 	crypto_shash_final(shash, result);
277 }
278 
279 /*
280  * we can't consider a given block up to date unless the transid of the
281  * block matches the transid in the parent node's pointer.  This is how we
282  * detect blocks that either didn't get written at all or got written
283  * in the wrong place.
284  */
285 static int verify_parent_transid(struct extent_io_tree *io_tree,
286 				 struct extent_buffer *eb, u64 parent_transid,
287 				 int atomic)
288 {
289 	struct extent_state *cached_state = NULL;
290 	int ret;
291 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
292 
293 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
294 		return 0;
295 
296 	if (atomic)
297 		return -EAGAIN;
298 
299 	if (need_lock) {
300 		btrfs_tree_read_lock(eb);
301 		btrfs_set_lock_blocking_read(eb);
302 	}
303 
304 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
305 			 &cached_state);
306 	if (extent_buffer_uptodate(eb) &&
307 	    btrfs_header_generation(eb) == parent_transid) {
308 		ret = 0;
309 		goto out;
310 	}
311 	btrfs_err_rl(eb->fs_info,
312 		"parent transid verify failed on %llu wanted %llu found %llu",
313 			eb->start,
314 			parent_transid, btrfs_header_generation(eb));
315 	ret = 1;
316 
317 	/*
318 	 * Things reading via commit roots that don't have normal protection,
319 	 * like send, can have a really old block in cache that may point at a
320 	 * block that has been freed and re-allocated.  So don't clear uptodate
321 	 * if we find an eb that is under IO (dirty/writeback) because we could
322 	 * end up reading in the stale data and then writing it back out and
323 	 * making everybody very sad.
324 	 */
325 	if (!extent_buffer_under_io(eb))
326 		clear_extent_buffer_uptodate(eb);
327 out:
328 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
329 			     &cached_state);
330 	if (need_lock)
331 		btrfs_tree_read_unlock_blocking(eb);
332 	return ret;
333 }
334 
335 static bool btrfs_supported_super_csum(u16 csum_type)
336 {
337 	switch (csum_type) {
338 	case BTRFS_CSUM_TYPE_CRC32:
339 	case BTRFS_CSUM_TYPE_XXHASH:
340 	case BTRFS_CSUM_TYPE_SHA256:
341 	case BTRFS_CSUM_TYPE_BLAKE2:
342 		return true;
343 	default:
344 		return false;
345 	}
346 }
347 
348 /*
349  * Return 0 if the superblock checksum type matches the checksum value of that
350  * algorithm. Pass the raw disk superblock data.
351  */
352 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
353 				  char *raw_disk_sb)
354 {
355 	struct btrfs_super_block *disk_sb =
356 		(struct btrfs_super_block *)raw_disk_sb;
357 	char result[BTRFS_CSUM_SIZE];
358 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
359 
360 	shash->tfm = fs_info->csum_shash;
361 	crypto_shash_init(shash);
362 
363 	/*
364 	 * The super_block structure does not span the whole
365 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
366 	 * filled with zeros and is included in the checksum.
367 	 */
368 	crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
369 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
370 	crypto_shash_final(shash, result);
371 
372 	if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
373 		return 1;
374 
375 	return 0;
376 }
377 
378 int btrfs_verify_level_key(struct extent_buffer *eb, int level,
379 			   struct btrfs_key *first_key, u64 parent_transid)
380 {
381 	struct btrfs_fs_info *fs_info = eb->fs_info;
382 	int found_level;
383 	struct btrfs_key found_key;
384 	int ret;
385 
386 	found_level = btrfs_header_level(eb);
387 	if (found_level != level) {
388 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
389 		     KERN_ERR "BTRFS: tree level check failed\n");
390 		btrfs_err(fs_info,
391 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
392 			  eb->start, level, found_level);
393 		return -EIO;
394 	}
395 
396 	if (!first_key)
397 		return 0;
398 
399 	/*
400 	 * For live tree block (new tree blocks in current transaction),
401 	 * we need proper lock context to avoid race, which is impossible here.
402 	 * So we only checks tree blocks which is read from disk, whose
403 	 * generation <= fs_info->last_trans_committed.
404 	 */
405 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
406 		return 0;
407 
408 	/* We have @first_key, so this @eb must have at least one item */
409 	if (btrfs_header_nritems(eb) == 0) {
410 		btrfs_err(fs_info,
411 		"invalid tree nritems, bytenr=%llu nritems=0 expect >0",
412 			  eb->start);
413 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
414 		return -EUCLEAN;
415 	}
416 
417 	if (found_level)
418 		btrfs_node_key_to_cpu(eb, &found_key, 0);
419 	else
420 		btrfs_item_key_to_cpu(eb, &found_key, 0);
421 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
422 
423 	if (ret) {
424 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
425 		     KERN_ERR "BTRFS: tree first key check failed\n");
426 		btrfs_err(fs_info,
427 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
428 			  eb->start, parent_transid, first_key->objectid,
429 			  first_key->type, first_key->offset,
430 			  found_key.objectid, found_key.type,
431 			  found_key.offset);
432 	}
433 	return ret;
434 }
435 
436 /*
437  * helper to read a given tree block, doing retries as required when
438  * the checksums don't match and we have alternate mirrors to try.
439  *
440  * @parent_transid:	expected transid, skip check if 0
441  * @level:		expected level, mandatory check
442  * @first_key:		expected key of first slot, skip check if NULL
443  */
444 static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
445 					  u64 parent_transid, int level,
446 					  struct btrfs_key *first_key)
447 {
448 	struct btrfs_fs_info *fs_info = eb->fs_info;
449 	struct extent_io_tree *io_tree;
450 	int failed = 0;
451 	int ret;
452 	int num_copies = 0;
453 	int mirror_num = 0;
454 	int failed_mirror = 0;
455 
456 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
457 	while (1) {
458 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
459 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
460 		if (!ret) {
461 			if (verify_parent_transid(io_tree, eb,
462 						   parent_transid, 0))
463 				ret = -EIO;
464 			else if (btrfs_verify_level_key(eb, level,
465 						first_key, parent_transid))
466 				ret = -EUCLEAN;
467 			else
468 				break;
469 		}
470 
471 		num_copies = btrfs_num_copies(fs_info,
472 					      eb->start, eb->len);
473 		if (num_copies == 1)
474 			break;
475 
476 		if (!failed_mirror) {
477 			failed = 1;
478 			failed_mirror = eb->read_mirror;
479 		}
480 
481 		mirror_num++;
482 		if (mirror_num == failed_mirror)
483 			mirror_num++;
484 
485 		if (mirror_num > num_copies)
486 			break;
487 	}
488 
489 	if (failed && !ret && failed_mirror)
490 		btrfs_repair_eb_io_failure(eb, failed_mirror);
491 
492 	return ret;
493 }
494 
495 /*
496  * checksum a dirty tree block before IO.  This has extra checks to make sure
497  * we only fill in the checksum field in the first page of a multi-page block
498  */
499 
500 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
501 {
502 	u64 start = page_offset(page);
503 	u64 found_start;
504 	u8 result[BTRFS_CSUM_SIZE];
505 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
506 	struct extent_buffer *eb;
507 	int ret;
508 
509 	eb = (struct extent_buffer *)page->private;
510 	if (page != eb->pages[0])
511 		return 0;
512 
513 	found_start = btrfs_header_bytenr(eb);
514 	/*
515 	 * Please do not consolidate these warnings into a single if.
516 	 * It is useful to know what went wrong.
517 	 */
518 	if (WARN_ON(found_start != start))
519 		return -EUCLEAN;
520 	if (WARN_ON(!PageUptodate(page)))
521 		return -EUCLEAN;
522 
523 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
524 				    offsetof(struct btrfs_header, fsid),
525 				    BTRFS_FSID_SIZE) == 0);
526 
527 	csum_tree_block(eb, result);
528 
529 	if (btrfs_header_level(eb))
530 		ret = btrfs_check_node(eb);
531 	else
532 		ret = btrfs_check_leaf_full(eb);
533 
534 	if (ret < 0) {
535 		btrfs_print_tree(eb, 0);
536 		btrfs_err(fs_info,
537 		"block=%llu write time tree block corruption detected",
538 			  eb->start);
539 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
540 		return ret;
541 	}
542 	write_extent_buffer(eb, result, 0, csum_size);
543 
544 	return 0;
545 }
546 
547 static int check_tree_block_fsid(struct extent_buffer *eb)
548 {
549 	struct btrfs_fs_info *fs_info = eb->fs_info;
550 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
551 	u8 fsid[BTRFS_FSID_SIZE];
552 	int ret = 1;
553 
554 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
555 			   BTRFS_FSID_SIZE);
556 	while (fs_devices) {
557 		u8 *metadata_uuid;
558 
559 		/*
560 		 * Checking the incompat flag is only valid for the current
561 		 * fs. For seed devices it's forbidden to have their uuid
562 		 * changed so reading ->fsid in this case is fine
563 		 */
564 		if (fs_devices == fs_info->fs_devices &&
565 		    btrfs_fs_incompat(fs_info, METADATA_UUID))
566 			metadata_uuid = fs_devices->metadata_uuid;
567 		else
568 			metadata_uuid = fs_devices->fsid;
569 
570 		if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
571 			ret = 0;
572 			break;
573 		}
574 		fs_devices = fs_devices->seed;
575 	}
576 	return ret;
577 }
578 
579 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
580 				      u64 phy_offset, struct page *page,
581 				      u64 start, u64 end, int mirror)
582 {
583 	u64 found_start;
584 	int found_level;
585 	struct extent_buffer *eb;
586 	struct btrfs_fs_info *fs_info;
587 	u16 csum_size;
588 	int ret = 0;
589 	u8 result[BTRFS_CSUM_SIZE];
590 	int reads_done;
591 
592 	if (!page->private)
593 		goto out;
594 
595 	eb = (struct extent_buffer *)page->private;
596 	fs_info = eb->fs_info;
597 	csum_size = btrfs_super_csum_size(fs_info->super_copy);
598 
599 	/* the pending IO might have been the only thing that kept this buffer
600 	 * in memory.  Make sure we have a ref for all this other checks
601 	 */
602 	atomic_inc(&eb->refs);
603 
604 	reads_done = atomic_dec_and_test(&eb->io_pages);
605 	if (!reads_done)
606 		goto err;
607 
608 	eb->read_mirror = mirror;
609 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
610 		ret = -EIO;
611 		goto err;
612 	}
613 
614 	found_start = btrfs_header_bytenr(eb);
615 	if (found_start != eb->start) {
616 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
617 			     eb->start, found_start);
618 		ret = -EIO;
619 		goto err;
620 	}
621 	if (check_tree_block_fsid(eb)) {
622 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
623 			     eb->start);
624 		ret = -EIO;
625 		goto err;
626 	}
627 	found_level = btrfs_header_level(eb);
628 	if (found_level >= BTRFS_MAX_LEVEL) {
629 		btrfs_err(fs_info, "bad tree block level %d on %llu",
630 			  (int)btrfs_header_level(eb), eb->start);
631 		ret = -EIO;
632 		goto err;
633 	}
634 
635 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
636 				       eb, found_level);
637 
638 	csum_tree_block(eb, result);
639 
640 	if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
641 		u32 val;
642 		u32 found = 0;
643 
644 		memcpy(&found, result, csum_size);
645 
646 		read_extent_buffer(eb, &val, 0, csum_size);
647 		btrfs_warn_rl(fs_info,
648 		"%s checksum verify failed on %llu wanted %x found %x level %d",
649 			      fs_info->sb->s_id, eb->start,
650 			      val, found, btrfs_header_level(eb));
651 		ret = -EUCLEAN;
652 		goto err;
653 	}
654 
655 	/*
656 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
657 	 * that we don't try and read the other copies of this block, just
658 	 * return -EIO.
659 	 */
660 	if (found_level == 0 && btrfs_check_leaf_full(eb)) {
661 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
662 		ret = -EIO;
663 	}
664 
665 	if (found_level > 0 && btrfs_check_node(eb))
666 		ret = -EIO;
667 
668 	if (!ret)
669 		set_extent_buffer_uptodate(eb);
670 	else
671 		btrfs_err(fs_info,
672 			  "block=%llu read time tree block corruption detected",
673 			  eb->start);
674 err:
675 	if (reads_done &&
676 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
677 		btree_readahead_hook(eb, ret);
678 
679 	if (ret) {
680 		/*
681 		 * our io error hook is going to dec the io pages
682 		 * again, we have to make sure it has something
683 		 * to decrement
684 		 */
685 		atomic_inc(&eb->io_pages);
686 		clear_extent_buffer_uptodate(eb);
687 	}
688 	free_extent_buffer(eb);
689 out:
690 	return ret;
691 }
692 
693 static void end_workqueue_bio(struct bio *bio)
694 {
695 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
696 	struct btrfs_fs_info *fs_info;
697 	struct btrfs_workqueue *wq;
698 
699 	fs_info = end_io_wq->info;
700 	end_io_wq->status = bio->bi_status;
701 
702 	if (bio_op(bio) == REQ_OP_WRITE) {
703 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
704 			wq = fs_info->endio_meta_write_workers;
705 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
706 			wq = fs_info->endio_freespace_worker;
707 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
708 			wq = fs_info->endio_raid56_workers;
709 		else
710 			wq = fs_info->endio_write_workers;
711 	} else {
712 		if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
713 			wq = fs_info->endio_repair_workers;
714 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
715 			wq = fs_info->endio_raid56_workers;
716 		else if (end_io_wq->metadata)
717 			wq = fs_info->endio_meta_workers;
718 		else
719 			wq = fs_info->endio_workers;
720 	}
721 
722 	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
723 	btrfs_queue_work(wq, &end_io_wq->work);
724 }
725 
726 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
727 			enum btrfs_wq_endio_type metadata)
728 {
729 	struct btrfs_end_io_wq *end_io_wq;
730 
731 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
732 	if (!end_io_wq)
733 		return BLK_STS_RESOURCE;
734 
735 	end_io_wq->private = bio->bi_private;
736 	end_io_wq->end_io = bio->bi_end_io;
737 	end_io_wq->info = info;
738 	end_io_wq->status = 0;
739 	end_io_wq->bio = bio;
740 	end_io_wq->metadata = metadata;
741 
742 	bio->bi_private = end_io_wq;
743 	bio->bi_end_io = end_workqueue_bio;
744 	return 0;
745 }
746 
747 static void run_one_async_start(struct btrfs_work *work)
748 {
749 	struct async_submit_bio *async;
750 	blk_status_t ret;
751 
752 	async = container_of(work, struct  async_submit_bio, work);
753 	ret = async->submit_bio_start(async->private_data, async->bio,
754 				      async->bio_offset);
755 	if (ret)
756 		async->status = ret;
757 }
758 
759 /*
760  * In order to insert checksums into the metadata in large chunks, we wait
761  * until bio submission time.   All the pages in the bio are checksummed and
762  * sums are attached onto the ordered extent record.
763  *
764  * At IO completion time the csums attached on the ordered extent record are
765  * inserted into the tree.
766  */
767 static void run_one_async_done(struct btrfs_work *work)
768 {
769 	struct async_submit_bio *async;
770 	struct inode *inode;
771 	blk_status_t ret;
772 
773 	async = container_of(work, struct  async_submit_bio, work);
774 	inode = async->private_data;
775 
776 	/* If an error occurred we just want to clean up the bio and move on */
777 	if (async->status) {
778 		async->bio->bi_status = async->status;
779 		bio_endio(async->bio);
780 		return;
781 	}
782 
783 	/*
784 	 * All of the bios that pass through here are from async helpers.
785 	 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
786 	 * This changes nothing when cgroups aren't in use.
787 	 */
788 	async->bio->bi_opf |= REQ_CGROUP_PUNT;
789 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
790 	if (ret) {
791 		async->bio->bi_status = ret;
792 		bio_endio(async->bio);
793 	}
794 }
795 
796 static void run_one_async_free(struct btrfs_work *work)
797 {
798 	struct async_submit_bio *async;
799 
800 	async = container_of(work, struct  async_submit_bio, work);
801 	kfree(async);
802 }
803 
804 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
805 				 int mirror_num, unsigned long bio_flags,
806 				 u64 bio_offset, void *private_data,
807 				 extent_submit_bio_start_t *submit_bio_start)
808 {
809 	struct async_submit_bio *async;
810 
811 	async = kmalloc(sizeof(*async), GFP_NOFS);
812 	if (!async)
813 		return BLK_STS_RESOURCE;
814 
815 	async->private_data = private_data;
816 	async->bio = bio;
817 	async->mirror_num = mirror_num;
818 	async->submit_bio_start = submit_bio_start;
819 
820 	btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
821 			run_one_async_free);
822 
823 	async->bio_offset = bio_offset;
824 
825 	async->status = 0;
826 
827 	if (op_is_sync(bio->bi_opf))
828 		btrfs_set_work_high_priority(&async->work);
829 
830 	btrfs_queue_work(fs_info->workers, &async->work);
831 	return 0;
832 }
833 
834 static blk_status_t btree_csum_one_bio(struct bio *bio)
835 {
836 	struct bio_vec *bvec;
837 	struct btrfs_root *root;
838 	int ret = 0;
839 	struct bvec_iter_all iter_all;
840 
841 	ASSERT(!bio_flagged(bio, BIO_CLONED));
842 	bio_for_each_segment_all(bvec, bio, iter_all) {
843 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
844 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
845 		if (ret)
846 			break;
847 	}
848 
849 	return errno_to_blk_status(ret);
850 }
851 
852 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
853 					     u64 bio_offset)
854 {
855 	/*
856 	 * when we're called for a write, we're already in the async
857 	 * submission context.  Just jump into btrfs_map_bio
858 	 */
859 	return btree_csum_one_bio(bio);
860 }
861 
862 static int check_async_write(struct btrfs_fs_info *fs_info,
863 			     struct btrfs_inode *bi)
864 {
865 	if (atomic_read(&bi->sync_writers))
866 		return 0;
867 	if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
868 		return 0;
869 	return 1;
870 }
871 
872 static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
873 					  int mirror_num,
874 					  unsigned long bio_flags)
875 {
876 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
877 	int async = check_async_write(fs_info, BTRFS_I(inode));
878 	blk_status_t ret;
879 
880 	if (bio_op(bio) != REQ_OP_WRITE) {
881 		/*
882 		 * called for a read, do the setup so that checksum validation
883 		 * can happen in the async kernel threads
884 		 */
885 		ret = btrfs_bio_wq_end_io(fs_info, bio,
886 					  BTRFS_WQ_ENDIO_METADATA);
887 		if (ret)
888 			goto out_w_error;
889 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
890 	} else if (!async) {
891 		ret = btree_csum_one_bio(bio);
892 		if (ret)
893 			goto out_w_error;
894 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
895 	} else {
896 		/*
897 		 * kthread helpers are used to submit writes so that
898 		 * checksumming can happen in parallel across all CPUs
899 		 */
900 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
901 					  0, inode, btree_submit_bio_start);
902 	}
903 
904 	if (ret)
905 		goto out_w_error;
906 	return 0;
907 
908 out_w_error:
909 	bio->bi_status = ret;
910 	bio_endio(bio);
911 	return ret;
912 }
913 
914 #ifdef CONFIG_MIGRATION
915 static int btree_migratepage(struct address_space *mapping,
916 			struct page *newpage, struct page *page,
917 			enum migrate_mode mode)
918 {
919 	/*
920 	 * we can't safely write a btree page from here,
921 	 * we haven't done the locking hook
922 	 */
923 	if (PageDirty(page))
924 		return -EAGAIN;
925 	/*
926 	 * Buffers may be managed in a filesystem specific way.
927 	 * We must have no buffers or drop them.
928 	 */
929 	if (page_has_private(page) &&
930 	    !try_to_release_page(page, GFP_KERNEL))
931 		return -EAGAIN;
932 	return migrate_page(mapping, newpage, page, mode);
933 }
934 #endif
935 
936 
937 static int btree_writepages(struct address_space *mapping,
938 			    struct writeback_control *wbc)
939 {
940 	struct btrfs_fs_info *fs_info;
941 	int ret;
942 
943 	if (wbc->sync_mode == WB_SYNC_NONE) {
944 
945 		if (wbc->for_kupdate)
946 			return 0;
947 
948 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
949 		/* this is a bit racy, but that's ok */
950 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
951 					     BTRFS_DIRTY_METADATA_THRESH,
952 					     fs_info->dirty_metadata_batch);
953 		if (ret < 0)
954 			return 0;
955 	}
956 	return btree_write_cache_pages(mapping, wbc);
957 }
958 
959 static int btree_readpage(struct file *file, struct page *page)
960 {
961 	return extent_read_full_page(page, btree_get_extent, 0);
962 }
963 
964 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
965 {
966 	if (PageWriteback(page) || PageDirty(page))
967 		return 0;
968 
969 	return try_release_extent_buffer(page);
970 }
971 
972 static void btree_invalidatepage(struct page *page, unsigned int offset,
973 				 unsigned int length)
974 {
975 	struct extent_io_tree *tree;
976 	tree = &BTRFS_I(page->mapping->host)->io_tree;
977 	extent_invalidatepage(tree, page, offset);
978 	btree_releasepage(page, GFP_NOFS);
979 	if (PagePrivate(page)) {
980 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
981 			   "page private not zero on page %llu",
982 			   (unsigned long long)page_offset(page));
983 		detach_page_private(page);
984 	}
985 }
986 
987 static int btree_set_page_dirty(struct page *page)
988 {
989 #ifdef DEBUG
990 	struct extent_buffer *eb;
991 
992 	BUG_ON(!PagePrivate(page));
993 	eb = (struct extent_buffer *)page->private;
994 	BUG_ON(!eb);
995 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
996 	BUG_ON(!atomic_read(&eb->refs));
997 	btrfs_assert_tree_locked(eb);
998 #endif
999 	return __set_page_dirty_nobuffers(page);
1000 }
1001 
1002 static const struct address_space_operations btree_aops = {
1003 	.readpage	= btree_readpage,
1004 	.writepages	= btree_writepages,
1005 	.releasepage	= btree_releasepage,
1006 	.invalidatepage = btree_invalidatepage,
1007 #ifdef CONFIG_MIGRATION
1008 	.migratepage	= btree_migratepage,
1009 #endif
1010 	.set_page_dirty = btree_set_page_dirty,
1011 };
1012 
1013 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1014 {
1015 	struct extent_buffer *buf = NULL;
1016 	int ret;
1017 
1018 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1019 	if (IS_ERR(buf))
1020 		return;
1021 
1022 	ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
1023 	if (ret < 0)
1024 		free_extent_buffer_stale(buf);
1025 	else
1026 		free_extent_buffer(buf);
1027 }
1028 
1029 struct extent_buffer *btrfs_find_create_tree_block(
1030 						struct btrfs_fs_info *fs_info,
1031 						u64 bytenr)
1032 {
1033 	if (btrfs_is_testing(fs_info))
1034 		return alloc_test_extent_buffer(fs_info, bytenr);
1035 	return alloc_extent_buffer(fs_info, bytenr);
1036 }
1037 
1038 /*
1039  * Read tree block at logical address @bytenr and do variant basic but critical
1040  * verification.
1041  *
1042  * @parent_transid:	expected transid of this tree block, skip check if 0
1043  * @level:		expected level, mandatory check
1044  * @first_key:		expected key in slot 0, skip check if NULL
1045  */
1046 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1047 				      u64 parent_transid, int level,
1048 				      struct btrfs_key *first_key)
1049 {
1050 	struct extent_buffer *buf = NULL;
1051 	int ret;
1052 
1053 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1054 	if (IS_ERR(buf))
1055 		return buf;
1056 
1057 	ret = btree_read_extent_buffer_pages(buf, parent_transid,
1058 					     level, first_key);
1059 	if (ret) {
1060 		free_extent_buffer_stale(buf);
1061 		return ERR_PTR(ret);
1062 	}
1063 	return buf;
1064 
1065 }
1066 
1067 void btrfs_clean_tree_block(struct extent_buffer *buf)
1068 {
1069 	struct btrfs_fs_info *fs_info = buf->fs_info;
1070 	if (btrfs_header_generation(buf) ==
1071 	    fs_info->running_transaction->transid) {
1072 		btrfs_assert_tree_locked(buf);
1073 
1074 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1075 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1076 						 -buf->len,
1077 						 fs_info->dirty_metadata_batch);
1078 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1079 			btrfs_set_lock_blocking_write(buf);
1080 			clear_extent_buffer_dirty(buf);
1081 		}
1082 	}
1083 }
1084 
1085 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1086 			 u64 objectid)
1087 {
1088 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1089 	root->fs_info = fs_info;
1090 	root->node = NULL;
1091 	root->commit_root = NULL;
1092 	root->state = 0;
1093 	root->orphan_cleanup_state = 0;
1094 
1095 	root->last_trans = 0;
1096 	root->highest_objectid = 0;
1097 	root->nr_delalloc_inodes = 0;
1098 	root->nr_ordered_extents = 0;
1099 	root->inode_tree = RB_ROOT;
1100 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1101 	root->block_rsv = NULL;
1102 
1103 	INIT_LIST_HEAD(&root->dirty_list);
1104 	INIT_LIST_HEAD(&root->root_list);
1105 	INIT_LIST_HEAD(&root->delalloc_inodes);
1106 	INIT_LIST_HEAD(&root->delalloc_root);
1107 	INIT_LIST_HEAD(&root->ordered_extents);
1108 	INIT_LIST_HEAD(&root->ordered_root);
1109 	INIT_LIST_HEAD(&root->reloc_dirty_list);
1110 	INIT_LIST_HEAD(&root->logged_list[0]);
1111 	INIT_LIST_HEAD(&root->logged_list[1]);
1112 	spin_lock_init(&root->inode_lock);
1113 	spin_lock_init(&root->delalloc_lock);
1114 	spin_lock_init(&root->ordered_extent_lock);
1115 	spin_lock_init(&root->accounting_lock);
1116 	spin_lock_init(&root->log_extents_lock[0]);
1117 	spin_lock_init(&root->log_extents_lock[1]);
1118 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1119 	mutex_init(&root->objectid_mutex);
1120 	mutex_init(&root->log_mutex);
1121 	mutex_init(&root->ordered_extent_mutex);
1122 	mutex_init(&root->delalloc_mutex);
1123 	init_waitqueue_head(&root->log_writer_wait);
1124 	init_waitqueue_head(&root->log_commit_wait[0]);
1125 	init_waitqueue_head(&root->log_commit_wait[1]);
1126 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1127 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1128 	atomic_set(&root->log_commit[0], 0);
1129 	atomic_set(&root->log_commit[1], 0);
1130 	atomic_set(&root->log_writers, 0);
1131 	atomic_set(&root->log_batch, 0);
1132 	refcount_set(&root->refs, 1);
1133 	atomic_set(&root->snapshot_force_cow, 0);
1134 	atomic_set(&root->nr_swapfiles, 0);
1135 	root->log_transid = 0;
1136 	root->log_transid_committed = -1;
1137 	root->last_log_commit = 0;
1138 	if (!dummy)
1139 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
1140 				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
1141 
1142 	memset(&root->root_key, 0, sizeof(root->root_key));
1143 	memset(&root->root_item, 0, sizeof(root->root_item));
1144 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1145 	if (!dummy)
1146 		root->defrag_trans_start = fs_info->generation;
1147 	else
1148 		root->defrag_trans_start = 0;
1149 	root->root_key.objectid = objectid;
1150 	root->anon_dev = 0;
1151 
1152 	spin_lock_init(&root->root_item_lock);
1153 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1154 #ifdef CONFIG_BTRFS_DEBUG
1155 	INIT_LIST_HEAD(&root->leak_list);
1156 	spin_lock(&fs_info->fs_roots_radix_lock);
1157 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
1158 	spin_unlock(&fs_info->fs_roots_radix_lock);
1159 #endif
1160 }
1161 
1162 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1163 					   u64 objectid, gfp_t flags)
1164 {
1165 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1166 	if (root)
1167 		__setup_root(root, fs_info, objectid);
1168 	return root;
1169 }
1170 
1171 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1172 /* Should only be used by the testing infrastructure */
1173 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1174 {
1175 	struct btrfs_root *root;
1176 
1177 	if (!fs_info)
1178 		return ERR_PTR(-EINVAL);
1179 
1180 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
1181 	if (!root)
1182 		return ERR_PTR(-ENOMEM);
1183 
1184 	/* We don't use the stripesize in selftest, set it as sectorsize */
1185 	root->alloc_bytenr = 0;
1186 
1187 	return root;
1188 }
1189 #endif
1190 
1191 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1192 				     u64 objectid)
1193 {
1194 	struct btrfs_fs_info *fs_info = trans->fs_info;
1195 	struct extent_buffer *leaf;
1196 	struct btrfs_root *tree_root = fs_info->tree_root;
1197 	struct btrfs_root *root;
1198 	struct btrfs_key key;
1199 	unsigned int nofs_flag;
1200 	int ret = 0;
1201 
1202 	/*
1203 	 * We're holding a transaction handle, so use a NOFS memory allocation
1204 	 * context to avoid deadlock if reclaim happens.
1205 	 */
1206 	nofs_flag = memalloc_nofs_save();
1207 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
1208 	memalloc_nofs_restore(nofs_flag);
1209 	if (!root)
1210 		return ERR_PTR(-ENOMEM);
1211 
1212 	root->root_key.objectid = objectid;
1213 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1214 	root->root_key.offset = 0;
1215 
1216 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1217 	if (IS_ERR(leaf)) {
1218 		ret = PTR_ERR(leaf);
1219 		leaf = NULL;
1220 		goto fail;
1221 	}
1222 
1223 	root->node = leaf;
1224 	btrfs_mark_buffer_dirty(leaf);
1225 
1226 	root->commit_root = btrfs_root_node(root);
1227 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1228 
1229 	root->root_item.flags = 0;
1230 	root->root_item.byte_limit = 0;
1231 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1232 	btrfs_set_root_generation(&root->root_item, trans->transid);
1233 	btrfs_set_root_level(&root->root_item, 0);
1234 	btrfs_set_root_refs(&root->root_item, 1);
1235 	btrfs_set_root_used(&root->root_item, leaf->len);
1236 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1237 	btrfs_set_root_dirid(&root->root_item, 0);
1238 	if (is_fstree(objectid))
1239 		generate_random_guid(root->root_item.uuid);
1240 	else
1241 		export_guid(root->root_item.uuid, &guid_null);
1242 	root->root_item.drop_level = 0;
1243 
1244 	key.objectid = objectid;
1245 	key.type = BTRFS_ROOT_ITEM_KEY;
1246 	key.offset = 0;
1247 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1248 	if (ret)
1249 		goto fail;
1250 
1251 	btrfs_tree_unlock(leaf);
1252 
1253 	return root;
1254 
1255 fail:
1256 	if (leaf)
1257 		btrfs_tree_unlock(leaf);
1258 	btrfs_put_root(root);
1259 
1260 	return ERR_PTR(ret);
1261 }
1262 
1263 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1264 					 struct btrfs_fs_info *fs_info)
1265 {
1266 	struct btrfs_root *root;
1267 	struct extent_buffer *leaf;
1268 
1269 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
1270 	if (!root)
1271 		return ERR_PTR(-ENOMEM);
1272 
1273 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1274 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1275 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1276 
1277 	/*
1278 	 * DON'T set REF_COWS for log trees
1279 	 *
1280 	 * log trees do not get reference counted because they go away
1281 	 * before a real commit is actually done.  They do store pointers
1282 	 * to file data extents, and those reference counts still get
1283 	 * updated (along with back refs to the log tree).
1284 	 */
1285 
1286 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1287 			NULL, 0, 0, 0);
1288 	if (IS_ERR(leaf)) {
1289 		btrfs_put_root(root);
1290 		return ERR_CAST(leaf);
1291 	}
1292 
1293 	root->node = leaf;
1294 
1295 	btrfs_mark_buffer_dirty(root->node);
1296 	btrfs_tree_unlock(root->node);
1297 	return root;
1298 }
1299 
1300 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1301 			     struct btrfs_fs_info *fs_info)
1302 {
1303 	struct btrfs_root *log_root;
1304 
1305 	log_root = alloc_log_tree(trans, fs_info);
1306 	if (IS_ERR(log_root))
1307 		return PTR_ERR(log_root);
1308 	WARN_ON(fs_info->log_root_tree);
1309 	fs_info->log_root_tree = log_root;
1310 	return 0;
1311 }
1312 
1313 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1314 		       struct btrfs_root *root)
1315 {
1316 	struct btrfs_fs_info *fs_info = root->fs_info;
1317 	struct btrfs_root *log_root;
1318 	struct btrfs_inode_item *inode_item;
1319 
1320 	log_root = alloc_log_tree(trans, fs_info);
1321 	if (IS_ERR(log_root))
1322 		return PTR_ERR(log_root);
1323 
1324 	log_root->last_trans = trans->transid;
1325 	log_root->root_key.offset = root->root_key.objectid;
1326 
1327 	inode_item = &log_root->root_item.inode;
1328 	btrfs_set_stack_inode_generation(inode_item, 1);
1329 	btrfs_set_stack_inode_size(inode_item, 3);
1330 	btrfs_set_stack_inode_nlink(inode_item, 1);
1331 	btrfs_set_stack_inode_nbytes(inode_item,
1332 				     fs_info->nodesize);
1333 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1334 
1335 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1336 
1337 	WARN_ON(root->log_root);
1338 	root->log_root = log_root;
1339 	root->log_transid = 0;
1340 	root->log_transid_committed = -1;
1341 	root->last_log_commit = 0;
1342 	return 0;
1343 }
1344 
1345 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1346 					struct btrfs_key *key)
1347 {
1348 	struct btrfs_root *root;
1349 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1350 	struct btrfs_path *path;
1351 	u64 generation;
1352 	int ret;
1353 	int level;
1354 
1355 	path = btrfs_alloc_path();
1356 	if (!path)
1357 		return ERR_PTR(-ENOMEM);
1358 
1359 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1360 	if (!root) {
1361 		ret = -ENOMEM;
1362 		goto alloc_fail;
1363 	}
1364 
1365 	ret = btrfs_find_root(tree_root, key, path,
1366 			      &root->root_item, &root->root_key);
1367 	if (ret) {
1368 		if (ret > 0)
1369 			ret = -ENOENT;
1370 		goto find_fail;
1371 	}
1372 
1373 	generation = btrfs_root_generation(&root->root_item);
1374 	level = btrfs_root_level(&root->root_item);
1375 	root->node = read_tree_block(fs_info,
1376 				     btrfs_root_bytenr(&root->root_item),
1377 				     generation, level, NULL);
1378 	if (IS_ERR(root->node)) {
1379 		ret = PTR_ERR(root->node);
1380 		root->node = NULL;
1381 		goto find_fail;
1382 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1383 		ret = -EIO;
1384 		goto find_fail;
1385 	}
1386 	root->commit_root = btrfs_root_node(root);
1387 out:
1388 	btrfs_free_path(path);
1389 	return root;
1390 
1391 find_fail:
1392 	btrfs_put_root(root);
1393 alloc_fail:
1394 	root = ERR_PTR(ret);
1395 	goto out;
1396 }
1397 
1398 static int btrfs_init_fs_root(struct btrfs_root *root)
1399 {
1400 	int ret;
1401 	unsigned int nofs_flag;
1402 
1403 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1404 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1405 					GFP_NOFS);
1406 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1407 		ret = -ENOMEM;
1408 		goto fail;
1409 	}
1410 
1411 	/*
1412 	 * We might be called under a transaction (e.g. indirect backref
1413 	 * resolution) which could deadlock if it triggers memory reclaim
1414 	 */
1415 	nofs_flag = memalloc_nofs_save();
1416 	ret = btrfs_drew_lock_init(&root->snapshot_lock);
1417 	memalloc_nofs_restore(nofs_flag);
1418 	if (ret)
1419 		goto fail;
1420 
1421 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1422 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1423 		btrfs_check_and_init_root_item(&root->root_item);
1424 	}
1425 
1426 	btrfs_init_free_ino_ctl(root);
1427 	spin_lock_init(&root->ino_cache_lock);
1428 	init_waitqueue_head(&root->ino_cache_wait);
1429 
1430 	ret = get_anon_bdev(&root->anon_dev);
1431 	if (ret)
1432 		goto fail;
1433 
1434 	mutex_lock(&root->objectid_mutex);
1435 	ret = btrfs_find_highest_objectid(root,
1436 					&root->highest_objectid);
1437 	if (ret) {
1438 		mutex_unlock(&root->objectid_mutex);
1439 		goto fail;
1440 	}
1441 
1442 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1443 
1444 	mutex_unlock(&root->objectid_mutex);
1445 
1446 	return 0;
1447 fail:
1448 	/* The caller is responsible to call btrfs_free_fs_root */
1449 	return ret;
1450 }
1451 
1452 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1453 					       u64 root_id)
1454 {
1455 	struct btrfs_root *root;
1456 
1457 	spin_lock(&fs_info->fs_roots_radix_lock);
1458 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1459 				 (unsigned long)root_id);
1460 	if (root)
1461 		root = btrfs_grab_root(root);
1462 	spin_unlock(&fs_info->fs_roots_radix_lock);
1463 	return root;
1464 }
1465 
1466 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1467 			 struct btrfs_root *root)
1468 {
1469 	int ret;
1470 
1471 	ret = radix_tree_preload(GFP_NOFS);
1472 	if (ret)
1473 		return ret;
1474 
1475 	spin_lock(&fs_info->fs_roots_radix_lock);
1476 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1477 				(unsigned long)root->root_key.objectid,
1478 				root);
1479 	if (ret == 0) {
1480 		btrfs_grab_root(root);
1481 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1482 	}
1483 	spin_unlock(&fs_info->fs_roots_radix_lock);
1484 	radix_tree_preload_end();
1485 
1486 	return ret;
1487 }
1488 
1489 void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1490 {
1491 #ifdef CONFIG_BTRFS_DEBUG
1492 	struct btrfs_root *root;
1493 
1494 	while (!list_empty(&fs_info->allocated_roots)) {
1495 		root = list_first_entry(&fs_info->allocated_roots,
1496 					struct btrfs_root, leak_list);
1497 		btrfs_err(fs_info, "leaked root %llu-%llu refcount %d",
1498 			  root->root_key.objectid, root->root_key.offset,
1499 			  refcount_read(&root->refs));
1500 		while (refcount_read(&root->refs) > 1)
1501 			btrfs_put_root(root);
1502 		btrfs_put_root(root);
1503 	}
1504 #endif
1505 }
1506 
1507 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1508 {
1509 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1510 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1511 	percpu_counter_destroy(&fs_info->dio_bytes);
1512 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1513 	btrfs_free_csum_hash(fs_info);
1514 	btrfs_free_stripe_hash_table(fs_info);
1515 	btrfs_free_ref_cache(fs_info);
1516 	kfree(fs_info->balance_ctl);
1517 	kfree(fs_info->delayed_root);
1518 	btrfs_put_root(fs_info->extent_root);
1519 	btrfs_put_root(fs_info->tree_root);
1520 	btrfs_put_root(fs_info->chunk_root);
1521 	btrfs_put_root(fs_info->dev_root);
1522 	btrfs_put_root(fs_info->csum_root);
1523 	btrfs_put_root(fs_info->quota_root);
1524 	btrfs_put_root(fs_info->uuid_root);
1525 	btrfs_put_root(fs_info->free_space_root);
1526 	btrfs_put_root(fs_info->fs_root);
1527 	btrfs_check_leaked_roots(fs_info);
1528 	btrfs_extent_buffer_leak_debug_check(fs_info);
1529 	kfree(fs_info->super_copy);
1530 	kfree(fs_info->super_for_commit);
1531 	kvfree(fs_info);
1532 }
1533 
1534 
1535 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1536 				     struct btrfs_key *location,
1537 				     bool check_ref)
1538 {
1539 	struct btrfs_root *root;
1540 	struct btrfs_path *path;
1541 	struct btrfs_key key;
1542 	int ret;
1543 
1544 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1545 		return btrfs_grab_root(fs_info->tree_root);
1546 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1547 		return btrfs_grab_root(fs_info->extent_root);
1548 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1549 		return btrfs_grab_root(fs_info->chunk_root);
1550 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1551 		return btrfs_grab_root(fs_info->dev_root);
1552 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1553 		return btrfs_grab_root(fs_info->csum_root);
1554 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1555 		return btrfs_grab_root(fs_info->quota_root) ?
1556 			fs_info->quota_root : ERR_PTR(-ENOENT);
1557 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1558 		return btrfs_grab_root(fs_info->uuid_root) ?
1559 			fs_info->uuid_root : ERR_PTR(-ENOENT);
1560 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1561 		return btrfs_grab_root(fs_info->free_space_root) ?
1562 			fs_info->free_space_root : ERR_PTR(-ENOENT);
1563 again:
1564 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1565 	if (root) {
1566 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1567 			btrfs_put_root(root);
1568 			return ERR_PTR(-ENOENT);
1569 		}
1570 		return root;
1571 	}
1572 
1573 	root = btrfs_read_tree_root(fs_info->tree_root, location);
1574 	if (IS_ERR(root))
1575 		return root;
1576 
1577 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1578 		ret = -ENOENT;
1579 		goto fail;
1580 	}
1581 
1582 	ret = btrfs_init_fs_root(root);
1583 	if (ret)
1584 		goto fail;
1585 
1586 	path = btrfs_alloc_path();
1587 	if (!path) {
1588 		ret = -ENOMEM;
1589 		goto fail;
1590 	}
1591 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1592 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1593 	key.offset = location->objectid;
1594 
1595 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1596 	btrfs_free_path(path);
1597 	if (ret < 0)
1598 		goto fail;
1599 	if (ret == 0)
1600 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1601 
1602 	ret = btrfs_insert_fs_root(fs_info, root);
1603 	if (ret) {
1604 		btrfs_put_root(root);
1605 		if (ret == -EEXIST)
1606 			goto again;
1607 		goto fail;
1608 	}
1609 	return root;
1610 fail:
1611 	btrfs_put_root(root);
1612 	return ERR_PTR(ret);
1613 }
1614 
1615 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1616 {
1617 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1618 	int ret = 0;
1619 	struct btrfs_device *device;
1620 	struct backing_dev_info *bdi;
1621 
1622 	rcu_read_lock();
1623 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1624 		if (!device->bdev)
1625 			continue;
1626 		bdi = device->bdev->bd_bdi;
1627 		if (bdi_congested(bdi, bdi_bits)) {
1628 			ret = 1;
1629 			break;
1630 		}
1631 	}
1632 	rcu_read_unlock();
1633 	return ret;
1634 }
1635 
1636 /*
1637  * called by the kthread helper functions to finally call the bio end_io
1638  * functions.  This is where read checksum verification actually happens
1639  */
1640 static void end_workqueue_fn(struct btrfs_work *work)
1641 {
1642 	struct bio *bio;
1643 	struct btrfs_end_io_wq *end_io_wq;
1644 
1645 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1646 	bio = end_io_wq->bio;
1647 
1648 	bio->bi_status = end_io_wq->status;
1649 	bio->bi_private = end_io_wq->private;
1650 	bio->bi_end_io = end_io_wq->end_io;
1651 	bio_endio(bio);
1652 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1653 }
1654 
1655 static int cleaner_kthread(void *arg)
1656 {
1657 	struct btrfs_root *root = arg;
1658 	struct btrfs_fs_info *fs_info = root->fs_info;
1659 	int again;
1660 
1661 	while (1) {
1662 		again = 0;
1663 
1664 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1665 
1666 		/* Make the cleaner go to sleep early. */
1667 		if (btrfs_need_cleaner_sleep(fs_info))
1668 			goto sleep;
1669 
1670 		/*
1671 		 * Do not do anything if we might cause open_ctree() to block
1672 		 * before we have finished mounting the filesystem.
1673 		 */
1674 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1675 			goto sleep;
1676 
1677 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1678 			goto sleep;
1679 
1680 		/*
1681 		 * Avoid the problem that we change the status of the fs
1682 		 * during the above check and trylock.
1683 		 */
1684 		if (btrfs_need_cleaner_sleep(fs_info)) {
1685 			mutex_unlock(&fs_info->cleaner_mutex);
1686 			goto sleep;
1687 		}
1688 
1689 		btrfs_run_delayed_iputs(fs_info);
1690 
1691 		again = btrfs_clean_one_deleted_snapshot(root);
1692 		mutex_unlock(&fs_info->cleaner_mutex);
1693 
1694 		/*
1695 		 * The defragger has dealt with the R/O remount and umount,
1696 		 * needn't do anything special here.
1697 		 */
1698 		btrfs_run_defrag_inodes(fs_info);
1699 
1700 		/*
1701 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1702 		 * with relocation (btrfs_relocate_chunk) and relocation
1703 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1704 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1705 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1706 		 * unused block groups.
1707 		 */
1708 		btrfs_delete_unused_bgs(fs_info);
1709 sleep:
1710 		clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1711 		if (kthread_should_park())
1712 			kthread_parkme();
1713 		if (kthread_should_stop())
1714 			return 0;
1715 		if (!again) {
1716 			set_current_state(TASK_INTERRUPTIBLE);
1717 			schedule();
1718 			__set_current_state(TASK_RUNNING);
1719 		}
1720 	}
1721 }
1722 
1723 static int transaction_kthread(void *arg)
1724 {
1725 	struct btrfs_root *root = arg;
1726 	struct btrfs_fs_info *fs_info = root->fs_info;
1727 	struct btrfs_trans_handle *trans;
1728 	struct btrfs_transaction *cur;
1729 	u64 transid;
1730 	time64_t now;
1731 	unsigned long delay;
1732 	bool cannot_commit;
1733 
1734 	do {
1735 		cannot_commit = false;
1736 		delay = HZ * fs_info->commit_interval;
1737 		mutex_lock(&fs_info->transaction_kthread_mutex);
1738 
1739 		spin_lock(&fs_info->trans_lock);
1740 		cur = fs_info->running_transaction;
1741 		if (!cur) {
1742 			spin_unlock(&fs_info->trans_lock);
1743 			goto sleep;
1744 		}
1745 
1746 		now = ktime_get_seconds();
1747 		if (cur->state < TRANS_STATE_COMMIT_START &&
1748 		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1749 		    (now < cur->start_time ||
1750 		     now - cur->start_time < fs_info->commit_interval)) {
1751 			spin_unlock(&fs_info->trans_lock);
1752 			delay = HZ * 5;
1753 			goto sleep;
1754 		}
1755 		transid = cur->transid;
1756 		spin_unlock(&fs_info->trans_lock);
1757 
1758 		/* If the file system is aborted, this will always fail. */
1759 		trans = btrfs_attach_transaction(root);
1760 		if (IS_ERR(trans)) {
1761 			if (PTR_ERR(trans) != -ENOENT)
1762 				cannot_commit = true;
1763 			goto sleep;
1764 		}
1765 		if (transid == trans->transid) {
1766 			btrfs_commit_transaction(trans);
1767 		} else {
1768 			btrfs_end_transaction(trans);
1769 		}
1770 sleep:
1771 		wake_up_process(fs_info->cleaner_kthread);
1772 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1773 
1774 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1775 				      &fs_info->fs_state)))
1776 			btrfs_cleanup_transaction(fs_info);
1777 		if (!kthread_should_stop() &&
1778 				(!btrfs_transaction_blocked(fs_info) ||
1779 				 cannot_commit))
1780 			schedule_timeout_interruptible(delay);
1781 	} while (!kthread_should_stop());
1782 	return 0;
1783 }
1784 
1785 /*
1786  * This will find the highest generation in the array of root backups.  The
1787  * index of the highest array is returned, or -EINVAL if we can't find
1788  * anything.
1789  *
1790  * We check to make sure the array is valid by comparing the
1791  * generation of the latest  root in the array with the generation
1792  * in the super block.  If they don't match we pitch it.
1793  */
1794 static int find_newest_super_backup(struct btrfs_fs_info *info)
1795 {
1796 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1797 	u64 cur;
1798 	struct btrfs_root_backup *root_backup;
1799 	int i;
1800 
1801 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1802 		root_backup = info->super_copy->super_roots + i;
1803 		cur = btrfs_backup_tree_root_gen(root_backup);
1804 		if (cur == newest_gen)
1805 			return i;
1806 	}
1807 
1808 	return -EINVAL;
1809 }
1810 
1811 /*
1812  * copy all the root pointers into the super backup array.
1813  * this will bump the backup pointer by one when it is
1814  * done
1815  */
1816 static void backup_super_roots(struct btrfs_fs_info *info)
1817 {
1818 	const int next_backup = info->backup_root_index;
1819 	struct btrfs_root_backup *root_backup;
1820 
1821 	root_backup = info->super_for_commit->super_roots + next_backup;
1822 
1823 	/*
1824 	 * make sure all of our padding and empty slots get zero filled
1825 	 * regardless of which ones we use today
1826 	 */
1827 	memset(root_backup, 0, sizeof(*root_backup));
1828 
1829 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1830 
1831 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1832 	btrfs_set_backup_tree_root_gen(root_backup,
1833 			       btrfs_header_generation(info->tree_root->node));
1834 
1835 	btrfs_set_backup_tree_root_level(root_backup,
1836 			       btrfs_header_level(info->tree_root->node));
1837 
1838 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1839 	btrfs_set_backup_chunk_root_gen(root_backup,
1840 			       btrfs_header_generation(info->chunk_root->node));
1841 	btrfs_set_backup_chunk_root_level(root_backup,
1842 			       btrfs_header_level(info->chunk_root->node));
1843 
1844 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1845 	btrfs_set_backup_extent_root_gen(root_backup,
1846 			       btrfs_header_generation(info->extent_root->node));
1847 	btrfs_set_backup_extent_root_level(root_backup,
1848 			       btrfs_header_level(info->extent_root->node));
1849 
1850 	/*
1851 	 * we might commit during log recovery, which happens before we set
1852 	 * the fs_root.  Make sure it is valid before we fill it in.
1853 	 */
1854 	if (info->fs_root && info->fs_root->node) {
1855 		btrfs_set_backup_fs_root(root_backup,
1856 					 info->fs_root->node->start);
1857 		btrfs_set_backup_fs_root_gen(root_backup,
1858 			       btrfs_header_generation(info->fs_root->node));
1859 		btrfs_set_backup_fs_root_level(root_backup,
1860 			       btrfs_header_level(info->fs_root->node));
1861 	}
1862 
1863 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1864 	btrfs_set_backup_dev_root_gen(root_backup,
1865 			       btrfs_header_generation(info->dev_root->node));
1866 	btrfs_set_backup_dev_root_level(root_backup,
1867 				       btrfs_header_level(info->dev_root->node));
1868 
1869 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1870 	btrfs_set_backup_csum_root_gen(root_backup,
1871 			       btrfs_header_generation(info->csum_root->node));
1872 	btrfs_set_backup_csum_root_level(root_backup,
1873 			       btrfs_header_level(info->csum_root->node));
1874 
1875 	btrfs_set_backup_total_bytes(root_backup,
1876 			     btrfs_super_total_bytes(info->super_copy));
1877 	btrfs_set_backup_bytes_used(root_backup,
1878 			     btrfs_super_bytes_used(info->super_copy));
1879 	btrfs_set_backup_num_devices(root_backup,
1880 			     btrfs_super_num_devices(info->super_copy));
1881 
1882 	/*
1883 	 * if we don't copy this out to the super_copy, it won't get remembered
1884 	 * for the next commit
1885 	 */
1886 	memcpy(&info->super_copy->super_roots,
1887 	       &info->super_for_commit->super_roots,
1888 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1889 }
1890 
1891 /*
1892  * read_backup_root - Reads a backup root based on the passed priority. Prio 0
1893  * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1894  *
1895  * fs_info - filesystem whose backup roots need to be read
1896  * priority - priority of backup root required
1897  *
1898  * Returns backup root index on success and -EINVAL otherwise.
1899  */
1900 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1901 {
1902 	int backup_index = find_newest_super_backup(fs_info);
1903 	struct btrfs_super_block *super = fs_info->super_copy;
1904 	struct btrfs_root_backup *root_backup;
1905 
1906 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1907 		if (priority == 0)
1908 			return backup_index;
1909 
1910 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1911 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1912 	} else {
1913 		return -EINVAL;
1914 	}
1915 
1916 	root_backup = super->super_roots + backup_index;
1917 
1918 	btrfs_set_super_generation(super,
1919 				   btrfs_backup_tree_root_gen(root_backup));
1920 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1921 	btrfs_set_super_root_level(super,
1922 				   btrfs_backup_tree_root_level(root_backup));
1923 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1924 
1925 	/*
1926 	 * Fixme: the total bytes and num_devices need to match or we should
1927 	 * need a fsck
1928 	 */
1929 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1930 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1931 
1932 	return backup_index;
1933 }
1934 
1935 /* helper to cleanup workers */
1936 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1937 {
1938 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1939 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1940 	btrfs_destroy_workqueue(fs_info->workers);
1941 	btrfs_destroy_workqueue(fs_info->endio_workers);
1942 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1943 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
1944 	btrfs_destroy_workqueue(fs_info->rmw_workers);
1945 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1946 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1947 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1948 	btrfs_destroy_workqueue(fs_info->caching_workers);
1949 	btrfs_destroy_workqueue(fs_info->readahead_workers);
1950 	btrfs_destroy_workqueue(fs_info->flush_workers);
1951 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1952 	if (fs_info->discard_ctl.discard_workers)
1953 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1954 	/*
1955 	 * Now that all other work queues are destroyed, we can safely destroy
1956 	 * the queues used for metadata I/O, since tasks from those other work
1957 	 * queues can do metadata I/O operations.
1958 	 */
1959 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
1960 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
1961 }
1962 
1963 static void free_root_extent_buffers(struct btrfs_root *root)
1964 {
1965 	if (root) {
1966 		free_extent_buffer(root->node);
1967 		free_extent_buffer(root->commit_root);
1968 		root->node = NULL;
1969 		root->commit_root = NULL;
1970 	}
1971 }
1972 
1973 /* helper to cleanup tree roots */
1974 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1975 {
1976 	free_root_extent_buffers(info->tree_root);
1977 
1978 	free_root_extent_buffers(info->dev_root);
1979 	free_root_extent_buffers(info->extent_root);
1980 	free_root_extent_buffers(info->csum_root);
1981 	free_root_extent_buffers(info->quota_root);
1982 	free_root_extent_buffers(info->uuid_root);
1983 	free_root_extent_buffers(info->fs_root);
1984 	if (free_chunk_root)
1985 		free_root_extent_buffers(info->chunk_root);
1986 	free_root_extent_buffers(info->free_space_root);
1987 }
1988 
1989 void btrfs_put_root(struct btrfs_root *root)
1990 {
1991 	if (!root)
1992 		return;
1993 
1994 	if (refcount_dec_and_test(&root->refs)) {
1995 		WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
1996 		if (root->anon_dev)
1997 			free_anon_bdev(root->anon_dev);
1998 		btrfs_drew_lock_destroy(&root->snapshot_lock);
1999 		free_extent_buffer(root->node);
2000 		free_extent_buffer(root->commit_root);
2001 		kfree(root->free_ino_ctl);
2002 		kfree(root->free_ino_pinned);
2003 #ifdef CONFIG_BTRFS_DEBUG
2004 		spin_lock(&root->fs_info->fs_roots_radix_lock);
2005 		list_del_init(&root->leak_list);
2006 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
2007 #endif
2008 		kfree(root);
2009 	}
2010 }
2011 
2012 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2013 {
2014 	int ret;
2015 	struct btrfs_root *gang[8];
2016 	int i;
2017 
2018 	while (!list_empty(&fs_info->dead_roots)) {
2019 		gang[0] = list_entry(fs_info->dead_roots.next,
2020 				     struct btrfs_root, root_list);
2021 		list_del(&gang[0]->root_list);
2022 
2023 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
2024 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2025 		btrfs_put_root(gang[0]);
2026 	}
2027 
2028 	while (1) {
2029 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2030 					     (void **)gang, 0,
2031 					     ARRAY_SIZE(gang));
2032 		if (!ret)
2033 			break;
2034 		for (i = 0; i < ret; i++)
2035 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2036 	}
2037 }
2038 
2039 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2040 {
2041 	mutex_init(&fs_info->scrub_lock);
2042 	atomic_set(&fs_info->scrubs_running, 0);
2043 	atomic_set(&fs_info->scrub_pause_req, 0);
2044 	atomic_set(&fs_info->scrubs_paused, 0);
2045 	atomic_set(&fs_info->scrub_cancel_req, 0);
2046 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2047 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
2048 }
2049 
2050 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2051 {
2052 	spin_lock_init(&fs_info->balance_lock);
2053 	mutex_init(&fs_info->balance_mutex);
2054 	atomic_set(&fs_info->balance_pause_req, 0);
2055 	atomic_set(&fs_info->balance_cancel_req, 0);
2056 	fs_info->balance_ctl = NULL;
2057 	init_waitqueue_head(&fs_info->balance_wait_q);
2058 }
2059 
2060 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2061 {
2062 	struct inode *inode = fs_info->btree_inode;
2063 
2064 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2065 	set_nlink(inode, 1);
2066 	/*
2067 	 * we set the i_size on the btree inode to the max possible int.
2068 	 * the real end of the address space is determined by all of
2069 	 * the devices in the system
2070 	 */
2071 	inode->i_size = OFFSET_MAX;
2072 	inode->i_mapping->a_ops = &btree_aops;
2073 
2074 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2075 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
2076 			    IO_TREE_INODE_IO, inode);
2077 	BTRFS_I(inode)->io_tree.track_uptodate = false;
2078 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2079 
2080 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2081 
2082 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
2083 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2084 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2085 	btrfs_insert_inode_hash(inode);
2086 }
2087 
2088 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2089 {
2090 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2091 	init_rwsem(&fs_info->dev_replace.rwsem);
2092 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2093 }
2094 
2095 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2096 {
2097 	spin_lock_init(&fs_info->qgroup_lock);
2098 	mutex_init(&fs_info->qgroup_ioctl_lock);
2099 	fs_info->qgroup_tree = RB_ROOT;
2100 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2101 	fs_info->qgroup_seq = 1;
2102 	fs_info->qgroup_ulist = NULL;
2103 	fs_info->qgroup_rescan_running = false;
2104 	mutex_init(&fs_info->qgroup_rescan_lock);
2105 }
2106 
2107 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2108 		struct btrfs_fs_devices *fs_devices)
2109 {
2110 	u32 max_active = fs_info->thread_pool_size;
2111 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2112 
2113 	fs_info->workers =
2114 		btrfs_alloc_workqueue(fs_info, "worker",
2115 				      flags | WQ_HIGHPRI, max_active, 16);
2116 
2117 	fs_info->delalloc_workers =
2118 		btrfs_alloc_workqueue(fs_info, "delalloc",
2119 				      flags, max_active, 2);
2120 
2121 	fs_info->flush_workers =
2122 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2123 				      flags, max_active, 0);
2124 
2125 	fs_info->caching_workers =
2126 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2127 
2128 	fs_info->fixup_workers =
2129 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2130 
2131 	/*
2132 	 * endios are largely parallel and should have a very
2133 	 * low idle thresh
2134 	 */
2135 	fs_info->endio_workers =
2136 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2137 	fs_info->endio_meta_workers =
2138 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2139 				      max_active, 4);
2140 	fs_info->endio_meta_write_workers =
2141 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2142 				      max_active, 2);
2143 	fs_info->endio_raid56_workers =
2144 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2145 				      max_active, 4);
2146 	fs_info->endio_repair_workers =
2147 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2148 	fs_info->rmw_workers =
2149 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2150 	fs_info->endio_write_workers =
2151 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2152 				      max_active, 2);
2153 	fs_info->endio_freespace_worker =
2154 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2155 				      max_active, 0);
2156 	fs_info->delayed_workers =
2157 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2158 				      max_active, 0);
2159 	fs_info->readahead_workers =
2160 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2161 				      max_active, 2);
2162 	fs_info->qgroup_rescan_workers =
2163 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2164 	fs_info->discard_ctl.discard_workers =
2165 		alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
2166 
2167 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2168 	      fs_info->flush_workers &&
2169 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2170 	      fs_info->endio_meta_write_workers &&
2171 	      fs_info->endio_repair_workers &&
2172 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2173 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2174 	      fs_info->caching_workers && fs_info->readahead_workers &&
2175 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2176 	      fs_info->qgroup_rescan_workers &&
2177 	      fs_info->discard_ctl.discard_workers)) {
2178 		return -ENOMEM;
2179 	}
2180 
2181 	return 0;
2182 }
2183 
2184 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2185 {
2186 	struct crypto_shash *csum_shash;
2187 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2188 
2189 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2190 
2191 	if (IS_ERR(csum_shash)) {
2192 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2193 			  csum_driver);
2194 		return PTR_ERR(csum_shash);
2195 	}
2196 
2197 	fs_info->csum_shash = csum_shash;
2198 
2199 	return 0;
2200 }
2201 
2202 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2203 			    struct btrfs_fs_devices *fs_devices)
2204 {
2205 	int ret;
2206 	struct btrfs_root *log_tree_root;
2207 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2208 	u64 bytenr = btrfs_super_log_root(disk_super);
2209 	int level = btrfs_super_log_root_level(disk_super);
2210 
2211 	if (fs_devices->rw_devices == 0) {
2212 		btrfs_warn(fs_info, "log replay required on RO media");
2213 		return -EIO;
2214 	}
2215 
2216 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2217 					 GFP_KERNEL);
2218 	if (!log_tree_root)
2219 		return -ENOMEM;
2220 
2221 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2222 					      fs_info->generation + 1,
2223 					      level, NULL);
2224 	if (IS_ERR(log_tree_root->node)) {
2225 		btrfs_warn(fs_info, "failed to read log tree");
2226 		ret = PTR_ERR(log_tree_root->node);
2227 		log_tree_root->node = NULL;
2228 		btrfs_put_root(log_tree_root);
2229 		return ret;
2230 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2231 		btrfs_err(fs_info, "failed to read log tree");
2232 		btrfs_put_root(log_tree_root);
2233 		return -EIO;
2234 	}
2235 	/* returns with log_tree_root freed on success */
2236 	ret = btrfs_recover_log_trees(log_tree_root);
2237 	if (ret) {
2238 		btrfs_handle_fs_error(fs_info, ret,
2239 				      "Failed to recover log tree");
2240 		btrfs_put_root(log_tree_root);
2241 		return ret;
2242 	}
2243 
2244 	if (sb_rdonly(fs_info->sb)) {
2245 		ret = btrfs_commit_super(fs_info);
2246 		if (ret)
2247 			return ret;
2248 	}
2249 
2250 	return 0;
2251 }
2252 
2253 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2254 {
2255 	struct btrfs_root *tree_root = fs_info->tree_root;
2256 	struct btrfs_root *root;
2257 	struct btrfs_key location;
2258 	int ret;
2259 
2260 	BUG_ON(!fs_info->tree_root);
2261 
2262 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2263 	location.type = BTRFS_ROOT_ITEM_KEY;
2264 	location.offset = 0;
2265 
2266 	root = btrfs_read_tree_root(tree_root, &location);
2267 	if (IS_ERR(root)) {
2268 		ret = PTR_ERR(root);
2269 		goto out;
2270 	}
2271 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2272 	fs_info->extent_root = root;
2273 
2274 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2275 	root = btrfs_read_tree_root(tree_root, &location);
2276 	if (IS_ERR(root)) {
2277 		ret = PTR_ERR(root);
2278 		goto out;
2279 	}
2280 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2281 	fs_info->dev_root = root;
2282 	btrfs_init_devices_late(fs_info);
2283 
2284 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2285 	root = btrfs_read_tree_root(tree_root, &location);
2286 	if (IS_ERR(root)) {
2287 		ret = PTR_ERR(root);
2288 		goto out;
2289 	}
2290 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2291 	fs_info->csum_root = root;
2292 
2293 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2294 	root = btrfs_read_tree_root(tree_root, &location);
2295 	if (!IS_ERR(root)) {
2296 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2297 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2298 		fs_info->quota_root = root;
2299 	}
2300 
2301 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2302 	root = btrfs_read_tree_root(tree_root, &location);
2303 	if (IS_ERR(root)) {
2304 		ret = PTR_ERR(root);
2305 		if (ret != -ENOENT)
2306 			goto out;
2307 	} else {
2308 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2309 		fs_info->uuid_root = root;
2310 	}
2311 
2312 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2313 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2314 		root = btrfs_read_tree_root(tree_root, &location);
2315 		if (IS_ERR(root)) {
2316 			ret = PTR_ERR(root);
2317 			goto out;
2318 		}
2319 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2320 		fs_info->free_space_root = root;
2321 	}
2322 
2323 	return 0;
2324 out:
2325 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2326 		   location.objectid, ret);
2327 	return ret;
2328 }
2329 
2330 /*
2331  * Real super block validation
2332  * NOTE: super csum type and incompat features will not be checked here.
2333  *
2334  * @sb:		super block to check
2335  * @mirror_num:	the super block number to check its bytenr:
2336  * 		0	the primary (1st) sb
2337  * 		1, 2	2nd and 3rd backup copy
2338  * 	       -1	skip bytenr check
2339  */
2340 static int validate_super(struct btrfs_fs_info *fs_info,
2341 			    struct btrfs_super_block *sb, int mirror_num)
2342 {
2343 	u64 nodesize = btrfs_super_nodesize(sb);
2344 	u64 sectorsize = btrfs_super_sectorsize(sb);
2345 	int ret = 0;
2346 
2347 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2348 		btrfs_err(fs_info, "no valid FS found");
2349 		ret = -EINVAL;
2350 	}
2351 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2352 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2353 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2354 		ret = -EINVAL;
2355 	}
2356 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2357 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2358 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2359 		ret = -EINVAL;
2360 	}
2361 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2362 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2363 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2364 		ret = -EINVAL;
2365 	}
2366 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2367 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2368 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2369 		ret = -EINVAL;
2370 	}
2371 
2372 	/*
2373 	 * Check sectorsize and nodesize first, other check will need it.
2374 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2375 	 */
2376 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2377 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2378 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2379 		ret = -EINVAL;
2380 	}
2381 	/* Only PAGE SIZE is supported yet */
2382 	if (sectorsize != PAGE_SIZE) {
2383 		btrfs_err(fs_info,
2384 			"sectorsize %llu not supported yet, only support %lu",
2385 			sectorsize, PAGE_SIZE);
2386 		ret = -EINVAL;
2387 	}
2388 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2389 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2390 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2391 		ret = -EINVAL;
2392 	}
2393 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2394 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2395 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2396 		ret = -EINVAL;
2397 	}
2398 
2399 	/* Root alignment check */
2400 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2401 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2402 			   btrfs_super_root(sb));
2403 		ret = -EINVAL;
2404 	}
2405 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2406 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2407 			   btrfs_super_chunk_root(sb));
2408 		ret = -EINVAL;
2409 	}
2410 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2411 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2412 			   btrfs_super_log_root(sb));
2413 		ret = -EINVAL;
2414 	}
2415 
2416 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2417 		   BTRFS_FSID_SIZE) != 0) {
2418 		btrfs_err(fs_info,
2419 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2420 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2421 		ret = -EINVAL;
2422 	}
2423 
2424 	/*
2425 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2426 	 * done later
2427 	 */
2428 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2429 		btrfs_err(fs_info, "bytes_used is too small %llu",
2430 			  btrfs_super_bytes_used(sb));
2431 		ret = -EINVAL;
2432 	}
2433 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2434 		btrfs_err(fs_info, "invalid stripesize %u",
2435 			  btrfs_super_stripesize(sb));
2436 		ret = -EINVAL;
2437 	}
2438 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2439 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2440 			   btrfs_super_num_devices(sb));
2441 	if (btrfs_super_num_devices(sb) == 0) {
2442 		btrfs_err(fs_info, "number of devices is 0");
2443 		ret = -EINVAL;
2444 	}
2445 
2446 	if (mirror_num >= 0 &&
2447 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2448 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2449 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2450 		ret = -EINVAL;
2451 	}
2452 
2453 	/*
2454 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2455 	 * and one chunk
2456 	 */
2457 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2458 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2459 			  btrfs_super_sys_array_size(sb),
2460 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2461 		ret = -EINVAL;
2462 	}
2463 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2464 			+ sizeof(struct btrfs_chunk)) {
2465 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2466 			  btrfs_super_sys_array_size(sb),
2467 			  sizeof(struct btrfs_disk_key)
2468 			  + sizeof(struct btrfs_chunk));
2469 		ret = -EINVAL;
2470 	}
2471 
2472 	/*
2473 	 * The generation is a global counter, we'll trust it more than the others
2474 	 * but it's still possible that it's the one that's wrong.
2475 	 */
2476 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2477 		btrfs_warn(fs_info,
2478 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2479 			btrfs_super_generation(sb),
2480 			btrfs_super_chunk_root_generation(sb));
2481 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2482 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2483 		btrfs_warn(fs_info,
2484 			"suspicious: generation < cache_generation: %llu < %llu",
2485 			btrfs_super_generation(sb),
2486 			btrfs_super_cache_generation(sb));
2487 
2488 	return ret;
2489 }
2490 
2491 /*
2492  * Validation of super block at mount time.
2493  * Some checks already done early at mount time, like csum type and incompat
2494  * flags will be skipped.
2495  */
2496 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2497 {
2498 	return validate_super(fs_info, fs_info->super_copy, 0);
2499 }
2500 
2501 /*
2502  * Validation of super block at write time.
2503  * Some checks like bytenr check will be skipped as their values will be
2504  * overwritten soon.
2505  * Extra checks like csum type and incompat flags will be done here.
2506  */
2507 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2508 				      struct btrfs_super_block *sb)
2509 {
2510 	int ret;
2511 
2512 	ret = validate_super(fs_info, sb, -1);
2513 	if (ret < 0)
2514 		goto out;
2515 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2516 		ret = -EUCLEAN;
2517 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2518 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2519 		goto out;
2520 	}
2521 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2522 		ret = -EUCLEAN;
2523 		btrfs_err(fs_info,
2524 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2525 			  btrfs_super_incompat_flags(sb),
2526 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2527 		goto out;
2528 	}
2529 out:
2530 	if (ret < 0)
2531 		btrfs_err(fs_info,
2532 		"super block corruption detected before writing it to disk");
2533 	return ret;
2534 }
2535 
2536 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2537 {
2538 	int backup_index = find_newest_super_backup(fs_info);
2539 	struct btrfs_super_block *sb = fs_info->super_copy;
2540 	struct btrfs_root *tree_root = fs_info->tree_root;
2541 	bool handle_error = false;
2542 	int ret = 0;
2543 	int i;
2544 
2545 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2546 		u64 generation;
2547 		int level;
2548 
2549 		if (handle_error) {
2550 			if (!IS_ERR(tree_root->node))
2551 				free_extent_buffer(tree_root->node);
2552 			tree_root->node = NULL;
2553 
2554 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2555 				break;
2556 
2557 			free_root_pointers(fs_info, 0);
2558 
2559 			/*
2560 			 * Don't use the log in recovery mode, it won't be
2561 			 * valid
2562 			 */
2563 			btrfs_set_super_log_root(sb, 0);
2564 
2565 			/* We can't trust the free space cache either */
2566 			btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2567 
2568 			ret = read_backup_root(fs_info, i);
2569 			backup_index = ret;
2570 			if (ret < 0)
2571 				return ret;
2572 		}
2573 		generation = btrfs_super_generation(sb);
2574 		level = btrfs_super_root_level(sb);
2575 		tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
2576 						  generation, level, NULL);
2577 		if (IS_ERR(tree_root->node) ||
2578 		    !extent_buffer_uptodate(tree_root->node)) {
2579 			handle_error = true;
2580 
2581 			if (IS_ERR(tree_root->node))
2582 				ret = PTR_ERR(tree_root->node);
2583 			else if (!extent_buffer_uptodate(tree_root->node))
2584 				ret = -EUCLEAN;
2585 
2586 			btrfs_warn(fs_info, "failed to read tree root");
2587 			continue;
2588 		}
2589 
2590 		btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2591 		tree_root->commit_root = btrfs_root_node(tree_root);
2592 		btrfs_set_root_refs(&tree_root->root_item, 1);
2593 
2594 		/*
2595 		 * No need to hold btrfs_root::objectid_mutex since the fs
2596 		 * hasn't been fully initialised and we are the only user
2597 		 */
2598 		ret = btrfs_find_highest_objectid(tree_root,
2599 						&tree_root->highest_objectid);
2600 		if (ret < 0) {
2601 			handle_error = true;
2602 			continue;
2603 		}
2604 
2605 		ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2606 
2607 		ret = btrfs_read_roots(fs_info);
2608 		if (ret < 0) {
2609 			handle_error = true;
2610 			continue;
2611 		}
2612 
2613 		/* All successful */
2614 		fs_info->generation = generation;
2615 		fs_info->last_trans_committed = generation;
2616 
2617 		/* Always begin writing backup roots after the one being used */
2618 		if (backup_index < 0) {
2619 			fs_info->backup_root_index = 0;
2620 		} else {
2621 			fs_info->backup_root_index = backup_index + 1;
2622 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2623 		}
2624 		break;
2625 	}
2626 
2627 	return ret;
2628 }
2629 
2630 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2631 {
2632 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2633 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2634 	INIT_LIST_HEAD(&fs_info->trans_list);
2635 	INIT_LIST_HEAD(&fs_info->dead_roots);
2636 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2637 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2638 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2639 	spin_lock_init(&fs_info->delalloc_root_lock);
2640 	spin_lock_init(&fs_info->trans_lock);
2641 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2642 	spin_lock_init(&fs_info->delayed_iput_lock);
2643 	spin_lock_init(&fs_info->defrag_inodes_lock);
2644 	spin_lock_init(&fs_info->super_lock);
2645 	spin_lock_init(&fs_info->buffer_lock);
2646 	spin_lock_init(&fs_info->unused_bgs_lock);
2647 	rwlock_init(&fs_info->tree_mod_log_lock);
2648 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2649 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2650 	mutex_init(&fs_info->reloc_mutex);
2651 	mutex_init(&fs_info->delalloc_root_mutex);
2652 	seqlock_init(&fs_info->profiles_lock);
2653 
2654 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2655 	INIT_LIST_HEAD(&fs_info->space_info);
2656 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2657 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2658 #ifdef CONFIG_BTRFS_DEBUG
2659 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2660 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2661 	spin_lock_init(&fs_info->eb_leak_lock);
2662 #endif
2663 	extent_map_tree_init(&fs_info->mapping_tree);
2664 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2665 			     BTRFS_BLOCK_RSV_GLOBAL);
2666 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2667 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2668 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2669 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2670 			     BTRFS_BLOCK_RSV_DELOPS);
2671 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2672 			     BTRFS_BLOCK_RSV_DELREFS);
2673 
2674 	atomic_set(&fs_info->async_delalloc_pages, 0);
2675 	atomic_set(&fs_info->defrag_running, 0);
2676 	atomic_set(&fs_info->reada_works_cnt, 0);
2677 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2678 	atomic64_set(&fs_info->tree_mod_seq, 0);
2679 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2680 	fs_info->metadata_ratio = 0;
2681 	fs_info->defrag_inodes = RB_ROOT;
2682 	atomic64_set(&fs_info->free_chunk_space, 0);
2683 	fs_info->tree_mod_log = RB_ROOT;
2684 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2685 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2686 	/* readahead state */
2687 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2688 	spin_lock_init(&fs_info->reada_lock);
2689 	btrfs_init_ref_verify(fs_info);
2690 
2691 	fs_info->thread_pool_size = min_t(unsigned long,
2692 					  num_online_cpus() + 2, 8);
2693 
2694 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2695 	spin_lock_init(&fs_info->ordered_root_lock);
2696 
2697 	btrfs_init_scrub(fs_info);
2698 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2699 	fs_info->check_integrity_print_mask = 0;
2700 #endif
2701 	btrfs_init_balance(fs_info);
2702 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2703 
2704 	spin_lock_init(&fs_info->block_group_cache_lock);
2705 	fs_info->block_group_cache_tree = RB_ROOT;
2706 	fs_info->first_logical_byte = (u64)-1;
2707 
2708 	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2709 			    IO_TREE_FS_EXCLUDED_EXTENTS, NULL);
2710 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2711 
2712 	mutex_init(&fs_info->ordered_operations_mutex);
2713 	mutex_init(&fs_info->tree_log_mutex);
2714 	mutex_init(&fs_info->chunk_mutex);
2715 	mutex_init(&fs_info->transaction_kthread_mutex);
2716 	mutex_init(&fs_info->cleaner_mutex);
2717 	mutex_init(&fs_info->ro_block_group_mutex);
2718 	init_rwsem(&fs_info->commit_root_sem);
2719 	init_rwsem(&fs_info->cleanup_work_sem);
2720 	init_rwsem(&fs_info->subvol_sem);
2721 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2722 
2723 	btrfs_init_dev_replace_locks(fs_info);
2724 	btrfs_init_qgroup(fs_info);
2725 	btrfs_discard_init(fs_info);
2726 
2727 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2728 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2729 
2730 	init_waitqueue_head(&fs_info->transaction_throttle);
2731 	init_waitqueue_head(&fs_info->transaction_wait);
2732 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2733 	init_waitqueue_head(&fs_info->async_submit_wait);
2734 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2735 
2736 	/* Usable values until the real ones are cached from the superblock */
2737 	fs_info->nodesize = 4096;
2738 	fs_info->sectorsize = 4096;
2739 	fs_info->stripesize = 4096;
2740 
2741 	spin_lock_init(&fs_info->swapfile_pins_lock);
2742 	fs_info->swapfile_pins = RB_ROOT;
2743 
2744 	fs_info->send_in_progress = 0;
2745 }
2746 
2747 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2748 {
2749 	int ret;
2750 
2751 	fs_info->sb = sb;
2752 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2753 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2754 
2755 	ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
2756 	if (ret)
2757 		return ret;
2758 
2759 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2760 	if (ret)
2761 		return ret;
2762 
2763 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2764 					(1 + ilog2(nr_cpu_ids));
2765 
2766 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2767 	if (ret)
2768 		return ret;
2769 
2770 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2771 			GFP_KERNEL);
2772 	if (ret)
2773 		return ret;
2774 
2775 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2776 					GFP_KERNEL);
2777 	if (!fs_info->delayed_root)
2778 		return -ENOMEM;
2779 	btrfs_init_delayed_root(fs_info->delayed_root);
2780 
2781 	return btrfs_alloc_stripe_hash_table(fs_info);
2782 }
2783 
2784 static int btrfs_uuid_rescan_kthread(void *data)
2785 {
2786 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
2787 	int ret;
2788 
2789 	/*
2790 	 * 1st step is to iterate through the existing UUID tree and
2791 	 * to delete all entries that contain outdated data.
2792 	 * 2nd step is to add all missing entries to the UUID tree.
2793 	 */
2794 	ret = btrfs_uuid_tree_iterate(fs_info);
2795 	if (ret < 0) {
2796 		if (ret != -EINTR)
2797 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2798 				   ret);
2799 		up(&fs_info->uuid_tree_rescan_sem);
2800 		return ret;
2801 	}
2802 	return btrfs_uuid_scan_kthread(data);
2803 }
2804 
2805 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2806 {
2807 	struct task_struct *task;
2808 
2809 	down(&fs_info->uuid_tree_rescan_sem);
2810 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2811 	if (IS_ERR(task)) {
2812 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2813 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2814 		up(&fs_info->uuid_tree_rescan_sem);
2815 		return PTR_ERR(task);
2816 	}
2817 
2818 	return 0;
2819 }
2820 
2821 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
2822 		      char *options)
2823 {
2824 	u32 sectorsize;
2825 	u32 nodesize;
2826 	u32 stripesize;
2827 	u64 generation;
2828 	u64 features;
2829 	u16 csum_type;
2830 	struct btrfs_key location;
2831 	struct btrfs_super_block *disk_super;
2832 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2833 	struct btrfs_root *tree_root;
2834 	struct btrfs_root *chunk_root;
2835 	int ret;
2836 	int err = -EINVAL;
2837 	int clear_free_space_tree = 0;
2838 	int level;
2839 
2840 	ret = init_mount_fs_info(fs_info, sb);
2841 	if (ret) {
2842 		err = ret;
2843 		goto fail;
2844 	}
2845 
2846 	/* These need to be init'ed before we start creating inodes and such. */
2847 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
2848 				     GFP_KERNEL);
2849 	fs_info->tree_root = tree_root;
2850 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
2851 				      GFP_KERNEL);
2852 	fs_info->chunk_root = chunk_root;
2853 	if (!tree_root || !chunk_root) {
2854 		err = -ENOMEM;
2855 		goto fail;
2856 	}
2857 
2858 	fs_info->btree_inode = new_inode(sb);
2859 	if (!fs_info->btree_inode) {
2860 		err = -ENOMEM;
2861 		goto fail;
2862 	}
2863 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2864 	btrfs_init_btree_inode(fs_info);
2865 
2866 	invalidate_bdev(fs_devices->latest_bdev);
2867 
2868 	/*
2869 	 * Read super block and check the signature bytes only
2870 	 */
2871 	disk_super = btrfs_read_dev_super(fs_devices->latest_bdev);
2872 	if (IS_ERR(disk_super)) {
2873 		err = PTR_ERR(disk_super);
2874 		goto fail_alloc;
2875 	}
2876 
2877 	/*
2878 	 * Verify the type first, if that or the the checksum value are
2879 	 * corrupted, we'll find out
2880 	 */
2881 	csum_type = btrfs_super_csum_type(disk_super);
2882 	if (!btrfs_supported_super_csum(csum_type)) {
2883 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
2884 			  csum_type);
2885 		err = -EINVAL;
2886 		btrfs_release_disk_super(disk_super);
2887 		goto fail_alloc;
2888 	}
2889 
2890 	ret = btrfs_init_csum_hash(fs_info, csum_type);
2891 	if (ret) {
2892 		err = ret;
2893 		btrfs_release_disk_super(disk_super);
2894 		goto fail_alloc;
2895 	}
2896 
2897 	/*
2898 	 * We want to check superblock checksum, the type is stored inside.
2899 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2900 	 */
2901 	if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) {
2902 		btrfs_err(fs_info, "superblock checksum mismatch");
2903 		err = -EINVAL;
2904 		btrfs_release_disk_super(disk_super);
2905 		goto fail_alloc;
2906 	}
2907 
2908 	/*
2909 	 * super_copy is zeroed at allocation time and we never touch the
2910 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2911 	 * the whole block of INFO_SIZE
2912 	 */
2913 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
2914 	btrfs_release_disk_super(disk_super);
2915 
2916 	disk_super = fs_info->super_copy;
2917 
2918 	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2919 		       BTRFS_FSID_SIZE));
2920 
2921 	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2922 		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2923 				fs_info->super_copy->metadata_uuid,
2924 				BTRFS_FSID_SIZE));
2925 	}
2926 
2927 	features = btrfs_super_flags(disk_super);
2928 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2929 		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2930 		btrfs_set_super_flags(disk_super, features);
2931 		btrfs_info(fs_info,
2932 			"found metadata UUID change in progress flag, clearing");
2933 	}
2934 
2935 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2936 	       sizeof(*fs_info->super_for_commit));
2937 
2938 	ret = btrfs_validate_mount_super(fs_info);
2939 	if (ret) {
2940 		btrfs_err(fs_info, "superblock contains fatal errors");
2941 		err = -EINVAL;
2942 		goto fail_alloc;
2943 	}
2944 
2945 	if (!btrfs_super_root(disk_super))
2946 		goto fail_alloc;
2947 
2948 	/* check FS state, whether FS is broken. */
2949 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2950 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2951 
2952 	/*
2953 	 * In the long term, we'll store the compression type in the super
2954 	 * block, and it'll be used for per file compression control.
2955 	 */
2956 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2957 
2958 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2959 	if (ret) {
2960 		err = ret;
2961 		goto fail_alloc;
2962 	}
2963 
2964 	features = btrfs_super_incompat_flags(disk_super) &
2965 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2966 	if (features) {
2967 		btrfs_err(fs_info,
2968 		    "cannot mount because of unsupported optional features (%llx)",
2969 		    features);
2970 		err = -EINVAL;
2971 		goto fail_alloc;
2972 	}
2973 
2974 	features = btrfs_super_incompat_flags(disk_super);
2975 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2976 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2977 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2978 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2979 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2980 
2981 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2982 		btrfs_info(fs_info, "has skinny extents");
2983 
2984 	/*
2985 	 * flag our filesystem as having big metadata blocks if
2986 	 * they are bigger than the page size
2987 	 */
2988 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2989 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2990 			btrfs_info(fs_info,
2991 				"flagging fs with big metadata feature");
2992 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2993 	}
2994 
2995 	nodesize = btrfs_super_nodesize(disk_super);
2996 	sectorsize = btrfs_super_sectorsize(disk_super);
2997 	stripesize = sectorsize;
2998 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2999 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3000 
3001 	/* Cache block sizes */
3002 	fs_info->nodesize = nodesize;
3003 	fs_info->sectorsize = sectorsize;
3004 	fs_info->stripesize = stripesize;
3005 
3006 	/*
3007 	 * mixed block groups end up with duplicate but slightly offset
3008 	 * extent buffers for the same range.  It leads to corruptions
3009 	 */
3010 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3011 	    (sectorsize != nodesize)) {
3012 		btrfs_err(fs_info,
3013 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3014 			nodesize, sectorsize);
3015 		goto fail_alloc;
3016 	}
3017 
3018 	/*
3019 	 * Needn't use the lock because there is no other task which will
3020 	 * update the flag.
3021 	 */
3022 	btrfs_set_super_incompat_flags(disk_super, features);
3023 
3024 	features = btrfs_super_compat_ro_flags(disk_super) &
3025 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
3026 	if (!sb_rdonly(sb) && features) {
3027 		btrfs_err(fs_info,
3028 	"cannot mount read-write because of unsupported optional features (%llx)",
3029 		       features);
3030 		err = -EINVAL;
3031 		goto fail_alloc;
3032 	}
3033 
3034 	ret = btrfs_init_workqueues(fs_info, fs_devices);
3035 	if (ret) {
3036 		err = ret;
3037 		goto fail_sb_buffer;
3038 	}
3039 
3040 	sb->s_bdi->congested_fn = btrfs_congested_fn;
3041 	sb->s_bdi->congested_data = fs_info;
3042 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
3043 	sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
3044 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3045 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3046 
3047 	sb->s_blocksize = sectorsize;
3048 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3049 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3050 
3051 	mutex_lock(&fs_info->chunk_mutex);
3052 	ret = btrfs_read_sys_array(fs_info);
3053 	mutex_unlock(&fs_info->chunk_mutex);
3054 	if (ret) {
3055 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3056 		goto fail_sb_buffer;
3057 	}
3058 
3059 	generation = btrfs_super_chunk_root_generation(disk_super);
3060 	level = btrfs_super_chunk_root_level(disk_super);
3061 
3062 	chunk_root->node = read_tree_block(fs_info,
3063 					   btrfs_super_chunk_root(disk_super),
3064 					   generation, level, NULL);
3065 	if (IS_ERR(chunk_root->node) ||
3066 	    !extent_buffer_uptodate(chunk_root->node)) {
3067 		btrfs_err(fs_info, "failed to read chunk root");
3068 		if (!IS_ERR(chunk_root->node))
3069 			free_extent_buffer(chunk_root->node);
3070 		chunk_root->node = NULL;
3071 		goto fail_tree_roots;
3072 	}
3073 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
3074 	chunk_root->commit_root = btrfs_root_node(chunk_root);
3075 
3076 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3077 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3078 			   BTRFS_UUID_SIZE);
3079 
3080 	ret = btrfs_read_chunk_tree(fs_info);
3081 	if (ret) {
3082 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3083 		goto fail_tree_roots;
3084 	}
3085 
3086 	/*
3087 	 * Keep the devid that is marked to be the target device for the
3088 	 * device replace procedure
3089 	 */
3090 	btrfs_free_extra_devids(fs_devices, 0);
3091 
3092 	if (!fs_devices->latest_bdev) {
3093 		btrfs_err(fs_info, "failed to read devices");
3094 		goto fail_tree_roots;
3095 	}
3096 
3097 	ret = init_tree_roots(fs_info);
3098 	if (ret)
3099 		goto fail_tree_roots;
3100 
3101 	/*
3102 	 * If we have a uuid root and we're not being told to rescan we need to
3103 	 * check the generation here so we can set the
3104 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3105 	 * transaction during a balance or the log replay without updating the
3106 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3107 	 * even though it was perfectly fine.
3108 	 */
3109 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3110 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3111 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3112 
3113 	ret = btrfs_verify_dev_extents(fs_info);
3114 	if (ret) {
3115 		btrfs_err(fs_info,
3116 			  "failed to verify dev extents against chunks: %d",
3117 			  ret);
3118 		goto fail_block_groups;
3119 	}
3120 	ret = btrfs_recover_balance(fs_info);
3121 	if (ret) {
3122 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3123 		goto fail_block_groups;
3124 	}
3125 
3126 	ret = btrfs_init_dev_stats(fs_info);
3127 	if (ret) {
3128 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3129 		goto fail_block_groups;
3130 	}
3131 
3132 	ret = btrfs_init_dev_replace(fs_info);
3133 	if (ret) {
3134 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3135 		goto fail_block_groups;
3136 	}
3137 
3138 	btrfs_free_extra_devids(fs_devices, 1);
3139 
3140 	ret = btrfs_sysfs_add_fsid(fs_devices);
3141 	if (ret) {
3142 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3143 				ret);
3144 		goto fail_block_groups;
3145 	}
3146 
3147 	ret = btrfs_sysfs_add_mounted(fs_info);
3148 	if (ret) {
3149 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3150 		goto fail_fsdev_sysfs;
3151 	}
3152 
3153 	ret = btrfs_init_space_info(fs_info);
3154 	if (ret) {
3155 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3156 		goto fail_sysfs;
3157 	}
3158 
3159 	ret = btrfs_read_block_groups(fs_info);
3160 	if (ret) {
3161 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3162 		goto fail_sysfs;
3163 	}
3164 
3165 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3166 		btrfs_warn(fs_info,
3167 		"writable mount is not allowed due to too many missing devices");
3168 		goto fail_sysfs;
3169 	}
3170 
3171 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3172 					       "btrfs-cleaner");
3173 	if (IS_ERR(fs_info->cleaner_kthread))
3174 		goto fail_sysfs;
3175 
3176 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3177 						   tree_root,
3178 						   "btrfs-transaction");
3179 	if (IS_ERR(fs_info->transaction_kthread))
3180 		goto fail_cleaner;
3181 
3182 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3183 	    !fs_info->fs_devices->rotating) {
3184 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3185 	}
3186 
3187 	/*
3188 	 * Mount does not set all options immediately, we can do it now and do
3189 	 * not have to wait for transaction commit
3190 	 */
3191 	btrfs_apply_pending_changes(fs_info);
3192 
3193 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3194 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3195 		ret = btrfsic_mount(fs_info, fs_devices,
3196 				    btrfs_test_opt(fs_info,
3197 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3198 				    1 : 0,
3199 				    fs_info->check_integrity_print_mask);
3200 		if (ret)
3201 			btrfs_warn(fs_info,
3202 				"failed to initialize integrity check module: %d",
3203 				ret);
3204 	}
3205 #endif
3206 	ret = btrfs_read_qgroup_config(fs_info);
3207 	if (ret)
3208 		goto fail_trans_kthread;
3209 
3210 	if (btrfs_build_ref_tree(fs_info))
3211 		btrfs_err(fs_info, "couldn't build ref tree");
3212 
3213 	/* do not make disk changes in broken FS or nologreplay is given */
3214 	if (btrfs_super_log_root(disk_super) != 0 &&
3215 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3216 		btrfs_info(fs_info, "start tree-log replay");
3217 		ret = btrfs_replay_log(fs_info, fs_devices);
3218 		if (ret) {
3219 			err = ret;
3220 			goto fail_qgroup;
3221 		}
3222 	}
3223 
3224 	ret = btrfs_find_orphan_roots(fs_info);
3225 	if (ret)
3226 		goto fail_qgroup;
3227 
3228 	if (!sb_rdonly(sb)) {
3229 		ret = btrfs_cleanup_fs_roots(fs_info);
3230 		if (ret)
3231 			goto fail_qgroup;
3232 
3233 		mutex_lock(&fs_info->cleaner_mutex);
3234 		ret = btrfs_recover_relocation(tree_root);
3235 		mutex_unlock(&fs_info->cleaner_mutex);
3236 		if (ret < 0) {
3237 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3238 					ret);
3239 			err = -EINVAL;
3240 			goto fail_qgroup;
3241 		}
3242 	}
3243 
3244 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3245 	location.type = BTRFS_ROOT_ITEM_KEY;
3246 	location.offset = 0;
3247 
3248 	fs_info->fs_root = btrfs_get_fs_root(fs_info, &location, true);
3249 	if (IS_ERR(fs_info->fs_root)) {
3250 		err = PTR_ERR(fs_info->fs_root);
3251 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3252 		fs_info->fs_root = NULL;
3253 		goto fail_qgroup;
3254 	}
3255 
3256 	if (sb_rdonly(sb))
3257 		return 0;
3258 
3259 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3260 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3261 		clear_free_space_tree = 1;
3262 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3263 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3264 		btrfs_warn(fs_info, "free space tree is invalid");
3265 		clear_free_space_tree = 1;
3266 	}
3267 
3268 	if (clear_free_space_tree) {
3269 		btrfs_info(fs_info, "clearing free space tree");
3270 		ret = btrfs_clear_free_space_tree(fs_info);
3271 		if (ret) {
3272 			btrfs_warn(fs_info,
3273 				   "failed to clear free space tree: %d", ret);
3274 			close_ctree(fs_info);
3275 			return ret;
3276 		}
3277 	}
3278 
3279 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3280 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3281 		btrfs_info(fs_info, "creating free space tree");
3282 		ret = btrfs_create_free_space_tree(fs_info);
3283 		if (ret) {
3284 			btrfs_warn(fs_info,
3285 				"failed to create free space tree: %d", ret);
3286 			close_ctree(fs_info);
3287 			return ret;
3288 		}
3289 	}
3290 
3291 	down_read(&fs_info->cleanup_work_sem);
3292 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3293 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3294 		up_read(&fs_info->cleanup_work_sem);
3295 		close_ctree(fs_info);
3296 		return ret;
3297 	}
3298 	up_read(&fs_info->cleanup_work_sem);
3299 
3300 	ret = btrfs_resume_balance_async(fs_info);
3301 	if (ret) {
3302 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3303 		close_ctree(fs_info);
3304 		return ret;
3305 	}
3306 
3307 	ret = btrfs_resume_dev_replace_async(fs_info);
3308 	if (ret) {
3309 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3310 		close_ctree(fs_info);
3311 		return ret;
3312 	}
3313 
3314 	btrfs_qgroup_rescan_resume(fs_info);
3315 	btrfs_discard_resume(fs_info);
3316 
3317 	if (!fs_info->uuid_root) {
3318 		btrfs_info(fs_info, "creating UUID tree");
3319 		ret = btrfs_create_uuid_tree(fs_info);
3320 		if (ret) {
3321 			btrfs_warn(fs_info,
3322 				"failed to create the UUID tree: %d", ret);
3323 			close_ctree(fs_info);
3324 			return ret;
3325 		}
3326 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3327 		   fs_info->generation !=
3328 				btrfs_super_uuid_tree_generation(disk_super)) {
3329 		btrfs_info(fs_info, "checking UUID tree");
3330 		ret = btrfs_check_uuid_tree(fs_info);
3331 		if (ret) {
3332 			btrfs_warn(fs_info,
3333 				"failed to check the UUID tree: %d", ret);
3334 			close_ctree(fs_info);
3335 			return ret;
3336 		}
3337 	}
3338 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3339 
3340 	/*
3341 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3342 	 * no need to keep the flag
3343 	 */
3344 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3345 
3346 	return 0;
3347 
3348 fail_qgroup:
3349 	btrfs_free_qgroup_config(fs_info);
3350 fail_trans_kthread:
3351 	kthread_stop(fs_info->transaction_kthread);
3352 	btrfs_cleanup_transaction(fs_info);
3353 	btrfs_free_fs_roots(fs_info);
3354 fail_cleaner:
3355 	kthread_stop(fs_info->cleaner_kthread);
3356 
3357 	/*
3358 	 * make sure we're done with the btree inode before we stop our
3359 	 * kthreads
3360 	 */
3361 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3362 
3363 fail_sysfs:
3364 	btrfs_sysfs_remove_mounted(fs_info);
3365 
3366 fail_fsdev_sysfs:
3367 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3368 
3369 fail_block_groups:
3370 	btrfs_put_block_group_cache(fs_info);
3371 
3372 fail_tree_roots:
3373 	free_root_pointers(fs_info, true);
3374 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3375 
3376 fail_sb_buffer:
3377 	btrfs_stop_all_workers(fs_info);
3378 	btrfs_free_block_groups(fs_info);
3379 fail_alloc:
3380 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3381 
3382 	iput(fs_info->btree_inode);
3383 fail:
3384 	btrfs_close_devices(fs_info->fs_devices);
3385 	return err;
3386 }
3387 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3388 
3389 static void btrfs_end_super_write(struct bio *bio)
3390 {
3391 	struct btrfs_device *device = bio->bi_private;
3392 	struct bio_vec *bvec;
3393 	struct bvec_iter_all iter_all;
3394 	struct page *page;
3395 
3396 	bio_for_each_segment_all(bvec, bio, iter_all) {
3397 		page = bvec->bv_page;
3398 
3399 		if (bio->bi_status) {
3400 			btrfs_warn_rl_in_rcu(device->fs_info,
3401 				"lost page write due to IO error on %s (%d)",
3402 				rcu_str_deref(device->name),
3403 				blk_status_to_errno(bio->bi_status));
3404 			ClearPageUptodate(page);
3405 			SetPageError(page);
3406 			btrfs_dev_stat_inc_and_print(device,
3407 						     BTRFS_DEV_STAT_WRITE_ERRS);
3408 		} else {
3409 			SetPageUptodate(page);
3410 		}
3411 
3412 		put_page(page);
3413 		unlock_page(page);
3414 	}
3415 
3416 	bio_put(bio);
3417 }
3418 
3419 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3420 						   int copy_num)
3421 {
3422 	struct btrfs_super_block *super;
3423 	struct page *page;
3424 	u64 bytenr;
3425 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3426 
3427 	bytenr = btrfs_sb_offset(copy_num);
3428 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3429 		return ERR_PTR(-EINVAL);
3430 
3431 	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3432 	if (IS_ERR(page))
3433 		return ERR_CAST(page);
3434 
3435 	super = page_address(page);
3436 	if (btrfs_super_bytenr(super) != bytenr ||
3437 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3438 		btrfs_release_disk_super(super);
3439 		return ERR_PTR(-EINVAL);
3440 	}
3441 
3442 	return super;
3443 }
3444 
3445 
3446 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3447 {
3448 	struct btrfs_super_block *super, *latest = NULL;
3449 	int i;
3450 	u64 transid = 0;
3451 
3452 	/* we would like to check all the supers, but that would make
3453 	 * a btrfs mount succeed after a mkfs from a different FS.
3454 	 * So, we need to add a special mount option to scan for
3455 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3456 	 */
3457 	for (i = 0; i < 1; i++) {
3458 		super = btrfs_read_dev_one_super(bdev, i);
3459 		if (IS_ERR(super))
3460 			continue;
3461 
3462 		if (!latest || btrfs_super_generation(super) > transid) {
3463 			if (latest)
3464 				btrfs_release_disk_super(super);
3465 
3466 			latest = super;
3467 			transid = btrfs_super_generation(super);
3468 		}
3469 	}
3470 
3471 	return super;
3472 }
3473 
3474 /*
3475  * Write superblock @sb to the @device. Do not wait for completion, all the
3476  * pages we use for writing are locked.
3477  *
3478  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3479  * the expected device size at commit time. Note that max_mirrors must be
3480  * same for write and wait phases.
3481  *
3482  * Return number of errors when page is not found or submission fails.
3483  */
3484 static int write_dev_supers(struct btrfs_device *device,
3485 			    struct btrfs_super_block *sb, int max_mirrors)
3486 {
3487 	struct btrfs_fs_info *fs_info = device->fs_info;
3488 	struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3489 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3490 	int i;
3491 	int errors = 0;
3492 	u64 bytenr;
3493 
3494 	if (max_mirrors == 0)
3495 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3496 
3497 	shash->tfm = fs_info->csum_shash;
3498 
3499 	for (i = 0; i < max_mirrors; i++) {
3500 		struct page *page;
3501 		struct bio *bio;
3502 		struct btrfs_super_block *disk_super;
3503 
3504 		bytenr = btrfs_sb_offset(i);
3505 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3506 		    device->commit_total_bytes)
3507 			break;
3508 
3509 		btrfs_set_super_bytenr(sb, bytenr);
3510 
3511 		crypto_shash_init(shash);
3512 		crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3513 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3514 		crypto_shash_final(shash, sb->csum);
3515 
3516 		page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
3517 					   GFP_NOFS);
3518 		if (!page) {
3519 			btrfs_err(device->fs_info,
3520 			    "couldn't get super block page for bytenr %llu",
3521 			    bytenr);
3522 			errors++;
3523 			continue;
3524 		}
3525 
3526 		/* Bump the refcount for wait_dev_supers() */
3527 		get_page(page);
3528 
3529 		disk_super = page_address(page);
3530 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3531 
3532 		/*
3533 		 * Directly use bios here instead of relying on the page cache
3534 		 * to do I/O, so we don't lose the ability to do integrity
3535 		 * checking.
3536 		 */
3537 		bio = bio_alloc(GFP_NOFS, 1);
3538 		bio_set_dev(bio, device->bdev);
3539 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3540 		bio->bi_private = device;
3541 		bio->bi_end_io = btrfs_end_super_write;
3542 		__bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3543 			       offset_in_page(bytenr));
3544 
3545 		/*
3546 		 * We FUA only the first super block.  The others we allow to
3547 		 * go down lazy and there's a short window where the on-disk
3548 		 * copies might still contain the older version.
3549 		 */
3550 		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
3551 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3552 			bio->bi_opf |= REQ_FUA;
3553 
3554 		btrfsic_submit_bio(bio);
3555 	}
3556 	return errors < i ? 0 : -1;
3557 }
3558 
3559 /*
3560  * Wait for write completion of superblocks done by write_dev_supers,
3561  * @max_mirrors same for write and wait phases.
3562  *
3563  * Return number of errors when page is not found or not marked up to
3564  * date.
3565  */
3566 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3567 {
3568 	int i;
3569 	int errors = 0;
3570 	bool primary_failed = false;
3571 	u64 bytenr;
3572 
3573 	if (max_mirrors == 0)
3574 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3575 
3576 	for (i = 0; i < max_mirrors; i++) {
3577 		struct page *page;
3578 
3579 		bytenr = btrfs_sb_offset(i);
3580 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3581 		    device->commit_total_bytes)
3582 			break;
3583 
3584 		page = find_get_page(device->bdev->bd_inode->i_mapping,
3585 				     bytenr >> PAGE_SHIFT);
3586 		if (!page) {
3587 			errors++;
3588 			if (i == 0)
3589 				primary_failed = true;
3590 			continue;
3591 		}
3592 		/* Page is submitted locked and unlocked once the IO completes */
3593 		wait_on_page_locked(page);
3594 		if (PageError(page)) {
3595 			errors++;
3596 			if (i == 0)
3597 				primary_failed = true;
3598 		}
3599 
3600 		/* Drop our reference */
3601 		put_page(page);
3602 
3603 		/* Drop the reference from the writing run */
3604 		put_page(page);
3605 	}
3606 
3607 	/* log error, force error return */
3608 	if (primary_failed) {
3609 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3610 			  device->devid);
3611 		return -1;
3612 	}
3613 
3614 	return errors < i ? 0 : -1;
3615 }
3616 
3617 /*
3618  * endio for the write_dev_flush, this will wake anyone waiting
3619  * for the barrier when it is done
3620  */
3621 static void btrfs_end_empty_barrier(struct bio *bio)
3622 {
3623 	complete(bio->bi_private);
3624 }
3625 
3626 /*
3627  * Submit a flush request to the device if it supports it. Error handling is
3628  * done in the waiting counterpart.
3629  */
3630 static void write_dev_flush(struct btrfs_device *device)
3631 {
3632 	struct request_queue *q = bdev_get_queue(device->bdev);
3633 	struct bio *bio = device->flush_bio;
3634 
3635 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3636 		return;
3637 
3638 	bio_reset(bio);
3639 	bio->bi_end_io = btrfs_end_empty_barrier;
3640 	bio_set_dev(bio, device->bdev);
3641 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3642 	init_completion(&device->flush_wait);
3643 	bio->bi_private = &device->flush_wait;
3644 
3645 	btrfsic_submit_bio(bio);
3646 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3647 }
3648 
3649 /*
3650  * If the flush bio has been submitted by write_dev_flush, wait for it.
3651  */
3652 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3653 {
3654 	struct bio *bio = device->flush_bio;
3655 
3656 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3657 		return BLK_STS_OK;
3658 
3659 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3660 	wait_for_completion_io(&device->flush_wait);
3661 
3662 	return bio->bi_status;
3663 }
3664 
3665 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3666 {
3667 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3668 		return -EIO;
3669 	return 0;
3670 }
3671 
3672 /*
3673  * send an empty flush down to each device in parallel,
3674  * then wait for them
3675  */
3676 static int barrier_all_devices(struct btrfs_fs_info *info)
3677 {
3678 	struct list_head *head;
3679 	struct btrfs_device *dev;
3680 	int errors_wait = 0;
3681 	blk_status_t ret;
3682 
3683 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3684 	/* send down all the barriers */
3685 	head = &info->fs_devices->devices;
3686 	list_for_each_entry(dev, head, dev_list) {
3687 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3688 			continue;
3689 		if (!dev->bdev)
3690 			continue;
3691 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3692 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3693 			continue;
3694 
3695 		write_dev_flush(dev);
3696 		dev->last_flush_error = BLK_STS_OK;
3697 	}
3698 
3699 	/* wait for all the barriers */
3700 	list_for_each_entry(dev, head, dev_list) {
3701 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3702 			continue;
3703 		if (!dev->bdev) {
3704 			errors_wait++;
3705 			continue;
3706 		}
3707 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3708 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3709 			continue;
3710 
3711 		ret = wait_dev_flush(dev);
3712 		if (ret) {
3713 			dev->last_flush_error = ret;
3714 			btrfs_dev_stat_inc_and_print(dev,
3715 					BTRFS_DEV_STAT_FLUSH_ERRS);
3716 			errors_wait++;
3717 		}
3718 	}
3719 
3720 	if (errors_wait) {
3721 		/*
3722 		 * At some point we need the status of all disks
3723 		 * to arrive at the volume status. So error checking
3724 		 * is being pushed to a separate loop.
3725 		 */
3726 		return check_barrier_error(info);
3727 	}
3728 	return 0;
3729 }
3730 
3731 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3732 {
3733 	int raid_type;
3734 	int min_tolerated = INT_MAX;
3735 
3736 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3737 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3738 		min_tolerated = min_t(int, min_tolerated,
3739 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3740 				    tolerated_failures);
3741 
3742 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3743 		if (raid_type == BTRFS_RAID_SINGLE)
3744 			continue;
3745 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3746 			continue;
3747 		min_tolerated = min_t(int, min_tolerated,
3748 				    btrfs_raid_array[raid_type].
3749 				    tolerated_failures);
3750 	}
3751 
3752 	if (min_tolerated == INT_MAX) {
3753 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3754 		min_tolerated = 0;
3755 	}
3756 
3757 	return min_tolerated;
3758 }
3759 
3760 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3761 {
3762 	struct list_head *head;
3763 	struct btrfs_device *dev;
3764 	struct btrfs_super_block *sb;
3765 	struct btrfs_dev_item *dev_item;
3766 	int ret;
3767 	int do_barriers;
3768 	int max_errors;
3769 	int total_errors = 0;
3770 	u64 flags;
3771 
3772 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3773 
3774 	/*
3775 	 * max_mirrors == 0 indicates we're from commit_transaction,
3776 	 * not from fsync where the tree roots in fs_info have not
3777 	 * been consistent on disk.
3778 	 */
3779 	if (max_mirrors == 0)
3780 		backup_super_roots(fs_info);
3781 
3782 	sb = fs_info->super_for_commit;
3783 	dev_item = &sb->dev_item;
3784 
3785 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3786 	head = &fs_info->fs_devices->devices;
3787 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3788 
3789 	if (do_barriers) {
3790 		ret = barrier_all_devices(fs_info);
3791 		if (ret) {
3792 			mutex_unlock(
3793 				&fs_info->fs_devices->device_list_mutex);
3794 			btrfs_handle_fs_error(fs_info, ret,
3795 					      "errors while submitting device barriers.");
3796 			return ret;
3797 		}
3798 	}
3799 
3800 	list_for_each_entry(dev, head, dev_list) {
3801 		if (!dev->bdev) {
3802 			total_errors++;
3803 			continue;
3804 		}
3805 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3806 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3807 			continue;
3808 
3809 		btrfs_set_stack_device_generation(dev_item, 0);
3810 		btrfs_set_stack_device_type(dev_item, dev->type);
3811 		btrfs_set_stack_device_id(dev_item, dev->devid);
3812 		btrfs_set_stack_device_total_bytes(dev_item,
3813 						   dev->commit_total_bytes);
3814 		btrfs_set_stack_device_bytes_used(dev_item,
3815 						  dev->commit_bytes_used);
3816 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3817 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3818 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3819 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3820 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3821 		       BTRFS_FSID_SIZE);
3822 
3823 		flags = btrfs_super_flags(sb);
3824 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3825 
3826 		ret = btrfs_validate_write_super(fs_info, sb);
3827 		if (ret < 0) {
3828 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3829 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3830 				"unexpected superblock corruption detected");
3831 			return -EUCLEAN;
3832 		}
3833 
3834 		ret = write_dev_supers(dev, sb, max_mirrors);
3835 		if (ret)
3836 			total_errors++;
3837 	}
3838 	if (total_errors > max_errors) {
3839 		btrfs_err(fs_info, "%d errors while writing supers",
3840 			  total_errors);
3841 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3842 
3843 		/* FUA is masked off if unsupported and can't be the reason */
3844 		btrfs_handle_fs_error(fs_info, -EIO,
3845 				      "%d errors while writing supers",
3846 				      total_errors);
3847 		return -EIO;
3848 	}
3849 
3850 	total_errors = 0;
3851 	list_for_each_entry(dev, head, dev_list) {
3852 		if (!dev->bdev)
3853 			continue;
3854 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3855 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3856 			continue;
3857 
3858 		ret = wait_dev_supers(dev, max_mirrors);
3859 		if (ret)
3860 			total_errors++;
3861 	}
3862 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3863 	if (total_errors > max_errors) {
3864 		btrfs_handle_fs_error(fs_info, -EIO,
3865 				      "%d errors while writing supers",
3866 				      total_errors);
3867 		return -EIO;
3868 	}
3869 	return 0;
3870 }
3871 
3872 /* Drop a fs root from the radix tree and free it. */
3873 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3874 				  struct btrfs_root *root)
3875 {
3876 	bool drop_ref = false;
3877 
3878 	spin_lock(&fs_info->fs_roots_radix_lock);
3879 	radix_tree_delete(&fs_info->fs_roots_radix,
3880 			  (unsigned long)root->root_key.objectid);
3881 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
3882 		drop_ref = true;
3883 	spin_unlock(&fs_info->fs_roots_radix_lock);
3884 
3885 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3886 		ASSERT(root->log_root == NULL);
3887 		if (root->reloc_root) {
3888 			btrfs_put_root(root->reloc_root);
3889 			root->reloc_root = NULL;
3890 		}
3891 	}
3892 
3893 	if (root->free_ino_pinned)
3894 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3895 	if (root->free_ino_ctl)
3896 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3897 	if (root->ino_cache_inode) {
3898 		iput(root->ino_cache_inode);
3899 		root->ino_cache_inode = NULL;
3900 	}
3901 	if (drop_ref)
3902 		btrfs_put_root(root);
3903 }
3904 
3905 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3906 {
3907 	u64 root_objectid = 0;
3908 	struct btrfs_root *gang[8];
3909 	int i = 0;
3910 	int err = 0;
3911 	unsigned int ret = 0;
3912 
3913 	while (1) {
3914 		spin_lock(&fs_info->fs_roots_radix_lock);
3915 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3916 					     (void **)gang, root_objectid,
3917 					     ARRAY_SIZE(gang));
3918 		if (!ret) {
3919 			spin_unlock(&fs_info->fs_roots_radix_lock);
3920 			break;
3921 		}
3922 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3923 
3924 		for (i = 0; i < ret; i++) {
3925 			/* Avoid to grab roots in dead_roots */
3926 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3927 				gang[i] = NULL;
3928 				continue;
3929 			}
3930 			/* grab all the search result for later use */
3931 			gang[i] = btrfs_grab_root(gang[i]);
3932 		}
3933 		spin_unlock(&fs_info->fs_roots_radix_lock);
3934 
3935 		for (i = 0; i < ret; i++) {
3936 			if (!gang[i])
3937 				continue;
3938 			root_objectid = gang[i]->root_key.objectid;
3939 			err = btrfs_orphan_cleanup(gang[i]);
3940 			if (err)
3941 				break;
3942 			btrfs_put_root(gang[i]);
3943 		}
3944 		root_objectid++;
3945 	}
3946 
3947 	/* release the uncleaned roots due to error */
3948 	for (; i < ret; i++) {
3949 		if (gang[i])
3950 			btrfs_put_root(gang[i]);
3951 	}
3952 	return err;
3953 }
3954 
3955 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3956 {
3957 	struct btrfs_root *root = fs_info->tree_root;
3958 	struct btrfs_trans_handle *trans;
3959 
3960 	mutex_lock(&fs_info->cleaner_mutex);
3961 	btrfs_run_delayed_iputs(fs_info);
3962 	mutex_unlock(&fs_info->cleaner_mutex);
3963 	wake_up_process(fs_info->cleaner_kthread);
3964 
3965 	/* wait until ongoing cleanup work done */
3966 	down_write(&fs_info->cleanup_work_sem);
3967 	up_write(&fs_info->cleanup_work_sem);
3968 
3969 	trans = btrfs_join_transaction(root);
3970 	if (IS_ERR(trans))
3971 		return PTR_ERR(trans);
3972 	return btrfs_commit_transaction(trans);
3973 }
3974 
3975 void __cold close_ctree(struct btrfs_fs_info *fs_info)
3976 {
3977 	int ret;
3978 
3979 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3980 	/*
3981 	 * We don't want the cleaner to start new transactions, add more delayed
3982 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3983 	 * because that frees the task_struct, and the transaction kthread might
3984 	 * still try to wake up the cleaner.
3985 	 */
3986 	kthread_park(fs_info->cleaner_kthread);
3987 
3988 	/* wait for the qgroup rescan worker to stop */
3989 	btrfs_qgroup_wait_for_completion(fs_info, false);
3990 
3991 	/* wait for the uuid_scan task to finish */
3992 	down(&fs_info->uuid_tree_rescan_sem);
3993 	/* avoid complains from lockdep et al., set sem back to initial state */
3994 	up(&fs_info->uuid_tree_rescan_sem);
3995 
3996 	/* pause restriper - we want to resume on mount */
3997 	btrfs_pause_balance(fs_info);
3998 
3999 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4000 
4001 	btrfs_scrub_cancel(fs_info);
4002 
4003 	/* wait for any defraggers to finish */
4004 	wait_event(fs_info->transaction_wait,
4005 		   (atomic_read(&fs_info->defrag_running) == 0));
4006 
4007 	/* clear out the rbtree of defraggable inodes */
4008 	btrfs_cleanup_defrag_inodes(fs_info);
4009 
4010 	cancel_work_sync(&fs_info->async_reclaim_work);
4011 
4012 	/* Cancel or finish ongoing discard work */
4013 	btrfs_discard_cleanup(fs_info);
4014 
4015 	if (!sb_rdonly(fs_info->sb)) {
4016 		/*
4017 		 * The cleaner kthread is stopped, so do one final pass over
4018 		 * unused block groups.
4019 		 */
4020 		btrfs_delete_unused_bgs(fs_info);
4021 
4022 		/*
4023 		 * There might be existing delayed inode workers still running
4024 		 * and holding an empty delayed inode item. We must wait for
4025 		 * them to complete first because they can create a transaction.
4026 		 * This happens when someone calls btrfs_balance_delayed_items()
4027 		 * and then a transaction commit runs the same delayed nodes
4028 		 * before any delayed worker has done something with the nodes.
4029 		 * We must wait for any worker here and not at transaction
4030 		 * commit time since that could cause a deadlock.
4031 		 * This is a very rare case.
4032 		 */
4033 		btrfs_flush_workqueue(fs_info->delayed_workers);
4034 
4035 		ret = btrfs_commit_super(fs_info);
4036 		if (ret)
4037 			btrfs_err(fs_info, "commit super ret %d", ret);
4038 	}
4039 
4040 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4041 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4042 		btrfs_error_commit_super(fs_info);
4043 
4044 	kthread_stop(fs_info->transaction_kthread);
4045 	kthread_stop(fs_info->cleaner_kthread);
4046 
4047 	ASSERT(list_empty(&fs_info->delayed_iputs));
4048 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4049 
4050 	btrfs_free_qgroup_config(fs_info);
4051 	ASSERT(list_empty(&fs_info->delalloc_roots));
4052 
4053 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4054 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4055 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4056 	}
4057 
4058 	if (percpu_counter_sum(&fs_info->dio_bytes))
4059 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4060 			   percpu_counter_sum(&fs_info->dio_bytes));
4061 
4062 	btrfs_sysfs_remove_mounted(fs_info);
4063 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4064 
4065 	btrfs_put_block_group_cache(fs_info);
4066 
4067 	/*
4068 	 * we must make sure there is not any read request to
4069 	 * submit after we stopping all workers.
4070 	 */
4071 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4072 	btrfs_stop_all_workers(fs_info);
4073 
4074 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4075 	free_root_pointers(fs_info, true);
4076 	btrfs_free_fs_roots(fs_info);
4077 
4078 	/*
4079 	 * We must free the block groups after dropping the fs_roots as we could
4080 	 * have had an IO error and have left over tree log blocks that aren't
4081 	 * cleaned up until the fs roots are freed.  This makes the block group
4082 	 * accounting appear to be wrong because there's pending reserved bytes,
4083 	 * so make sure we do the block group cleanup afterwards.
4084 	 */
4085 	btrfs_free_block_groups(fs_info);
4086 
4087 	iput(fs_info->btree_inode);
4088 
4089 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4090 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4091 		btrfsic_unmount(fs_info->fs_devices);
4092 #endif
4093 
4094 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4095 	btrfs_close_devices(fs_info->fs_devices);
4096 }
4097 
4098 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4099 			  int atomic)
4100 {
4101 	int ret;
4102 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4103 
4104 	ret = extent_buffer_uptodate(buf);
4105 	if (!ret)
4106 		return ret;
4107 
4108 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4109 				    parent_transid, atomic);
4110 	if (ret == -EAGAIN)
4111 		return ret;
4112 	return !ret;
4113 }
4114 
4115 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4116 {
4117 	struct btrfs_fs_info *fs_info;
4118 	struct btrfs_root *root;
4119 	u64 transid = btrfs_header_generation(buf);
4120 	int was_dirty;
4121 
4122 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4123 	/*
4124 	 * This is a fast path so only do this check if we have sanity tests
4125 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4126 	 * outside of the sanity tests.
4127 	 */
4128 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4129 		return;
4130 #endif
4131 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4132 	fs_info = root->fs_info;
4133 	btrfs_assert_tree_locked(buf);
4134 	if (transid != fs_info->generation)
4135 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4136 			buf->start, transid, fs_info->generation);
4137 	was_dirty = set_extent_buffer_dirty(buf);
4138 	if (!was_dirty)
4139 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4140 					 buf->len,
4141 					 fs_info->dirty_metadata_batch);
4142 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4143 	/*
4144 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4145 	 * but item data not updated.
4146 	 * So here we should only check item pointers, not item data.
4147 	 */
4148 	if (btrfs_header_level(buf) == 0 &&
4149 	    btrfs_check_leaf_relaxed(buf)) {
4150 		btrfs_print_leaf(buf);
4151 		ASSERT(0);
4152 	}
4153 #endif
4154 }
4155 
4156 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4157 					int flush_delayed)
4158 {
4159 	/*
4160 	 * looks as though older kernels can get into trouble with
4161 	 * this code, they end up stuck in balance_dirty_pages forever
4162 	 */
4163 	int ret;
4164 
4165 	if (current->flags & PF_MEMALLOC)
4166 		return;
4167 
4168 	if (flush_delayed)
4169 		btrfs_balance_delayed_items(fs_info);
4170 
4171 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4172 				     BTRFS_DIRTY_METADATA_THRESH,
4173 				     fs_info->dirty_metadata_batch);
4174 	if (ret > 0) {
4175 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4176 	}
4177 }
4178 
4179 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4180 {
4181 	__btrfs_btree_balance_dirty(fs_info, 1);
4182 }
4183 
4184 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4185 {
4186 	__btrfs_btree_balance_dirty(fs_info, 0);
4187 }
4188 
4189 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4190 		      struct btrfs_key *first_key)
4191 {
4192 	return btree_read_extent_buffer_pages(buf, parent_transid,
4193 					      level, first_key);
4194 }
4195 
4196 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4197 {
4198 	/* cleanup FS via transaction */
4199 	btrfs_cleanup_transaction(fs_info);
4200 
4201 	mutex_lock(&fs_info->cleaner_mutex);
4202 	btrfs_run_delayed_iputs(fs_info);
4203 	mutex_unlock(&fs_info->cleaner_mutex);
4204 
4205 	down_write(&fs_info->cleanup_work_sem);
4206 	up_write(&fs_info->cleanup_work_sem);
4207 }
4208 
4209 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4210 {
4211 	struct btrfs_root *gang[8];
4212 	u64 root_objectid = 0;
4213 	int ret;
4214 
4215 	spin_lock(&fs_info->fs_roots_radix_lock);
4216 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4217 					     (void **)gang, root_objectid,
4218 					     ARRAY_SIZE(gang))) != 0) {
4219 		int i;
4220 
4221 		for (i = 0; i < ret; i++)
4222 			gang[i] = btrfs_grab_root(gang[i]);
4223 		spin_unlock(&fs_info->fs_roots_radix_lock);
4224 
4225 		for (i = 0; i < ret; i++) {
4226 			if (!gang[i])
4227 				continue;
4228 			root_objectid = gang[i]->root_key.objectid;
4229 			btrfs_free_log(NULL, gang[i]);
4230 			btrfs_put_root(gang[i]);
4231 		}
4232 		root_objectid++;
4233 		spin_lock(&fs_info->fs_roots_radix_lock);
4234 	}
4235 	spin_unlock(&fs_info->fs_roots_radix_lock);
4236 	btrfs_free_log_root_tree(NULL, fs_info);
4237 }
4238 
4239 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4240 {
4241 	struct btrfs_ordered_extent *ordered;
4242 
4243 	spin_lock(&root->ordered_extent_lock);
4244 	/*
4245 	 * This will just short circuit the ordered completion stuff which will
4246 	 * make sure the ordered extent gets properly cleaned up.
4247 	 */
4248 	list_for_each_entry(ordered, &root->ordered_extents,
4249 			    root_extent_list)
4250 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4251 	spin_unlock(&root->ordered_extent_lock);
4252 }
4253 
4254 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4255 {
4256 	struct btrfs_root *root;
4257 	struct list_head splice;
4258 
4259 	INIT_LIST_HEAD(&splice);
4260 
4261 	spin_lock(&fs_info->ordered_root_lock);
4262 	list_splice_init(&fs_info->ordered_roots, &splice);
4263 	while (!list_empty(&splice)) {
4264 		root = list_first_entry(&splice, struct btrfs_root,
4265 					ordered_root);
4266 		list_move_tail(&root->ordered_root,
4267 			       &fs_info->ordered_roots);
4268 
4269 		spin_unlock(&fs_info->ordered_root_lock);
4270 		btrfs_destroy_ordered_extents(root);
4271 
4272 		cond_resched();
4273 		spin_lock(&fs_info->ordered_root_lock);
4274 	}
4275 	spin_unlock(&fs_info->ordered_root_lock);
4276 
4277 	/*
4278 	 * We need this here because if we've been flipped read-only we won't
4279 	 * get sync() from the umount, so we need to make sure any ordered
4280 	 * extents that haven't had their dirty pages IO start writeout yet
4281 	 * actually get run and error out properly.
4282 	 */
4283 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4284 }
4285 
4286 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4287 				      struct btrfs_fs_info *fs_info)
4288 {
4289 	struct rb_node *node;
4290 	struct btrfs_delayed_ref_root *delayed_refs;
4291 	struct btrfs_delayed_ref_node *ref;
4292 	int ret = 0;
4293 
4294 	delayed_refs = &trans->delayed_refs;
4295 
4296 	spin_lock(&delayed_refs->lock);
4297 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4298 		spin_unlock(&delayed_refs->lock);
4299 		btrfs_debug(fs_info, "delayed_refs has NO entry");
4300 		return ret;
4301 	}
4302 
4303 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4304 		struct btrfs_delayed_ref_head *head;
4305 		struct rb_node *n;
4306 		bool pin_bytes = false;
4307 
4308 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4309 				href_node);
4310 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4311 			continue;
4312 
4313 		spin_lock(&head->lock);
4314 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4315 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4316 				       ref_node);
4317 			ref->in_tree = 0;
4318 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4319 			RB_CLEAR_NODE(&ref->ref_node);
4320 			if (!list_empty(&ref->add_list))
4321 				list_del(&ref->add_list);
4322 			atomic_dec(&delayed_refs->num_entries);
4323 			btrfs_put_delayed_ref(ref);
4324 		}
4325 		if (head->must_insert_reserved)
4326 			pin_bytes = true;
4327 		btrfs_free_delayed_extent_op(head->extent_op);
4328 		btrfs_delete_ref_head(delayed_refs, head);
4329 		spin_unlock(&head->lock);
4330 		spin_unlock(&delayed_refs->lock);
4331 		mutex_unlock(&head->mutex);
4332 
4333 		if (pin_bytes) {
4334 			struct btrfs_block_group *cache;
4335 
4336 			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4337 			BUG_ON(!cache);
4338 
4339 			spin_lock(&cache->space_info->lock);
4340 			spin_lock(&cache->lock);
4341 			cache->pinned += head->num_bytes;
4342 			btrfs_space_info_update_bytes_pinned(fs_info,
4343 				cache->space_info, head->num_bytes);
4344 			cache->reserved -= head->num_bytes;
4345 			cache->space_info->bytes_reserved -= head->num_bytes;
4346 			spin_unlock(&cache->lock);
4347 			spin_unlock(&cache->space_info->lock);
4348 			percpu_counter_add_batch(
4349 				&cache->space_info->total_bytes_pinned,
4350 				head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
4351 
4352 			btrfs_put_block_group(cache);
4353 
4354 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4355 				head->bytenr + head->num_bytes - 1);
4356 		}
4357 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4358 		btrfs_put_delayed_ref_head(head);
4359 		cond_resched();
4360 		spin_lock(&delayed_refs->lock);
4361 	}
4362 	btrfs_qgroup_destroy_extent_records(trans);
4363 
4364 	spin_unlock(&delayed_refs->lock);
4365 
4366 	return ret;
4367 }
4368 
4369 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4370 {
4371 	struct btrfs_inode *btrfs_inode;
4372 	struct list_head splice;
4373 
4374 	INIT_LIST_HEAD(&splice);
4375 
4376 	spin_lock(&root->delalloc_lock);
4377 	list_splice_init(&root->delalloc_inodes, &splice);
4378 
4379 	while (!list_empty(&splice)) {
4380 		struct inode *inode = NULL;
4381 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4382 					       delalloc_inodes);
4383 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4384 		spin_unlock(&root->delalloc_lock);
4385 
4386 		/*
4387 		 * Make sure we get a live inode and that it'll not disappear
4388 		 * meanwhile.
4389 		 */
4390 		inode = igrab(&btrfs_inode->vfs_inode);
4391 		if (inode) {
4392 			invalidate_inode_pages2(inode->i_mapping);
4393 			iput(inode);
4394 		}
4395 		spin_lock(&root->delalloc_lock);
4396 	}
4397 	spin_unlock(&root->delalloc_lock);
4398 }
4399 
4400 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4401 {
4402 	struct btrfs_root *root;
4403 	struct list_head splice;
4404 
4405 	INIT_LIST_HEAD(&splice);
4406 
4407 	spin_lock(&fs_info->delalloc_root_lock);
4408 	list_splice_init(&fs_info->delalloc_roots, &splice);
4409 	while (!list_empty(&splice)) {
4410 		root = list_first_entry(&splice, struct btrfs_root,
4411 					 delalloc_root);
4412 		root = btrfs_grab_root(root);
4413 		BUG_ON(!root);
4414 		spin_unlock(&fs_info->delalloc_root_lock);
4415 
4416 		btrfs_destroy_delalloc_inodes(root);
4417 		btrfs_put_root(root);
4418 
4419 		spin_lock(&fs_info->delalloc_root_lock);
4420 	}
4421 	spin_unlock(&fs_info->delalloc_root_lock);
4422 }
4423 
4424 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4425 					struct extent_io_tree *dirty_pages,
4426 					int mark)
4427 {
4428 	int ret;
4429 	struct extent_buffer *eb;
4430 	u64 start = 0;
4431 	u64 end;
4432 
4433 	while (1) {
4434 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4435 					    mark, NULL);
4436 		if (ret)
4437 			break;
4438 
4439 		clear_extent_bits(dirty_pages, start, end, mark);
4440 		while (start <= end) {
4441 			eb = find_extent_buffer(fs_info, start);
4442 			start += fs_info->nodesize;
4443 			if (!eb)
4444 				continue;
4445 			wait_on_extent_buffer_writeback(eb);
4446 
4447 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4448 					       &eb->bflags))
4449 				clear_extent_buffer_dirty(eb);
4450 			free_extent_buffer_stale(eb);
4451 		}
4452 	}
4453 
4454 	return ret;
4455 }
4456 
4457 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4458 				       struct extent_io_tree *unpin)
4459 {
4460 	u64 start;
4461 	u64 end;
4462 	int ret;
4463 
4464 	while (1) {
4465 		struct extent_state *cached_state = NULL;
4466 
4467 		/*
4468 		 * The btrfs_finish_extent_commit() may get the same range as
4469 		 * ours between find_first_extent_bit and clear_extent_dirty.
4470 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4471 		 * the same extent range.
4472 		 */
4473 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4474 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4475 					    EXTENT_DIRTY, &cached_state);
4476 		if (ret) {
4477 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4478 			break;
4479 		}
4480 
4481 		clear_extent_dirty(unpin, start, end, &cached_state);
4482 		free_extent_state(cached_state);
4483 		btrfs_error_unpin_extent_range(fs_info, start, end);
4484 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4485 		cond_resched();
4486 	}
4487 
4488 	return 0;
4489 }
4490 
4491 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4492 {
4493 	struct inode *inode;
4494 
4495 	inode = cache->io_ctl.inode;
4496 	if (inode) {
4497 		invalidate_inode_pages2(inode->i_mapping);
4498 		BTRFS_I(inode)->generation = 0;
4499 		cache->io_ctl.inode = NULL;
4500 		iput(inode);
4501 	}
4502 	btrfs_put_block_group(cache);
4503 }
4504 
4505 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4506 			     struct btrfs_fs_info *fs_info)
4507 {
4508 	struct btrfs_block_group *cache;
4509 
4510 	spin_lock(&cur_trans->dirty_bgs_lock);
4511 	while (!list_empty(&cur_trans->dirty_bgs)) {
4512 		cache = list_first_entry(&cur_trans->dirty_bgs,
4513 					 struct btrfs_block_group,
4514 					 dirty_list);
4515 
4516 		if (!list_empty(&cache->io_list)) {
4517 			spin_unlock(&cur_trans->dirty_bgs_lock);
4518 			list_del_init(&cache->io_list);
4519 			btrfs_cleanup_bg_io(cache);
4520 			spin_lock(&cur_trans->dirty_bgs_lock);
4521 		}
4522 
4523 		list_del_init(&cache->dirty_list);
4524 		spin_lock(&cache->lock);
4525 		cache->disk_cache_state = BTRFS_DC_ERROR;
4526 		spin_unlock(&cache->lock);
4527 
4528 		spin_unlock(&cur_trans->dirty_bgs_lock);
4529 		btrfs_put_block_group(cache);
4530 		btrfs_delayed_refs_rsv_release(fs_info, 1);
4531 		spin_lock(&cur_trans->dirty_bgs_lock);
4532 	}
4533 	spin_unlock(&cur_trans->dirty_bgs_lock);
4534 
4535 	/*
4536 	 * Refer to the definition of io_bgs member for details why it's safe
4537 	 * to use it without any locking
4538 	 */
4539 	while (!list_empty(&cur_trans->io_bgs)) {
4540 		cache = list_first_entry(&cur_trans->io_bgs,
4541 					 struct btrfs_block_group,
4542 					 io_list);
4543 
4544 		list_del_init(&cache->io_list);
4545 		spin_lock(&cache->lock);
4546 		cache->disk_cache_state = BTRFS_DC_ERROR;
4547 		spin_unlock(&cache->lock);
4548 		btrfs_cleanup_bg_io(cache);
4549 	}
4550 }
4551 
4552 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4553 				   struct btrfs_fs_info *fs_info)
4554 {
4555 	struct btrfs_device *dev, *tmp;
4556 
4557 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4558 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4559 	ASSERT(list_empty(&cur_trans->io_bgs));
4560 
4561 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4562 				 post_commit_list) {
4563 		list_del_init(&dev->post_commit_list);
4564 	}
4565 
4566 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4567 
4568 	cur_trans->state = TRANS_STATE_COMMIT_START;
4569 	wake_up(&fs_info->transaction_blocked_wait);
4570 
4571 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4572 	wake_up(&fs_info->transaction_wait);
4573 
4574 	btrfs_destroy_delayed_inodes(fs_info);
4575 
4576 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4577 				     EXTENT_DIRTY);
4578 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4579 
4580 	cur_trans->state =TRANS_STATE_COMPLETED;
4581 	wake_up(&cur_trans->commit_wait);
4582 }
4583 
4584 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4585 {
4586 	struct btrfs_transaction *t;
4587 
4588 	mutex_lock(&fs_info->transaction_kthread_mutex);
4589 
4590 	spin_lock(&fs_info->trans_lock);
4591 	while (!list_empty(&fs_info->trans_list)) {
4592 		t = list_first_entry(&fs_info->trans_list,
4593 				     struct btrfs_transaction, list);
4594 		if (t->state >= TRANS_STATE_COMMIT_START) {
4595 			refcount_inc(&t->use_count);
4596 			spin_unlock(&fs_info->trans_lock);
4597 			btrfs_wait_for_commit(fs_info, t->transid);
4598 			btrfs_put_transaction(t);
4599 			spin_lock(&fs_info->trans_lock);
4600 			continue;
4601 		}
4602 		if (t == fs_info->running_transaction) {
4603 			t->state = TRANS_STATE_COMMIT_DOING;
4604 			spin_unlock(&fs_info->trans_lock);
4605 			/*
4606 			 * We wait for 0 num_writers since we don't hold a trans
4607 			 * handle open currently for this transaction.
4608 			 */
4609 			wait_event(t->writer_wait,
4610 				   atomic_read(&t->num_writers) == 0);
4611 		} else {
4612 			spin_unlock(&fs_info->trans_lock);
4613 		}
4614 		btrfs_cleanup_one_transaction(t, fs_info);
4615 
4616 		spin_lock(&fs_info->trans_lock);
4617 		if (t == fs_info->running_transaction)
4618 			fs_info->running_transaction = NULL;
4619 		list_del_init(&t->list);
4620 		spin_unlock(&fs_info->trans_lock);
4621 
4622 		btrfs_put_transaction(t);
4623 		trace_btrfs_transaction_commit(fs_info->tree_root);
4624 		spin_lock(&fs_info->trans_lock);
4625 	}
4626 	spin_unlock(&fs_info->trans_lock);
4627 	btrfs_destroy_all_ordered_extents(fs_info);
4628 	btrfs_destroy_delayed_inodes(fs_info);
4629 	btrfs_assert_delayed_root_empty(fs_info);
4630 	btrfs_destroy_all_delalloc_inodes(fs_info);
4631 	btrfs_drop_all_logs(fs_info);
4632 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4633 
4634 	return 0;
4635 }
4636 
4637 static const struct extent_io_ops btree_extent_io_ops = {
4638 	/* mandatory callbacks */
4639 	.submit_bio_hook = btree_submit_bio_hook,
4640 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4641 };
4642