xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 0a671dc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <linux/sched/mm.h>
21 #include <asm/unaligned.h>
22 #include <crypto/hash.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "btrfs_inode.h"
27 #include "volumes.h"
28 #include "print-tree.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "free-space-cache.h"
32 #include "free-space-tree.h"
33 #include "inode-map.h"
34 #include "check-integrity.h"
35 #include "rcu-string.h"
36 #include "dev-replace.h"
37 #include "raid56.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40 #include "compression.h"
41 #include "tree-checker.h"
42 #include "ref-verify.h"
43 #include "block-group.h"
44 
45 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
46 				 BTRFS_HEADER_FLAG_RELOC |\
47 				 BTRFS_SUPER_FLAG_ERROR |\
48 				 BTRFS_SUPER_FLAG_SEEDING |\
49 				 BTRFS_SUPER_FLAG_METADUMP |\
50 				 BTRFS_SUPER_FLAG_METADUMP_V2)
51 
52 static const struct extent_io_ops btree_extent_io_ops;
53 static void end_workqueue_fn(struct btrfs_work *work);
54 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
55 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56 				      struct btrfs_fs_info *fs_info);
57 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
59 					struct extent_io_tree *dirty_pages,
60 					int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
62 				       struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
64 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
65 
66 /*
67  * btrfs_end_io_wq structs are used to do processing in task context when an IO
68  * is complete.  This is used during reads to verify checksums, and it is used
69  * by writes to insert metadata for new file extents after IO is complete.
70  */
71 struct btrfs_end_io_wq {
72 	struct bio *bio;
73 	bio_end_io_t *end_io;
74 	void *private;
75 	struct btrfs_fs_info *info;
76 	blk_status_t status;
77 	enum btrfs_wq_endio_type metadata;
78 	struct btrfs_work work;
79 };
80 
81 static struct kmem_cache *btrfs_end_io_wq_cache;
82 
83 int __init btrfs_end_io_wq_init(void)
84 {
85 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
86 					sizeof(struct btrfs_end_io_wq),
87 					0,
88 					SLAB_MEM_SPREAD,
89 					NULL);
90 	if (!btrfs_end_io_wq_cache)
91 		return -ENOMEM;
92 	return 0;
93 }
94 
95 void __cold btrfs_end_io_wq_exit(void)
96 {
97 	kmem_cache_destroy(btrfs_end_io_wq_cache);
98 }
99 
100 /*
101  * async submit bios are used to offload expensive checksumming
102  * onto the worker threads.  They checksum file and metadata bios
103  * just before they are sent down the IO stack.
104  */
105 struct async_submit_bio {
106 	void *private_data;
107 	struct bio *bio;
108 	extent_submit_bio_start_t *submit_bio_start;
109 	int mirror_num;
110 	/*
111 	 * bio_offset is optional, can be used if the pages in the bio
112 	 * can't tell us where in the file the bio should go
113 	 */
114 	u64 bio_offset;
115 	struct btrfs_work work;
116 	blk_status_t status;
117 };
118 
119 /*
120  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
121  * eb, the lockdep key is determined by the btrfs_root it belongs to and
122  * the level the eb occupies in the tree.
123  *
124  * Different roots are used for different purposes and may nest inside each
125  * other and they require separate keysets.  As lockdep keys should be
126  * static, assign keysets according to the purpose of the root as indicated
127  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
128  * roots have separate keysets.
129  *
130  * Lock-nesting across peer nodes is always done with the immediate parent
131  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
132  * subclass to avoid triggering lockdep warning in such cases.
133  *
134  * The key is set by the readpage_end_io_hook after the buffer has passed
135  * csum validation but before the pages are unlocked.  It is also set by
136  * btrfs_init_new_buffer on freshly allocated blocks.
137  *
138  * We also add a check to make sure the highest level of the tree is the
139  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
140  * needs update as well.
141  */
142 #ifdef CONFIG_DEBUG_LOCK_ALLOC
143 # if BTRFS_MAX_LEVEL != 8
144 #  error
145 # endif
146 
147 static struct btrfs_lockdep_keyset {
148 	u64			id;		/* root objectid */
149 	const char		*name_stem;	/* lock name stem */
150 	char			names[BTRFS_MAX_LEVEL + 1][20];
151 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
152 } btrfs_lockdep_keysets[] = {
153 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
154 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
155 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
156 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
157 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
158 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
159 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
160 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
161 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
162 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
163 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
164 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
165 	{ .id = 0,				.name_stem = "tree"	},
166 };
167 
168 void __init btrfs_init_lockdep(void)
169 {
170 	int i, j;
171 
172 	/* initialize lockdep class names */
173 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
174 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
175 
176 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
177 			snprintf(ks->names[j], sizeof(ks->names[j]),
178 				 "btrfs-%s-%02d", ks->name_stem, j);
179 	}
180 }
181 
182 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
183 				    int level)
184 {
185 	struct btrfs_lockdep_keyset *ks;
186 
187 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
188 
189 	/* find the matching keyset, id 0 is the default entry */
190 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
191 		if (ks->id == objectid)
192 			break;
193 
194 	lockdep_set_class_and_name(&eb->lock,
195 				   &ks->keys[level], ks->names[level]);
196 }
197 
198 #endif
199 
200 /*
201  * extents on the btree inode are pretty simple, there's one extent
202  * that covers the entire device
203  */
204 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
205 		struct page *page, size_t pg_offset, u64 start, u64 len,
206 		int create)
207 {
208 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
209 	struct extent_map_tree *em_tree = &inode->extent_tree;
210 	struct extent_map *em;
211 	int ret;
212 
213 	read_lock(&em_tree->lock);
214 	em = lookup_extent_mapping(em_tree, start, len);
215 	if (em) {
216 		em->bdev = fs_info->fs_devices->latest_bdev;
217 		read_unlock(&em_tree->lock);
218 		goto out;
219 	}
220 	read_unlock(&em_tree->lock);
221 
222 	em = alloc_extent_map();
223 	if (!em) {
224 		em = ERR_PTR(-ENOMEM);
225 		goto out;
226 	}
227 	em->start = 0;
228 	em->len = (u64)-1;
229 	em->block_len = (u64)-1;
230 	em->block_start = 0;
231 	em->bdev = fs_info->fs_devices->latest_bdev;
232 
233 	write_lock(&em_tree->lock);
234 	ret = add_extent_mapping(em_tree, em, 0);
235 	if (ret == -EEXIST) {
236 		free_extent_map(em);
237 		em = lookup_extent_mapping(em_tree, start, len);
238 		if (!em)
239 			em = ERR_PTR(-EIO);
240 	} else if (ret) {
241 		free_extent_map(em);
242 		em = ERR_PTR(ret);
243 	}
244 	write_unlock(&em_tree->lock);
245 
246 out:
247 	return em;
248 }
249 
250 /*
251  * Compute the csum of a btree block and store the result to provided buffer.
252  *
253  * Returns error if the extent buffer cannot be mapped.
254  */
255 static int csum_tree_block(struct extent_buffer *buf, u8 *result)
256 {
257 	struct btrfs_fs_info *fs_info = buf->fs_info;
258 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
259 	unsigned long len;
260 	unsigned long cur_len;
261 	unsigned long offset = BTRFS_CSUM_SIZE;
262 	char *kaddr;
263 	unsigned long map_start;
264 	unsigned long map_len;
265 	int err;
266 
267 	shash->tfm = fs_info->csum_shash;
268 	crypto_shash_init(shash);
269 
270 	len = buf->len - offset;
271 
272 	while (len > 0) {
273 		/*
274 		 * Note: we don't need to check for the err == 1 case here, as
275 		 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
276 		 * and 'min_len = 32' and the currently implemented mapping
277 		 * algorithm we cannot cross a page boundary.
278 		 */
279 		err = map_private_extent_buffer(buf, offset, 32,
280 					&kaddr, &map_start, &map_len);
281 		if (WARN_ON(err))
282 			return err;
283 		cur_len = min(len, map_len - (offset - map_start));
284 		crypto_shash_update(shash, kaddr + offset - map_start, cur_len);
285 		len -= cur_len;
286 		offset += cur_len;
287 	}
288 	memset(result, 0, BTRFS_CSUM_SIZE);
289 
290 	crypto_shash_final(shash, result);
291 
292 	return 0;
293 }
294 
295 /*
296  * we can't consider a given block up to date unless the transid of the
297  * block matches the transid in the parent node's pointer.  This is how we
298  * detect blocks that either didn't get written at all or got written
299  * in the wrong place.
300  */
301 static int verify_parent_transid(struct extent_io_tree *io_tree,
302 				 struct extent_buffer *eb, u64 parent_transid,
303 				 int atomic)
304 {
305 	struct extent_state *cached_state = NULL;
306 	int ret;
307 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
308 
309 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
310 		return 0;
311 
312 	if (atomic)
313 		return -EAGAIN;
314 
315 	if (need_lock) {
316 		btrfs_tree_read_lock(eb);
317 		btrfs_set_lock_blocking_read(eb);
318 	}
319 
320 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
321 			 &cached_state);
322 	if (extent_buffer_uptodate(eb) &&
323 	    btrfs_header_generation(eb) == parent_transid) {
324 		ret = 0;
325 		goto out;
326 	}
327 	btrfs_err_rl(eb->fs_info,
328 		"parent transid verify failed on %llu wanted %llu found %llu",
329 			eb->start,
330 			parent_transid, btrfs_header_generation(eb));
331 	ret = 1;
332 
333 	/*
334 	 * Things reading via commit roots that don't have normal protection,
335 	 * like send, can have a really old block in cache that may point at a
336 	 * block that has been freed and re-allocated.  So don't clear uptodate
337 	 * if we find an eb that is under IO (dirty/writeback) because we could
338 	 * end up reading in the stale data and then writing it back out and
339 	 * making everybody very sad.
340 	 */
341 	if (!extent_buffer_under_io(eb))
342 		clear_extent_buffer_uptodate(eb);
343 out:
344 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
345 			     &cached_state);
346 	if (need_lock)
347 		btrfs_tree_read_unlock_blocking(eb);
348 	return ret;
349 }
350 
351 static bool btrfs_supported_super_csum(u16 csum_type)
352 {
353 	switch (csum_type) {
354 	case BTRFS_CSUM_TYPE_CRC32:
355 		return true;
356 	default:
357 		return false;
358 	}
359 }
360 
361 /*
362  * Return 0 if the superblock checksum type matches the checksum value of that
363  * algorithm. Pass the raw disk superblock data.
364  */
365 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
366 				  char *raw_disk_sb)
367 {
368 	struct btrfs_super_block *disk_sb =
369 		(struct btrfs_super_block *)raw_disk_sb;
370 	char result[BTRFS_CSUM_SIZE];
371 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
372 
373 	shash->tfm = fs_info->csum_shash;
374 	crypto_shash_init(shash);
375 
376 	/*
377 	 * The super_block structure does not span the whole
378 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
379 	 * filled with zeros and is included in the checksum.
380 	 */
381 	crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
382 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
383 	crypto_shash_final(shash, result);
384 
385 	if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
386 		return 1;
387 
388 	return 0;
389 }
390 
391 int btrfs_verify_level_key(struct extent_buffer *eb, int level,
392 			   struct btrfs_key *first_key, u64 parent_transid)
393 {
394 	struct btrfs_fs_info *fs_info = eb->fs_info;
395 	int found_level;
396 	struct btrfs_key found_key;
397 	int ret;
398 
399 	found_level = btrfs_header_level(eb);
400 	if (found_level != level) {
401 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
402 		     KERN_ERR "BTRFS: tree level check failed\n");
403 		btrfs_err(fs_info,
404 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
405 			  eb->start, level, found_level);
406 		return -EIO;
407 	}
408 
409 	if (!first_key)
410 		return 0;
411 
412 	/*
413 	 * For live tree block (new tree blocks in current transaction),
414 	 * we need proper lock context to avoid race, which is impossible here.
415 	 * So we only checks tree blocks which is read from disk, whose
416 	 * generation <= fs_info->last_trans_committed.
417 	 */
418 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
419 		return 0;
420 
421 	/* We have @first_key, so this @eb must have at least one item */
422 	if (btrfs_header_nritems(eb) == 0) {
423 		btrfs_err(fs_info,
424 		"invalid tree nritems, bytenr=%llu nritems=0 expect >0",
425 			  eb->start);
426 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
427 		return -EUCLEAN;
428 	}
429 
430 	if (found_level)
431 		btrfs_node_key_to_cpu(eb, &found_key, 0);
432 	else
433 		btrfs_item_key_to_cpu(eb, &found_key, 0);
434 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
435 
436 	if (ret) {
437 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
438 		     KERN_ERR "BTRFS: tree first key check failed\n");
439 		btrfs_err(fs_info,
440 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
441 			  eb->start, parent_transid, first_key->objectid,
442 			  first_key->type, first_key->offset,
443 			  found_key.objectid, found_key.type,
444 			  found_key.offset);
445 	}
446 	return ret;
447 }
448 
449 /*
450  * helper to read a given tree block, doing retries as required when
451  * the checksums don't match and we have alternate mirrors to try.
452  *
453  * @parent_transid:	expected transid, skip check if 0
454  * @level:		expected level, mandatory check
455  * @first_key:		expected key of first slot, skip check if NULL
456  */
457 static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
458 					  u64 parent_transid, int level,
459 					  struct btrfs_key *first_key)
460 {
461 	struct btrfs_fs_info *fs_info = eb->fs_info;
462 	struct extent_io_tree *io_tree;
463 	int failed = 0;
464 	int ret;
465 	int num_copies = 0;
466 	int mirror_num = 0;
467 	int failed_mirror = 0;
468 
469 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
470 	while (1) {
471 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
472 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
473 		if (!ret) {
474 			if (verify_parent_transid(io_tree, eb,
475 						   parent_transid, 0))
476 				ret = -EIO;
477 			else if (btrfs_verify_level_key(eb, level,
478 						first_key, parent_transid))
479 				ret = -EUCLEAN;
480 			else
481 				break;
482 		}
483 
484 		num_copies = btrfs_num_copies(fs_info,
485 					      eb->start, eb->len);
486 		if (num_copies == 1)
487 			break;
488 
489 		if (!failed_mirror) {
490 			failed = 1;
491 			failed_mirror = eb->read_mirror;
492 		}
493 
494 		mirror_num++;
495 		if (mirror_num == failed_mirror)
496 			mirror_num++;
497 
498 		if (mirror_num > num_copies)
499 			break;
500 	}
501 
502 	if (failed && !ret && failed_mirror)
503 		btrfs_repair_eb_io_failure(eb, failed_mirror);
504 
505 	return ret;
506 }
507 
508 /*
509  * checksum a dirty tree block before IO.  This has extra checks to make sure
510  * we only fill in the checksum field in the first page of a multi-page block
511  */
512 
513 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
514 {
515 	u64 start = page_offset(page);
516 	u64 found_start;
517 	u8 result[BTRFS_CSUM_SIZE];
518 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
519 	struct extent_buffer *eb;
520 	int ret;
521 
522 	eb = (struct extent_buffer *)page->private;
523 	if (page != eb->pages[0])
524 		return 0;
525 
526 	found_start = btrfs_header_bytenr(eb);
527 	/*
528 	 * Please do not consolidate these warnings into a single if.
529 	 * It is useful to know what went wrong.
530 	 */
531 	if (WARN_ON(found_start != start))
532 		return -EUCLEAN;
533 	if (WARN_ON(!PageUptodate(page)))
534 		return -EUCLEAN;
535 
536 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
537 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
538 
539 	if (csum_tree_block(eb, result))
540 		return -EINVAL;
541 
542 	if (btrfs_header_level(eb))
543 		ret = btrfs_check_node(eb);
544 	else
545 		ret = btrfs_check_leaf_full(eb);
546 
547 	if (ret < 0) {
548 		btrfs_err(fs_info,
549 		"block=%llu write time tree block corruption detected",
550 			  eb->start);
551 		return ret;
552 	}
553 	write_extent_buffer(eb, result, 0, csum_size);
554 
555 	return 0;
556 }
557 
558 static int check_tree_block_fsid(struct extent_buffer *eb)
559 {
560 	struct btrfs_fs_info *fs_info = eb->fs_info;
561 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
562 	u8 fsid[BTRFS_FSID_SIZE];
563 	int ret = 1;
564 
565 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
566 	while (fs_devices) {
567 		u8 *metadata_uuid;
568 
569 		/*
570 		 * Checking the incompat flag is only valid for the current
571 		 * fs. For seed devices it's forbidden to have their uuid
572 		 * changed so reading ->fsid in this case is fine
573 		 */
574 		if (fs_devices == fs_info->fs_devices &&
575 		    btrfs_fs_incompat(fs_info, METADATA_UUID))
576 			metadata_uuid = fs_devices->metadata_uuid;
577 		else
578 			metadata_uuid = fs_devices->fsid;
579 
580 		if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
581 			ret = 0;
582 			break;
583 		}
584 		fs_devices = fs_devices->seed;
585 	}
586 	return ret;
587 }
588 
589 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
590 				      u64 phy_offset, struct page *page,
591 				      u64 start, u64 end, int mirror)
592 {
593 	u64 found_start;
594 	int found_level;
595 	struct extent_buffer *eb;
596 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
597 	struct btrfs_fs_info *fs_info = root->fs_info;
598 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
599 	int ret = 0;
600 	u8 result[BTRFS_CSUM_SIZE];
601 	int reads_done;
602 
603 	if (!page->private)
604 		goto out;
605 
606 	eb = (struct extent_buffer *)page->private;
607 
608 	/* the pending IO might have been the only thing that kept this buffer
609 	 * in memory.  Make sure we have a ref for all this other checks
610 	 */
611 	extent_buffer_get(eb);
612 
613 	reads_done = atomic_dec_and_test(&eb->io_pages);
614 	if (!reads_done)
615 		goto err;
616 
617 	eb->read_mirror = mirror;
618 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
619 		ret = -EIO;
620 		goto err;
621 	}
622 
623 	found_start = btrfs_header_bytenr(eb);
624 	if (found_start != eb->start) {
625 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
626 			     eb->start, found_start);
627 		ret = -EIO;
628 		goto err;
629 	}
630 	if (check_tree_block_fsid(eb)) {
631 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
632 			     eb->start);
633 		ret = -EIO;
634 		goto err;
635 	}
636 	found_level = btrfs_header_level(eb);
637 	if (found_level >= BTRFS_MAX_LEVEL) {
638 		btrfs_err(fs_info, "bad tree block level %d on %llu",
639 			  (int)btrfs_header_level(eb), eb->start);
640 		ret = -EIO;
641 		goto err;
642 	}
643 
644 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
645 				       eb, found_level);
646 
647 	ret = csum_tree_block(eb, result);
648 	if (ret)
649 		goto err;
650 
651 	if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
652 		u32 val;
653 		u32 found = 0;
654 
655 		memcpy(&found, result, csum_size);
656 
657 		read_extent_buffer(eb, &val, 0, csum_size);
658 		btrfs_warn_rl(fs_info,
659 		"%s checksum verify failed on %llu wanted %x found %x level %d",
660 			      fs_info->sb->s_id, eb->start,
661 			      val, found, btrfs_header_level(eb));
662 		ret = -EUCLEAN;
663 		goto err;
664 	}
665 
666 	/*
667 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
668 	 * that we don't try and read the other copies of this block, just
669 	 * return -EIO.
670 	 */
671 	if (found_level == 0 && btrfs_check_leaf_full(eb)) {
672 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
673 		ret = -EIO;
674 	}
675 
676 	if (found_level > 0 && btrfs_check_node(eb))
677 		ret = -EIO;
678 
679 	if (!ret)
680 		set_extent_buffer_uptodate(eb);
681 	else
682 		btrfs_err(fs_info,
683 			  "block=%llu read time tree block corruption detected",
684 			  eb->start);
685 err:
686 	if (reads_done &&
687 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
688 		btree_readahead_hook(eb, ret);
689 
690 	if (ret) {
691 		/*
692 		 * our io error hook is going to dec the io pages
693 		 * again, we have to make sure it has something
694 		 * to decrement
695 		 */
696 		atomic_inc(&eb->io_pages);
697 		clear_extent_buffer_uptodate(eb);
698 	}
699 	free_extent_buffer(eb);
700 out:
701 	return ret;
702 }
703 
704 static void end_workqueue_bio(struct bio *bio)
705 {
706 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
707 	struct btrfs_fs_info *fs_info;
708 	struct btrfs_workqueue *wq;
709 	btrfs_work_func_t func;
710 
711 	fs_info = end_io_wq->info;
712 	end_io_wq->status = bio->bi_status;
713 
714 	if (bio_op(bio) == REQ_OP_WRITE) {
715 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
716 			wq = fs_info->endio_meta_write_workers;
717 			func = btrfs_endio_meta_write_helper;
718 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
719 			wq = fs_info->endio_freespace_worker;
720 			func = btrfs_freespace_write_helper;
721 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
722 			wq = fs_info->endio_raid56_workers;
723 			func = btrfs_endio_raid56_helper;
724 		} else {
725 			wq = fs_info->endio_write_workers;
726 			func = btrfs_endio_write_helper;
727 		}
728 	} else {
729 		if (unlikely(end_io_wq->metadata ==
730 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
731 			wq = fs_info->endio_repair_workers;
732 			func = btrfs_endio_repair_helper;
733 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
734 			wq = fs_info->endio_raid56_workers;
735 			func = btrfs_endio_raid56_helper;
736 		} else if (end_io_wq->metadata) {
737 			wq = fs_info->endio_meta_workers;
738 			func = btrfs_endio_meta_helper;
739 		} else {
740 			wq = fs_info->endio_workers;
741 			func = btrfs_endio_helper;
742 		}
743 	}
744 
745 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
746 	btrfs_queue_work(wq, &end_io_wq->work);
747 }
748 
749 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
750 			enum btrfs_wq_endio_type metadata)
751 {
752 	struct btrfs_end_io_wq *end_io_wq;
753 
754 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
755 	if (!end_io_wq)
756 		return BLK_STS_RESOURCE;
757 
758 	end_io_wq->private = bio->bi_private;
759 	end_io_wq->end_io = bio->bi_end_io;
760 	end_io_wq->info = info;
761 	end_io_wq->status = 0;
762 	end_io_wq->bio = bio;
763 	end_io_wq->metadata = metadata;
764 
765 	bio->bi_private = end_io_wq;
766 	bio->bi_end_io = end_workqueue_bio;
767 	return 0;
768 }
769 
770 static void run_one_async_start(struct btrfs_work *work)
771 {
772 	struct async_submit_bio *async;
773 	blk_status_t ret;
774 
775 	async = container_of(work, struct  async_submit_bio, work);
776 	ret = async->submit_bio_start(async->private_data, async->bio,
777 				      async->bio_offset);
778 	if (ret)
779 		async->status = ret;
780 }
781 
782 /*
783  * In order to insert checksums into the metadata in large chunks, we wait
784  * until bio submission time.   All the pages in the bio are checksummed and
785  * sums are attached onto the ordered extent record.
786  *
787  * At IO completion time the csums attached on the ordered extent record are
788  * inserted into the tree.
789  */
790 static void run_one_async_done(struct btrfs_work *work)
791 {
792 	struct async_submit_bio *async;
793 	struct inode *inode;
794 	blk_status_t ret;
795 
796 	async = container_of(work, struct  async_submit_bio, work);
797 	inode = async->private_data;
798 
799 	/* If an error occurred we just want to clean up the bio and move on */
800 	if (async->status) {
801 		async->bio->bi_status = async->status;
802 		bio_endio(async->bio);
803 		return;
804 	}
805 
806 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
807 			async->mirror_num, 1);
808 	if (ret) {
809 		async->bio->bi_status = ret;
810 		bio_endio(async->bio);
811 	}
812 }
813 
814 static void run_one_async_free(struct btrfs_work *work)
815 {
816 	struct async_submit_bio *async;
817 
818 	async = container_of(work, struct  async_submit_bio, work);
819 	kfree(async);
820 }
821 
822 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
823 				 int mirror_num, unsigned long bio_flags,
824 				 u64 bio_offset, void *private_data,
825 				 extent_submit_bio_start_t *submit_bio_start)
826 {
827 	struct async_submit_bio *async;
828 
829 	async = kmalloc(sizeof(*async), GFP_NOFS);
830 	if (!async)
831 		return BLK_STS_RESOURCE;
832 
833 	async->private_data = private_data;
834 	async->bio = bio;
835 	async->mirror_num = mirror_num;
836 	async->submit_bio_start = submit_bio_start;
837 
838 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
839 			run_one_async_done, run_one_async_free);
840 
841 	async->bio_offset = bio_offset;
842 
843 	async->status = 0;
844 
845 	if (op_is_sync(bio->bi_opf))
846 		btrfs_set_work_high_priority(&async->work);
847 
848 	btrfs_queue_work(fs_info->workers, &async->work);
849 	return 0;
850 }
851 
852 static blk_status_t btree_csum_one_bio(struct bio *bio)
853 {
854 	struct bio_vec *bvec;
855 	struct btrfs_root *root;
856 	int ret = 0;
857 	struct bvec_iter_all iter_all;
858 
859 	ASSERT(!bio_flagged(bio, BIO_CLONED));
860 	bio_for_each_segment_all(bvec, bio, iter_all) {
861 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
862 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
863 		if (ret)
864 			break;
865 	}
866 
867 	return errno_to_blk_status(ret);
868 }
869 
870 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
871 					     u64 bio_offset)
872 {
873 	/*
874 	 * when we're called for a write, we're already in the async
875 	 * submission context.  Just jump into btrfs_map_bio
876 	 */
877 	return btree_csum_one_bio(bio);
878 }
879 
880 static int check_async_write(struct btrfs_fs_info *fs_info,
881 			     struct btrfs_inode *bi)
882 {
883 	if (atomic_read(&bi->sync_writers))
884 		return 0;
885 	if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
886 		return 0;
887 	return 1;
888 }
889 
890 static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
891 					  int mirror_num,
892 					  unsigned long bio_flags)
893 {
894 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
895 	int async = check_async_write(fs_info, BTRFS_I(inode));
896 	blk_status_t ret;
897 
898 	if (bio_op(bio) != REQ_OP_WRITE) {
899 		/*
900 		 * called for a read, do the setup so that checksum validation
901 		 * can happen in the async kernel threads
902 		 */
903 		ret = btrfs_bio_wq_end_io(fs_info, bio,
904 					  BTRFS_WQ_ENDIO_METADATA);
905 		if (ret)
906 			goto out_w_error;
907 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
908 	} else if (!async) {
909 		ret = btree_csum_one_bio(bio);
910 		if (ret)
911 			goto out_w_error;
912 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
913 	} else {
914 		/*
915 		 * kthread helpers are used to submit writes so that
916 		 * checksumming can happen in parallel across all CPUs
917 		 */
918 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
919 					  0, inode, btree_submit_bio_start);
920 	}
921 
922 	if (ret)
923 		goto out_w_error;
924 	return 0;
925 
926 out_w_error:
927 	bio->bi_status = ret;
928 	bio_endio(bio);
929 	return ret;
930 }
931 
932 #ifdef CONFIG_MIGRATION
933 static int btree_migratepage(struct address_space *mapping,
934 			struct page *newpage, struct page *page,
935 			enum migrate_mode mode)
936 {
937 	/*
938 	 * we can't safely write a btree page from here,
939 	 * we haven't done the locking hook
940 	 */
941 	if (PageDirty(page))
942 		return -EAGAIN;
943 	/*
944 	 * Buffers may be managed in a filesystem specific way.
945 	 * We must have no buffers or drop them.
946 	 */
947 	if (page_has_private(page) &&
948 	    !try_to_release_page(page, GFP_KERNEL))
949 		return -EAGAIN;
950 	return migrate_page(mapping, newpage, page, mode);
951 }
952 #endif
953 
954 
955 static int btree_writepages(struct address_space *mapping,
956 			    struct writeback_control *wbc)
957 {
958 	struct btrfs_fs_info *fs_info;
959 	int ret;
960 
961 	if (wbc->sync_mode == WB_SYNC_NONE) {
962 
963 		if (wbc->for_kupdate)
964 			return 0;
965 
966 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
967 		/* this is a bit racy, but that's ok */
968 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
969 					     BTRFS_DIRTY_METADATA_THRESH,
970 					     fs_info->dirty_metadata_batch);
971 		if (ret < 0)
972 			return 0;
973 	}
974 	return btree_write_cache_pages(mapping, wbc);
975 }
976 
977 static int btree_readpage(struct file *file, struct page *page)
978 {
979 	struct extent_io_tree *tree;
980 	tree = &BTRFS_I(page->mapping->host)->io_tree;
981 	return extent_read_full_page(tree, page, btree_get_extent, 0);
982 }
983 
984 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
985 {
986 	if (PageWriteback(page) || PageDirty(page))
987 		return 0;
988 
989 	return try_release_extent_buffer(page);
990 }
991 
992 static void btree_invalidatepage(struct page *page, unsigned int offset,
993 				 unsigned int length)
994 {
995 	struct extent_io_tree *tree;
996 	tree = &BTRFS_I(page->mapping->host)->io_tree;
997 	extent_invalidatepage(tree, page, offset);
998 	btree_releasepage(page, GFP_NOFS);
999 	if (PagePrivate(page)) {
1000 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1001 			   "page private not zero on page %llu",
1002 			   (unsigned long long)page_offset(page));
1003 		ClearPagePrivate(page);
1004 		set_page_private(page, 0);
1005 		put_page(page);
1006 	}
1007 }
1008 
1009 static int btree_set_page_dirty(struct page *page)
1010 {
1011 #ifdef DEBUG
1012 	struct extent_buffer *eb;
1013 
1014 	BUG_ON(!PagePrivate(page));
1015 	eb = (struct extent_buffer *)page->private;
1016 	BUG_ON(!eb);
1017 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1018 	BUG_ON(!atomic_read(&eb->refs));
1019 	btrfs_assert_tree_locked(eb);
1020 #endif
1021 	return __set_page_dirty_nobuffers(page);
1022 }
1023 
1024 static const struct address_space_operations btree_aops = {
1025 	.readpage	= btree_readpage,
1026 	.writepages	= btree_writepages,
1027 	.releasepage	= btree_releasepage,
1028 	.invalidatepage = btree_invalidatepage,
1029 #ifdef CONFIG_MIGRATION
1030 	.migratepage	= btree_migratepage,
1031 #endif
1032 	.set_page_dirty = btree_set_page_dirty,
1033 };
1034 
1035 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1036 {
1037 	struct extent_buffer *buf = NULL;
1038 	int ret;
1039 
1040 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1041 	if (IS_ERR(buf))
1042 		return;
1043 
1044 	ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
1045 	if (ret < 0)
1046 		free_extent_buffer_stale(buf);
1047 	else
1048 		free_extent_buffer(buf);
1049 }
1050 
1051 struct extent_buffer *btrfs_find_create_tree_block(
1052 						struct btrfs_fs_info *fs_info,
1053 						u64 bytenr)
1054 {
1055 	if (btrfs_is_testing(fs_info))
1056 		return alloc_test_extent_buffer(fs_info, bytenr);
1057 	return alloc_extent_buffer(fs_info, bytenr);
1058 }
1059 
1060 /*
1061  * Read tree block at logical address @bytenr and do variant basic but critical
1062  * verification.
1063  *
1064  * @parent_transid:	expected transid of this tree block, skip check if 0
1065  * @level:		expected level, mandatory check
1066  * @first_key:		expected key in slot 0, skip check if NULL
1067  */
1068 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1069 				      u64 parent_transid, int level,
1070 				      struct btrfs_key *first_key)
1071 {
1072 	struct extent_buffer *buf = NULL;
1073 	int ret;
1074 
1075 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1076 	if (IS_ERR(buf))
1077 		return buf;
1078 
1079 	ret = btree_read_extent_buffer_pages(buf, parent_transid,
1080 					     level, first_key);
1081 	if (ret) {
1082 		free_extent_buffer_stale(buf);
1083 		return ERR_PTR(ret);
1084 	}
1085 	return buf;
1086 
1087 }
1088 
1089 void btrfs_clean_tree_block(struct extent_buffer *buf)
1090 {
1091 	struct btrfs_fs_info *fs_info = buf->fs_info;
1092 	if (btrfs_header_generation(buf) ==
1093 	    fs_info->running_transaction->transid) {
1094 		btrfs_assert_tree_locked(buf);
1095 
1096 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1097 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1098 						 -buf->len,
1099 						 fs_info->dirty_metadata_batch);
1100 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1101 			btrfs_set_lock_blocking_write(buf);
1102 			clear_extent_buffer_dirty(buf);
1103 		}
1104 	}
1105 }
1106 
1107 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1108 {
1109 	struct btrfs_subvolume_writers *writers;
1110 	int ret;
1111 
1112 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1113 	if (!writers)
1114 		return ERR_PTR(-ENOMEM);
1115 
1116 	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1117 	if (ret < 0) {
1118 		kfree(writers);
1119 		return ERR_PTR(ret);
1120 	}
1121 
1122 	init_waitqueue_head(&writers->wait);
1123 	return writers;
1124 }
1125 
1126 static void
1127 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1128 {
1129 	percpu_counter_destroy(&writers->counter);
1130 	kfree(writers);
1131 }
1132 
1133 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1134 			 u64 objectid)
1135 {
1136 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1137 	root->node = NULL;
1138 	root->commit_root = NULL;
1139 	root->state = 0;
1140 	root->orphan_cleanup_state = 0;
1141 
1142 	root->last_trans = 0;
1143 	root->highest_objectid = 0;
1144 	root->nr_delalloc_inodes = 0;
1145 	root->nr_ordered_extents = 0;
1146 	root->inode_tree = RB_ROOT;
1147 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1148 	root->block_rsv = NULL;
1149 
1150 	INIT_LIST_HEAD(&root->dirty_list);
1151 	INIT_LIST_HEAD(&root->root_list);
1152 	INIT_LIST_HEAD(&root->delalloc_inodes);
1153 	INIT_LIST_HEAD(&root->delalloc_root);
1154 	INIT_LIST_HEAD(&root->ordered_extents);
1155 	INIT_LIST_HEAD(&root->ordered_root);
1156 	INIT_LIST_HEAD(&root->reloc_dirty_list);
1157 	INIT_LIST_HEAD(&root->logged_list[0]);
1158 	INIT_LIST_HEAD(&root->logged_list[1]);
1159 	spin_lock_init(&root->inode_lock);
1160 	spin_lock_init(&root->delalloc_lock);
1161 	spin_lock_init(&root->ordered_extent_lock);
1162 	spin_lock_init(&root->accounting_lock);
1163 	spin_lock_init(&root->log_extents_lock[0]);
1164 	spin_lock_init(&root->log_extents_lock[1]);
1165 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1166 	mutex_init(&root->objectid_mutex);
1167 	mutex_init(&root->log_mutex);
1168 	mutex_init(&root->ordered_extent_mutex);
1169 	mutex_init(&root->delalloc_mutex);
1170 	init_waitqueue_head(&root->log_writer_wait);
1171 	init_waitqueue_head(&root->log_commit_wait[0]);
1172 	init_waitqueue_head(&root->log_commit_wait[1]);
1173 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1174 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1175 	atomic_set(&root->log_commit[0], 0);
1176 	atomic_set(&root->log_commit[1], 0);
1177 	atomic_set(&root->log_writers, 0);
1178 	atomic_set(&root->log_batch, 0);
1179 	refcount_set(&root->refs, 1);
1180 	atomic_set(&root->will_be_snapshotted, 0);
1181 	atomic_set(&root->snapshot_force_cow, 0);
1182 	atomic_set(&root->nr_swapfiles, 0);
1183 	root->log_transid = 0;
1184 	root->log_transid_committed = -1;
1185 	root->last_log_commit = 0;
1186 	if (!dummy)
1187 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
1188 				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
1189 
1190 	memset(&root->root_key, 0, sizeof(root->root_key));
1191 	memset(&root->root_item, 0, sizeof(root->root_item));
1192 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1193 	if (!dummy)
1194 		root->defrag_trans_start = fs_info->generation;
1195 	else
1196 		root->defrag_trans_start = 0;
1197 	root->root_key.objectid = objectid;
1198 	root->anon_dev = 0;
1199 
1200 	spin_lock_init(&root->root_item_lock);
1201 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1202 }
1203 
1204 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1205 		gfp_t flags)
1206 {
1207 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1208 	if (root)
1209 		root->fs_info = fs_info;
1210 	return root;
1211 }
1212 
1213 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1214 /* Should only be used by the testing infrastructure */
1215 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1216 {
1217 	struct btrfs_root *root;
1218 
1219 	if (!fs_info)
1220 		return ERR_PTR(-EINVAL);
1221 
1222 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1223 	if (!root)
1224 		return ERR_PTR(-ENOMEM);
1225 
1226 	/* We don't use the stripesize in selftest, set it as sectorsize */
1227 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1228 	root->alloc_bytenr = 0;
1229 
1230 	return root;
1231 }
1232 #endif
1233 
1234 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1235 				     u64 objectid)
1236 {
1237 	struct btrfs_fs_info *fs_info = trans->fs_info;
1238 	struct extent_buffer *leaf;
1239 	struct btrfs_root *tree_root = fs_info->tree_root;
1240 	struct btrfs_root *root;
1241 	struct btrfs_key key;
1242 	unsigned int nofs_flag;
1243 	int ret = 0;
1244 	uuid_le uuid = NULL_UUID_LE;
1245 
1246 	/*
1247 	 * We're holding a transaction handle, so use a NOFS memory allocation
1248 	 * context to avoid deadlock if reclaim happens.
1249 	 */
1250 	nofs_flag = memalloc_nofs_save();
1251 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1252 	memalloc_nofs_restore(nofs_flag);
1253 	if (!root)
1254 		return ERR_PTR(-ENOMEM);
1255 
1256 	__setup_root(root, fs_info, objectid);
1257 	root->root_key.objectid = objectid;
1258 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1259 	root->root_key.offset = 0;
1260 
1261 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1262 	if (IS_ERR(leaf)) {
1263 		ret = PTR_ERR(leaf);
1264 		leaf = NULL;
1265 		goto fail;
1266 	}
1267 
1268 	root->node = leaf;
1269 	btrfs_mark_buffer_dirty(leaf);
1270 
1271 	root->commit_root = btrfs_root_node(root);
1272 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1273 
1274 	root->root_item.flags = 0;
1275 	root->root_item.byte_limit = 0;
1276 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1277 	btrfs_set_root_generation(&root->root_item, trans->transid);
1278 	btrfs_set_root_level(&root->root_item, 0);
1279 	btrfs_set_root_refs(&root->root_item, 1);
1280 	btrfs_set_root_used(&root->root_item, leaf->len);
1281 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1282 	btrfs_set_root_dirid(&root->root_item, 0);
1283 	if (is_fstree(objectid))
1284 		uuid_le_gen(&uuid);
1285 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1286 	root->root_item.drop_level = 0;
1287 
1288 	key.objectid = objectid;
1289 	key.type = BTRFS_ROOT_ITEM_KEY;
1290 	key.offset = 0;
1291 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1292 	if (ret)
1293 		goto fail;
1294 
1295 	btrfs_tree_unlock(leaf);
1296 
1297 	return root;
1298 
1299 fail:
1300 	if (leaf) {
1301 		btrfs_tree_unlock(leaf);
1302 		free_extent_buffer(root->commit_root);
1303 		free_extent_buffer(leaf);
1304 	}
1305 	kfree(root);
1306 
1307 	return ERR_PTR(ret);
1308 }
1309 
1310 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1311 					 struct btrfs_fs_info *fs_info)
1312 {
1313 	struct btrfs_root *root;
1314 	struct extent_buffer *leaf;
1315 
1316 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1317 	if (!root)
1318 		return ERR_PTR(-ENOMEM);
1319 
1320 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1321 
1322 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1323 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1324 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1325 
1326 	/*
1327 	 * DON'T set REF_COWS for log trees
1328 	 *
1329 	 * log trees do not get reference counted because they go away
1330 	 * before a real commit is actually done.  They do store pointers
1331 	 * to file data extents, and those reference counts still get
1332 	 * updated (along with back refs to the log tree).
1333 	 */
1334 
1335 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1336 			NULL, 0, 0, 0);
1337 	if (IS_ERR(leaf)) {
1338 		kfree(root);
1339 		return ERR_CAST(leaf);
1340 	}
1341 
1342 	root->node = leaf;
1343 
1344 	btrfs_mark_buffer_dirty(root->node);
1345 	btrfs_tree_unlock(root->node);
1346 	return root;
1347 }
1348 
1349 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1350 			     struct btrfs_fs_info *fs_info)
1351 {
1352 	struct btrfs_root *log_root;
1353 
1354 	log_root = alloc_log_tree(trans, fs_info);
1355 	if (IS_ERR(log_root))
1356 		return PTR_ERR(log_root);
1357 	WARN_ON(fs_info->log_root_tree);
1358 	fs_info->log_root_tree = log_root;
1359 	return 0;
1360 }
1361 
1362 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1363 		       struct btrfs_root *root)
1364 {
1365 	struct btrfs_fs_info *fs_info = root->fs_info;
1366 	struct btrfs_root *log_root;
1367 	struct btrfs_inode_item *inode_item;
1368 
1369 	log_root = alloc_log_tree(trans, fs_info);
1370 	if (IS_ERR(log_root))
1371 		return PTR_ERR(log_root);
1372 
1373 	log_root->last_trans = trans->transid;
1374 	log_root->root_key.offset = root->root_key.objectid;
1375 
1376 	inode_item = &log_root->root_item.inode;
1377 	btrfs_set_stack_inode_generation(inode_item, 1);
1378 	btrfs_set_stack_inode_size(inode_item, 3);
1379 	btrfs_set_stack_inode_nlink(inode_item, 1);
1380 	btrfs_set_stack_inode_nbytes(inode_item,
1381 				     fs_info->nodesize);
1382 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1383 
1384 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1385 
1386 	WARN_ON(root->log_root);
1387 	root->log_root = log_root;
1388 	root->log_transid = 0;
1389 	root->log_transid_committed = -1;
1390 	root->last_log_commit = 0;
1391 	return 0;
1392 }
1393 
1394 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1395 					       struct btrfs_key *key)
1396 {
1397 	struct btrfs_root *root;
1398 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1399 	struct btrfs_path *path;
1400 	u64 generation;
1401 	int ret;
1402 	int level;
1403 
1404 	path = btrfs_alloc_path();
1405 	if (!path)
1406 		return ERR_PTR(-ENOMEM);
1407 
1408 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1409 	if (!root) {
1410 		ret = -ENOMEM;
1411 		goto alloc_fail;
1412 	}
1413 
1414 	__setup_root(root, fs_info, key->objectid);
1415 
1416 	ret = btrfs_find_root(tree_root, key, path,
1417 			      &root->root_item, &root->root_key);
1418 	if (ret) {
1419 		if (ret > 0)
1420 			ret = -ENOENT;
1421 		goto find_fail;
1422 	}
1423 
1424 	generation = btrfs_root_generation(&root->root_item);
1425 	level = btrfs_root_level(&root->root_item);
1426 	root->node = read_tree_block(fs_info,
1427 				     btrfs_root_bytenr(&root->root_item),
1428 				     generation, level, NULL);
1429 	if (IS_ERR(root->node)) {
1430 		ret = PTR_ERR(root->node);
1431 		goto find_fail;
1432 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1433 		ret = -EIO;
1434 		free_extent_buffer(root->node);
1435 		goto find_fail;
1436 	}
1437 	root->commit_root = btrfs_root_node(root);
1438 out:
1439 	btrfs_free_path(path);
1440 	return root;
1441 
1442 find_fail:
1443 	kfree(root);
1444 alloc_fail:
1445 	root = ERR_PTR(ret);
1446 	goto out;
1447 }
1448 
1449 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1450 				      struct btrfs_key *location)
1451 {
1452 	struct btrfs_root *root;
1453 
1454 	root = btrfs_read_tree_root(tree_root, location);
1455 	if (IS_ERR(root))
1456 		return root;
1457 
1458 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1459 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1460 		btrfs_check_and_init_root_item(&root->root_item);
1461 	}
1462 
1463 	return root;
1464 }
1465 
1466 int btrfs_init_fs_root(struct btrfs_root *root)
1467 {
1468 	int ret;
1469 	struct btrfs_subvolume_writers *writers;
1470 
1471 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1472 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1473 					GFP_NOFS);
1474 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1475 		ret = -ENOMEM;
1476 		goto fail;
1477 	}
1478 
1479 	writers = btrfs_alloc_subvolume_writers();
1480 	if (IS_ERR(writers)) {
1481 		ret = PTR_ERR(writers);
1482 		goto fail;
1483 	}
1484 	root->subv_writers = writers;
1485 
1486 	btrfs_init_free_ino_ctl(root);
1487 	spin_lock_init(&root->ino_cache_lock);
1488 	init_waitqueue_head(&root->ino_cache_wait);
1489 
1490 	ret = get_anon_bdev(&root->anon_dev);
1491 	if (ret)
1492 		goto fail;
1493 
1494 	mutex_lock(&root->objectid_mutex);
1495 	ret = btrfs_find_highest_objectid(root,
1496 					&root->highest_objectid);
1497 	if (ret) {
1498 		mutex_unlock(&root->objectid_mutex);
1499 		goto fail;
1500 	}
1501 
1502 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1503 
1504 	mutex_unlock(&root->objectid_mutex);
1505 
1506 	return 0;
1507 fail:
1508 	/* The caller is responsible to call btrfs_free_fs_root */
1509 	return ret;
1510 }
1511 
1512 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1513 					u64 root_id)
1514 {
1515 	struct btrfs_root *root;
1516 
1517 	spin_lock(&fs_info->fs_roots_radix_lock);
1518 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1519 				 (unsigned long)root_id);
1520 	spin_unlock(&fs_info->fs_roots_radix_lock);
1521 	return root;
1522 }
1523 
1524 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1525 			 struct btrfs_root *root)
1526 {
1527 	int ret;
1528 
1529 	ret = radix_tree_preload(GFP_NOFS);
1530 	if (ret)
1531 		return ret;
1532 
1533 	spin_lock(&fs_info->fs_roots_radix_lock);
1534 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1535 				(unsigned long)root->root_key.objectid,
1536 				root);
1537 	if (ret == 0)
1538 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1539 	spin_unlock(&fs_info->fs_roots_radix_lock);
1540 	radix_tree_preload_end();
1541 
1542 	return ret;
1543 }
1544 
1545 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1546 				     struct btrfs_key *location,
1547 				     bool check_ref)
1548 {
1549 	struct btrfs_root *root;
1550 	struct btrfs_path *path;
1551 	struct btrfs_key key;
1552 	int ret;
1553 
1554 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1555 		return fs_info->tree_root;
1556 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1557 		return fs_info->extent_root;
1558 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1559 		return fs_info->chunk_root;
1560 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1561 		return fs_info->dev_root;
1562 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1563 		return fs_info->csum_root;
1564 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1565 		return fs_info->quota_root ? fs_info->quota_root :
1566 					     ERR_PTR(-ENOENT);
1567 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1568 		return fs_info->uuid_root ? fs_info->uuid_root :
1569 					    ERR_PTR(-ENOENT);
1570 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1571 		return fs_info->free_space_root ? fs_info->free_space_root :
1572 						  ERR_PTR(-ENOENT);
1573 again:
1574 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1575 	if (root) {
1576 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1577 			return ERR_PTR(-ENOENT);
1578 		return root;
1579 	}
1580 
1581 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1582 	if (IS_ERR(root))
1583 		return root;
1584 
1585 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1586 		ret = -ENOENT;
1587 		goto fail;
1588 	}
1589 
1590 	ret = btrfs_init_fs_root(root);
1591 	if (ret)
1592 		goto fail;
1593 
1594 	path = btrfs_alloc_path();
1595 	if (!path) {
1596 		ret = -ENOMEM;
1597 		goto fail;
1598 	}
1599 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1600 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1601 	key.offset = location->objectid;
1602 
1603 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1604 	btrfs_free_path(path);
1605 	if (ret < 0)
1606 		goto fail;
1607 	if (ret == 0)
1608 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1609 
1610 	ret = btrfs_insert_fs_root(fs_info, root);
1611 	if (ret) {
1612 		if (ret == -EEXIST) {
1613 			btrfs_free_fs_root(root);
1614 			goto again;
1615 		}
1616 		goto fail;
1617 	}
1618 	return root;
1619 fail:
1620 	btrfs_free_fs_root(root);
1621 	return ERR_PTR(ret);
1622 }
1623 
1624 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1625 {
1626 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1627 	int ret = 0;
1628 	struct btrfs_device *device;
1629 	struct backing_dev_info *bdi;
1630 
1631 	rcu_read_lock();
1632 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1633 		if (!device->bdev)
1634 			continue;
1635 		bdi = device->bdev->bd_bdi;
1636 		if (bdi_congested(bdi, bdi_bits)) {
1637 			ret = 1;
1638 			break;
1639 		}
1640 	}
1641 	rcu_read_unlock();
1642 	return ret;
1643 }
1644 
1645 /*
1646  * called by the kthread helper functions to finally call the bio end_io
1647  * functions.  This is where read checksum verification actually happens
1648  */
1649 static void end_workqueue_fn(struct btrfs_work *work)
1650 {
1651 	struct bio *bio;
1652 	struct btrfs_end_io_wq *end_io_wq;
1653 
1654 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1655 	bio = end_io_wq->bio;
1656 
1657 	bio->bi_status = end_io_wq->status;
1658 	bio->bi_private = end_io_wq->private;
1659 	bio->bi_end_io = end_io_wq->end_io;
1660 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1661 	bio_endio(bio);
1662 }
1663 
1664 static int cleaner_kthread(void *arg)
1665 {
1666 	struct btrfs_root *root = arg;
1667 	struct btrfs_fs_info *fs_info = root->fs_info;
1668 	int again;
1669 
1670 	while (1) {
1671 		again = 0;
1672 
1673 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1674 
1675 		/* Make the cleaner go to sleep early. */
1676 		if (btrfs_need_cleaner_sleep(fs_info))
1677 			goto sleep;
1678 
1679 		/*
1680 		 * Do not do anything if we might cause open_ctree() to block
1681 		 * before we have finished mounting the filesystem.
1682 		 */
1683 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1684 			goto sleep;
1685 
1686 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1687 			goto sleep;
1688 
1689 		/*
1690 		 * Avoid the problem that we change the status of the fs
1691 		 * during the above check and trylock.
1692 		 */
1693 		if (btrfs_need_cleaner_sleep(fs_info)) {
1694 			mutex_unlock(&fs_info->cleaner_mutex);
1695 			goto sleep;
1696 		}
1697 
1698 		btrfs_run_delayed_iputs(fs_info);
1699 
1700 		again = btrfs_clean_one_deleted_snapshot(root);
1701 		mutex_unlock(&fs_info->cleaner_mutex);
1702 
1703 		/*
1704 		 * The defragger has dealt with the R/O remount and umount,
1705 		 * needn't do anything special here.
1706 		 */
1707 		btrfs_run_defrag_inodes(fs_info);
1708 
1709 		/*
1710 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1711 		 * with relocation (btrfs_relocate_chunk) and relocation
1712 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1713 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1714 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1715 		 * unused block groups.
1716 		 */
1717 		btrfs_delete_unused_bgs(fs_info);
1718 sleep:
1719 		clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1720 		if (kthread_should_park())
1721 			kthread_parkme();
1722 		if (kthread_should_stop())
1723 			return 0;
1724 		if (!again) {
1725 			set_current_state(TASK_INTERRUPTIBLE);
1726 			schedule();
1727 			__set_current_state(TASK_RUNNING);
1728 		}
1729 	}
1730 }
1731 
1732 static int transaction_kthread(void *arg)
1733 {
1734 	struct btrfs_root *root = arg;
1735 	struct btrfs_fs_info *fs_info = root->fs_info;
1736 	struct btrfs_trans_handle *trans;
1737 	struct btrfs_transaction *cur;
1738 	u64 transid;
1739 	time64_t now;
1740 	unsigned long delay;
1741 	bool cannot_commit;
1742 
1743 	do {
1744 		cannot_commit = false;
1745 		delay = HZ * fs_info->commit_interval;
1746 		mutex_lock(&fs_info->transaction_kthread_mutex);
1747 
1748 		spin_lock(&fs_info->trans_lock);
1749 		cur = fs_info->running_transaction;
1750 		if (!cur) {
1751 			spin_unlock(&fs_info->trans_lock);
1752 			goto sleep;
1753 		}
1754 
1755 		now = ktime_get_seconds();
1756 		if (cur->state < TRANS_STATE_BLOCKED &&
1757 		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1758 		    (now < cur->start_time ||
1759 		     now - cur->start_time < fs_info->commit_interval)) {
1760 			spin_unlock(&fs_info->trans_lock);
1761 			delay = HZ * 5;
1762 			goto sleep;
1763 		}
1764 		transid = cur->transid;
1765 		spin_unlock(&fs_info->trans_lock);
1766 
1767 		/* If the file system is aborted, this will always fail. */
1768 		trans = btrfs_attach_transaction(root);
1769 		if (IS_ERR(trans)) {
1770 			if (PTR_ERR(trans) != -ENOENT)
1771 				cannot_commit = true;
1772 			goto sleep;
1773 		}
1774 		if (transid == trans->transid) {
1775 			btrfs_commit_transaction(trans);
1776 		} else {
1777 			btrfs_end_transaction(trans);
1778 		}
1779 sleep:
1780 		wake_up_process(fs_info->cleaner_kthread);
1781 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1782 
1783 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1784 				      &fs_info->fs_state)))
1785 			btrfs_cleanup_transaction(fs_info);
1786 		if (!kthread_should_stop() &&
1787 				(!btrfs_transaction_blocked(fs_info) ||
1788 				 cannot_commit))
1789 			schedule_timeout_interruptible(delay);
1790 	} while (!kthread_should_stop());
1791 	return 0;
1792 }
1793 
1794 /*
1795  * this will find the highest generation in the array of
1796  * root backups.  The index of the highest array is returned,
1797  * or -1 if we can't find anything.
1798  *
1799  * We check to make sure the array is valid by comparing the
1800  * generation of the latest  root in the array with the generation
1801  * in the super block.  If they don't match we pitch it.
1802  */
1803 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1804 {
1805 	u64 cur;
1806 	int newest_index = -1;
1807 	struct btrfs_root_backup *root_backup;
1808 	int i;
1809 
1810 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1811 		root_backup = info->super_copy->super_roots + i;
1812 		cur = btrfs_backup_tree_root_gen(root_backup);
1813 		if (cur == newest_gen)
1814 			newest_index = i;
1815 	}
1816 
1817 	/* check to see if we actually wrapped around */
1818 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1819 		root_backup = info->super_copy->super_roots;
1820 		cur = btrfs_backup_tree_root_gen(root_backup);
1821 		if (cur == newest_gen)
1822 			newest_index = 0;
1823 	}
1824 	return newest_index;
1825 }
1826 
1827 
1828 /*
1829  * find the oldest backup so we know where to store new entries
1830  * in the backup array.  This will set the backup_root_index
1831  * field in the fs_info struct
1832  */
1833 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1834 				     u64 newest_gen)
1835 {
1836 	int newest_index = -1;
1837 
1838 	newest_index = find_newest_super_backup(info, newest_gen);
1839 	/* if there was garbage in there, just move along */
1840 	if (newest_index == -1) {
1841 		info->backup_root_index = 0;
1842 	} else {
1843 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1844 	}
1845 }
1846 
1847 /*
1848  * copy all the root pointers into the super backup array.
1849  * this will bump the backup pointer by one when it is
1850  * done
1851  */
1852 static void backup_super_roots(struct btrfs_fs_info *info)
1853 {
1854 	int next_backup;
1855 	struct btrfs_root_backup *root_backup;
1856 	int last_backup;
1857 
1858 	next_backup = info->backup_root_index;
1859 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1860 		BTRFS_NUM_BACKUP_ROOTS;
1861 
1862 	/*
1863 	 * just overwrite the last backup if we're at the same generation
1864 	 * this happens only at umount
1865 	 */
1866 	root_backup = info->super_for_commit->super_roots + last_backup;
1867 	if (btrfs_backup_tree_root_gen(root_backup) ==
1868 	    btrfs_header_generation(info->tree_root->node))
1869 		next_backup = last_backup;
1870 
1871 	root_backup = info->super_for_commit->super_roots + next_backup;
1872 
1873 	/*
1874 	 * make sure all of our padding and empty slots get zero filled
1875 	 * regardless of which ones we use today
1876 	 */
1877 	memset(root_backup, 0, sizeof(*root_backup));
1878 
1879 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1880 
1881 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1882 	btrfs_set_backup_tree_root_gen(root_backup,
1883 			       btrfs_header_generation(info->tree_root->node));
1884 
1885 	btrfs_set_backup_tree_root_level(root_backup,
1886 			       btrfs_header_level(info->tree_root->node));
1887 
1888 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1889 	btrfs_set_backup_chunk_root_gen(root_backup,
1890 			       btrfs_header_generation(info->chunk_root->node));
1891 	btrfs_set_backup_chunk_root_level(root_backup,
1892 			       btrfs_header_level(info->chunk_root->node));
1893 
1894 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1895 	btrfs_set_backup_extent_root_gen(root_backup,
1896 			       btrfs_header_generation(info->extent_root->node));
1897 	btrfs_set_backup_extent_root_level(root_backup,
1898 			       btrfs_header_level(info->extent_root->node));
1899 
1900 	/*
1901 	 * we might commit during log recovery, which happens before we set
1902 	 * the fs_root.  Make sure it is valid before we fill it in.
1903 	 */
1904 	if (info->fs_root && info->fs_root->node) {
1905 		btrfs_set_backup_fs_root(root_backup,
1906 					 info->fs_root->node->start);
1907 		btrfs_set_backup_fs_root_gen(root_backup,
1908 			       btrfs_header_generation(info->fs_root->node));
1909 		btrfs_set_backup_fs_root_level(root_backup,
1910 			       btrfs_header_level(info->fs_root->node));
1911 	}
1912 
1913 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1914 	btrfs_set_backup_dev_root_gen(root_backup,
1915 			       btrfs_header_generation(info->dev_root->node));
1916 	btrfs_set_backup_dev_root_level(root_backup,
1917 				       btrfs_header_level(info->dev_root->node));
1918 
1919 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1920 	btrfs_set_backup_csum_root_gen(root_backup,
1921 			       btrfs_header_generation(info->csum_root->node));
1922 	btrfs_set_backup_csum_root_level(root_backup,
1923 			       btrfs_header_level(info->csum_root->node));
1924 
1925 	btrfs_set_backup_total_bytes(root_backup,
1926 			     btrfs_super_total_bytes(info->super_copy));
1927 	btrfs_set_backup_bytes_used(root_backup,
1928 			     btrfs_super_bytes_used(info->super_copy));
1929 	btrfs_set_backup_num_devices(root_backup,
1930 			     btrfs_super_num_devices(info->super_copy));
1931 
1932 	/*
1933 	 * if we don't copy this out to the super_copy, it won't get remembered
1934 	 * for the next commit
1935 	 */
1936 	memcpy(&info->super_copy->super_roots,
1937 	       &info->super_for_commit->super_roots,
1938 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1939 }
1940 
1941 /*
1942  * this copies info out of the root backup array and back into
1943  * the in-memory super block.  It is meant to help iterate through
1944  * the array, so you send it the number of backups you've already
1945  * tried and the last backup index you used.
1946  *
1947  * this returns -1 when it has tried all the backups
1948  */
1949 static noinline int next_root_backup(struct btrfs_fs_info *info,
1950 				     struct btrfs_super_block *super,
1951 				     int *num_backups_tried, int *backup_index)
1952 {
1953 	struct btrfs_root_backup *root_backup;
1954 	int newest = *backup_index;
1955 
1956 	if (*num_backups_tried == 0) {
1957 		u64 gen = btrfs_super_generation(super);
1958 
1959 		newest = find_newest_super_backup(info, gen);
1960 		if (newest == -1)
1961 			return -1;
1962 
1963 		*backup_index = newest;
1964 		*num_backups_tried = 1;
1965 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1966 		/* we've tried all the backups, all done */
1967 		return -1;
1968 	} else {
1969 		/* jump to the next oldest backup */
1970 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1971 			BTRFS_NUM_BACKUP_ROOTS;
1972 		*backup_index = newest;
1973 		*num_backups_tried += 1;
1974 	}
1975 	root_backup = super->super_roots + newest;
1976 
1977 	btrfs_set_super_generation(super,
1978 				   btrfs_backup_tree_root_gen(root_backup));
1979 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1980 	btrfs_set_super_root_level(super,
1981 				   btrfs_backup_tree_root_level(root_backup));
1982 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1983 
1984 	/*
1985 	 * fixme: the total bytes and num_devices need to match or we should
1986 	 * need a fsck
1987 	 */
1988 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1989 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1990 	return 0;
1991 }
1992 
1993 /* helper to cleanup workers */
1994 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1995 {
1996 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1997 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1998 	btrfs_destroy_workqueue(fs_info->workers);
1999 	btrfs_destroy_workqueue(fs_info->endio_workers);
2000 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2001 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2002 	btrfs_destroy_workqueue(fs_info->rmw_workers);
2003 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
2004 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2005 	btrfs_destroy_workqueue(fs_info->submit_workers);
2006 	btrfs_destroy_workqueue(fs_info->delayed_workers);
2007 	btrfs_destroy_workqueue(fs_info->caching_workers);
2008 	btrfs_destroy_workqueue(fs_info->readahead_workers);
2009 	btrfs_destroy_workqueue(fs_info->flush_workers);
2010 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2011 	btrfs_destroy_workqueue(fs_info->extent_workers);
2012 	/*
2013 	 * Now that all other work queues are destroyed, we can safely destroy
2014 	 * the queues used for metadata I/O, since tasks from those other work
2015 	 * queues can do metadata I/O operations.
2016 	 */
2017 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2018 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2019 }
2020 
2021 static void free_root_extent_buffers(struct btrfs_root *root)
2022 {
2023 	if (root) {
2024 		free_extent_buffer(root->node);
2025 		free_extent_buffer(root->commit_root);
2026 		root->node = NULL;
2027 		root->commit_root = NULL;
2028 	}
2029 }
2030 
2031 /* helper to cleanup tree roots */
2032 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2033 {
2034 	free_root_extent_buffers(info->tree_root);
2035 
2036 	free_root_extent_buffers(info->dev_root);
2037 	free_root_extent_buffers(info->extent_root);
2038 	free_root_extent_buffers(info->csum_root);
2039 	free_root_extent_buffers(info->quota_root);
2040 	free_root_extent_buffers(info->uuid_root);
2041 	if (chunk_root)
2042 		free_root_extent_buffers(info->chunk_root);
2043 	free_root_extent_buffers(info->free_space_root);
2044 }
2045 
2046 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2047 {
2048 	int ret;
2049 	struct btrfs_root *gang[8];
2050 	int i;
2051 
2052 	while (!list_empty(&fs_info->dead_roots)) {
2053 		gang[0] = list_entry(fs_info->dead_roots.next,
2054 				     struct btrfs_root, root_list);
2055 		list_del(&gang[0]->root_list);
2056 
2057 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2058 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2059 		} else {
2060 			free_extent_buffer(gang[0]->node);
2061 			free_extent_buffer(gang[0]->commit_root);
2062 			btrfs_put_fs_root(gang[0]);
2063 		}
2064 	}
2065 
2066 	while (1) {
2067 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2068 					     (void **)gang, 0,
2069 					     ARRAY_SIZE(gang));
2070 		if (!ret)
2071 			break;
2072 		for (i = 0; i < ret; i++)
2073 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2074 	}
2075 
2076 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2077 		btrfs_free_log_root_tree(NULL, fs_info);
2078 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2079 	}
2080 }
2081 
2082 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2083 {
2084 	mutex_init(&fs_info->scrub_lock);
2085 	atomic_set(&fs_info->scrubs_running, 0);
2086 	atomic_set(&fs_info->scrub_pause_req, 0);
2087 	atomic_set(&fs_info->scrubs_paused, 0);
2088 	atomic_set(&fs_info->scrub_cancel_req, 0);
2089 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2090 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
2091 }
2092 
2093 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2094 {
2095 	spin_lock_init(&fs_info->balance_lock);
2096 	mutex_init(&fs_info->balance_mutex);
2097 	atomic_set(&fs_info->balance_pause_req, 0);
2098 	atomic_set(&fs_info->balance_cancel_req, 0);
2099 	fs_info->balance_ctl = NULL;
2100 	init_waitqueue_head(&fs_info->balance_wait_q);
2101 }
2102 
2103 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2104 {
2105 	struct inode *inode = fs_info->btree_inode;
2106 
2107 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2108 	set_nlink(inode, 1);
2109 	/*
2110 	 * we set the i_size on the btree inode to the max possible int.
2111 	 * the real end of the address space is determined by all of
2112 	 * the devices in the system
2113 	 */
2114 	inode->i_size = OFFSET_MAX;
2115 	inode->i_mapping->a_ops = &btree_aops;
2116 
2117 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2118 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
2119 			    IO_TREE_INODE_IO, inode);
2120 	BTRFS_I(inode)->io_tree.track_uptodate = false;
2121 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2122 
2123 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2124 
2125 	BTRFS_I(inode)->root = fs_info->tree_root;
2126 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2127 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2128 	btrfs_insert_inode_hash(inode);
2129 }
2130 
2131 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2132 {
2133 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2134 	init_rwsem(&fs_info->dev_replace.rwsem);
2135 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2136 }
2137 
2138 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2139 {
2140 	spin_lock_init(&fs_info->qgroup_lock);
2141 	mutex_init(&fs_info->qgroup_ioctl_lock);
2142 	fs_info->qgroup_tree = RB_ROOT;
2143 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2144 	fs_info->qgroup_seq = 1;
2145 	fs_info->qgroup_ulist = NULL;
2146 	fs_info->qgroup_rescan_running = false;
2147 	mutex_init(&fs_info->qgroup_rescan_lock);
2148 }
2149 
2150 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2151 		struct btrfs_fs_devices *fs_devices)
2152 {
2153 	u32 max_active = fs_info->thread_pool_size;
2154 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2155 
2156 	fs_info->workers =
2157 		btrfs_alloc_workqueue(fs_info, "worker",
2158 				      flags | WQ_HIGHPRI, max_active, 16);
2159 
2160 	fs_info->delalloc_workers =
2161 		btrfs_alloc_workqueue(fs_info, "delalloc",
2162 				      flags, max_active, 2);
2163 
2164 	fs_info->flush_workers =
2165 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2166 				      flags, max_active, 0);
2167 
2168 	fs_info->caching_workers =
2169 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2170 
2171 	/*
2172 	 * a higher idle thresh on the submit workers makes it much more
2173 	 * likely that bios will be send down in a sane order to the
2174 	 * devices
2175 	 */
2176 	fs_info->submit_workers =
2177 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2178 				      min_t(u64, fs_devices->num_devices,
2179 					    max_active), 64);
2180 
2181 	fs_info->fixup_workers =
2182 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2183 
2184 	/*
2185 	 * endios are largely parallel and should have a very
2186 	 * low idle thresh
2187 	 */
2188 	fs_info->endio_workers =
2189 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2190 	fs_info->endio_meta_workers =
2191 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2192 				      max_active, 4);
2193 	fs_info->endio_meta_write_workers =
2194 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2195 				      max_active, 2);
2196 	fs_info->endio_raid56_workers =
2197 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2198 				      max_active, 4);
2199 	fs_info->endio_repair_workers =
2200 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2201 	fs_info->rmw_workers =
2202 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2203 	fs_info->endio_write_workers =
2204 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2205 				      max_active, 2);
2206 	fs_info->endio_freespace_worker =
2207 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2208 				      max_active, 0);
2209 	fs_info->delayed_workers =
2210 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2211 				      max_active, 0);
2212 	fs_info->readahead_workers =
2213 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2214 				      max_active, 2);
2215 	fs_info->qgroup_rescan_workers =
2216 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2217 	fs_info->extent_workers =
2218 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2219 				      min_t(u64, fs_devices->num_devices,
2220 					    max_active), 8);
2221 
2222 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2223 	      fs_info->submit_workers && fs_info->flush_workers &&
2224 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2225 	      fs_info->endio_meta_write_workers &&
2226 	      fs_info->endio_repair_workers &&
2227 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2228 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2229 	      fs_info->caching_workers && fs_info->readahead_workers &&
2230 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2231 	      fs_info->extent_workers &&
2232 	      fs_info->qgroup_rescan_workers)) {
2233 		return -ENOMEM;
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2240 {
2241 	struct crypto_shash *csum_shash;
2242 	const char *csum_name = btrfs_super_csum_name(csum_type);
2243 
2244 	csum_shash = crypto_alloc_shash(csum_name, 0, 0);
2245 
2246 	if (IS_ERR(csum_shash)) {
2247 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2248 			  csum_name);
2249 		return PTR_ERR(csum_shash);
2250 	}
2251 
2252 	fs_info->csum_shash = csum_shash;
2253 
2254 	return 0;
2255 }
2256 
2257 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
2258 {
2259 	crypto_free_shash(fs_info->csum_shash);
2260 }
2261 
2262 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2263 			    struct btrfs_fs_devices *fs_devices)
2264 {
2265 	int ret;
2266 	struct btrfs_root *log_tree_root;
2267 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2268 	u64 bytenr = btrfs_super_log_root(disk_super);
2269 	int level = btrfs_super_log_root_level(disk_super);
2270 
2271 	if (fs_devices->rw_devices == 0) {
2272 		btrfs_warn(fs_info, "log replay required on RO media");
2273 		return -EIO;
2274 	}
2275 
2276 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2277 	if (!log_tree_root)
2278 		return -ENOMEM;
2279 
2280 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2281 
2282 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2283 					      fs_info->generation + 1,
2284 					      level, NULL);
2285 	if (IS_ERR(log_tree_root->node)) {
2286 		btrfs_warn(fs_info, "failed to read log tree");
2287 		ret = PTR_ERR(log_tree_root->node);
2288 		kfree(log_tree_root);
2289 		return ret;
2290 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2291 		btrfs_err(fs_info, "failed to read log tree");
2292 		free_extent_buffer(log_tree_root->node);
2293 		kfree(log_tree_root);
2294 		return -EIO;
2295 	}
2296 	/* returns with log_tree_root freed on success */
2297 	ret = btrfs_recover_log_trees(log_tree_root);
2298 	if (ret) {
2299 		btrfs_handle_fs_error(fs_info, ret,
2300 				      "Failed to recover log tree");
2301 		free_extent_buffer(log_tree_root->node);
2302 		kfree(log_tree_root);
2303 		return ret;
2304 	}
2305 
2306 	if (sb_rdonly(fs_info->sb)) {
2307 		ret = btrfs_commit_super(fs_info);
2308 		if (ret)
2309 			return ret;
2310 	}
2311 
2312 	return 0;
2313 }
2314 
2315 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2316 {
2317 	struct btrfs_root *tree_root = fs_info->tree_root;
2318 	struct btrfs_root *root;
2319 	struct btrfs_key location;
2320 	int ret;
2321 
2322 	BUG_ON(!fs_info->tree_root);
2323 
2324 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2325 	location.type = BTRFS_ROOT_ITEM_KEY;
2326 	location.offset = 0;
2327 
2328 	root = btrfs_read_tree_root(tree_root, &location);
2329 	if (IS_ERR(root)) {
2330 		ret = PTR_ERR(root);
2331 		goto out;
2332 	}
2333 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2334 	fs_info->extent_root = root;
2335 
2336 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2337 	root = btrfs_read_tree_root(tree_root, &location);
2338 	if (IS_ERR(root)) {
2339 		ret = PTR_ERR(root);
2340 		goto out;
2341 	}
2342 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2343 	fs_info->dev_root = root;
2344 	btrfs_init_devices_late(fs_info);
2345 
2346 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2347 	root = btrfs_read_tree_root(tree_root, &location);
2348 	if (IS_ERR(root)) {
2349 		ret = PTR_ERR(root);
2350 		goto out;
2351 	}
2352 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2353 	fs_info->csum_root = root;
2354 
2355 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2356 	root = btrfs_read_tree_root(tree_root, &location);
2357 	if (!IS_ERR(root)) {
2358 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2359 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2360 		fs_info->quota_root = root;
2361 	}
2362 
2363 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2364 	root = btrfs_read_tree_root(tree_root, &location);
2365 	if (IS_ERR(root)) {
2366 		ret = PTR_ERR(root);
2367 		if (ret != -ENOENT)
2368 			goto out;
2369 	} else {
2370 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2371 		fs_info->uuid_root = root;
2372 	}
2373 
2374 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2375 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2376 		root = btrfs_read_tree_root(tree_root, &location);
2377 		if (IS_ERR(root)) {
2378 			ret = PTR_ERR(root);
2379 			goto out;
2380 		}
2381 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2382 		fs_info->free_space_root = root;
2383 	}
2384 
2385 	return 0;
2386 out:
2387 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2388 		   location.objectid, ret);
2389 	return ret;
2390 }
2391 
2392 /*
2393  * Real super block validation
2394  * NOTE: super csum type and incompat features will not be checked here.
2395  *
2396  * @sb:		super block to check
2397  * @mirror_num:	the super block number to check its bytenr:
2398  * 		0	the primary (1st) sb
2399  * 		1, 2	2nd and 3rd backup copy
2400  * 	       -1	skip bytenr check
2401  */
2402 static int validate_super(struct btrfs_fs_info *fs_info,
2403 			    struct btrfs_super_block *sb, int mirror_num)
2404 {
2405 	u64 nodesize = btrfs_super_nodesize(sb);
2406 	u64 sectorsize = btrfs_super_sectorsize(sb);
2407 	int ret = 0;
2408 
2409 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2410 		btrfs_err(fs_info, "no valid FS found");
2411 		ret = -EINVAL;
2412 	}
2413 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2414 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2415 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2416 		ret = -EINVAL;
2417 	}
2418 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2419 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2420 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2421 		ret = -EINVAL;
2422 	}
2423 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2424 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2425 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2426 		ret = -EINVAL;
2427 	}
2428 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2429 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2430 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2431 		ret = -EINVAL;
2432 	}
2433 
2434 	/*
2435 	 * Check sectorsize and nodesize first, other check will need it.
2436 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2437 	 */
2438 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2439 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2440 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2441 		ret = -EINVAL;
2442 	}
2443 	/* Only PAGE SIZE is supported yet */
2444 	if (sectorsize != PAGE_SIZE) {
2445 		btrfs_err(fs_info,
2446 			"sectorsize %llu not supported yet, only support %lu",
2447 			sectorsize, PAGE_SIZE);
2448 		ret = -EINVAL;
2449 	}
2450 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2451 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2452 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2453 		ret = -EINVAL;
2454 	}
2455 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2456 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2457 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2458 		ret = -EINVAL;
2459 	}
2460 
2461 	/* Root alignment check */
2462 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2463 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2464 			   btrfs_super_root(sb));
2465 		ret = -EINVAL;
2466 	}
2467 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2468 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2469 			   btrfs_super_chunk_root(sb));
2470 		ret = -EINVAL;
2471 	}
2472 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2473 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2474 			   btrfs_super_log_root(sb));
2475 		ret = -EINVAL;
2476 	}
2477 
2478 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2479 		   BTRFS_FSID_SIZE) != 0) {
2480 		btrfs_err(fs_info,
2481 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2482 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2483 		ret = -EINVAL;
2484 	}
2485 
2486 	/*
2487 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2488 	 * done later
2489 	 */
2490 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2491 		btrfs_err(fs_info, "bytes_used is too small %llu",
2492 			  btrfs_super_bytes_used(sb));
2493 		ret = -EINVAL;
2494 	}
2495 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2496 		btrfs_err(fs_info, "invalid stripesize %u",
2497 			  btrfs_super_stripesize(sb));
2498 		ret = -EINVAL;
2499 	}
2500 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2501 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2502 			   btrfs_super_num_devices(sb));
2503 	if (btrfs_super_num_devices(sb) == 0) {
2504 		btrfs_err(fs_info, "number of devices is 0");
2505 		ret = -EINVAL;
2506 	}
2507 
2508 	if (mirror_num >= 0 &&
2509 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2510 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2511 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2512 		ret = -EINVAL;
2513 	}
2514 
2515 	/*
2516 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2517 	 * and one chunk
2518 	 */
2519 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2520 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2521 			  btrfs_super_sys_array_size(sb),
2522 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2523 		ret = -EINVAL;
2524 	}
2525 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2526 			+ sizeof(struct btrfs_chunk)) {
2527 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2528 			  btrfs_super_sys_array_size(sb),
2529 			  sizeof(struct btrfs_disk_key)
2530 			  + sizeof(struct btrfs_chunk));
2531 		ret = -EINVAL;
2532 	}
2533 
2534 	/*
2535 	 * The generation is a global counter, we'll trust it more than the others
2536 	 * but it's still possible that it's the one that's wrong.
2537 	 */
2538 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2539 		btrfs_warn(fs_info,
2540 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2541 			btrfs_super_generation(sb),
2542 			btrfs_super_chunk_root_generation(sb));
2543 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2544 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2545 		btrfs_warn(fs_info,
2546 			"suspicious: generation < cache_generation: %llu < %llu",
2547 			btrfs_super_generation(sb),
2548 			btrfs_super_cache_generation(sb));
2549 
2550 	return ret;
2551 }
2552 
2553 /*
2554  * Validation of super block at mount time.
2555  * Some checks already done early at mount time, like csum type and incompat
2556  * flags will be skipped.
2557  */
2558 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2559 {
2560 	return validate_super(fs_info, fs_info->super_copy, 0);
2561 }
2562 
2563 /*
2564  * Validation of super block at write time.
2565  * Some checks like bytenr check will be skipped as their values will be
2566  * overwritten soon.
2567  * Extra checks like csum type and incompat flags will be done here.
2568  */
2569 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2570 				      struct btrfs_super_block *sb)
2571 {
2572 	int ret;
2573 
2574 	ret = validate_super(fs_info, sb, -1);
2575 	if (ret < 0)
2576 		goto out;
2577 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2578 		ret = -EUCLEAN;
2579 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2580 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2581 		goto out;
2582 	}
2583 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2584 		ret = -EUCLEAN;
2585 		btrfs_err(fs_info,
2586 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2587 			  btrfs_super_incompat_flags(sb),
2588 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2589 		goto out;
2590 	}
2591 out:
2592 	if (ret < 0)
2593 		btrfs_err(fs_info,
2594 		"super block corruption detected before writing it to disk");
2595 	return ret;
2596 }
2597 
2598 int open_ctree(struct super_block *sb,
2599 	       struct btrfs_fs_devices *fs_devices,
2600 	       char *options)
2601 {
2602 	u32 sectorsize;
2603 	u32 nodesize;
2604 	u32 stripesize;
2605 	u64 generation;
2606 	u64 features;
2607 	u16 csum_type;
2608 	struct btrfs_key location;
2609 	struct buffer_head *bh;
2610 	struct btrfs_super_block *disk_super;
2611 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2612 	struct btrfs_root *tree_root;
2613 	struct btrfs_root *chunk_root;
2614 	int ret;
2615 	int err = -EINVAL;
2616 	int num_backups_tried = 0;
2617 	int backup_index = 0;
2618 	int clear_free_space_tree = 0;
2619 	int level;
2620 
2621 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2622 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2623 	if (!tree_root || !chunk_root) {
2624 		err = -ENOMEM;
2625 		goto fail;
2626 	}
2627 
2628 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2629 	if (ret) {
2630 		err = ret;
2631 		goto fail;
2632 	}
2633 
2634 	ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
2635 	if (ret) {
2636 		err = ret;
2637 		goto fail_srcu;
2638 	}
2639 
2640 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2641 	if (ret) {
2642 		err = ret;
2643 		goto fail_dio_bytes;
2644 	}
2645 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2646 					(1 + ilog2(nr_cpu_ids));
2647 
2648 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2649 	if (ret) {
2650 		err = ret;
2651 		goto fail_dirty_metadata_bytes;
2652 	}
2653 
2654 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2655 			GFP_KERNEL);
2656 	if (ret) {
2657 		err = ret;
2658 		goto fail_delalloc_bytes;
2659 	}
2660 
2661 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2662 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2663 	INIT_LIST_HEAD(&fs_info->trans_list);
2664 	INIT_LIST_HEAD(&fs_info->dead_roots);
2665 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2666 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2667 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2668 	spin_lock_init(&fs_info->delalloc_root_lock);
2669 	spin_lock_init(&fs_info->trans_lock);
2670 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2671 	spin_lock_init(&fs_info->delayed_iput_lock);
2672 	spin_lock_init(&fs_info->defrag_inodes_lock);
2673 	spin_lock_init(&fs_info->tree_mod_seq_lock);
2674 	spin_lock_init(&fs_info->super_lock);
2675 	spin_lock_init(&fs_info->buffer_lock);
2676 	spin_lock_init(&fs_info->unused_bgs_lock);
2677 	rwlock_init(&fs_info->tree_mod_log_lock);
2678 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2679 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2680 	mutex_init(&fs_info->reloc_mutex);
2681 	mutex_init(&fs_info->delalloc_root_mutex);
2682 	seqlock_init(&fs_info->profiles_lock);
2683 
2684 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2685 	INIT_LIST_HEAD(&fs_info->space_info);
2686 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2687 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2688 	extent_map_tree_init(&fs_info->mapping_tree);
2689 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2690 			     BTRFS_BLOCK_RSV_GLOBAL);
2691 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2692 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2693 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2694 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2695 			     BTRFS_BLOCK_RSV_DELOPS);
2696 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2697 			     BTRFS_BLOCK_RSV_DELREFS);
2698 
2699 	atomic_set(&fs_info->async_delalloc_pages, 0);
2700 	atomic_set(&fs_info->defrag_running, 0);
2701 	atomic_set(&fs_info->reada_works_cnt, 0);
2702 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2703 	atomic64_set(&fs_info->tree_mod_seq, 0);
2704 	fs_info->sb = sb;
2705 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2706 	fs_info->metadata_ratio = 0;
2707 	fs_info->defrag_inodes = RB_ROOT;
2708 	atomic64_set(&fs_info->free_chunk_space, 0);
2709 	fs_info->tree_mod_log = RB_ROOT;
2710 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2711 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2712 	/* readahead state */
2713 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2714 	spin_lock_init(&fs_info->reada_lock);
2715 	btrfs_init_ref_verify(fs_info);
2716 
2717 	fs_info->thread_pool_size = min_t(unsigned long,
2718 					  num_online_cpus() + 2, 8);
2719 
2720 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2721 	spin_lock_init(&fs_info->ordered_root_lock);
2722 
2723 	fs_info->btree_inode = new_inode(sb);
2724 	if (!fs_info->btree_inode) {
2725 		err = -ENOMEM;
2726 		goto fail_bio_counter;
2727 	}
2728 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2729 
2730 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2731 					GFP_KERNEL);
2732 	if (!fs_info->delayed_root) {
2733 		err = -ENOMEM;
2734 		goto fail_iput;
2735 	}
2736 	btrfs_init_delayed_root(fs_info->delayed_root);
2737 
2738 	btrfs_init_scrub(fs_info);
2739 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2740 	fs_info->check_integrity_print_mask = 0;
2741 #endif
2742 	btrfs_init_balance(fs_info);
2743 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2744 
2745 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2746 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2747 
2748 	btrfs_init_btree_inode(fs_info);
2749 
2750 	spin_lock_init(&fs_info->block_group_cache_lock);
2751 	fs_info->block_group_cache_tree = RB_ROOT;
2752 	fs_info->first_logical_byte = (u64)-1;
2753 
2754 	extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
2755 			    IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
2756 	extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
2757 			    IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
2758 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2759 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2760 
2761 	mutex_init(&fs_info->ordered_operations_mutex);
2762 	mutex_init(&fs_info->tree_log_mutex);
2763 	mutex_init(&fs_info->chunk_mutex);
2764 	mutex_init(&fs_info->transaction_kthread_mutex);
2765 	mutex_init(&fs_info->cleaner_mutex);
2766 	mutex_init(&fs_info->ro_block_group_mutex);
2767 	init_rwsem(&fs_info->commit_root_sem);
2768 	init_rwsem(&fs_info->cleanup_work_sem);
2769 	init_rwsem(&fs_info->subvol_sem);
2770 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2771 
2772 	btrfs_init_dev_replace_locks(fs_info);
2773 	btrfs_init_qgroup(fs_info);
2774 
2775 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2776 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2777 
2778 	init_waitqueue_head(&fs_info->transaction_throttle);
2779 	init_waitqueue_head(&fs_info->transaction_wait);
2780 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2781 	init_waitqueue_head(&fs_info->async_submit_wait);
2782 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2783 
2784 	/* Usable values until the real ones are cached from the superblock */
2785 	fs_info->nodesize = 4096;
2786 	fs_info->sectorsize = 4096;
2787 	fs_info->stripesize = 4096;
2788 
2789 	spin_lock_init(&fs_info->swapfile_pins_lock);
2790 	fs_info->swapfile_pins = RB_ROOT;
2791 
2792 	fs_info->send_in_progress = 0;
2793 
2794 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2795 	if (ret) {
2796 		err = ret;
2797 		goto fail_alloc;
2798 	}
2799 
2800 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2801 
2802 	invalidate_bdev(fs_devices->latest_bdev);
2803 
2804 	/*
2805 	 * Read super block and check the signature bytes only
2806 	 */
2807 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2808 	if (IS_ERR(bh)) {
2809 		err = PTR_ERR(bh);
2810 		goto fail_alloc;
2811 	}
2812 
2813 	/*
2814 	 * Verify the type first, if that or the the checksum value are
2815 	 * corrupted, we'll find out
2816 	 */
2817 	csum_type = btrfs_super_csum_type((struct btrfs_super_block *)bh->b_data);
2818 	if (!btrfs_supported_super_csum(csum_type)) {
2819 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
2820 			  csum_type);
2821 		err = -EINVAL;
2822 		brelse(bh);
2823 		goto fail_alloc;
2824 	}
2825 
2826 	ret = btrfs_init_csum_hash(fs_info, csum_type);
2827 	if (ret) {
2828 		err = ret;
2829 		goto fail_alloc;
2830 	}
2831 
2832 	/*
2833 	 * We want to check superblock checksum, the type is stored inside.
2834 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2835 	 */
2836 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2837 		btrfs_err(fs_info, "superblock checksum mismatch");
2838 		err = -EINVAL;
2839 		brelse(bh);
2840 		goto fail_csum;
2841 	}
2842 
2843 	/*
2844 	 * super_copy is zeroed at allocation time and we never touch the
2845 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2846 	 * the whole block of INFO_SIZE
2847 	 */
2848 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2849 	brelse(bh);
2850 
2851 	disk_super = fs_info->super_copy;
2852 
2853 	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2854 		       BTRFS_FSID_SIZE));
2855 
2856 	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2857 		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2858 				fs_info->super_copy->metadata_uuid,
2859 				BTRFS_FSID_SIZE));
2860 	}
2861 
2862 	features = btrfs_super_flags(disk_super);
2863 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2864 		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2865 		btrfs_set_super_flags(disk_super, features);
2866 		btrfs_info(fs_info,
2867 			"found metadata UUID change in progress flag, clearing");
2868 	}
2869 
2870 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2871 	       sizeof(*fs_info->super_for_commit));
2872 
2873 	ret = btrfs_validate_mount_super(fs_info);
2874 	if (ret) {
2875 		btrfs_err(fs_info, "superblock contains fatal errors");
2876 		err = -EINVAL;
2877 		goto fail_csum;
2878 	}
2879 
2880 	if (!btrfs_super_root(disk_super))
2881 		goto fail_csum;
2882 
2883 	/* check FS state, whether FS is broken. */
2884 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2885 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2886 
2887 	/*
2888 	 * run through our array of backup supers and setup
2889 	 * our ring pointer to the oldest one
2890 	 */
2891 	generation = btrfs_super_generation(disk_super);
2892 	find_oldest_super_backup(fs_info, generation);
2893 
2894 	/*
2895 	 * In the long term, we'll store the compression type in the super
2896 	 * block, and it'll be used for per file compression control.
2897 	 */
2898 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2899 
2900 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2901 	if (ret) {
2902 		err = ret;
2903 		goto fail_csum;
2904 	}
2905 
2906 	features = btrfs_super_incompat_flags(disk_super) &
2907 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2908 	if (features) {
2909 		btrfs_err(fs_info,
2910 		    "cannot mount because of unsupported optional features (%llx)",
2911 		    features);
2912 		err = -EINVAL;
2913 		goto fail_csum;
2914 	}
2915 
2916 	features = btrfs_super_incompat_flags(disk_super);
2917 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2918 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2919 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2920 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2921 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2922 
2923 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2924 		btrfs_info(fs_info, "has skinny extents");
2925 
2926 	/*
2927 	 * flag our filesystem as having big metadata blocks if
2928 	 * they are bigger than the page size
2929 	 */
2930 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2931 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2932 			btrfs_info(fs_info,
2933 				"flagging fs with big metadata feature");
2934 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2935 	}
2936 
2937 	nodesize = btrfs_super_nodesize(disk_super);
2938 	sectorsize = btrfs_super_sectorsize(disk_super);
2939 	stripesize = sectorsize;
2940 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2941 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2942 
2943 	/* Cache block sizes */
2944 	fs_info->nodesize = nodesize;
2945 	fs_info->sectorsize = sectorsize;
2946 	fs_info->stripesize = stripesize;
2947 
2948 	/*
2949 	 * mixed block groups end up with duplicate but slightly offset
2950 	 * extent buffers for the same range.  It leads to corruptions
2951 	 */
2952 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2953 	    (sectorsize != nodesize)) {
2954 		btrfs_err(fs_info,
2955 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2956 			nodesize, sectorsize);
2957 		goto fail_csum;
2958 	}
2959 
2960 	/*
2961 	 * Needn't use the lock because there is no other task which will
2962 	 * update the flag.
2963 	 */
2964 	btrfs_set_super_incompat_flags(disk_super, features);
2965 
2966 	features = btrfs_super_compat_ro_flags(disk_super) &
2967 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2968 	if (!sb_rdonly(sb) && features) {
2969 		btrfs_err(fs_info,
2970 	"cannot mount read-write because of unsupported optional features (%llx)",
2971 		       features);
2972 		err = -EINVAL;
2973 		goto fail_csum;
2974 	}
2975 
2976 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2977 	if (ret) {
2978 		err = ret;
2979 		goto fail_sb_buffer;
2980 	}
2981 
2982 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2983 	sb->s_bdi->congested_data = fs_info;
2984 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2985 	sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
2986 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2987 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2988 
2989 	sb->s_blocksize = sectorsize;
2990 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2991 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
2992 
2993 	mutex_lock(&fs_info->chunk_mutex);
2994 	ret = btrfs_read_sys_array(fs_info);
2995 	mutex_unlock(&fs_info->chunk_mutex);
2996 	if (ret) {
2997 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2998 		goto fail_sb_buffer;
2999 	}
3000 
3001 	generation = btrfs_super_chunk_root_generation(disk_super);
3002 	level = btrfs_super_chunk_root_level(disk_super);
3003 
3004 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
3005 
3006 	chunk_root->node = read_tree_block(fs_info,
3007 					   btrfs_super_chunk_root(disk_super),
3008 					   generation, level, NULL);
3009 	if (IS_ERR(chunk_root->node) ||
3010 	    !extent_buffer_uptodate(chunk_root->node)) {
3011 		btrfs_err(fs_info, "failed to read chunk root");
3012 		if (!IS_ERR(chunk_root->node))
3013 			free_extent_buffer(chunk_root->node);
3014 		chunk_root->node = NULL;
3015 		goto fail_tree_roots;
3016 	}
3017 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
3018 	chunk_root->commit_root = btrfs_root_node(chunk_root);
3019 
3020 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3021 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
3022 
3023 	ret = btrfs_read_chunk_tree(fs_info);
3024 	if (ret) {
3025 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3026 		goto fail_tree_roots;
3027 	}
3028 
3029 	/*
3030 	 * Keep the devid that is marked to be the target device for the
3031 	 * device replace procedure
3032 	 */
3033 	btrfs_free_extra_devids(fs_devices, 0);
3034 
3035 	if (!fs_devices->latest_bdev) {
3036 		btrfs_err(fs_info, "failed to read devices");
3037 		goto fail_tree_roots;
3038 	}
3039 
3040 retry_root_backup:
3041 	generation = btrfs_super_generation(disk_super);
3042 	level = btrfs_super_root_level(disk_super);
3043 
3044 	tree_root->node = read_tree_block(fs_info,
3045 					  btrfs_super_root(disk_super),
3046 					  generation, level, NULL);
3047 	if (IS_ERR(tree_root->node) ||
3048 	    !extent_buffer_uptodate(tree_root->node)) {
3049 		btrfs_warn(fs_info, "failed to read tree root");
3050 		if (!IS_ERR(tree_root->node))
3051 			free_extent_buffer(tree_root->node);
3052 		tree_root->node = NULL;
3053 		goto recovery_tree_root;
3054 	}
3055 
3056 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
3057 	tree_root->commit_root = btrfs_root_node(tree_root);
3058 	btrfs_set_root_refs(&tree_root->root_item, 1);
3059 
3060 	mutex_lock(&tree_root->objectid_mutex);
3061 	ret = btrfs_find_highest_objectid(tree_root,
3062 					&tree_root->highest_objectid);
3063 	if (ret) {
3064 		mutex_unlock(&tree_root->objectid_mutex);
3065 		goto recovery_tree_root;
3066 	}
3067 
3068 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
3069 
3070 	mutex_unlock(&tree_root->objectid_mutex);
3071 
3072 	ret = btrfs_read_roots(fs_info);
3073 	if (ret)
3074 		goto recovery_tree_root;
3075 
3076 	fs_info->generation = generation;
3077 	fs_info->last_trans_committed = generation;
3078 
3079 	ret = btrfs_verify_dev_extents(fs_info);
3080 	if (ret) {
3081 		btrfs_err(fs_info,
3082 			  "failed to verify dev extents against chunks: %d",
3083 			  ret);
3084 		goto fail_block_groups;
3085 	}
3086 	ret = btrfs_recover_balance(fs_info);
3087 	if (ret) {
3088 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3089 		goto fail_block_groups;
3090 	}
3091 
3092 	ret = btrfs_init_dev_stats(fs_info);
3093 	if (ret) {
3094 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3095 		goto fail_block_groups;
3096 	}
3097 
3098 	ret = btrfs_init_dev_replace(fs_info);
3099 	if (ret) {
3100 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3101 		goto fail_block_groups;
3102 	}
3103 
3104 	btrfs_free_extra_devids(fs_devices, 1);
3105 
3106 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3107 	if (ret) {
3108 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3109 				ret);
3110 		goto fail_block_groups;
3111 	}
3112 
3113 	ret = btrfs_sysfs_add_device(fs_devices);
3114 	if (ret) {
3115 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3116 				ret);
3117 		goto fail_fsdev_sysfs;
3118 	}
3119 
3120 	ret = btrfs_sysfs_add_mounted(fs_info);
3121 	if (ret) {
3122 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3123 		goto fail_fsdev_sysfs;
3124 	}
3125 
3126 	ret = btrfs_init_space_info(fs_info);
3127 	if (ret) {
3128 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3129 		goto fail_sysfs;
3130 	}
3131 
3132 	ret = btrfs_read_block_groups(fs_info);
3133 	if (ret) {
3134 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3135 		goto fail_sysfs;
3136 	}
3137 
3138 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3139 		btrfs_warn(fs_info,
3140 		"writable mount is not allowed due to too many missing devices");
3141 		goto fail_sysfs;
3142 	}
3143 
3144 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3145 					       "btrfs-cleaner");
3146 	if (IS_ERR(fs_info->cleaner_kthread))
3147 		goto fail_sysfs;
3148 
3149 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3150 						   tree_root,
3151 						   "btrfs-transaction");
3152 	if (IS_ERR(fs_info->transaction_kthread))
3153 		goto fail_cleaner;
3154 
3155 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3156 	    !fs_info->fs_devices->rotating) {
3157 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3158 	}
3159 
3160 	/*
3161 	 * Mount does not set all options immediately, we can do it now and do
3162 	 * not have to wait for transaction commit
3163 	 */
3164 	btrfs_apply_pending_changes(fs_info);
3165 
3166 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3167 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3168 		ret = btrfsic_mount(fs_info, fs_devices,
3169 				    btrfs_test_opt(fs_info,
3170 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3171 				    1 : 0,
3172 				    fs_info->check_integrity_print_mask);
3173 		if (ret)
3174 			btrfs_warn(fs_info,
3175 				"failed to initialize integrity check module: %d",
3176 				ret);
3177 	}
3178 #endif
3179 	ret = btrfs_read_qgroup_config(fs_info);
3180 	if (ret)
3181 		goto fail_trans_kthread;
3182 
3183 	if (btrfs_build_ref_tree(fs_info))
3184 		btrfs_err(fs_info, "couldn't build ref tree");
3185 
3186 	/* do not make disk changes in broken FS or nologreplay is given */
3187 	if (btrfs_super_log_root(disk_super) != 0 &&
3188 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3189 		ret = btrfs_replay_log(fs_info, fs_devices);
3190 		if (ret) {
3191 			err = ret;
3192 			goto fail_qgroup;
3193 		}
3194 	}
3195 
3196 	ret = btrfs_find_orphan_roots(fs_info);
3197 	if (ret)
3198 		goto fail_qgroup;
3199 
3200 	if (!sb_rdonly(sb)) {
3201 		ret = btrfs_cleanup_fs_roots(fs_info);
3202 		if (ret)
3203 			goto fail_qgroup;
3204 
3205 		mutex_lock(&fs_info->cleaner_mutex);
3206 		ret = btrfs_recover_relocation(tree_root);
3207 		mutex_unlock(&fs_info->cleaner_mutex);
3208 		if (ret < 0) {
3209 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3210 					ret);
3211 			err = -EINVAL;
3212 			goto fail_qgroup;
3213 		}
3214 	}
3215 
3216 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3217 	location.type = BTRFS_ROOT_ITEM_KEY;
3218 	location.offset = 0;
3219 
3220 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3221 	if (IS_ERR(fs_info->fs_root)) {
3222 		err = PTR_ERR(fs_info->fs_root);
3223 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3224 		goto fail_qgroup;
3225 	}
3226 
3227 	if (sb_rdonly(sb))
3228 		return 0;
3229 
3230 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3231 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3232 		clear_free_space_tree = 1;
3233 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3234 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3235 		btrfs_warn(fs_info, "free space tree is invalid");
3236 		clear_free_space_tree = 1;
3237 	}
3238 
3239 	if (clear_free_space_tree) {
3240 		btrfs_info(fs_info, "clearing free space tree");
3241 		ret = btrfs_clear_free_space_tree(fs_info);
3242 		if (ret) {
3243 			btrfs_warn(fs_info,
3244 				   "failed to clear free space tree: %d", ret);
3245 			close_ctree(fs_info);
3246 			return ret;
3247 		}
3248 	}
3249 
3250 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3251 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3252 		btrfs_info(fs_info, "creating free space tree");
3253 		ret = btrfs_create_free_space_tree(fs_info);
3254 		if (ret) {
3255 			btrfs_warn(fs_info,
3256 				"failed to create free space tree: %d", ret);
3257 			close_ctree(fs_info);
3258 			return ret;
3259 		}
3260 	}
3261 
3262 	down_read(&fs_info->cleanup_work_sem);
3263 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3264 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3265 		up_read(&fs_info->cleanup_work_sem);
3266 		close_ctree(fs_info);
3267 		return ret;
3268 	}
3269 	up_read(&fs_info->cleanup_work_sem);
3270 
3271 	ret = btrfs_resume_balance_async(fs_info);
3272 	if (ret) {
3273 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3274 		close_ctree(fs_info);
3275 		return ret;
3276 	}
3277 
3278 	ret = btrfs_resume_dev_replace_async(fs_info);
3279 	if (ret) {
3280 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3281 		close_ctree(fs_info);
3282 		return ret;
3283 	}
3284 
3285 	btrfs_qgroup_rescan_resume(fs_info);
3286 
3287 	if (!fs_info->uuid_root) {
3288 		btrfs_info(fs_info, "creating UUID tree");
3289 		ret = btrfs_create_uuid_tree(fs_info);
3290 		if (ret) {
3291 			btrfs_warn(fs_info,
3292 				"failed to create the UUID tree: %d", ret);
3293 			close_ctree(fs_info);
3294 			return ret;
3295 		}
3296 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3297 		   fs_info->generation !=
3298 				btrfs_super_uuid_tree_generation(disk_super)) {
3299 		btrfs_info(fs_info, "checking UUID tree");
3300 		ret = btrfs_check_uuid_tree(fs_info);
3301 		if (ret) {
3302 			btrfs_warn(fs_info,
3303 				"failed to check the UUID tree: %d", ret);
3304 			close_ctree(fs_info);
3305 			return ret;
3306 		}
3307 	} else {
3308 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3309 	}
3310 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3311 
3312 	/*
3313 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3314 	 * no need to keep the flag
3315 	 */
3316 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3317 
3318 	return 0;
3319 
3320 fail_qgroup:
3321 	btrfs_free_qgroup_config(fs_info);
3322 fail_trans_kthread:
3323 	kthread_stop(fs_info->transaction_kthread);
3324 	btrfs_cleanup_transaction(fs_info);
3325 	btrfs_free_fs_roots(fs_info);
3326 fail_cleaner:
3327 	kthread_stop(fs_info->cleaner_kthread);
3328 
3329 	/*
3330 	 * make sure we're done with the btree inode before we stop our
3331 	 * kthreads
3332 	 */
3333 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3334 
3335 fail_sysfs:
3336 	btrfs_sysfs_remove_mounted(fs_info);
3337 
3338 fail_fsdev_sysfs:
3339 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3340 
3341 fail_block_groups:
3342 	btrfs_put_block_group_cache(fs_info);
3343 
3344 fail_tree_roots:
3345 	free_root_pointers(fs_info, 1);
3346 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3347 
3348 fail_sb_buffer:
3349 	btrfs_stop_all_workers(fs_info);
3350 	btrfs_free_block_groups(fs_info);
3351 fail_csum:
3352 	btrfs_free_csum_hash(fs_info);
3353 fail_alloc:
3354 fail_iput:
3355 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3356 
3357 	iput(fs_info->btree_inode);
3358 fail_bio_counter:
3359 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3360 fail_delalloc_bytes:
3361 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3362 fail_dirty_metadata_bytes:
3363 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3364 fail_dio_bytes:
3365 	percpu_counter_destroy(&fs_info->dio_bytes);
3366 fail_srcu:
3367 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3368 fail:
3369 	btrfs_free_stripe_hash_table(fs_info);
3370 	btrfs_close_devices(fs_info->fs_devices);
3371 	return err;
3372 
3373 recovery_tree_root:
3374 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3375 		goto fail_tree_roots;
3376 
3377 	free_root_pointers(fs_info, 0);
3378 
3379 	/* don't use the log in recovery mode, it won't be valid */
3380 	btrfs_set_super_log_root(disk_super, 0);
3381 
3382 	/* we can't trust the free space cache either */
3383 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3384 
3385 	ret = next_root_backup(fs_info, fs_info->super_copy,
3386 			       &num_backups_tried, &backup_index);
3387 	if (ret == -1)
3388 		goto fail_block_groups;
3389 	goto retry_root_backup;
3390 }
3391 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3392 
3393 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3394 {
3395 	if (uptodate) {
3396 		set_buffer_uptodate(bh);
3397 	} else {
3398 		struct btrfs_device *device = (struct btrfs_device *)
3399 			bh->b_private;
3400 
3401 		btrfs_warn_rl_in_rcu(device->fs_info,
3402 				"lost page write due to IO error on %s",
3403 					  rcu_str_deref(device->name));
3404 		/* note, we don't set_buffer_write_io_error because we have
3405 		 * our own ways of dealing with the IO errors
3406 		 */
3407 		clear_buffer_uptodate(bh);
3408 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3409 	}
3410 	unlock_buffer(bh);
3411 	put_bh(bh);
3412 }
3413 
3414 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3415 			struct buffer_head **bh_ret)
3416 {
3417 	struct buffer_head *bh;
3418 	struct btrfs_super_block *super;
3419 	u64 bytenr;
3420 
3421 	bytenr = btrfs_sb_offset(copy_num);
3422 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3423 		return -EINVAL;
3424 
3425 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3426 	/*
3427 	 * If we fail to read from the underlying devices, as of now
3428 	 * the best option we have is to mark it EIO.
3429 	 */
3430 	if (!bh)
3431 		return -EIO;
3432 
3433 	super = (struct btrfs_super_block *)bh->b_data;
3434 	if (btrfs_super_bytenr(super) != bytenr ||
3435 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3436 		brelse(bh);
3437 		return -EINVAL;
3438 	}
3439 
3440 	*bh_ret = bh;
3441 	return 0;
3442 }
3443 
3444 
3445 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3446 {
3447 	struct buffer_head *bh;
3448 	struct buffer_head *latest = NULL;
3449 	struct btrfs_super_block *super;
3450 	int i;
3451 	u64 transid = 0;
3452 	int ret = -EINVAL;
3453 
3454 	/* we would like to check all the supers, but that would make
3455 	 * a btrfs mount succeed after a mkfs from a different FS.
3456 	 * So, we need to add a special mount option to scan for
3457 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3458 	 */
3459 	for (i = 0; i < 1; i++) {
3460 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3461 		if (ret)
3462 			continue;
3463 
3464 		super = (struct btrfs_super_block *)bh->b_data;
3465 
3466 		if (!latest || btrfs_super_generation(super) > transid) {
3467 			brelse(latest);
3468 			latest = bh;
3469 			transid = btrfs_super_generation(super);
3470 		} else {
3471 			brelse(bh);
3472 		}
3473 	}
3474 
3475 	if (!latest)
3476 		return ERR_PTR(ret);
3477 
3478 	return latest;
3479 }
3480 
3481 /*
3482  * Write superblock @sb to the @device. Do not wait for completion, all the
3483  * buffer heads we write are pinned.
3484  *
3485  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3486  * the expected device size at commit time. Note that max_mirrors must be
3487  * same for write and wait phases.
3488  *
3489  * Return number of errors when buffer head is not found or submission fails.
3490  */
3491 static int write_dev_supers(struct btrfs_device *device,
3492 			    struct btrfs_super_block *sb, int max_mirrors)
3493 {
3494 	struct btrfs_fs_info *fs_info = device->fs_info;
3495 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3496 	struct buffer_head *bh;
3497 	int i;
3498 	int ret;
3499 	int errors = 0;
3500 	u64 bytenr;
3501 	int op_flags;
3502 
3503 	if (max_mirrors == 0)
3504 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3505 
3506 	shash->tfm = fs_info->csum_shash;
3507 
3508 	for (i = 0; i < max_mirrors; i++) {
3509 		bytenr = btrfs_sb_offset(i);
3510 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3511 		    device->commit_total_bytes)
3512 			break;
3513 
3514 		btrfs_set_super_bytenr(sb, bytenr);
3515 
3516 		crypto_shash_init(shash);
3517 		crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3518 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3519 		crypto_shash_final(shash, sb->csum);
3520 
3521 		/* One reference for us, and we leave it for the caller */
3522 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3523 			      BTRFS_SUPER_INFO_SIZE);
3524 		if (!bh) {
3525 			btrfs_err(device->fs_info,
3526 			    "couldn't get super buffer head for bytenr %llu",
3527 			    bytenr);
3528 			errors++;
3529 			continue;
3530 		}
3531 
3532 		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3533 
3534 		/* one reference for submit_bh */
3535 		get_bh(bh);
3536 
3537 		set_buffer_uptodate(bh);
3538 		lock_buffer(bh);
3539 		bh->b_end_io = btrfs_end_buffer_write_sync;
3540 		bh->b_private = device;
3541 
3542 		/*
3543 		 * we fua the first super.  The others we allow
3544 		 * to go down lazy.
3545 		 */
3546 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3547 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3548 			op_flags |= REQ_FUA;
3549 		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3550 		if (ret)
3551 			errors++;
3552 	}
3553 	return errors < i ? 0 : -1;
3554 }
3555 
3556 /*
3557  * Wait for write completion of superblocks done by write_dev_supers,
3558  * @max_mirrors same for write and wait phases.
3559  *
3560  * Return number of errors when buffer head is not found or not marked up to
3561  * date.
3562  */
3563 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3564 {
3565 	struct buffer_head *bh;
3566 	int i;
3567 	int errors = 0;
3568 	bool primary_failed = false;
3569 	u64 bytenr;
3570 
3571 	if (max_mirrors == 0)
3572 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3573 
3574 	for (i = 0; i < max_mirrors; i++) {
3575 		bytenr = btrfs_sb_offset(i);
3576 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3577 		    device->commit_total_bytes)
3578 			break;
3579 
3580 		bh = __find_get_block(device->bdev,
3581 				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3582 				      BTRFS_SUPER_INFO_SIZE);
3583 		if (!bh) {
3584 			errors++;
3585 			if (i == 0)
3586 				primary_failed = true;
3587 			continue;
3588 		}
3589 		wait_on_buffer(bh);
3590 		if (!buffer_uptodate(bh)) {
3591 			errors++;
3592 			if (i == 0)
3593 				primary_failed = true;
3594 		}
3595 
3596 		/* drop our reference */
3597 		brelse(bh);
3598 
3599 		/* drop the reference from the writing run */
3600 		brelse(bh);
3601 	}
3602 
3603 	/* log error, force error return */
3604 	if (primary_failed) {
3605 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3606 			  device->devid);
3607 		return -1;
3608 	}
3609 
3610 	return errors < i ? 0 : -1;
3611 }
3612 
3613 /*
3614  * endio for the write_dev_flush, this will wake anyone waiting
3615  * for the barrier when it is done
3616  */
3617 static void btrfs_end_empty_barrier(struct bio *bio)
3618 {
3619 	complete(bio->bi_private);
3620 }
3621 
3622 /*
3623  * Submit a flush request to the device if it supports it. Error handling is
3624  * done in the waiting counterpart.
3625  */
3626 static void write_dev_flush(struct btrfs_device *device)
3627 {
3628 	struct request_queue *q = bdev_get_queue(device->bdev);
3629 	struct bio *bio = device->flush_bio;
3630 
3631 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3632 		return;
3633 
3634 	bio_reset(bio);
3635 	bio->bi_end_io = btrfs_end_empty_barrier;
3636 	bio_set_dev(bio, device->bdev);
3637 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3638 	init_completion(&device->flush_wait);
3639 	bio->bi_private = &device->flush_wait;
3640 
3641 	btrfsic_submit_bio(bio);
3642 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3643 }
3644 
3645 /*
3646  * If the flush bio has been submitted by write_dev_flush, wait for it.
3647  */
3648 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3649 {
3650 	struct bio *bio = device->flush_bio;
3651 
3652 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3653 		return BLK_STS_OK;
3654 
3655 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3656 	wait_for_completion_io(&device->flush_wait);
3657 
3658 	return bio->bi_status;
3659 }
3660 
3661 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3662 {
3663 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3664 		return -EIO;
3665 	return 0;
3666 }
3667 
3668 /*
3669  * send an empty flush down to each device in parallel,
3670  * then wait for them
3671  */
3672 static int barrier_all_devices(struct btrfs_fs_info *info)
3673 {
3674 	struct list_head *head;
3675 	struct btrfs_device *dev;
3676 	int errors_wait = 0;
3677 	blk_status_t ret;
3678 
3679 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3680 	/* send down all the barriers */
3681 	head = &info->fs_devices->devices;
3682 	list_for_each_entry(dev, head, dev_list) {
3683 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3684 			continue;
3685 		if (!dev->bdev)
3686 			continue;
3687 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3688 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3689 			continue;
3690 
3691 		write_dev_flush(dev);
3692 		dev->last_flush_error = BLK_STS_OK;
3693 	}
3694 
3695 	/* wait for all the barriers */
3696 	list_for_each_entry(dev, head, dev_list) {
3697 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3698 			continue;
3699 		if (!dev->bdev) {
3700 			errors_wait++;
3701 			continue;
3702 		}
3703 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3704 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3705 			continue;
3706 
3707 		ret = wait_dev_flush(dev);
3708 		if (ret) {
3709 			dev->last_flush_error = ret;
3710 			btrfs_dev_stat_inc_and_print(dev,
3711 					BTRFS_DEV_STAT_FLUSH_ERRS);
3712 			errors_wait++;
3713 		}
3714 	}
3715 
3716 	if (errors_wait) {
3717 		/*
3718 		 * At some point we need the status of all disks
3719 		 * to arrive at the volume status. So error checking
3720 		 * is being pushed to a separate loop.
3721 		 */
3722 		return check_barrier_error(info);
3723 	}
3724 	return 0;
3725 }
3726 
3727 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3728 {
3729 	int raid_type;
3730 	int min_tolerated = INT_MAX;
3731 
3732 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3733 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3734 		min_tolerated = min_t(int, min_tolerated,
3735 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3736 				    tolerated_failures);
3737 
3738 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3739 		if (raid_type == BTRFS_RAID_SINGLE)
3740 			continue;
3741 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3742 			continue;
3743 		min_tolerated = min_t(int, min_tolerated,
3744 				    btrfs_raid_array[raid_type].
3745 				    tolerated_failures);
3746 	}
3747 
3748 	if (min_tolerated == INT_MAX) {
3749 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3750 		min_tolerated = 0;
3751 	}
3752 
3753 	return min_tolerated;
3754 }
3755 
3756 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3757 {
3758 	struct list_head *head;
3759 	struct btrfs_device *dev;
3760 	struct btrfs_super_block *sb;
3761 	struct btrfs_dev_item *dev_item;
3762 	int ret;
3763 	int do_barriers;
3764 	int max_errors;
3765 	int total_errors = 0;
3766 	u64 flags;
3767 
3768 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3769 
3770 	/*
3771 	 * max_mirrors == 0 indicates we're from commit_transaction,
3772 	 * not from fsync where the tree roots in fs_info have not
3773 	 * been consistent on disk.
3774 	 */
3775 	if (max_mirrors == 0)
3776 		backup_super_roots(fs_info);
3777 
3778 	sb = fs_info->super_for_commit;
3779 	dev_item = &sb->dev_item;
3780 
3781 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3782 	head = &fs_info->fs_devices->devices;
3783 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3784 
3785 	if (do_barriers) {
3786 		ret = barrier_all_devices(fs_info);
3787 		if (ret) {
3788 			mutex_unlock(
3789 				&fs_info->fs_devices->device_list_mutex);
3790 			btrfs_handle_fs_error(fs_info, ret,
3791 					      "errors while submitting device barriers.");
3792 			return ret;
3793 		}
3794 	}
3795 
3796 	list_for_each_entry(dev, head, dev_list) {
3797 		if (!dev->bdev) {
3798 			total_errors++;
3799 			continue;
3800 		}
3801 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3802 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3803 			continue;
3804 
3805 		btrfs_set_stack_device_generation(dev_item, 0);
3806 		btrfs_set_stack_device_type(dev_item, dev->type);
3807 		btrfs_set_stack_device_id(dev_item, dev->devid);
3808 		btrfs_set_stack_device_total_bytes(dev_item,
3809 						   dev->commit_total_bytes);
3810 		btrfs_set_stack_device_bytes_used(dev_item,
3811 						  dev->commit_bytes_used);
3812 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3813 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3814 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3815 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3816 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3817 		       BTRFS_FSID_SIZE);
3818 
3819 		flags = btrfs_super_flags(sb);
3820 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3821 
3822 		ret = btrfs_validate_write_super(fs_info, sb);
3823 		if (ret < 0) {
3824 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3825 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3826 				"unexpected superblock corruption detected");
3827 			return -EUCLEAN;
3828 		}
3829 
3830 		ret = write_dev_supers(dev, sb, max_mirrors);
3831 		if (ret)
3832 			total_errors++;
3833 	}
3834 	if (total_errors > max_errors) {
3835 		btrfs_err(fs_info, "%d errors while writing supers",
3836 			  total_errors);
3837 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3838 
3839 		/* FUA is masked off if unsupported and can't be the reason */
3840 		btrfs_handle_fs_error(fs_info, -EIO,
3841 				      "%d errors while writing supers",
3842 				      total_errors);
3843 		return -EIO;
3844 	}
3845 
3846 	total_errors = 0;
3847 	list_for_each_entry(dev, head, dev_list) {
3848 		if (!dev->bdev)
3849 			continue;
3850 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3851 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3852 			continue;
3853 
3854 		ret = wait_dev_supers(dev, max_mirrors);
3855 		if (ret)
3856 			total_errors++;
3857 	}
3858 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3859 	if (total_errors > max_errors) {
3860 		btrfs_handle_fs_error(fs_info, -EIO,
3861 				      "%d errors while writing supers",
3862 				      total_errors);
3863 		return -EIO;
3864 	}
3865 	return 0;
3866 }
3867 
3868 /* Drop a fs root from the radix tree and free it. */
3869 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3870 				  struct btrfs_root *root)
3871 {
3872 	spin_lock(&fs_info->fs_roots_radix_lock);
3873 	radix_tree_delete(&fs_info->fs_roots_radix,
3874 			  (unsigned long)root->root_key.objectid);
3875 	spin_unlock(&fs_info->fs_roots_radix_lock);
3876 
3877 	if (btrfs_root_refs(&root->root_item) == 0)
3878 		synchronize_srcu(&fs_info->subvol_srcu);
3879 
3880 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3881 		btrfs_free_log(NULL, root);
3882 		if (root->reloc_root) {
3883 			free_extent_buffer(root->reloc_root->node);
3884 			free_extent_buffer(root->reloc_root->commit_root);
3885 			btrfs_put_fs_root(root->reloc_root);
3886 			root->reloc_root = NULL;
3887 		}
3888 	}
3889 
3890 	if (root->free_ino_pinned)
3891 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3892 	if (root->free_ino_ctl)
3893 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3894 	btrfs_free_fs_root(root);
3895 }
3896 
3897 void btrfs_free_fs_root(struct btrfs_root *root)
3898 {
3899 	iput(root->ino_cache_inode);
3900 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3901 	if (root->anon_dev)
3902 		free_anon_bdev(root->anon_dev);
3903 	if (root->subv_writers)
3904 		btrfs_free_subvolume_writers(root->subv_writers);
3905 	free_extent_buffer(root->node);
3906 	free_extent_buffer(root->commit_root);
3907 	kfree(root->free_ino_ctl);
3908 	kfree(root->free_ino_pinned);
3909 	btrfs_put_fs_root(root);
3910 }
3911 
3912 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3913 {
3914 	u64 root_objectid = 0;
3915 	struct btrfs_root *gang[8];
3916 	int i = 0;
3917 	int err = 0;
3918 	unsigned int ret = 0;
3919 	int index;
3920 
3921 	while (1) {
3922 		index = srcu_read_lock(&fs_info->subvol_srcu);
3923 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3924 					     (void **)gang, root_objectid,
3925 					     ARRAY_SIZE(gang));
3926 		if (!ret) {
3927 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3928 			break;
3929 		}
3930 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3931 
3932 		for (i = 0; i < ret; i++) {
3933 			/* Avoid to grab roots in dead_roots */
3934 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3935 				gang[i] = NULL;
3936 				continue;
3937 			}
3938 			/* grab all the search result for later use */
3939 			gang[i] = btrfs_grab_fs_root(gang[i]);
3940 		}
3941 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3942 
3943 		for (i = 0; i < ret; i++) {
3944 			if (!gang[i])
3945 				continue;
3946 			root_objectid = gang[i]->root_key.objectid;
3947 			err = btrfs_orphan_cleanup(gang[i]);
3948 			if (err)
3949 				break;
3950 			btrfs_put_fs_root(gang[i]);
3951 		}
3952 		root_objectid++;
3953 	}
3954 
3955 	/* release the uncleaned roots due to error */
3956 	for (; i < ret; i++) {
3957 		if (gang[i])
3958 			btrfs_put_fs_root(gang[i]);
3959 	}
3960 	return err;
3961 }
3962 
3963 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3964 {
3965 	struct btrfs_root *root = fs_info->tree_root;
3966 	struct btrfs_trans_handle *trans;
3967 
3968 	mutex_lock(&fs_info->cleaner_mutex);
3969 	btrfs_run_delayed_iputs(fs_info);
3970 	mutex_unlock(&fs_info->cleaner_mutex);
3971 	wake_up_process(fs_info->cleaner_kthread);
3972 
3973 	/* wait until ongoing cleanup work done */
3974 	down_write(&fs_info->cleanup_work_sem);
3975 	up_write(&fs_info->cleanup_work_sem);
3976 
3977 	trans = btrfs_join_transaction(root);
3978 	if (IS_ERR(trans))
3979 		return PTR_ERR(trans);
3980 	return btrfs_commit_transaction(trans);
3981 }
3982 
3983 void close_ctree(struct btrfs_fs_info *fs_info)
3984 {
3985 	int ret;
3986 
3987 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3988 	/*
3989 	 * We don't want the cleaner to start new transactions, add more delayed
3990 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3991 	 * because that frees the task_struct, and the transaction kthread might
3992 	 * still try to wake up the cleaner.
3993 	 */
3994 	kthread_park(fs_info->cleaner_kthread);
3995 
3996 	/* wait for the qgroup rescan worker to stop */
3997 	btrfs_qgroup_wait_for_completion(fs_info, false);
3998 
3999 	/* wait for the uuid_scan task to finish */
4000 	down(&fs_info->uuid_tree_rescan_sem);
4001 	/* avoid complains from lockdep et al., set sem back to initial state */
4002 	up(&fs_info->uuid_tree_rescan_sem);
4003 
4004 	/* pause restriper - we want to resume on mount */
4005 	btrfs_pause_balance(fs_info);
4006 
4007 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4008 
4009 	btrfs_scrub_cancel(fs_info);
4010 
4011 	/* wait for any defraggers to finish */
4012 	wait_event(fs_info->transaction_wait,
4013 		   (atomic_read(&fs_info->defrag_running) == 0));
4014 
4015 	/* clear out the rbtree of defraggable inodes */
4016 	btrfs_cleanup_defrag_inodes(fs_info);
4017 
4018 	cancel_work_sync(&fs_info->async_reclaim_work);
4019 
4020 	if (!sb_rdonly(fs_info->sb)) {
4021 		/*
4022 		 * The cleaner kthread is stopped, so do one final pass over
4023 		 * unused block groups.
4024 		 */
4025 		btrfs_delete_unused_bgs(fs_info);
4026 
4027 		ret = btrfs_commit_super(fs_info);
4028 		if (ret)
4029 			btrfs_err(fs_info, "commit super ret %d", ret);
4030 	}
4031 
4032 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4033 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4034 		btrfs_error_commit_super(fs_info);
4035 
4036 	kthread_stop(fs_info->transaction_kthread);
4037 	kthread_stop(fs_info->cleaner_kthread);
4038 
4039 	ASSERT(list_empty(&fs_info->delayed_iputs));
4040 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4041 
4042 	btrfs_free_qgroup_config(fs_info);
4043 	ASSERT(list_empty(&fs_info->delalloc_roots));
4044 
4045 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4046 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4047 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4048 	}
4049 
4050 	if (percpu_counter_sum(&fs_info->dio_bytes))
4051 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4052 			   percpu_counter_sum(&fs_info->dio_bytes));
4053 
4054 	btrfs_sysfs_remove_mounted(fs_info);
4055 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4056 
4057 	btrfs_free_fs_roots(fs_info);
4058 
4059 	btrfs_put_block_group_cache(fs_info);
4060 
4061 	/*
4062 	 * we must make sure there is not any read request to
4063 	 * submit after we stopping all workers.
4064 	 */
4065 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4066 	btrfs_stop_all_workers(fs_info);
4067 
4068 	btrfs_free_block_groups(fs_info);
4069 
4070 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4071 	free_root_pointers(fs_info, 1);
4072 
4073 	iput(fs_info->btree_inode);
4074 
4075 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4076 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4077 		btrfsic_unmount(fs_info->fs_devices);
4078 #endif
4079 
4080 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4081 	btrfs_close_devices(fs_info->fs_devices);
4082 
4083 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4084 	percpu_counter_destroy(&fs_info->delalloc_bytes);
4085 	percpu_counter_destroy(&fs_info->dio_bytes);
4086 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4087 	cleanup_srcu_struct(&fs_info->subvol_srcu);
4088 
4089 	btrfs_free_csum_hash(fs_info);
4090 	btrfs_free_stripe_hash_table(fs_info);
4091 	btrfs_free_ref_cache(fs_info);
4092 }
4093 
4094 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4095 			  int atomic)
4096 {
4097 	int ret;
4098 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4099 
4100 	ret = extent_buffer_uptodate(buf);
4101 	if (!ret)
4102 		return ret;
4103 
4104 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4105 				    parent_transid, atomic);
4106 	if (ret == -EAGAIN)
4107 		return ret;
4108 	return !ret;
4109 }
4110 
4111 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4112 {
4113 	struct btrfs_fs_info *fs_info;
4114 	struct btrfs_root *root;
4115 	u64 transid = btrfs_header_generation(buf);
4116 	int was_dirty;
4117 
4118 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4119 	/*
4120 	 * This is a fast path so only do this check if we have sanity tests
4121 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4122 	 * outside of the sanity tests.
4123 	 */
4124 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4125 		return;
4126 #endif
4127 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4128 	fs_info = root->fs_info;
4129 	btrfs_assert_tree_locked(buf);
4130 	if (transid != fs_info->generation)
4131 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4132 			buf->start, transid, fs_info->generation);
4133 	was_dirty = set_extent_buffer_dirty(buf);
4134 	if (!was_dirty)
4135 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4136 					 buf->len,
4137 					 fs_info->dirty_metadata_batch);
4138 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4139 	/*
4140 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4141 	 * but item data not updated.
4142 	 * So here we should only check item pointers, not item data.
4143 	 */
4144 	if (btrfs_header_level(buf) == 0 &&
4145 	    btrfs_check_leaf_relaxed(buf)) {
4146 		btrfs_print_leaf(buf);
4147 		ASSERT(0);
4148 	}
4149 #endif
4150 }
4151 
4152 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4153 					int flush_delayed)
4154 {
4155 	/*
4156 	 * looks as though older kernels can get into trouble with
4157 	 * this code, they end up stuck in balance_dirty_pages forever
4158 	 */
4159 	int ret;
4160 
4161 	if (current->flags & PF_MEMALLOC)
4162 		return;
4163 
4164 	if (flush_delayed)
4165 		btrfs_balance_delayed_items(fs_info);
4166 
4167 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4168 				     BTRFS_DIRTY_METADATA_THRESH,
4169 				     fs_info->dirty_metadata_batch);
4170 	if (ret > 0) {
4171 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4172 	}
4173 }
4174 
4175 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4176 {
4177 	__btrfs_btree_balance_dirty(fs_info, 1);
4178 }
4179 
4180 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4181 {
4182 	__btrfs_btree_balance_dirty(fs_info, 0);
4183 }
4184 
4185 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4186 		      struct btrfs_key *first_key)
4187 {
4188 	return btree_read_extent_buffer_pages(buf, parent_transid,
4189 					      level, first_key);
4190 }
4191 
4192 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4193 {
4194 	/* cleanup FS via transaction */
4195 	btrfs_cleanup_transaction(fs_info);
4196 
4197 	mutex_lock(&fs_info->cleaner_mutex);
4198 	btrfs_run_delayed_iputs(fs_info);
4199 	mutex_unlock(&fs_info->cleaner_mutex);
4200 
4201 	down_write(&fs_info->cleanup_work_sem);
4202 	up_write(&fs_info->cleanup_work_sem);
4203 }
4204 
4205 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4206 {
4207 	struct btrfs_ordered_extent *ordered;
4208 
4209 	spin_lock(&root->ordered_extent_lock);
4210 	/*
4211 	 * This will just short circuit the ordered completion stuff which will
4212 	 * make sure the ordered extent gets properly cleaned up.
4213 	 */
4214 	list_for_each_entry(ordered, &root->ordered_extents,
4215 			    root_extent_list)
4216 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4217 	spin_unlock(&root->ordered_extent_lock);
4218 }
4219 
4220 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4221 {
4222 	struct btrfs_root *root;
4223 	struct list_head splice;
4224 
4225 	INIT_LIST_HEAD(&splice);
4226 
4227 	spin_lock(&fs_info->ordered_root_lock);
4228 	list_splice_init(&fs_info->ordered_roots, &splice);
4229 	while (!list_empty(&splice)) {
4230 		root = list_first_entry(&splice, struct btrfs_root,
4231 					ordered_root);
4232 		list_move_tail(&root->ordered_root,
4233 			       &fs_info->ordered_roots);
4234 
4235 		spin_unlock(&fs_info->ordered_root_lock);
4236 		btrfs_destroy_ordered_extents(root);
4237 
4238 		cond_resched();
4239 		spin_lock(&fs_info->ordered_root_lock);
4240 	}
4241 	spin_unlock(&fs_info->ordered_root_lock);
4242 
4243 	/*
4244 	 * We need this here because if we've been flipped read-only we won't
4245 	 * get sync() from the umount, so we need to make sure any ordered
4246 	 * extents that haven't had their dirty pages IO start writeout yet
4247 	 * actually get run and error out properly.
4248 	 */
4249 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4250 }
4251 
4252 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4253 				      struct btrfs_fs_info *fs_info)
4254 {
4255 	struct rb_node *node;
4256 	struct btrfs_delayed_ref_root *delayed_refs;
4257 	struct btrfs_delayed_ref_node *ref;
4258 	int ret = 0;
4259 
4260 	delayed_refs = &trans->delayed_refs;
4261 
4262 	spin_lock(&delayed_refs->lock);
4263 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4264 		spin_unlock(&delayed_refs->lock);
4265 		btrfs_info(fs_info, "delayed_refs has NO entry");
4266 		return ret;
4267 	}
4268 
4269 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4270 		struct btrfs_delayed_ref_head *head;
4271 		struct rb_node *n;
4272 		bool pin_bytes = false;
4273 
4274 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4275 				href_node);
4276 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4277 			continue;
4278 
4279 		spin_lock(&head->lock);
4280 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4281 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4282 				       ref_node);
4283 			ref->in_tree = 0;
4284 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4285 			RB_CLEAR_NODE(&ref->ref_node);
4286 			if (!list_empty(&ref->add_list))
4287 				list_del(&ref->add_list);
4288 			atomic_dec(&delayed_refs->num_entries);
4289 			btrfs_put_delayed_ref(ref);
4290 		}
4291 		if (head->must_insert_reserved)
4292 			pin_bytes = true;
4293 		btrfs_free_delayed_extent_op(head->extent_op);
4294 		btrfs_delete_ref_head(delayed_refs, head);
4295 		spin_unlock(&head->lock);
4296 		spin_unlock(&delayed_refs->lock);
4297 		mutex_unlock(&head->mutex);
4298 
4299 		if (pin_bytes)
4300 			btrfs_pin_extent(fs_info, head->bytenr,
4301 					 head->num_bytes, 1);
4302 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4303 		btrfs_put_delayed_ref_head(head);
4304 		cond_resched();
4305 		spin_lock(&delayed_refs->lock);
4306 	}
4307 
4308 	spin_unlock(&delayed_refs->lock);
4309 
4310 	return ret;
4311 }
4312 
4313 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4314 {
4315 	struct btrfs_inode *btrfs_inode;
4316 	struct list_head splice;
4317 
4318 	INIT_LIST_HEAD(&splice);
4319 
4320 	spin_lock(&root->delalloc_lock);
4321 	list_splice_init(&root->delalloc_inodes, &splice);
4322 
4323 	while (!list_empty(&splice)) {
4324 		struct inode *inode = NULL;
4325 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4326 					       delalloc_inodes);
4327 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4328 		spin_unlock(&root->delalloc_lock);
4329 
4330 		/*
4331 		 * Make sure we get a live inode and that it'll not disappear
4332 		 * meanwhile.
4333 		 */
4334 		inode = igrab(&btrfs_inode->vfs_inode);
4335 		if (inode) {
4336 			invalidate_inode_pages2(inode->i_mapping);
4337 			iput(inode);
4338 		}
4339 		spin_lock(&root->delalloc_lock);
4340 	}
4341 	spin_unlock(&root->delalloc_lock);
4342 }
4343 
4344 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4345 {
4346 	struct btrfs_root *root;
4347 	struct list_head splice;
4348 
4349 	INIT_LIST_HEAD(&splice);
4350 
4351 	spin_lock(&fs_info->delalloc_root_lock);
4352 	list_splice_init(&fs_info->delalloc_roots, &splice);
4353 	while (!list_empty(&splice)) {
4354 		root = list_first_entry(&splice, struct btrfs_root,
4355 					 delalloc_root);
4356 		root = btrfs_grab_fs_root(root);
4357 		BUG_ON(!root);
4358 		spin_unlock(&fs_info->delalloc_root_lock);
4359 
4360 		btrfs_destroy_delalloc_inodes(root);
4361 		btrfs_put_fs_root(root);
4362 
4363 		spin_lock(&fs_info->delalloc_root_lock);
4364 	}
4365 	spin_unlock(&fs_info->delalloc_root_lock);
4366 }
4367 
4368 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4369 					struct extent_io_tree *dirty_pages,
4370 					int mark)
4371 {
4372 	int ret;
4373 	struct extent_buffer *eb;
4374 	u64 start = 0;
4375 	u64 end;
4376 
4377 	while (1) {
4378 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4379 					    mark, NULL);
4380 		if (ret)
4381 			break;
4382 
4383 		clear_extent_bits(dirty_pages, start, end, mark);
4384 		while (start <= end) {
4385 			eb = find_extent_buffer(fs_info, start);
4386 			start += fs_info->nodesize;
4387 			if (!eb)
4388 				continue;
4389 			wait_on_extent_buffer_writeback(eb);
4390 
4391 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4392 					       &eb->bflags))
4393 				clear_extent_buffer_dirty(eb);
4394 			free_extent_buffer_stale(eb);
4395 		}
4396 	}
4397 
4398 	return ret;
4399 }
4400 
4401 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4402 				       struct extent_io_tree *pinned_extents)
4403 {
4404 	struct extent_io_tree *unpin;
4405 	u64 start;
4406 	u64 end;
4407 	int ret;
4408 	bool loop = true;
4409 
4410 	unpin = pinned_extents;
4411 again:
4412 	while (1) {
4413 		struct extent_state *cached_state = NULL;
4414 
4415 		/*
4416 		 * The btrfs_finish_extent_commit() may get the same range as
4417 		 * ours between find_first_extent_bit and clear_extent_dirty.
4418 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4419 		 * the same extent range.
4420 		 */
4421 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4422 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4423 					    EXTENT_DIRTY, &cached_state);
4424 		if (ret) {
4425 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4426 			break;
4427 		}
4428 
4429 		clear_extent_dirty(unpin, start, end, &cached_state);
4430 		free_extent_state(cached_state);
4431 		btrfs_error_unpin_extent_range(fs_info, start, end);
4432 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4433 		cond_resched();
4434 	}
4435 
4436 	if (loop) {
4437 		if (unpin == &fs_info->freed_extents[0])
4438 			unpin = &fs_info->freed_extents[1];
4439 		else
4440 			unpin = &fs_info->freed_extents[0];
4441 		loop = false;
4442 		goto again;
4443 	}
4444 
4445 	return 0;
4446 }
4447 
4448 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4449 {
4450 	struct inode *inode;
4451 
4452 	inode = cache->io_ctl.inode;
4453 	if (inode) {
4454 		invalidate_inode_pages2(inode->i_mapping);
4455 		BTRFS_I(inode)->generation = 0;
4456 		cache->io_ctl.inode = NULL;
4457 		iput(inode);
4458 	}
4459 	btrfs_put_block_group(cache);
4460 }
4461 
4462 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4463 			     struct btrfs_fs_info *fs_info)
4464 {
4465 	struct btrfs_block_group_cache *cache;
4466 
4467 	spin_lock(&cur_trans->dirty_bgs_lock);
4468 	while (!list_empty(&cur_trans->dirty_bgs)) {
4469 		cache = list_first_entry(&cur_trans->dirty_bgs,
4470 					 struct btrfs_block_group_cache,
4471 					 dirty_list);
4472 
4473 		if (!list_empty(&cache->io_list)) {
4474 			spin_unlock(&cur_trans->dirty_bgs_lock);
4475 			list_del_init(&cache->io_list);
4476 			btrfs_cleanup_bg_io(cache);
4477 			spin_lock(&cur_trans->dirty_bgs_lock);
4478 		}
4479 
4480 		list_del_init(&cache->dirty_list);
4481 		spin_lock(&cache->lock);
4482 		cache->disk_cache_state = BTRFS_DC_ERROR;
4483 		spin_unlock(&cache->lock);
4484 
4485 		spin_unlock(&cur_trans->dirty_bgs_lock);
4486 		btrfs_put_block_group(cache);
4487 		btrfs_delayed_refs_rsv_release(fs_info, 1);
4488 		spin_lock(&cur_trans->dirty_bgs_lock);
4489 	}
4490 	spin_unlock(&cur_trans->dirty_bgs_lock);
4491 
4492 	/*
4493 	 * Refer to the definition of io_bgs member for details why it's safe
4494 	 * to use it without any locking
4495 	 */
4496 	while (!list_empty(&cur_trans->io_bgs)) {
4497 		cache = list_first_entry(&cur_trans->io_bgs,
4498 					 struct btrfs_block_group_cache,
4499 					 io_list);
4500 
4501 		list_del_init(&cache->io_list);
4502 		spin_lock(&cache->lock);
4503 		cache->disk_cache_state = BTRFS_DC_ERROR;
4504 		spin_unlock(&cache->lock);
4505 		btrfs_cleanup_bg_io(cache);
4506 	}
4507 }
4508 
4509 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4510 				   struct btrfs_fs_info *fs_info)
4511 {
4512 	struct btrfs_device *dev, *tmp;
4513 
4514 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4515 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4516 	ASSERT(list_empty(&cur_trans->io_bgs));
4517 
4518 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4519 				 post_commit_list) {
4520 		list_del_init(&dev->post_commit_list);
4521 	}
4522 
4523 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4524 
4525 	cur_trans->state = TRANS_STATE_COMMIT_START;
4526 	wake_up(&fs_info->transaction_blocked_wait);
4527 
4528 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4529 	wake_up(&fs_info->transaction_wait);
4530 
4531 	btrfs_destroy_delayed_inodes(fs_info);
4532 	btrfs_assert_delayed_root_empty(fs_info);
4533 
4534 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4535 				     EXTENT_DIRTY);
4536 	btrfs_destroy_pinned_extent(fs_info,
4537 				    fs_info->pinned_extents);
4538 
4539 	cur_trans->state =TRANS_STATE_COMPLETED;
4540 	wake_up(&cur_trans->commit_wait);
4541 }
4542 
4543 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4544 {
4545 	struct btrfs_transaction *t;
4546 
4547 	mutex_lock(&fs_info->transaction_kthread_mutex);
4548 
4549 	spin_lock(&fs_info->trans_lock);
4550 	while (!list_empty(&fs_info->trans_list)) {
4551 		t = list_first_entry(&fs_info->trans_list,
4552 				     struct btrfs_transaction, list);
4553 		if (t->state >= TRANS_STATE_COMMIT_START) {
4554 			refcount_inc(&t->use_count);
4555 			spin_unlock(&fs_info->trans_lock);
4556 			btrfs_wait_for_commit(fs_info, t->transid);
4557 			btrfs_put_transaction(t);
4558 			spin_lock(&fs_info->trans_lock);
4559 			continue;
4560 		}
4561 		if (t == fs_info->running_transaction) {
4562 			t->state = TRANS_STATE_COMMIT_DOING;
4563 			spin_unlock(&fs_info->trans_lock);
4564 			/*
4565 			 * We wait for 0 num_writers since we don't hold a trans
4566 			 * handle open currently for this transaction.
4567 			 */
4568 			wait_event(t->writer_wait,
4569 				   atomic_read(&t->num_writers) == 0);
4570 		} else {
4571 			spin_unlock(&fs_info->trans_lock);
4572 		}
4573 		btrfs_cleanup_one_transaction(t, fs_info);
4574 
4575 		spin_lock(&fs_info->trans_lock);
4576 		if (t == fs_info->running_transaction)
4577 			fs_info->running_transaction = NULL;
4578 		list_del_init(&t->list);
4579 		spin_unlock(&fs_info->trans_lock);
4580 
4581 		btrfs_put_transaction(t);
4582 		trace_btrfs_transaction_commit(fs_info->tree_root);
4583 		spin_lock(&fs_info->trans_lock);
4584 	}
4585 	spin_unlock(&fs_info->trans_lock);
4586 	btrfs_destroy_all_ordered_extents(fs_info);
4587 	btrfs_destroy_delayed_inodes(fs_info);
4588 	btrfs_assert_delayed_root_empty(fs_info);
4589 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4590 	btrfs_destroy_all_delalloc_inodes(fs_info);
4591 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4592 
4593 	return 0;
4594 }
4595 
4596 static const struct extent_io_ops btree_extent_io_ops = {
4597 	/* mandatory callbacks */
4598 	.submit_bio_hook = btree_submit_bio_hook,
4599 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4600 };
4601