xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 58f9d806)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <asm/unaligned.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "volumes.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 #include "tree-log.h"
29 #include "free-space-cache.h"
30 #include "free-space-tree.h"
31 #include "inode-map.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
34 #include "dev-replace.h"
35 #include "raid56.h"
36 #include "sysfs.h"
37 #include "qgroup.h"
38 #include "compression.h"
39 #include "tree-checker.h"
40 #include "ref-verify.h"
41 
42 #ifdef CONFIG_X86
43 #include <asm/cpufeature.h>
44 #endif
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static const struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 				      struct btrfs_fs_info *fs_info);
58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 					struct extent_io_tree *dirty_pages,
61 					int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 				       struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66 
67 /*
68  * btrfs_end_io_wq structs are used to do processing in task context when an IO
69  * is complete.  This is used during reads to verify checksums, and it is used
70  * by writes to insert metadata for new file extents after IO is complete.
71  */
72 struct btrfs_end_io_wq {
73 	struct bio *bio;
74 	bio_end_io_t *end_io;
75 	void *private;
76 	struct btrfs_fs_info *info;
77 	blk_status_t status;
78 	enum btrfs_wq_endio_type metadata;
79 	struct btrfs_work work;
80 };
81 
82 static struct kmem_cache *btrfs_end_io_wq_cache;
83 
84 int __init btrfs_end_io_wq_init(void)
85 {
86 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 					sizeof(struct btrfs_end_io_wq),
88 					0,
89 					SLAB_MEM_SPREAD,
90 					NULL);
91 	if (!btrfs_end_io_wq_cache)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 void __cold btrfs_end_io_wq_exit(void)
97 {
98 	kmem_cache_destroy(btrfs_end_io_wq_cache);
99 }
100 
101 /*
102  * async submit bios are used to offload expensive checksumming
103  * onto the worker threads.  They checksum file and metadata bios
104  * just before they are sent down the IO stack.
105  */
106 struct async_submit_bio {
107 	void *private_data;
108 	struct bio *bio;
109 	extent_submit_bio_start_t *submit_bio_start;
110 	int mirror_num;
111 	/*
112 	 * bio_offset is optional, can be used if the pages in the bio
113 	 * can't tell us where in the file the bio should go
114 	 */
115 	u64 bio_offset;
116 	struct btrfs_work work;
117 	blk_status_t status;
118 };
119 
120 /*
121  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
122  * eb, the lockdep key is determined by the btrfs_root it belongs to and
123  * the level the eb occupies in the tree.
124  *
125  * Different roots are used for different purposes and may nest inside each
126  * other and they require separate keysets.  As lockdep keys should be
127  * static, assign keysets according to the purpose of the root as indicated
128  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
129  * roots have separate keysets.
130  *
131  * Lock-nesting across peer nodes is always done with the immediate parent
132  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
133  * subclass to avoid triggering lockdep warning in such cases.
134  *
135  * The key is set by the readpage_end_io_hook after the buffer has passed
136  * csum validation but before the pages are unlocked.  It is also set by
137  * btrfs_init_new_buffer on freshly allocated blocks.
138  *
139  * We also add a check to make sure the highest level of the tree is the
140  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
141  * needs update as well.
142  */
143 #ifdef CONFIG_DEBUG_LOCK_ALLOC
144 # if BTRFS_MAX_LEVEL != 8
145 #  error
146 # endif
147 
148 static struct btrfs_lockdep_keyset {
149 	u64			id;		/* root objectid */
150 	const char		*name_stem;	/* lock name stem */
151 	char			names[BTRFS_MAX_LEVEL + 1][20];
152 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
153 } btrfs_lockdep_keysets[] = {
154 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
155 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
156 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
157 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
158 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
159 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
160 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
161 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
162 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
163 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
164 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
165 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
166 	{ .id = 0,				.name_stem = "tree"	},
167 };
168 
169 void __init btrfs_init_lockdep(void)
170 {
171 	int i, j;
172 
173 	/* initialize lockdep class names */
174 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
175 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
176 
177 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
178 			snprintf(ks->names[j], sizeof(ks->names[j]),
179 				 "btrfs-%s-%02d", ks->name_stem, j);
180 	}
181 }
182 
183 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
184 				    int level)
185 {
186 	struct btrfs_lockdep_keyset *ks;
187 
188 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
189 
190 	/* find the matching keyset, id 0 is the default entry */
191 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
192 		if (ks->id == objectid)
193 			break;
194 
195 	lockdep_set_class_and_name(&eb->lock,
196 				   &ks->keys[level], ks->names[level]);
197 }
198 
199 #endif
200 
201 /*
202  * extents on the btree inode are pretty simple, there's one extent
203  * that covers the entire device
204  */
205 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
206 		struct page *page, size_t pg_offset, u64 start, u64 len,
207 		int create)
208 {
209 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
210 	struct extent_map_tree *em_tree = &inode->extent_tree;
211 	struct extent_map *em;
212 	int ret;
213 
214 	read_lock(&em_tree->lock);
215 	em = lookup_extent_mapping(em_tree, start, len);
216 	if (em) {
217 		em->bdev = fs_info->fs_devices->latest_bdev;
218 		read_unlock(&em_tree->lock);
219 		goto out;
220 	}
221 	read_unlock(&em_tree->lock);
222 
223 	em = alloc_extent_map();
224 	if (!em) {
225 		em = ERR_PTR(-ENOMEM);
226 		goto out;
227 	}
228 	em->start = 0;
229 	em->len = (u64)-1;
230 	em->block_len = (u64)-1;
231 	em->block_start = 0;
232 	em->bdev = fs_info->fs_devices->latest_bdev;
233 
234 	write_lock(&em_tree->lock);
235 	ret = add_extent_mapping(em_tree, em, 0);
236 	if (ret == -EEXIST) {
237 		free_extent_map(em);
238 		em = lookup_extent_mapping(em_tree, start, len);
239 		if (!em)
240 			em = ERR_PTR(-EIO);
241 	} else if (ret) {
242 		free_extent_map(em);
243 		em = ERR_PTR(ret);
244 	}
245 	write_unlock(&em_tree->lock);
246 
247 out:
248 	return em;
249 }
250 
251 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
252 {
253 	return crc32c(seed, data, len);
254 }
255 
256 void btrfs_csum_final(u32 crc, u8 *result)
257 {
258 	put_unaligned_le32(~crc, result);
259 }
260 
261 /*
262  * compute the csum for a btree block, and either verify it or write it
263  * into the csum field of the block.
264  */
265 static int csum_tree_block(struct btrfs_fs_info *fs_info,
266 			   struct extent_buffer *buf,
267 			   int verify)
268 {
269 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
270 	char result[BTRFS_CSUM_SIZE];
271 	unsigned long len;
272 	unsigned long cur_len;
273 	unsigned long offset = BTRFS_CSUM_SIZE;
274 	char *kaddr;
275 	unsigned long map_start;
276 	unsigned long map_len;
277 	int err;
278 	u32 crc = ~(u32)0;
279 
280 	len = buf->len - offset;
281 	while (len > 0) {
282 		err = map_private_extent_buffer(buf, offset, 32,
283 					&kaddr, &map_start, &map_len);
284 		if (err)
285 			return err;
286 		cur_len = min(len, map_len - (offset - map_start));
287 		crc = btrfs_csum_data(kaddr + offset - map_start,
288 				      crc, cur_len);
289 		len -= cur_len;
290 		offset += cur_len;
291 	}
292 	memset(result, 0, BTRFS_CSUM_SIZE);
293 
294 	btrfs_csum_final(crc, result);
295 
296 	if (verify) {
297 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
298 			u32 val;
299 			u32 found = 0;
300 			memcpy(&found, result, csum_size);
301 
302 			read_extent_buffer(buf, &val, 0, csum_size);
303 			btrfs_warn_rl(fs_info,
304 				"%s checksum verify failed on %llu wanted %X found %X level %d",
305 				fs_info->sb->s_id, buf->start,
306 				val, found, btrfs_header_level(buf));
307 			return -EUCLEAN;
308 		}
309 	} else {
310 		write_extent_buffer(buf, result, 0, csum_size);
311 	}
312 
313 	return 0;
314 }
315 
316 /*
317  * we can't consider a given block up to date unless the transid of the
318  * block matches the transid in the parent node's pointer.  This is how we
319  * detect blocks that either didn't get written at all or got written
320  * in the wrong place.
321  */
322 static int verify_parent_transid(struct extent_io_tree *io_tree,
323 				 struct extent_buffer *eb, u64 parent_transid,
324 				 int atomic)
325 {
326 	struct extent_state *cached_state = NULL;
327 	int ret;
328 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
329 
330 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
331 		return 0;
332 
333 	if (atomic)
334 		return -EAGAIN;
335 
336 	if (need_lock) {
337 		btrfs_tree_read_lock(eb);
338 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
339 	}
340 
341 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
342 			 &cached_state);
343 	if (extent_buffer_uptodate(eb) &&
344 	    btrfs_header_generation(eb) == parent_transid) {
345 		ret = 0;
346 		goto out;
347 	}
348 	btrfs_err_rl(eb->fs_info,
349 		"parent transid verify failed on %llu wanted %llu found %llu",
350 			eb->start,
351 			parent_transid, btrfs_header_generation(eb));
352 	ret = 1;
353 
354 	/*
355 	 * Things reading via commit roots that don't have normal protection,
356 	 * like send, can have a really old block in cache that may point at a
357 	 * block that has been freed and re-allocated.  So don't clear uptodate
358 	 * if we find an eb that is under IO (dirty/writeback) because we could
359 	 * end up reading in the stale data and then writing it back out and
360 	 * making everybody very sad.
361 	 */
362 	if (!extent_buffer_under_io(eb))
363 		clear_extent_buffer_uptodate(eb);
364 out:
365 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
366 			     &cached_state);
367 	if (need_lock)
368 		btrfs_tree_read_unlock_blocking(eb);
369 	return ret;
370 }
371 
372 /*
373  * Return 0 if the superblock checksum type matches the checksum value of that
374  * algorithm. Pass the raw disk superblock data.
375  */
376 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
377 				  char *raw_disk_sb)
378 {
379 	struct btrfs_super_block *disk_sb =
380 		(struct btrfs_super_block *)raw_disk_sb;
381 	u16 csum_type = btrfs_super_csum_type(disk_sb);
382 	int ret = 0;
383 
384 	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
385 		u32 crc = ~(u32)0;
386 		char result[sizeof(crc)];
387 
388 		/*
389 		 * The super_block structure does not span the whole
390 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
391 		 * is filled with zeros and is included in the checksum.
392 		 */
393 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
394 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
395 		btrfs_csum_final(crc, result);
396 
397 		if (memcmp(raw_disk_sb, result, sizeof(result)))
398 			ret = 1;
399 	}
400 
401 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
402 		btrfs_err(fs_info, "unsupported checksum algorithm %u",
403 				csum_type);
404 		ret = 1;
405 	}
406 
407 	return ret;
408 }
409 
410 static int verify_level_key(struct btrfs_fs_info *fs_info,
411 			    struct extent_buffer *eb, int level,
412 			    struct btrfs_key *first_key, u64 parent_transid)
413 {
414 	int found_level;
415 	struct btrfs_key found_key;
416 	int ret;
417 
418 	found_level = btrfs_header_level(eb);
419 	if (found_level != level) {
420 #ifdef CONFIG_BTRFS_DEBUG
421 		WARN_ON(1);
422 		btrfs_err(fs_info,
423 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
424 			  eb->start, level, found_level);
425 #endif
426 		return -EIO;
427 	}
428 
429 	if (!first_key)
430 		return 0;
431 
432 	/*
433 	 * For live tree block (new tree blocks in current transaction),
434 	 * we need proper lock context to avoid race, which is impossible here.
435 	 * So we only checks tree blocks which is read from disk, whose
436 	 * generation <= fs_info->last_trans_committed.
437 	 */
438 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
439 		return 0;
440 	if (found_level)
441 		btrfs_node_key_to_cpu(eb, &found_key, 0);
442 	else
443 		btrfs_item_key_to_cpu(eb, &found_key, 0);
444 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
445 
446 #ifdef CONFIG_BTRFS_DEBUG
447 	if (ret) {
448 		WARN_ON(1);
449 		btrfs_err(fs_info,
450 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
451 			  eb->start, parent_transid, first_key->objectid,
452 			  first_key->type, first_key->offset,
453 			  found_key.objectid, found_key.type,
454 			  found_key.offset);
455 	}
456 #endif
457 	return ret;
458 }
459 
460 /*
461  * helper to read a given tree block, doing retries as required when
462  * the checksums don't match and we have alternate mirrors to try.
463  *
464  * @parent_transid:	expected transid, skip check if 0
465  * @level:		expected level, mandatory check
466  * @first_key:		expected key of first slot, skip check if NULL
467  */
468 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
469 					  struct extent_buffer *eb,
470 					  u64 parent_transid, int level,
471 					  struct btrfs_key *first_key)
472 {
473 	struct extent_io_tree *io_tree;
474 	int failed = 0;
475 	int ret;
476 	int num_copies = 0;
477 	int mirror_num = 0;
478 	int failed_mirror = 0;
479 
480 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
481 	while (1) {
482 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
483 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
484 					       mirror_num);
485 		if (!ret) {
486 			if (verify_parent_transid(io_tree, eb,
487 						   parent_transid, 0))
488 				ret = -EIO;
489 			else if (verify_level_key(fs_info, eb, level,
490 						  first_key, parent_transid))
491 				ret = -EUCLEAN;
492 			else
493 				break;
494 		}
495 
496 		num_copies = btrfs_num_copies(fs_info,
497 					      eb->start, eb->len);
498 		if (num_copies == 1)
499 			break;
500 
501 		if (!failed_mirror) {
502 			failed = 1;
503 			failed_mirror = eb->read_mirror;
504 		}
505 
506 		mirror_num++;
507 		if (mirror_num == failed_mirror)
508 			mirror_num++;
509 
510 		if (mirror_num > num_copies)
511 			break;
512 	}
513 
514 	if (failed && !ret && failed_mirror)
515 		repair_eb_io_failure(fs_info, eb, failed_mirror);
516 
517 	return ret;
518 }
519 
520 /*
521  * checksum a dirty tree block before IO.  This has extra checks to make sure
522  * we only fill in the checksum field in the first page of a multi-page block
523  */
524 
525 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
526 {
527 	u64 start = page_offset(page);
528 	u64 found_start;
529 	struct extent_buffer *eb;
530 
531 	eb = (struct extent_buffer *)page->private;
532 	if (page != eb->pages[0])
533 		return 0;
534 
535 	found_start = btrfs_header_bytenr(eb);
536 	/*
537 	 * Please do not consolidate these warnings into a single if.
538 	 * It is useful to know what went wrong.
539 	 */
540 	if (WARN_ON(found_start != start))
541 		return -EUCLEAN;
542 	if (WARN_ON(!PageUptodate(page)))
543 		return -EUCLEAN;
544 
545 	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
546 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
547 
548 	return csum_tree_block(fs_info, eb, 0);
549 }
550 
551 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
552 				 struct extent_buffer *eb)
553 {
554 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
555 	u8 fsid[BTRFS_FSID_SIZE];
556 	int ret = 1;
557 
558 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
559 	while (fs_devices) {
560 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
561 			ret = 0;
562 			break;
563 		}
564 		fs_devices = fs_devices->seed;
565 	}
566 	return ret;
567 }
568 
569 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
570 				      u64 phy_offset, struct page *page,
571 				      u64 start, u64 end, int mirror)
572 {
573 	u64 found_start;
574 	int found_level;
575 	struct extent_buffer *eb;
576 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
577 	struct btrfs_fs_info *fs_info = root->fs_info;
578 	int ret = 0;
579 	int reads_done;
580 
581 	if (!page->private)
582 		goto out;
583 
584 	eb = (struct extent_buffer *)page->private;
585 
586 	/* the pending IO might have been the only thing that kept this buffer
587 	 * in memory.  Make sure we have a ref for all this other checks
588 	 */
589 	extent_buffer_get(eb);
590 
591 	reads_done = atomic_dec_and_test(&eb->io_pages);
592 	if (!reads_done)
593 		goto err;
594 
595 	eb->read_mirror = mirror;
596 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
597 		ret = -EIO;
598 		goto err;
599 	}
600 
601 	found_start = btrfs_header_bytenr(eb);
602 	if (found_start != eb->start) {
603 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
604 			     eb->start, found_start);
605 		ret = -EIO;
606 		goto err;
607 	}
608 	if (check_tree_block_fsid(fs_info, eb)) {
609 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
610 			     eb->start);
611 		ret = -EIO;
612 		goto err;
613 	}
614 	found_level = btrfs_header_level(eb);
615 	if (found_level >= BTRFS_MAX_LEVEL) {
616 		btrfs_err(fs_info, "bad tree block level %d on %llu",
617 			  (int)btrfs_header_level(eb), eb->start);
618 		ret = -EIO;
619 		goto err;
620 	}
621 
622 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
623 				       eb, found_level);
624 
625 	ret = csum_tree_block(fs_info, eb, 1);
626 	if (ret)
627 		goto err;
628 
629 	/*
630 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
631 	 * that we don't try and read the other copies of this block, just
632 	 * return -EIO.
633 	 */
634 	if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
635 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
636 		ret = -EIO;
637 	}
638 
639 	if (found_level > 0 && btrfs_check_node(fs_info, eb))
640 		ret = -EIO;
641 
642 	if (!ret)
643 		set_extent_buffer_uptodate(eb);
644 err:
645 	if (reads_done &&
646 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
647 		btree_readahead_hook(eb, ret);
648 
649 	if (ret) {
650 		/*
651 		 * our io error hook is going to dec the io pages
652 		 * again, we have to make sure it has something
653 		 * to decrement
654 		 */
655 		atomic_inc(&eb->io_pages);
656 		clear_extent_buffer_uptodate(eb);
657 	}
658 	free_extent_buffer(eb);
659 out:
660 	return ret;
661 }
662 
663 static int btree_io_failed_hook(struct page *page, int failed_mirror)
664 {
665 	struct extent_buffer *eb;
666 
667 	eb = (struct extent_buffer *)page->private;
668 	set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
669 	eb->read_mirror = failed_mirror;
670 	atomic_dec(&eb->io_pages);
671 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
672 		btree_readahead_hook(eb, -EIO);
673 	return -EIO;	/* we fixed nothing */
674 }
675 
676 static void end_workqueue_bio(struct bio *bio)
677 {
678 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
679 	struct btrfs_fs_info *fs_info;
680 	struct btrfs_workqueue *wq;
681 	btrfs_work_func_t func;
682 
683 	fs_info = end_io_wq->info;
684 	end_io_wq->status = bio->bi_status;
685 
686 	if (bio_op(bio) == REQ_OP_WRITE) {
687 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
688 			wq = fs_info->endio_meta_write_workers;
689 			func = btrfs_endio_meta_write_helper;
690 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
691 			wq = fs_info->endio_freespace_worker;
692 			func = btrfs_freespace_write_helper;
693 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
694 			wq = fs_info->endio_raid56_workers;
695 			func = btrfs_endio_raid56_helper;
696 		} else {
697 			wq = fs_info->endio_write_workers;
698 			func = btrfs_endio_write_helper;
699 		}
700 	} else {
701 		if (unlikely(end_io_wq->metadata ==
702 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
703 			wq = fs_info->endio_repair_workers;
704 			func = btrfs_endio_repair_helper;
705 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
706 			wq = fs_info->endio_raid56_workers;
707 			func = btrfs_endio_raid56_helper;
708 		} else if (end_io_wq->metadata) {
709 			wq = fs_info->endio_meta_workers;
710 			func = btrfs_endio_meta_helper;
711 		} else {
712 			wq = fs_info->endio_workers;
713 			func = btrfs_endio_helper;
714 		}
715 	}
716 
717 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
718 	btrfs_queue_work(wq, &end_io_wq->work);
719 }
720 
721 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
722 			enum btrfs_wq_endio_type metadata)
723 {
724 	struct btrfs_end_io_wq *end_io_wq;
725 
726 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
727 	if (!end_io_wq)
728 		return BLK_STS_RESOURCE;
729 
730 	end_io_wq->private = bio->bi_private;
731 	end_io_wq->end_io = bio->bi_end_io;
732 	end_io_wq->info = info;
733 	end_io_wq->status = 0;
734 	end_io_wq->bio = bio;
735 	end_io_wq->metadata = metadata;
736 
737 	bio->bi_private = end_io_wq;
738 	bio->bi_end_io = end_workqueue_bio;
739 	return 0;
740 }
741 
742 static void run_one_async_start(struct btrfs_work *work)
743 {
744 	struct async_submit_bio *async;
745 	blk_status_t ret;
746 
747 	async = container_of(work, struct  async_submit_bio, work);
748 	ret = async->submit_bio_start(async->private_data, async->bio,
749 				      async->bio_offset);
750 	if (ret)
751 		async->status = ret;
752 }
753 
754 static void run_one_async_done(struct btrfs_work *work)
755 {
756 	struct async_submit_bio *async;
757 
758 	async = container_of(work, struct  async_submit_bio, work);
759 
760 	/* If an error occurred we just want to clean up the bio and move on */
761 	if (async->status) {
762 		async->bio->bi_status = async->status;
763 		bio_endio(async->bio);
764 		return;
765 	}
766 
767 	btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
768 }
769 
770 static void run_one_async_free(struct btrfs_work *work)
771 {
772 	struct async_submit_bio *async;
773 
774 	async = container_of(work, struct  async_submit_bio, work);
775 	kfree(async);
776 }
777 
778 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
779 				 int mirror_num, unsigned long bio_flags,
780 				 u64 bio_offset, void *private_data,
781 				 extent_submit_bio_start_t *submit_bio_start)
782 {
783 	struct async_submit_bio *async;
784 
785 	async = kmalloc(sizeof(*async), GFP_NOFS);
786 	if (!async)
787 		return BLK_STS_RESOURCE;
788 
789 	async->private_data = private_data;
790 	async->bio = bio;
791 	async->mirror_num = mirror_num;
792 	async->submit_bio_start = submit_bio_start;
793 
794 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
795 			run_one_async_done, run_one_async_free);
796 
797 	async->bio_offset = bio_offset;
798 
799 	async->status = 0;
800 
801 	if (op_is_sync(bio->bi_opf))
802 		btrfs_set_work_high_priority(&async->work);
803 
804 	btrfs_queue_work(fs_info->workers, &async->work);
805 	return 0;
806 }
807 
808 static blk_status_t btree_csum_one_bio(struct bio *bio)
809 {
810 	struct bio_vec *bvec;
811 	struct btrfs_root *root;
812 	int i, ret = 0;
813 
814 	ASSERT(!bio_flagged(bio, BIO_CLONED));
815 	bio_for_each_segment_all(bvec, bio, i) {
816 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
817 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
818 		if (ret)
819 			break;
820 	}
821 
822 	return errno_to_blk_status(ret);
823 }
824 
825 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
826 					     u64 bio_offset)
827 {
828 	/*
829 	 * when we're called for a write, we're already in the async
830 	 * submission context.  Just jump into btrfs_map_bio
831 	 */
832 	return btree_csum_one_bio(bio);
833 }
834 
835 static int check_async_write(struct btrfs_inode *bi)
836 {
837 	if (atomic_read(&bi->sync_writers))
838 		return 0;
839 #ifdef CONFIG_X86
840 	if (static_cpu_has(X86_FEATURE_XMM4_2))
841 		return 0;
842 #endif
843 	return 1;
844 }
845 
846 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
847 					  int mirror_num, unsigned long bio_flags,
848 					  u64 bio_offset)
849 {
850 	struct inode *inode = private_data;
851 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
852 	int async = check_async_write(BTRFS_I(inode));
853 	blk_status_t ret;
854 
855 	if (bio_op(bio) != REQ_OP_WRITE) {
856 		/*
857 		 * called for a read, do the setup so that checksum validation
858 		 * can happen in the async kernel threads
859 		 */
860 		ret = btrfs_bio_wq_end_io(fs_info, bio,
861 					  BTRFS_WQ_ENDIO_METADATA);
862 		if (ret)
863 			goto out_w_error;
864 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
865 	} else if (!async) {
866 		ret = btree_csum_one_bio(bio);
867 		if (ret)
868 			goto out_w_error;
869 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
870 	} else {
871 		/*
872 		 * kthread helpers are used to submit writes so that
873 		 * checksumming can happen in parallel across all CPUs
874 		 */
875 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
876 					  bio_offset, private_data,
877 					  btree_submit_bio_start);
878 	}
879 
880 	if (ret)
881 		goto out_w_error;
882 	return 0;
883 
884 out_w_error:
885 	bio->bi_status = ret;
886 	bio_endio(bio);
887 	return ret;
888 }
889 
890 #ifdef CONFIG_MIGRATION
891 static int btree_migratepage(struct address_space *mapping,
892 			struct page *newpage, struct page *page,
893 			enum migrate_mode mode)
894 {
895 	/*
896 	 * we can't safely write a btree page from here,
897 	 * we haven't done the locking hook
898 	 */
899 	if (PageDirty(page))
900 		return -EAGAIN;
901 	/*
902 	 * Buffers may be managed in a filesystem specific way.
903 	 * We must have no buffers or drop them.
904 	 */
905 	if (page_has_private(page) &&
906 	    !try_to_release_page(page, GFP_KERNEL))
907 		return -EAGAIN;
908 	return migrate_page(mapping, newpage, page, mode);
909 }
910 #endif
911 
912 
913 static int btree_writepages(struct address_space *mapping,
914 			    struct writeback_control *wbc)
915 {
916 	struct btrfs_fs_info *fs_info;
917 	int ret;
918 
919 	if (wbc->sync_mode == WB_SYNC_NONE) {
920 
921 		if (wbc->for_kupdate)
922 			return 0;
923 
924 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
925 		/* this is a bit racy, but that's ok */
926 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
927 					     BTRFS_DIRTY_METADATA_THRESH,
928 					     fs_info->dirty_metadata_batch);
929 		if (ret < 0)
930 			return 0;
931 	}
932 	return btree_write_cache_pages(mapping, wbc);
933 }
934 
935 static int btree_readpage(struct file *file, struct page *page)
936 {
937 	struct extent_io_tree *tree;
938 	tree = &BTRFS_I(page->mapping->host)->io_tree;
939 	return extent_read_full_page(tree, page, btree_get_extent, 0);
940 }
941 
942 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
943 {
944 	if (PageWriteback(page) || PageDirty(page))
945 		return 0;
946 
947 	return try_release_extent_buffer(page);
948 }
949 
950 static void btree_invalidatepage(struct page *page, unsigned int offset,
951 				 unsigned int length)
952 {
953 	struct extent_io_tree *tree;
954 	tree = &BTRFS_I(page->mapping->host)->io_tree;
955 	extent_invalidatepage(tree, page, offset);
956 	btree_releasepage(page, GFP_NOFS);
957 	if (PagePrivate(page)) {
958 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
959 			   "page private not zero on page %llu",
960 			   (unsigned long long)page_offset(page));
961 		ClearPagePrivate(page);
962 		set_page_private(page, 0);
963 		put_page(page);
964 	}
965 }
966 
967 static int btree_set_page_dirty(struct page *page)
968 {
969 #ifdef DEBUG
970 	struct extent_buffer *eb;
971 
972 	BUG_ON(!PagePrivate(page));
973 	eb = (struct extent_buffer *)page->private;
974 	BUG_ON(!eb);
975 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
976 	BUG_ON(!atomic_read(&eb->refs));
977 	btrfs_assert_tree_locked(eb);
978 #endif
979 	return __set_page_dirty_nobuffers(page);
980 }
981 
982 static const struct address_space_operations btree_aops = {
983 	.readpage	= btree_readpage,
984 	.writepages	= btree_writepages,
985 	.releasepage	= btree_releasepage,
986 	.invalidatepage = btree_invalidatepage,
987 #ifdef CONFIG_MIGRATION
988 	.migratepage	= btree_migratepage,
989 #endif
990 	.set_page_dirty = btree_set_page_dirty,
991 };
992 
993 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
994 {
995 	struct extent_buffer *buf = NULL;
996 	struct inode *btree_inode = fs_info->btree_inode;
997 
998 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
999 	if (IS_ERR(buf))
1000 		return;
1001 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1002 				 buf, WAIT_NONE, 0);
1003 	free_extent_buffer(buf);
1004 }
1005 
1006 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1007 			 int mirror_num, struct extent_buffer **eb)
1008 {
1009 	struct extent_buffer *buf = NULL;
1010 	struct inode *btree_inode = fs_info->btree_inode;
1011 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1012 	int ret;
1013 
1014 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1015 	if (IS_ERR(buf))
1016 		return 0;
1017 
1018 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1019 
1020 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1021 				       mirror_num);
1022 	if (ret) {
1023 		free_extent_buffer(buf);
1024 		return ret;
1025 	}
1026 
1027 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1028 		free_extent_buffer(buf);
1029 		return -EIO;
1030 	} else if (extent_buffer_uptodate(buf)) {
1031 		*eb = buf;
1032 	} else {
1033 		free_extent_buffer(buf);
1034 	}
1035 	return 0;
1036 }
1037 
1038 struct extent_buffer *btrfs_find_create_tree_block(
1039 						struct btrfs_fs_info *fs_info,
1040 						u64 bytenr)
1041 {
1042 	if (btrfs_is_testing(fs_info))
1043 		return alloc_test_extent_buffer(fs_info, bytenr);
1044 	return alloc_extent_buffer(fs_info, bytenr);
1045 }
1046 
1047 
1048 int btrfs_write_tree_block(struct extent_buffer *buf)
1049 {
1050 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1051 					buf->start + buf->len - 1);
1052 }
1053 
1054 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1055 {
1056 	filemap_fdatawait_range(buf->pages[0]->mapping,
1057 			        buf->start, buf->start + buf->len - 1);
1058 }
1059 
1060 /*
1061  * Read tree block at logical address @bytenr and do variant basic but critical
1062  * verification.
1063  *
1064  * @parent_transid:	expected transid of this tree block, skip check if 0
1065  * @level:		expected level, mandatory check
1066  * @first_key:		expected key in slot 0, skip check if NULL
1067  */
1068 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1069 				      u64 parent_transid, int level,
1070 				      struct btrfs_key *first_key)
1071 {
1072 	struct extent_buffer *buf = NULL;
1073 	int ret;
1074 
1075 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1076 	if (IS_ERR(buf))
1077 		return buf;
1078 
1079 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1080 					     level, first_key);
1081 	if (ret) {
1082 		free_extent_buffer(buf);
1083 		return ERR_PTR(ret);
1084 	}
1085 	return buf;
1086 
1087 }
1088 
1089 void clean_tree_block(struct btrfs_fs_info *fs_info,
1090 		      struct extent_buffer *buf)
1091 {
1092 	if (btrfs_header_generation(buf) ==
1093 	    fs_info->running_transaction->transid) {
1094 		btrfs_assert_tree_locked(buf);
1095 
1096 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1097 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1098 						 -buf->len,
1099 						 fs_info->dirty_metadata_batch);
1100 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1101 			btrfs_set_lock_blocking(buf);
1102 			clear_extent_buffer_dirty(buf);
1103 		}
1104 	}
1105 }
1106 
1107 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1108 {
1109 	struct btrfs_subvolume_writers *writers;
1110 	int ret;
1111 
1112 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1113 	if (!writers)
1114 		return ERR_PTR(-ENOMEM);
1115 
1116 	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1117 	if (ret < 0) {
1118 		kfree(writers);
1119 		return ERR_PTR(ret);
1120 	}
1121 
1122 	init_waitqueue_head(&writers->wait);
1123 	return writers;
1124 }
1125 
1126 static void
1127 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1128 {
1129 	percpu_counter_destroy(&writers->counter);
1130 	kfree(writers);
1131 }
1132 
1133 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1134 			 u64 objectid)
1135 {
1136 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1137 	root->node = NULL;
1138 	root->commit_root = NULL;
1139 	root->state = 0;
1140 	root->orphan_cleanup_state = 0;
1141 
1142 	root->last_trans = 0;
1143 	root->highest_objectid = 0;
1144 	root->nr_delalloc_inodes = 0;
1145 	root->nr_ordered_extents = 0;
1146 	root->inode_tree = RB_ROOT;
1147 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1148 	root->block_rsv = NULL;
1149 
1150 	INIT_LIST_HEAD(&root->dirty_list);
1151 	INIT_LIST_HEAD(&root->root_list);
1152 	INIT_LIST_HEAD(&root->delalloc_inodes);
1153 	INIT_LIST_HEAD(&root->delalloc_root);
1154 	INIT_LIST_HEAD(&root->ordered_extents);
1155 	INIT_LIST_HEAD(&root->ordered_root);
1156 	INIT_LIST_HEAD(&root->logged_list[0]);
1157 	INIT_LIST_HEAD(&root->logged_list[1]);
1158 	spin_lock_init(&root->inode_lock);
1159 	spin_lock_init(&root->delalloc_lock);
1160 	spin_lock_init(&root->ordered_extent_lock);
1161 	spin_lock_init(&root->accounting_lock);
1162 	spin_lock_init(&root->log_extents_lock[0]);
1163 	spin_lock_init(&root->log_extents_lock[1]);
1164 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1165 	mutex_init(&root->objectid_mutex);
1166 	mutex_init(&root->log_mutex);
1167 	mutex_init(&root->ordered_extent_mutex);
1168 	mutex_init(&root->delalloc_mutex);
1169 	init_waitqueue_head(&root->log_writer_wait);
1170 	init_waitqueue_head(&root->log_commit_wait[0]);
1171 	init_waitqueue_head(&root->log_commit_wait[1]);
1172 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1173 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1174 	atomic_set(&root->log_commit[0], 0);
1175 	atomic_set(&root->log_commit[1], 0);
1176 	atomic_set(&root->log_writers, 0);
1177 	atomic_set(&root->log_batch, 0);
1178 	refcount_set(&root->refs, 1);
1179 	atomic_set(&root->will_be_snapshotted, 0);
1180 	atomic_set(&root->snapshot_force_cow, 0);
1181 	root->log_transid = 0;
1182 	root->log_transid_committed = -1;
1183 	root->last_log_commit = 0;
1184 	if (!dummy)
1185 		extent_io_tree_init(&root->dirty_log_pages, NULL);
1186 
1187 	memset(&root->root_key, 0, sizeof(root->root_key));
1188 	memset(&root->root_item, 0, sizeof(root->root_item));
1189 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1190 	if (!dummy)
1191 		root->defrag_trans_start = fs_info->generation;
1192 	else
1193 		root->defrag_trans_start = 0;
1194 	root->root_key.objectid = objectid;
1195 	root->anon_dev = 0;
1196 
1197 	spin_lock_init(&root->root_item_lock);
1198 }
1199 
1200 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1201 		gfp_t flags)
1202 {
1203 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1204 	if (root)
1205 		root->fs_info = fs_info;
1206 	return root;
1207 }
1208 
1209 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1210 /* Should only be used by the testing infrastructure */
1211 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1212 {
1213 	struct btrfs_root *root;
1214 
1215 	if (!fs_info)
1216 		return ERR_PTR(-EINVAL);
1217 
1218 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1219 	if (!root)
1220 		return ERR_PTR(-ENOMEM);
1221 
1222 	/* We don't use the stripesize in selftest, set it as sectorsize */
1223 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1224 	root->alloc_bytenr = 0;
1225 
1226 	return root;
1227 }
1228 #endif
1229 
1230 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1231 				     struct btrfs_fs_info *fs_info,
1232 				     u64 objectid)
1233 {
1234 	struct extent_buffer *leaf;
1235 	struct btrfs_root *tree_root = fs_info->tree_root;
1236 	struct btrfs_root *root;
1237 	struct btrfs_key key;
1238 	int ret = 0;
1239 	uuid_le uuid = NULL_UUID_LE;
1240 
1241 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1242 	if (!root)
1243 		return ERR_PTR(-ENOMEM);
1244 
1245 	__setup_root(root, fs_info, objectid);
1246 	root->root_key.objectid = objectid;
1247 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1248 	root->root_key.offset = 0;
1249 
1250 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1251 	if (IS_ERR(leaf)) {
1252 		ret = PTR_ERR(leaf);
1253 		leaf = NULL;
1254 		goto fail;
1255 	}
1256 
1257 	root->node = leaf;
1258 	btrfs_mark_buffer_dirty(leaf);
1259 
1260 	root->commit_root = btrfs_root_node(root);
1261 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1262 
1263 	root->root_item.flags = 0;
1264 	root->root_item.byte_limit = 0;
1265 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1266 	btrfs_set_root_generation(&root->root_item, trans->transid);
1267 	btrfs_set_root_level(&root->root_item, 0);
1268 	btrfs_set_root_refs(&root->root_item, 1);
1269 	btrfs_set_root_used(&root->root_item, leaf->len);
1270 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1271 	btrfs_set_root_dirid(&root->root_item, 0);
1272 	if (is_fstree(objectid))
1273 		uuid_le_gen(&uuid);
1274 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1275 	root->root_item.drop_level = 0;
1276 
1277 	key.objectid = objectid;
1278 	key.type = BTRFS_ROOT_ITEM_KEY;
1279 	key.offset = 0;
1280 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1281 	if (ret)
1282 		goto fail;
1283 
1284 	btrfs_tree_unlock(leaf);
1285 
1286 	return root;
1287 
1288 fail:
1289 	if (leaf) {
1290 		btrfs_tree_unlock(leaf);
1291 		free_extent_buffer(root->commit_root);
1292 		free_extent_buffer(leaf);
1293 	}
1294 	kfree(root);
1295 
1296 	return ERR_PTR(ret);
1297 }
1298 
1299 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1300 					 struct btrfs_fs_info *fs_info)
1301 {
1302 	struct btrfs_root *root;
1303 	struct extent_buffer *leaf;
1304 
1305 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1306 	if (!root)
1307 		return ERR_PTR(-ENOMEM);
1308 
1309 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1310 
1311 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1312 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1313 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1314 
1315 	/*
1316 	 * DON'T set REF_COWS for log trees
1317 	 *
1318 	 * log trees do not get reference counted because they go away
1319 	 * before a real commit is actually done.  They do store pointers
1320 	 * to file data extents, and those reference counts still get
1321 	 * updated (along with back refs to the log tree).
1322 	 */
1323 
1324 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1325 			NULL, 0, 0, 0);
1326 	if (IS_ERR(leaf)) {
1327 		kfree(root);
1328 		return ERR_CAST(leaf);
1329 	}
1330 
1331 	root->node = leaf;
1332 
1333 	btrfs_mark_buffer_dirty(root->node);
1334 	btrfs_tree_unlock(root->node);
1335 	return root;
1336 }
1337 
1338 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1339 			     struct btrfs_fs_info *fs_info)
1340 {
1341 	struct btrfs_root *log_root;
1342 
1343 	log_root = alloc_log_tree(trans, fs_info);
1344 	if (IS_ERR(log_root))
1345 		return PTR_ERR(log_root);
1346 	WARN_ON(fs_info->log_root_tree);
1347 	fs_info->log_root_tree = log_root;
1348 	return 0;
1349 }
1350 
1351 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1352 		       struct btrfs_root *root)
1353 {
1354 	struct btrfs_fs_info *fs_info = root->fs_info;
1355 	struct btrfs_root *log_root;
1356 	struct btrfs_inode_item *inode_item;
1357 
1358 	log_root = alloc_log_tree(trans, fs_info);
1359 	if (IS_ERR(log_root))
1360 		return PTR_ERR(log_root);
1361 
1362 	log_root->last_trans = trans->transid;
1363 	log_root->root_key.offset = root->root_key.objectid;
1364 
1365 	inode_item = &log_root->root_item.inode;
1366 	btrfs_set_stack_inode_generation(inode_item, 1);
1367 	btrfs_set_stack_inode_size(inode_item, 3);
1368 	btrfs_set_stack_inode_nlink(inode_item, 1);
1369 	btrfs_set_stack_inode_nbytes(inode_item,
1370 				     fs_info->nodesize);
1371 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1372 
1373 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1374 
1375 	WARN_ON(root->log_root);
1376 	root->log_root = log_root;
1377 	root->log_transid = 0;
1378 	root->log_transid_committed = -1;
1379 	root->last_log_commit = 0;
1380 	return 0;
1381 }
1382 
1383 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1384 					       struct btrfs_key *key)
1385 {
1386 	struct btrfs_root *root;
1387 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1388 	struct btrfs_path *path;
1389 	u64 generation;
1390 	int ret;
1391 	int level;
1392 
1393 	path = btrfs_alloc_path();
1394 	if (!path)
1395 		return ERR_PTR(-ENOMEM);
1396 
1397 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1398 	if (!root) {
1399 		ret = -ENOMEM;
1400 		goto alloc_fail;
1401 	}
1402 
1403 	__setup_root(root, fs_info, key->objectid);
1404 
1405 	ret = btrfs_find_root(tree_root, key, path,
1406 			      &root->root_item, &root->root_key);
1407 	if (ret) {
1408 		if (ret > 0)
1409 			ret = -ENOENT;
1410 		goto find_fail;
1411 	}
1412 
1413 	generation = btrfs_root_generation(&root->root_item);
1414 	level = btrfs_root_level(&root->root_item);
1415 	root->node = read_tree_block(fs_info,
1416 				     btrfs_root_bytenr(&root->root_item),
1417 				     generation, level, NULL);
1418 	if (IS_ERR(root->node)) {
1419 		ret = PTR_ERR(root->node);
1420 		goto find_fail;
1421 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1422 		ret = -EIO;
1423 		free_extent_buffer(root->node);
1424 		goto find_fail;
1425 	}
1426 	root->commit_root = btrfs_root_node(root);
1427 out:
1428 	btrfs_free_path(path);
1429 	return root;
1430 
1431 find_fail:
1432 	kfree(root);
1433 alloc_fail:
1434 	root = ERR_PTR(ret);
1435 	goto out;
1436 }
1437 
1438 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1439 				      struct btrfs_key *location)
1440 {
1441 	struct btrfs_root *root;
1442 
1443 	root = btrfs_read_tree_root(tree_root, location);
1444 	if (IS_ERR(root))
1445 		return root;
1446 
1447 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1448 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1449 		btrfs_check_and_init_root_item(&root->root_item);
1450 	}
1451 
1452 	return root;
1453 }
1454 
1455 int btrfs_init_fs_root(struct btrfs_root *root)
1456 {
1457 	int ret;
1458 	struct btrfs_subvolume_writers *writers;
1459 
1460 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1461 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1462 					GFP_NOFS);
1463 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1464 		ret = -ENOMEM;
1465 		goto fail;
1466 	}
1467 
1468 	writers = btrfs_alloc_subvolume_writers();
1469 	if (IS_ERR(writers)) {
1470 		ret = PTR_ERR(writers);
1471 		goto fail;
1472 	}
1473 	root->subv_writers = writers;
1474 
1475 	btrfs_init_free_ino_ctl(root);
1476 	spin_lock_init(&root->ino_cache_lock);
1477 	init_waitqueue_head(&root->ino_cache_wait);
1478 
1479 	ret = get_anon_bdev(&root->anon_dev);
1480 	if (ret)
1481 		goto fail;
1482 
1483 	mutex_lock(&root->objectid_mutex);
1484 	ret = btrfs_find_highest_objectid(root,
1485 					&root->highest_objectid);
1486 	if (ret) {
1487 		mutex_unlock(&root->objectid_mutex);
1488 		goto fail;
1489 	}
1490 
1491 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1492 
1493 	mutex_unlock(&root->objectid_mutex);
1494 
1495 	return 0;
1496 fail:
1497 	/* The caller is responsible to call btrfs_free_fs_root */
1498 	return ret;
1499 }
1500 
1501 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1502 					u64 root_id)
1503 {
1504 	struct btrfs_root *root;
1505 
1506 	spin_lock(&fs_info->fs_roots_radix_lock);
1507 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1508 				 (unsigned long)root_id);
1509 	spin_unlock(&fs_info->fs_roots_radix_lock);
1510 	return root;
1511 }
1512 
1513 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1514 			 struct btrfs_root *root)
1515 {
1516 	int ret;
1517 
1518 	ret = radix_tree_preload(GFP_NOFS);
1519 	if (ret)
1520 		return ret;
1521 
1522 	spin_lock(&fs_info->fs_roots_radix_lock);
1523 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1524 				(unsigned long)root->root_key.objectid,
1525 				root);
1526 	if (ret == 0)
1527 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1528 	spin_unlock(&fs_info->fs_roots_radix_lock);
1529 	radix_tree_preload_end();
1530 
1531 	return ret;
1532 }
1533 
1534 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1535 				     struct btrfs_key *location,
1536 				     bool check_ref)
1537 {
1538 	struct btrfs_root *root;
1539 	struct btrfs_path *path;
1540 	struct btrfs_key key;
1541 	int ret;
1542 
1543 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1544 		return fs_info->tree_root;
1545 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1546 		return fs_info->extent_root;
1547 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1548 		return fs_info->chunk_root;
1549 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1550 		return fs_info->dev_root;
1551 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1552 		return fs_info->csum_root;
1553 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1554 		return fs_info->quota_root ? fs_info->quota_root :
1555 					     ERR_PTR(-ENOENT);
1556 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1557 		return fs_info->uuid_root ? fs_info->uuid_root :
1558 					    ERR_PTR(-ENOENT);
1559 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1560 		return fs_info->free_space_root ? fs_info->free_space_root :
1561 						  ERR_PTR(-ENOENT);
1562 again:
1563 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1564 	if (root) {
1565 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1566 			return ERR_PTR(-ENOENT);
1567 		return root;
1568 	}
1569 
1570 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1571 	if (IS_ERR(root))
1572 		return root;
1573 
1574 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1575 		ret = -ENOENT;
1576 		goto fail;
1577 	}
1578 
1579 	ret = btrfs_init_fs_root(root);
1580 	if (ret)
1581 		goto fail;
1582 
1583 	path = btrfs_alloc_path();
1584 	if (!path) {
1585 		ret = -ENOMEM;
1586 		goto fail;
1587 	}
1588 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1589 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1590 	key.offset = location->objectid;
1591 
1592 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1593 	btrfs_free_path(path);
1594 	if (ret < 0)
1595 		goto fail;
1596 	if (ret == 0)
1597 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1598 
1599 	ret = btrfs_insert_fs_root(fs_info, root);
1600 	if (ret) {
1601 		if (ret == -EEXIST) {
1602 			btrfs_free_fs_root(root);
1603 			goto again;
1604 		}
1605 		goto fail;
1606 	}
1607 	return root;
1608 fail:
1609 	btrfs_free_fs_root(root);
1610 	return ERR_PTR(ret);
1611 }
1612 
1613 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1614 {
1615 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1616 	int ret = 0;
1617 	struct btrfs_device *device;
1618 	struct backing_dev_info *bdi;
1619 
1620 	rcu_read_lock();
1621 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1622 		if (!device->bdev)
1623 			continue;
1624 		bdi = device->bdev->bd_bdi;
1625 		if (bdi_congested(bdi, bdi_bits)) {
1626 			ret = 1;
1627 			break;
1628 		}
1629 	}
1630 	rcu_read_unlock();
1631 	return ret;
1632 }
1633 
1634 /*
1635  * called by the kthread helper functions to finally call the bio end_io
1636  * functions.  This is where read checksum verification actually happens
1637  */
1638 static void end_workqueue_fn(struct btrfs_work *work)
1639 {
1640 	struct bio *bio;
1641 	struct btrfs_end_io_wq *end_io_wq;
1642 
1643 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1644 	bio = end_io_wq->bio;
1645 
1646 	bio->bi_status = end_io_wq->status;
1647 	bio->bi_private = end_io_wq->private;
1648 	bio->bi_end_io = end_io_wq->end_io;
1649 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1650 	bio_endio(bio);
1651 }
1652 
1653 static int cleaner_kthread(void *arg)
1654 {
1655 	struct btrfs_root *root = arg;
1656 	struct btrfs_fs_info *fs_info = root->fs_info;
1657 	int again;
1658 
1659 	while (1) {
1660 		again = 0;
1661 
1662 		/* Make the cleaner go to sleep early. */
1663 		if (btrfs_need_cleaner_sleep(fs_info))
1664 			goto sleep;
1665 
1666 		/*
1667 		 * Do not do anything if we might cause open_ctree() to block
1668 		 * before we have finished mounting the filesystem.
1669 		 */
1670 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1671 			goto sleep;
1672 
1673 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1674 			goto sleep;
1675 
1676 		/*
1677 		 * Avoid the problem that we change the status of the fs
1678 		 * during the above check and trylock.
1679 		 */
1680 		if (btrfs_need_cleaner_sleep(fs_info)) {
1681 			mutex_unlock(&fs_info->cleaner_mutex);
1682 			goto sleep;
1683 		}
1684 
1685 		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1686 		btrfs_run_delayed_iputs(fs_info);
1687 		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1688 
1689 		again = btrfs_clean_one_deleted_snapshot(root);
1690 		mutex_unlock(&fs_info->cleaner_mutex);
1691 
1692 		/*
1693 		 * The defragger has dealt with the R/O remount and umount,
1694 		 * needn't do anything special here.
1695 		 */
1696 		btrfs_run_defrag_inodes(fs_info);
1697 
1698 		/*
1699 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1700 		 * with relocation (btrfs_relocate_chunk) and relocation
1701 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1702 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1703 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1704 		 * unused block groups.
1705 		 */
1706 		btrfs_delete_unused_bgs(fs_info);
1707 sleep:
1708 		if (kthread_should_park())
1709 			kthread_parkme();
1710 		if (kthread_should_stop())
1711 			return 0;
1712 		if (!again) {
1713 			set_current_state(TASK_INTERRUPTIBLE);
1714 			schedule();
1715 			__set_current_state(TASK_RUNNING);
1716 		}
1717 	}
1718 }
1719 
1720 static int transaction_kthread(void *arg)
1721 {
1722 	struct btrfs_root *root = arg;
1723 	struct btrfs_fs_info *fs_info = root->fs_info;
1724 	struct btrfs_trans_handle *trans;
1725 	struct btrfs_transaction *cur;
1726 	u64 transid;
1727 	time64_t now;
1728 	unsigned long delay;
1729 	bool cannot_commit;
1730 
1731 	do {
1732 		cannot_commit = false;
1733 		delay = HZ * fs_info->commit_interval;
1734 		mutex_lock(&fs_info->transaction_kthread_mutex);
1735 
1736 		spin_lock(&fs_info->trans_lock);
1737 		cur = fs_info->running_transaction;
1738 		if (!cur) {
1739 			spin_unlock(&fs_info->trans_lock);
1740 			goto sleep;
1741 		}
1742 
1743 		now = ktime_get_seconds();
1744 		if (cur->state < TRANS_STATE_BLOCKED &&
1745 		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1746 		    (now < cur->start_time ||
1747 		     now - cur->start_time < fs_info->commit_interval)) {
1748 			spin_unlock(&fs_info->trans_lock);
1749 			delay = HZ * 5;
1750 			goto sleep;
1751 		}
1752 		transid = cur->transid;
1753 		spin_unlock(&fs_info->trans_lock);
1754 
1755 		/* If the file system is aborted, this will always fail. */
1756 		trans = btrfs_attach_transaction(root);
1757 		if (IS_ERR(trans)) {
1758 			if (PTR_ERR(trans) != -ENOENT)
1759 				cannot_commit = true;
1760 			goto sleep;
1761 		}
1762 		if (transid == trans->transid) {
1763 			btrfs_commit_transaction(trans);
1764 		} else {
1765 			btrfs_end_transaction(trans);
1766 		}
1767 sleep:
1768 		wake_up_process(fs_info->cleaner_kthread);
1769 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1770 
1771 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1772 				      &fs_info->fs_state)))
1773 			btrfs_cleanup_transaction(fs_info);
1774 		if (!kthread_should_stop() &&
1775 				(!btrfs_transaction_blocked(fs_info) ||
1776 				 cannot_commit))
1777 			schedule_timeout_interruptible(delay);
1778 	} while (!kthread_should_stop());
1779 	return 0;
1780 }
1781 
1782 /*
1783  * this will find the highest generation in the array of
1784  * root backups.  The index of the highest array is returned,
1785  * or -1 if we can't find anything.
1786  *
1787  * We check to make sure the array is valid by comparing the
1788  * generation of the latest  root in the array with the generation
1789  * in the super block.  If they don't match we pitch it.
1790  */
1791 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1792 {
1793 	u64 cur;
1794 	int newest_index = -1;
1795 	struct btrfs_root_backup *root_backup;
1796 	int i;
1797 
1798 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1799 		root_backup = info->super_copy->super_roots + i;
1800 		cur = btrfs_backup_tree_root_gen(root_backup);
1801 		if (cur == newest_gen)
1802 			newest_index = i;
1803 	}
1804 
1805 	/* check to see if we actually wrapped around */
1806 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1807 		root_backup = info->super_copy->super_roots;
1808 		cur = btrfs_backup_tree_root_gen(root_backup);
1809 		if (cur == newest_gen)
1810 			newest_index = 0;
1811 	}
1812 	return newest_index;
1813 }
1814 
1815 
1816 /*
1817  * find the oldest backup so we know where to store new entries
1818  * in the backup array.  This will set the backup_root_index
1819  * field in the fs_info struct
1820  */
1821 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1822 				     u64 newest_gen)
1823 {
1824 	int newest_index = -1;
1825 
1826 	newest_index = find_newest_super_backup(info, newest_gen);
1827 	/* if there was garbage in there, just move along */
1828 	if (newest_index == -1) {
1829 		info->backup_root_index = 0;
1830 	} else {
1831 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1832 	}
1833 }
1834 
1835 /*
1836  * copy all the root pointers into the super backup array.
1837  * this will bump the backup pointer by one when it is
1838  * done
1839  */
1840 static void backup_super_roots(struct btrfs_fs_info *info)
1841 {
1842 	int next_backup;
1843 	struct btrfs_root_backup *root_backup;
1844 	int last_backup;
1845 
1846 	next_backup = info->backup_root_index;
1847 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1848 		BTRFS_NUM_BACKUP_ROOTS;
1849 
1850 	/*
1851 	 * just overwrite the last backup if we're at the same generation
1852 	 * this happens only at umount
1853 	 */
1854 	root_backup = info->super_for_commit->super_roots + last_backup;
1855 	if (btrfs_backup_tree_root_gen(root_backup) ==
1856 	    btrfs_header_generation(info->tree_root->node))
1857 		next_backup = last_backup;
1858 
1859 	root_backup = info->super_for_commit->super_roots + next_backup;
1860 
1861 	/*
1862 	 * make sure all of our padding and empty slots get zero filled
1863 	 * regardless of which ones we use today
1864 	 */
1865 	memset(root_backup, 0, sizeof(*root_backup));
1866 
1867 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1868 
1869 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1870 	btrfs_set_backup_tree_root_gen(root_backup,
1871 			       btrfs_header_generation(info->tree_root->node));
1872 
1873 	btrfs_set_backup_tree_root_level(root_backup,
1874 			       btrfs_header_level(info->tree_root->node));
1875 
1876 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1877 	btrfs_set_backup_chunk_root_gen(root_backup,
1878 			       btrfs_header_generation(info->chunk_root->node));
1879 	btrfs_set_backup_chunk_root_level(root_backup,
1880 			       btrfs_header_level(info->chunk_root->node));
1881 
1882 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1883 	btrfs_set_backup_extent_root_gen(root_backup,
1884 			       btrfs_header_generation(info->extent_root->node));
1885 	btrfs_set_backup_extent_root_level(root_backup,
1886 			       btrfs_header_level(info->extent_root->node));
1887 
1888 	/*
1889 	 * we might commit during log recovery, which happens before we set
1890 	 * the fs_root.  Make sure it is valid before we fill it in.
1891 	 */
1892 	if (info->fs_root && info->fs_root->node) {
1893 		btrfs_set_backup_fs_root(root_backup,
1894 					 info->fs_root->node->start);
1895 		btrfs_set_backup_fs_root_gen(root_backup,
1896 			       btrfs_header_generation(info->fs_root->node));
1897 		btrfs_set_backup_fs_root_level(root_backup,
1898 			       btrfs_header_level(info->fs_root->node));
1899 	}
1900 
1901 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1902 	btrfs_set_backup_dev_root_gen(root_backup,
1903 			       btrfs_header_generation(info->dev_root->node));
1904 	btrfs_set_backup_dev_root_level(root_backup,
1905 				       btrfs_header_level(info->dev_root->node));
1906 
1907 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1908 	btrfs_set_backup_csum_root_gen(root_backup,
1909 			       btrfs_header_generation(info->csum_root->node));
1910 	btrfs_set_backup_csum_root_level(root_backup,
1911 			       btrfs_header_level(info->csum_root->node));
1912 
1913 	btrfs_set_backup_total_bytes(root_backup,
1914 			     btrfs_super_total_bytes(info->super_copy));
1915 	btrfs_set_backup_bytes_used(root_backup,
1916 			     btrfs_super_bytes_used(info->super_copy));
1917 	btrfs_set_backup_num_devices(root_backup,
1918 			     btrfs_super_num_devices(info->super_copy));
1919 
1920 	/*
1921 	 * if we don't copy this out to the super_copy, it won't get remembered
1922 	 * for the next commit
1923 	 */
1924 	memcpy(&info->super_copy->super_roots,
1925 	       &info->super_for_commit->super_roots,
1926 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1927 }
1928 
1929 /*
1930  * this copies info out of the root backup array and back into
1931  * the in-memory super block.  It is meant to help iterate through
1932  * the array, so you send it the number of backups you've already
1933  * tried and the last backup index you used.
1934  *
1935  * this returns -1 when it has tried all the backups
1936  */
1937 static noinline int next_root_backup(struct btrfs_fs_info *info,
1938 				     struct btrfs_super_block *super,
1939 				     int *num_backups_tried, int *backup_index)
1940 {
1941 	struct btrfs_root_backup *root_backup;
1942 	int newest = *backup_index;
1943 
1944 	if (*num_backups_tried == 0) {
1945 		u64 gen = btrfs_super_generation(super);
1946 
1947 		newest = find_newest_super_backup(info, gen);
1948 		if (newest == -1)
1949 			return -1;
1950 
1951 		*backup_index = newest;
1952 		*num_backups_tried = 1;
1953 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1954 		/* we've tried all the backups, all done */
1955 		return -1;
1956 	} else {
1957 		/* jump to the next oldest backup */
1958 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1959 			BTRFS_NUM_BACKUP_ROOTS;
1960 		*backup_index = newest;
1961 		*num_backups_tried += 1;
1962 	}
1963 	root_backup = super->super_roots + newest;
1964 
1965 	btrfs_set_super_generation(super,
1966 				   btrfs_backup_tree_root_gen(root_backup));
1967 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1968 	btrfs_set_super_root_level(super,
1969 				   btrfs_backup_tree_root_level(root_backup));
1970 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1971 
1972 	/*
1973 	 * fixme: the total bytes and num_devices need to match or we should
1974 	 * need a fsck
1975 	 */
1976 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1977 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1978 	return 0;
1979 }
1980 
1981 /* helper to cleanup workers */
1982 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1983 {
1984 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1985 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1986 	btrfs_destroy_workqueue(fs_info->workers);
1987 	btrfs_destroy_workqueue(fs_info->endio_workers);
1988 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1989 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
1990 	btrfs_destroy_workqueue(fs_info->rmw_workers);
1991 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1992 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1993 	btrfs_destroy_workqueue(fs_info->submit_workers);
1994 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1995 	btrfs_destroy_workqueue(fs_info->caching_workers);
1996 	btrfs_destroy_workqueue(fs_info->readahead_workers);
1997 	btrfs_destroy_workqueue(fs_info->flush_workers);
1998 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1999 	btrfs_destroy_workqueue(fs_info->extent_workers);
2000 	/*
2001 	 * Now that all other work queues are destroyed, we can safely destroy
2002 	 * the queues used for metadata I/O, since tasks from those other work
2003 	 * queues can do metadata I/O operations.
2004 	 */
2005 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2006 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2007 }
2008 
2009 static void free_root_extent_buffers(struct btrfs_root *root)
2010 {
2011 	if (root) {
2012 		free_extent_buffer(root->node);
2013 		free_extent_buffer(root->commit_root);
2014 		root->node = NULL;
2015 		root->commit_root = NULL;
2016 	}
2017 }
2018 
2019 /* helper to cleanup tree roots */
2020 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2021 {
2022 	free_root_extent_buffers(info->tree_root);
2023 
2024 	free_root_extent_buffers(info->dev_root);
2025 	free_root_extent_buffers(info->extent_root);
2026 	free_root_extent_buffers(info->csum_root);
2027 	free_root_extent_buffers(info->quota_root);
2028 	free_root_extent_buffers(info->uuid_root);
2029 	if (chunk_root)
2030 		free_root_extent_buffers(info->chunk_root);
2031 	free_root_extent_buffers(info->free_space_root);
2032 }
2033 
2034 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2035 {
2036 	int ret;
2037 	struct btrfs_root *gang[8];
2038 	int i;
2039 
2040 	while (!list_empty(&fs_info->dead_roots)) {
2041 		gang[0] = list_entry(fs_info->dead_roots.next,
2042 				     struct btrfs_root, root_list);
2043 		list_del(&gang[0]->root_list);
2044 
2045 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2046 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2047 		} else {
2048 			free_extent_buffer(gang[0]->node);
2049 			free_extent_buffer(gang[0]->commit_root);
2050 			btrfs_put_fs_root(gang[0]);
2051 		}
2052 	}
2053 
2054 	while (1) {
2055 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2056 					     (void **)gang, 0,
2057 					     ARRAY_SIZE(gang));
2058 		if (!ret)
2059 			break;
2060 		for (i = 0; i < ret; i++)
2061 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2062 	}
2063 
2064 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2065 		btrfs_free_log_root_tree(NULL, fs_info);
2066 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2067 	}
2068 }
2069 
2070 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2071 {
2072 	mutex_init(&fs_info->scrub_lock);
2073 	atomic_set(&fs_info->scrubs_running, 0);
2074 	atomic_set(&fs_info->scrub_pause_req, 0);
2075 	atomic_set(&fs_info->scrubs_paused, 0);
2076 	atomic_set(&fs_info->scrub_cancel_req, 0);
2077 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2078 	fs_info->scrub_workers_refcnt = 0;
2079 }
2080 
2081 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2082 {
2083 	spin_lock_init(&fs_info->balance_lock);
2084 	mutex_init(&fs_info->balance_mutex);
2085 	atomic_set(&fs_info->balance_pause_req, 0);
2086 	atomic_set(&fs_info->balance_cancel_req, 0);
2087 	fs_info->balance_ctl = NULL;
2088 	init_waitqueue_head(&fs_info->balance_wait_q);
2089 }
2090 
2091 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2092 {
2093 	struct inode *inode = fs_info->btree_inode;
2094 
2095 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2096 	set_nlink(inode, 1);
2097 	/*
2098 	 * we set the i_size on the btree inode to the max possible int.
2099 	 * the real end of the address space is determined by all of
2100 	 * the devices in the system
2101 	 */
2102 	inode->i_size = OFFSET_MAX;
2103 	inode->i_mapping->a_ops = &btree_aops;
2104 
2105 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2106 	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2107 	BTRFS_I(inode)->io_tree.track_uptodate = 0;
2108 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2109 
2110 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2111 
2112 	BTRFS_I(inode)->root = fs_info->tree_root;
2113 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2114 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2115 	btrfs_insert_inode_hash(inode);
2116 }
2117 
2118 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2119 {
2120 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2121 	rwlock_init(&fs_info->dev_replace.lock);
2122 	atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2123 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2124 	init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2125 }
2126 
2127 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2128 {
2129 	spin_lock_init(&fs_info->qgroup_lock);
2130 	mutex_init(&fs_info->qgroup_ioctl_lock);
2131 	fs_info->qgroup_tree = RB_ROOT;
2132 	fs_info->qgroup_op_tree = RB_ROOT;
2133 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2134 	fs_info->qgroup_seq = 1;
2135 	fs_info->qgroup_ulist = NULL;
2136 	fs_info->qgroup_rescan_running = false;
2137 	mutex_init(&fs_info->qgroup_rescan_lock);
2138 }
2139 
2140 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2141 		struct btrfs_fs_devices *fs_devices)
2142 {
2143 	u32 max_active = fs_info->thread_pool_size;
2144 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2145 
2146 	fs_info->workers =
2147 		btrfs_alloc_workqueue(fs_info, "worker",
2148 				      flags | WQ_HIGHPRI, max_active, 16);
2149 
2150 	fs_info->delalloc_workers =
2151 		btrfs_alloc_workqueue(fs_info, "delalloc",
2152 				      flags, max_active, 2);
2153 
2154 	fs_info->flush_workers =
2155 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2156 				      flags, max_active, 0);
2157 
2158 	fs_info->caching_workers =
2159 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2160 
2161 	/*
2162 	 * a higher idle thresh on the submit workers makes it much more
2163 	 * likely that bios will be send down in a sane order to the
2164 	 * devices
2165 	 */
2166 	fs_info->submit_workers =
2167 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2168 				      min_t(u64, fs_devices->num_devices,
2169 					    max_active), 64);
2170 
2171 	fs_info->fixup_workers =
2172 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2173 
2174 	/*
2175 	 * endios are largely parallel and should have a very
2176 	 * low idle thresh
2177 	 */
2178 	fs_info->endio_workers =
2179 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2180 	fs_info->endio_meta_workers =
2181 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2182 				      max_active, 4);
2183 	fs_info->endio_meta_write_workers =
2184 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2185 				      max_active, 2);
2186 	fs_info->endio_raid56_workers =
2187 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2188 				      max_active, 4);
2189 	fs_info->endio_repair_workers =
2190 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2191 	fs_info->rmw_workers =
2192 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2193 	fs_info->endio_write_workers =
2194 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2195 				      max_active, 2);
2196 	fs_info->endio_freespace_worker =
2197 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2198 				      max_active, 0);
2199 	fs_info->delayed_workers =
2200 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2201 				      max_active, 0);
2202 	fs_info->readahead_workers =
2203 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2204 				      max_active, 2);
2205 	fs_info->qgroup_rescan_workers =
2206 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2207 	fs_info->extent_workers =
2208 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2209 				      min_t(u64, fs_devices->num_devices,
2210 					    max_active), 8);
2211 
2212 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2213 	      fs_info->submit_workers && fs_info->flush_workers &&
2214 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2215 	      fs_info->endio_meta_write_workers &&
2216 	      fs_info->endio_repair_workers &&
2217 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2218 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2219 	      fs_info->caching_workers && fs_info->readahead_workers &&
2220 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2221 	      fs_info->extent_workers &&
2222 	      fs_info->qgroup_rescan_workers)) {
2223 		return -ENOMEM;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2230 			    struct btrfs_fs_devices *fs_devices)
2231 {
2232 	int ret;
2233 	struct btrfs_root *log_tree_root;
2234 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2235 	u64 bytenr = btrfs_super_log_root(disk_super);
2236 	int level = btrfs_super_log_root_level(disk_super);
2237 
2238 	if (fs_devices->rw_devices == 0) {
2239 		btrfs_warn(fs_info, "log replay required on RO media");
2240 		return -EIO;
2241 	}
2242 
2243 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2244 	if (!log_tree_root)
2245 		return -ENOMEM;
2246 
2247 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2248 
2249 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2250 					      fs_info->generation + 1,
2251 					      level, NULL);
2252 	if (IS_ERR(log_tree_root->node)) {
2253 		btrfs_warn(fs_info, "failed to read log tree");
2254 		ret = PTR_ERR(log_tree_root->node);
2255 		kfree(log_tree_root);
2256 		return ret;
2257 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2258 		btrfs_err(fs_info, "failed to read log tree");
2259 		free_extent_buffer(log_tree_root->node);
2260 		kfree(log_tree_root);
2261 		return -EIO;
2262 	}
2263 	/* returns with log_tree_root freed on success */
2264 	ret = btrfs_recover_log_trees(log_tree_root);
2265 	if (ret) {
2266 		btrfs_handle_fs_error(fs_info, ret,
2267 				      "Failed to recover log tree");
2268 		free_extent_buffer(log_tree_root->node);
2269 		kfree(log_tree_root);
2270 		return ret;
2271 	}
2272 
2273 	if (sb_rdonly(fs_info->sb)) {
2274 		ret = btrfs_commit_super(fs_info);
2275 		if (ret)
2276 			return ret;
2277 	}
2278 
2279 	return 0;
2280 }
2281 
2282 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2283 {
2284 	struct btrfs_root *tree_root = fs_info->tree_root;
2285 	struct btrfs_root *root;
2286 	struct btrfs_key location;
2287 	int ret;
2288 
2289 	BUG_ON(!fs_info->tree_root);
2290 
2291 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2292 	location.type = BTRFS_ROOT_ITEM_KEY;
2293 	location.offset = 0;
2294 
2295 	root = btrfs_read_tree_root(tree_root, &location);
2296 	if (IS_ERR(root)) {
2297 		ret = PTR_ERR(root);
2298 		goto out;
2299 	}
2300 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2301 	fs_info->extent_root = root;
2302 
2303 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2304 	root = btrfs_read_tree_root(tree_root, &location);
2305 	if (IS_ERR(root)) {
2306 		ret = PTR_ERR(root);
2307 		goto out;
2308 	}
2309 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2310 	fs_info->dev_root = root;
2311 	btrfs_init_devices_late(fs_info);
2312 
2313 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2314 	root = btrfs_read_tree_root(tree_root, &location);
2315 	if (IS_ERR(root)) {
2316 		ret = PTR_ERR(root);
2317 		goto out;
2318 	}
2319 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2320 	fs_info->csum_root = root;
2321 
2322 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2323 	root = btrfs_read_tree_root(tree_root, &location);
2324 	if (!IS_ERR(root)) {
2325 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2326 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2327 		fs_info->quota_root = root;
2328 	}
2329 
2330 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2331 	root = btrfs_read_tree_root(tree_root, &location);
2332 	if (IS_ERR(root)) {
2333 		ret = PTR_ERR(root);
2334 		if (ret != -ENOENT)
2335 			goto out;
2336 	} else {
2337 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2338 		fs_info->uuid_root = root;
2339 	}
2340 
2341 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2342 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2343 		root = btrfs_read_tree_root(tree_root, &location);
2344 		if (IS_ERR(root)) {
2345 			ret = PTR_ERR(root);
2346 			goto out;
2347 		}
2348 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2349 		fs_info->free_space_root = root;
2350 	}
2351 
2352 	return 0;
2353 out:
2354 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2355 		   location.objectid, ret);
2356 	return ret;
2357 }
2358 
2359 /*
2360  * Real super block validation
2361  * NOTE: super csum type and incompat features will not be checked here.
2362  *
2363  * @sb:		super block to check
2364  * @mirror_num:	the super block number to check its bytenr:
2365  * 		0	the primary (1st) sb
2366  * 		1, 2	2nd and 3rd backup copy
2367  * 	       -1	skip bytenr check
2368  */
2369 static int validate_super(struct btrfs_fs_info *fs_info,
2370 			    struct btrfs_super_block *sb, int mirror_num)
2371 {
2372 	u64 nodesize = btrfs_super_nodesize(sb);
2373 	u64 sectorsize = btrfs_super_sectorsize(sb);
2374 	int ret = 0;
2375 
2376 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2377 		btrfs_err(fs_info, "no valid FS found");
2378 		ret = -EINVAL;
2379 	}
2380 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2381 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2382 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2383 		ret = -EINVAL;
2384 	}
2385 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2386 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2387 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2388 		ret = -EINVAL;
2389 	}
2390 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2391 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2392 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2393 		ret = -EINVAL;
2394 	}
2395 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2396 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2397 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2398 		ret = -EINVAL;
2399 	}
2400 
2401 	/*
2402 	 * Check sectorsize and nodesize first, other check will need it.
2403 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2404 	 */
2405 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2406 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2407 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2408 		ret = -EINVAL;
2409 	}
2410 	/* Only PAGE SIZE is supported yet */
2411 	if (sectorsize != PAGE_SIZE) {
2412 		btrfs_err(fs_info,
2413 			"sectorsize %llu not supported yet, only support %lu",
2414 			sectorsize, PAGE_SIZE);
2415 		ret = -EINVAL;
2416 	}
2417 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2418 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2419 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2420 		ret = -EINVAL;
2421 	}
2422 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2423 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2424 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2425 		ret = -EINVAL;
2426 	}
2427 
2428 	/* Root alignment check */
2429 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2430 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2431 			   btrfs_super_root(sb));
2432 		ret = -EINVAL;
2433 	}
2434 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2435 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2436 			   btrfs_super_chunk_root(sb));
2437 		ret = -EINVAL;
2438 	}
2439 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2440 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2441 			   btrfs_super_log_root(sb));
2442 		ret = -EINVAL;
2443 	}
2444 
2445 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
2446 		btrfs_err(fs_info,
2447 			   "dev_item UUID does not match fsid: %pU != %pU",
2448 			   fs_info->fsid, sb->dev_item.fsid);
2449 		ret = -EINVAL;
2450 	}
2451 
2452 	/*
2453 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2454 	 * done later
2455 	 */
2456 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2457 		btrfs_err(fs_info, "bytes_used is too small %llu",
2458 			  btrfs_super_bytes_used(sb));
2459 		ret = -EINVAL;
2460 	}
2461 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2462 		btrfs_err(fs_info, "invalid stripesize %u",
2463 			  btrfs_super_stripesize(sb));
2464 		ret = -EINVAL;
2465 	}
2466 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2467 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2468 			   btrfs_super_num_devices(sb));
2469 	if (btrfs_super_num_devices(sb) == 0) {
2470 		btrfs_err(fs_info, "number of devices is 0");
2471 		ret = -EINVAL;
2472 	}
2473 
2474 	if (mirror_num >= 0 &&
2475 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2476 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2477 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2478 		ret = -EINVAL;
2479 	}
2480 
2481 	/*
2482 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2483 	 * and one chunk
2484 	 */
2485 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2486 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2487 			  btrfs_super_sys_array_size(sb),
2488 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2489 		ret = -EINVAL;
2490 	}
2491 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2492 			+ sizeof(struct btrfs_chunk)) {
2493 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2494 			  btrfs_super_sys_array_size(sb),
2495 			  sizeof(struct btrfs_disk_key)
2496 			  + sizeof(struct btrfs_chunk));
2497 		ret = -EINVAL;
2498 	}
2499 
2500 	/*
2501 	 * The generation is a global counter, we'll trust it more than the others
2502 	 * but it's still possible that it's the one that's wrong.
2503 	 */
2504 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2505 		btrfs_warn(fs_info,
2506 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2507 			btrfs_super_generation(sb),
2508 			btrfs_super_chunk_root_generation(sb));
2509 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2510 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2511 		btrfs_warn(fs_info,
2512 			"suspicious: generation < cache_generation: %llu < %llu",
2513 			btrfs_super_generation(sb),
2514 			btrfs_super_cache_generation(sb));
2515 
2516 	return ret;
2517 }
2518 
2519 /*
2520  * Validation of super block at mount time.
2521  * Some checks already done early at mount time, like csum type and incompat
2522  * flags will be skipped.
2523  */
2524 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2525 {
2526 	return validate_super(fs_info, fs_info->super_copy, 0);
2527 }
2528 
2529 /*
2530  * Validation of super block at write time.
2531  * Some checks like bytenr check will be skipped as their values will be
2532  * overwritten soon.
2533  * Extra checks like csum type and incompat flags will be done here.
2534  */
2535 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2536 				      struct btrfs_super_block *sb)
2537 {
2538 	int ret;
2539 
2540 	ret = validate_super(fs_info, sb, -1);
2541 	if (ret < 0)
2542 		goto out;
2543 	if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
2544 		ret = -EUCLEAN;
2545 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2546 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2547 		goto out;
2548 	}
2549 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2550 		ret = -EUCLEAN;
2551 		btrfs_err(fs_info,
2552 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2553 			  btrfs_super_incompat_flags(sb),
2554 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2555 		goto out;
2556 	}
2557 out:
2558 	if (ret < 0)
2559 		btrfs_err(fs_info,
2560 		"super block corruption detected before writing it to disk");
2561 	return ret;
2562 }
2563 
2564 int open_ctree(struct super_block *sb,
2565 	       struct btrfs_fs_devices *fs_devices,
2566 	       char *options)
2567 {
2568 	u32 sectorsize;
2569 	u32 nodesize;
2570 	u32 stripesize;
2571 	u64 generation;
2572 	u64 features;
2573 	struct btrfs_key location;
2574 	struct buffer_head *bh;
2575 	struct btrfs_super_block *disk_super;
2576 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2577 	struct btrfs_root *tree_root;
2578 	struct btrfs_root *chunk_root;
2579 	int ret;
2580 	int err = -EINVAL;
2581 	int num_backups_tried = 0;
2582 	int backup_index = 0;
2583 	int clear_free_space_tree = 0;
2584 	int level;
2585 
2586 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2587 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2588 	if (!tree_root || !chunk_root) {
2589 		err = -ENOMEM;
2590 		goto fail;
2591 	}
2592 
2593 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2594 	if (ret) {
2595 		err = ret;
2596 		goto fail;
2597 	}
2598 
2599 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2600 	if (ret) {
2601 		err = ret;
2602 		goto fail_srcu;
2603 	}
2604 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2605 					(1 + ilog2(nr_cpu_ids));
2606 
2607 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2608 	if (ret) {
2609 		err = ret;
2610 		goto fail_dirty_metadata_bytes;
2611 	}
2612 
2613 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2614 			GFP_KERNEL);
2615 	if (ret) {
2616 		err = ret;
2617 		goto fail_delalloc_bytes;
2618 	}
2619 
2620 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2621 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2622 	INIT_LIST_HEAD(&fs_info->trans_list);
2623 	INIT_LIST_HEAD(&fs_info->dead_roots);
2624 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2625 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2626 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2627 	INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2628 	spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2629 	spin_lock_init(&fs_info->delalloc_root_lock);
2630 	spin_lock_init(&fs_info->trans_lock);
2631 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2632 	spin_lock_init(&fs_info->delayed_iput_lock);
2633 	spin_lock_init(&fs_info->defrag_inodes_lock);
2634 	spin_lock_init(&fs_info->tree_mod_seq_lock);
2635 	spin_lock_init(&fs_info->super_lock);
2636 	spin_lock_init(&fs_info->qgroup_op_lock);
2637 	spin_lock_init(&fs_info->buffer_lock);
2638 	spin_lock_init(&fs_info->unused_bgs_lock);
2639 	rwlock_init(&fs_info->tree_mod_log_lock);
2640 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2641 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2642 	mutex_init(&fs_info->reloc_mutex);
2643 	mutex_init(&fs_info->delalloc_root_mutex);
2644 	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2645 	seqlock_init(&fs_info->profiles_lock);
2646 
2647 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2648 	INIT_LIST_HEAD(&fs_info->space_info);
2649 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2650 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2651 	btrfs_mapping_init(&fs_info->mapping_tree);
2652 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2653 			     BTRFS_BLOCK_RSV_GLOBAL);
2654 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2655 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2656 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2657 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2658 			     BTRFS_BLOCK_RSV_DELOPS);
2659 	atomic_set(&fs_info->async_delalloc_pages, 0);
2660 	atomic_set(&fs_info->defrag_running, 0);
2661 	atomic_set(&fs_info->qgroup_op_seq, 0);
2662 	atomic_set(&fs_info->reada_works_cnt, 0);
2663 	atomic64_set(&fs_info->tree_mod_seq, 0);
2664 	fs_info->sb = sb;
2665 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2666 	fs_info->metadata_ratio = 0;
2667 	fs_info->defrag_inodes = RB_ROOT;
2668 	atomic64_set(&fs_info->free_chunk_space, 0);
2669 	fs_info->tree_mod_log = RB_ROOT;
2670 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2671 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2672 	/* readahead state */
2673 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2674 	spin_lock_init(&fs_info->reada_lock);
2675 	btrfs_init_ref_verify(fs_info);
2676 
2677 	fs_info->thread_pool_size = min_t(unsigned long,
2678 					  num_online_cpus() + 2, 8);
2679 
2680 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2681 	spin_lock_init(&fs_info->ordered_root_lock);
2682 
2683 	fs_info->btree_inode = new_inode(sb);
2684 	if (!fs_info->btree_inode) {
2685 		err = -ENOMEM;
2686 		goto fail_bio_counter;
2687 	}
2688 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2689 
2690 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2691 					GFP_KERNEL);
2692 	if (!fs_info->delayed_root) {
2693 		err = -ENOMEM;
2694 		goto fail_iput;
2695 	}
2696 	btrfs_init_delayed_root(fs_info->delayed_root);
2697 
2698 	btrfs_init_scrub(fs_info);
2699 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2700 	fs_info->check_integrity_print_mask = 0;
2701 #endif
2702 	btrfs_init_balance(fs_info);
2703 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2704 
2705 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2706 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2707 
2708 	btrfs_init_btree_inode(fs_info);
2709 
2710 	spin_lock_init(&fs_info->block_group_cache_lock);
2711 	fs_info->block_group_cache_tree = RB_ROOT;
2712 	fs_info->first_logical_byte = (u64)-1;
2713 
2714 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2715 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2716 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2717 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2718 
2719 	mutex_init(&fs_info->ordered_operations_mutex);
2720 	mutex_init(&fs_info->tree_log_mutex);
2721 	mutex_init(&fs_info->chunk_mutex);
2722 	mutex_init(&fs_info->transaction_kthread_mutex);
2723 	mutex_init(&fs_info->cleaner_mutex);
2724 	mutex_init(&fs_info->ro_block_group_mutex);
2725 	init_rwsem(&fs_info->commit_root_sem);
2726 	init_rwsem(&fs_info->cleanup_work_sem);
2727 	init_rwsem(&fs_info->subvol_sem);
2728 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2729 
2730 	btrfs_init_dev_replace_locks(fs_info);
2731 	btrfs_init_qgroup(fs_info);
2732 
2733 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2734 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2735 
2736 	init_waitqueue_head(&fs_info->transaction_throttle);
2737 	init_waitqueue_head(&fs_info->transaction_wait);
2738 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2739 	init_waitqueue_head(&fs_info->async_submit_wait);
2740 
2741 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
2742 
2743 	/* Usable values until the real ones are cached from the superblock */
2744 	fs_info->nodesize = 4096;
2745 	fs_info->sectorsize = 4096;
2746 	fs_info->stripesize = 4096;
2747 
2748 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2749 	if (ret) {
2750 		err = ret;
2751 		goto fail_alloc;
2752 	}
2753 
2754 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2755 
2756 	invalidate_bdev(fs_devices->latest_bdev);
2757 
2758 	/*
2759 	 * Read super block and check the signature bytes only
2760 	 */
2761 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2762 	if (IS_ERR(bh)) {
2763 		err = PTR_ERR(bh);
2764 		goto fail_alloc;
2765 	}
2766 
2767 	/*
2768 	 * We want to check superblock checksum, the type is stored inside.
2769 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2770 	 */
2771 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2772 		btrfs_err(fs_info, "superblock checksum mismatch");
2773 		err = -EINVAL;
2774 		brelse(bh);
2775 		goto fail_alloc;
2776 	}
2777 
2778 	/*
2779 	 * super_copy is zeroed at allocation time and we never touch the
2780 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2781 	 * the whole block of INFO_SIZE
2782 	 */
2783 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2784 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2785 	       sizeof(*fs_info->super_for_commit));
2786 	brelse(bh);
2787 
2788 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2789 
2790 	ret = btrfs_validate_mount_super(fs_info);
2791 	if (ret) {
2792 		btrfs_err(fs_info, "superblock contains fatal errors");
2793 		err = -EINVAL;
2794 		goto fail_alloc;
2795 	}
2796 
2797 	disk_super = fs_info->super_copy;
2798 	if (!btrfs_super_root(disk_super))
2799 		goto fail_alloc;
2800 
2801 	/* check FS state, whether FS is broken. */
2802 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2803 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2804 
2805 	/*
2806 	 * run through our array of backup supers and setup
2807 	 * our ring pointer to the oldest one
2808 	 */
2809 	generation = btrfs_super_generation(disk_super);
2810 	find_oldest_super_backup(fs_info, generation);
2811 
2812 	/*
2813 	 * In the long term, we'll store the compression type in the super
2814 	 * block, and it'll be used for per file compression control.
2815 	 */
2816 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2817 
2818 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2819 	if (ret) {
2820 		err = ret;
2821 		goto fail_alloc;
2822 	}
2823 
2824 	features = btrfs_super_incompat_flags(disk_super) &
2825 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2826 	if (features) {
2827 		btrfs_err(fs_info,
2828 		    "cannot mount because of unsupported optional features (%llx)",
2829 		    features);
2830 		err = -EINVAL;
2831 		goto fail_alloc;
2832 	}
2833 
2834 	features = btrfs_super_incompat_flags(disk_super);
2835 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2836 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2837 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2838 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2839 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2840 
2841 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2842 		btrfs_info(fs_info, "has skinny extents");
2843 
2844 	/*
2845 	 * flag our filesystem as having big metadata blocks if
2846 	 * they are bigger than the page size
2847 	 */
2848 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2849 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2850 			btrfs_info(fs_info,
2851 				"flagging fs with big metadata feature");
2852 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2853 	}
2854 
2855 	nodesize = btrfs_super_nodesize(disk_super);
2856 	sectorsize = btrfs_super_sectorsize(disk_super);
2857 	stripesize = sectorsize;
2858 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2859 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2860 
2861 	/* Cache block sizes */
2862 	fs_info->nodesize = nodesize;
2863 	fs_info->sectorsize = sectorsize;
2864 	fs_info->stripesize = stripesize;
2865 
2866 	/*
2867 	 * mixed block groups end up with duplicate but slightly offset
2868 	 * extent buffers for the same range.  It leads to corruptions
2869 	 */
2870 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2871 	    (sectorsize != nodesize)) {
2872 		btrfs_err(fs_info,
2873 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2874 			nodesize, sectorsize);
2875 		goto fail_alloc;
2876 	}
2877 
2878 	/*
2879 	 * Needn't use the lock because there is no other task which will
2880 	 * update the flag.
2881 	 */
2882 	btrfs_set_super_incompat_flags(disk_super, features);
2883 
2884 	features = btrfs_super_compat_ro_flags(disk_super) &
2885 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2886 	if (!sb_rdonly(sb) && features) {
2887 		btrfs_err(fs_info,
2888 	"cannot mount read-write because of unsupported optional features (%llx)",
2889 		       features);
2890 		err = -EINVAL;
2891 		goto fail_alloc;
2892 	}
2893 
2894 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2895 	if (ret) {
2896 		err = ret;
2897 		goto fail_sb_buffer;
2898 	}
2899 
2900 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2901 	sb->s_bdi->congested_data = fs_info;
2902 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2903 	sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2904 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2905 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2906 
2907 	sb->s_blocksize = sectorsize;
2908 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2909 	memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
2910 
2911 	mutex_lock(&fs_info->chunk_mutex);
2912 	ret = btrfs_read_sys_array(fs_info);
2913 	mutex_unlock(&fs_info->chunk_mutex);
2914 	if (ret) {
2915 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2916 		goto fail_sb_buffer;
2917 	}
2918 
2919 	generation = btrfs_super_chunk_root_generation(disk_super);
2920 	level = btrfs_super_chunk_root_level(disk_super);
2921 
2922 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2923 
2924 	chunk_root->node = read_tree_block(fs_info,
2925 					   btrfs_super_chunk_root(disk_super),
2926 					   generation, level, NULL);
2927 	if (IS_ERR(chunk_root->node) ||
2928 	    !extent_buffer_uptodate(chunk_root->node)) {
2929 		btrfs_err(fs_info, "failed to read chunk root");
2930 		if (!IS_ERR(chunk_root->node))
2931 			free_extent_buffer(chunk_root->node);
2932 		chunk_root->node = NULL;
2933 		goto fail_tree_roots;
2934 	}
2935 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2936 	chunk_root->commit_root = btrfs_root_node(chunk_root);
2937 
2938 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2939 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2940 
2941 	ret = btrfs_read_chunk_tree(fs_info);
2942 	if (ret) {
2943 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2944 		goto fail_tree_roots;
2945 	}
2946 
2947 	/*
2948 	 * Keep the devid that is marked to be the target device for the
2949 	 * device replace procedure
2950 	 */
2951 	btrfs_free_extra_devids(fs_devices, 0);
2952 
2953 	if (!fs_devices->latest_bdev) {
2954 		btrfs_err(fs_info, "failed to read devices");
2955 		goto fail_tree_roots;
2956 	}
2957 
2958 retry_root_backup:
2959 	generation = btrfs_super_generation(disk_super);
2960 	level = btrfs_super_root_level(disk_super);
2961 
2962 	tree_root->node = read_tree_block(fs_info,
2963 					  btrfs_super_root(disk_super),
2964 					  generation, level, NULL);
2965 	if (IS_ERR(tree_root->node) ||
2966 	    !extent_buffer_uptodate(tree_root->node)) {
2967 		btrfs_warn(fs_info, "failed to read tree root");
2968 		if (!IS_ERR(tree_root->node))
2969 			free_extent_buffer(tree_root->node);
2970 		tree_root->node = NULL;
2971 		goto recovery_tree_root;
2972 	}
2973 
2974 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2975 	tree_root->commit_root = btrfs_root_node(tree_root);
2976 	btrfs_set_root_refs(&tree_root->root_item, 1);
2977 
2978 	mutex_lock(&tree_root->objectid_mutex);
2979 	ret = btrfs_find_highest_objectid(tree_root,
2980 					&tree_root->highest_objectid);
2981 	if (ret) {
2982 		mutex_unlock(&tree_root->objectid_mutex);
2983 		goto recovery_tree_root;
2984 	}
2985 
2986 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2987 
2988 	mutex_unlock(&tree_root->objectid_mutex);
2989 
2990 	ret = btrfs_read_roots(fs_info);
2991 	if (ret)
2992 		goto recovery_tree_root;
2993 
2994 	fs_info->generation = generation;
2995 	fs_info->last_trans_committed = generation;
2996 
2997 	ret = btrfs_verify_dev_extents(fs_info);
2998 	if (ret) {
2999 		btrfs_err(fs_info,
3000 			  "failed to verify dev extents against chunks: %d",
3001 			  ret);
3002 		goto fail_block_groups;
3003 	}
3004 	ret = btrfs_recover_balance(fs_info);
3005 	if (ret) {
3006 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3007 		goto fail_block_groups;
3008 	}
3009 
3010 	ret = btrfs_init_dev_stats(fs_info);
3011 	if (ret) {
3012 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3013 		goto fail_block_groups;
3014 	}
3015 
3016 	ret = btrfs_init_dev_replace(fs_info);
3017 	if (ret) {
3018 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3019 		goto fail_block_groups;
3020 	}
3021 
3022 	btrfs_free_extra_devids(fs_devices, 1);
3023 
3024 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3025 	if (ret) {
3026 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3027 				ret);
3028 		goto fail_block_groups;
3029 	}
3030 
3031 	ret = btrfs_sysfs_add_device(fs_devices);
3032 	if (ret) {
3033 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3034 				ret);
3035 		goto fail_fsdev_sysfs;
3036 	}
3037 
3038 	ret = btrfs_sysfs_add_mounted(fs_info);
3039 	if (ret) {
3040 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3041 		goto fail_fsdev_sysfs;
3042 	}
3043 
3044 	ret = btrfs_init_space_info(fs_info);
3045 	if (ret) {
3046 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3047 		goto fail_sysfs;
3048 	}
3049 
3050 	ret = btrfs_read_block_groups(fs_info);
3051 	if (ret) {
3052 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3053 		goto fail_sysfs;
3054 	}
3055 
3056 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3057 		btrfs_warn(fs_info,
3058 		"writeable mount is not allowed due to too many missing devices");
3059 		goto fail_sysfs;
3060 	}
3061 
3062 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3063 					       "btrfs-cleaner");
3064 	if (IS_ERR(fs_info->cleaner_kthread))
3065 		goto fail_sysfs;
3066 
3067 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3068 						   tree_root,
3069 						   "btrfs-transaction");
3070 	if (IS_ERR(fs_info->transaction_kthread))
3071 		goto fail_cleaner;
3072 
3073 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3074 	    !fs_info->fs_devices->rotating) {
3075 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3076 	}
3077 
3078 	/*
3079 	 * Mount does not set all options immediately, we can do it now and do
3080 	 * not have to wait for transaction commit
3081 	 */
3082 	btrfs_apply_pending_changes(fs_info);
3083 
3084 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3085 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3086 		ret = btrfsic_mount(fs_info, fs_devices,
3087 				    btrfs_test_opt(fs_info,
3088 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3089 				    1 : 0,
3090 				    fs_info->check_integrity_print_mask);
3091 		if (ret)
3092 			btrfs_warn(fs_info,
3093 				"failed to initialize integrity check module: %d",
3094 				ret);
3095 	}
3096 #endif
3097 	ret = btrfs_read_qgroup_config(fs_info);
3098 	if (ret)
3099 		goto fail_trans_kthread;
3100 
3101 	if (btrfs_build_ref_tree(fs_info))
3102 		btrfs_err(fs_info, "couldn't build ref tree");
3103 
3104 	/* do not make disk changes in broken FS or nologreplay is given */
3105 	if (btrfs_super_log_root(disk_super) != 0 &&
3106 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3107 		ret = btrfs_replay_log(fs_info, fs_devices);
3108 		if (ret) {
3109 			err = ret;
3110 			goto fail_qgroup;
3111 		}
3112 	}
3113 
3114 	ret = btrfs_find_orphan_roots(fs_info);
3115 	if (ret)
3116 		goto fail_qgroup;
3117 
3118 	if (!sb_rdonly(sb)) {
3119 		ret = btrfs_cleanup_fs_roots(fs_info);
3120 		if (ret)
3121 			goto fail_qgroup;
3122 
3123 		mutex_lock(&fs_info->cleaner_mutex);
3124 		ret = btrfs_recover_relocation(tree_root);
3125 		mutex_unlock(&fs_info->cleaner_mutex);
3126 		if (ret < 0) {
3127 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3128 					ret);
3129 			err = -EINVAL;
3130 			goto fail_qgroup;
3131 		}
3132 	}
3133 
3134 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3135 	location.type = BTRFS_ROOT_ITEM_KEY;
3136 	location.offset = 0;
3137 
3138 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3139 	if (IS_ERR(fs_info->fs_root)) {
3140 		err = PTR_ERR(fs_info->fs_root);
3141 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3142 		goto fail_qgroup;
3143 	}
3144 
3145 	if (sb_rdonly(sb))
3146 		return 0;
3147 
3148 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3149 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3150 		clear_free_space_tree = 1;
3151 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3152 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3153 		btrfs_warn(fs_info, "free space tree is invalid");
3154 		clear_free_space_tree = 1;
3155 	}
3156 
3157 	if (clear_free_space_tree) {
3158 		btrfs_info(fs_info, "clearing free space tree");
3159 		ret = btrfs_clear_free_space_tree(fs_info);
3160 		if (ret) {
3161 			btrfs_warn(fs_info,
3162 				   "failed to clear free space tree: %d", ret);
3163 			close_ctree(fs_info);
3164 			return ret;
3165 		}
3166 	}
3167 
3168 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3169 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3170 		btrfs_info(fs_info, "creating free space tree");
3171 		ret = btrfs_create_free_space_tree(fs_info);
3172 		if (ret) {
3173 			btrfs_warn(fs_info,
3174 				"failed to create free space tree: %d", ret);
3175 			close_ctree(fs_info);
3176 			return ret;
3177 		}
3178 	}
3179 
3180 	down_read(&fs_info->cleanup_work_sem);
3181 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3182 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3183 		up_read(&fs_info->cleanup_work_sem);
3184 		close_ctree(fs_info);
3185 		return ret;
3186 	}
3187 	up_read(&fs_info->cleanup_work_sem);
3188 
3189 	ret = btrfs_resume_balance_async(fs_info);
3190 	if (ret) {
3191 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3192 		close_ctree(fs_info);
3193 		return ret;
3194 	}
3195 
3196 	ret = btrfs_resume_dev_replace_async(fs_info);
3197 	if (ret) {
3198 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3199 		close_ctree(fs_info);
3200 		return ret;
3201 	}
3202 
3203 	btrfs_qgroup_rescan_resume(fs_info);
3204 
3205 	if (!fs_info->uuid_root) {
3206 		btrfs_info(fs_info, "creating UUID tree");
3207 		ret = btrfs_create_uuid_tree(fs_info);
3208 		if (ret) {
3209 			btrfs_warn(fs_info,
3210 				"failed to create the UUID tree: %d", ret);
3211 			close_ctree(fs_info);
3212 			return ret;
3213 		}
3214 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3215 		   fs_info->generation !=
3216 				btrfs_super_uuid_tree_generation(disk_super)) {
3217 		btrfs_info(fs_info, "checking UUID tree");
3218 		ret = btrfs_check_uuid_tree(fs_info);
3219 		if (ret) {
3220 			btrfs_warn(fs_info,
3221 				"failed to check the UUID tree: %d", ret);
3222 			close_ctree(fs_info);
3223 			return ret;
3224 		}
3225 	} else {
3226 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3227 	}
3228 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3229 
3230 	/*
3231 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3232 	 * no need to keep the flag
3233 	 */
3234 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3235 
3236 	return 0;
3237 
3238 fail_qgroup:
3239 	btrfs_free_qgroup_config(fs_info);
3240 fail_trans_kthread:
3241 	kthread_stop(fs_info->transaction_kthread);
3242 	btrfs_cleanup_transaction(fs_info);
3243 	btrfs_free_fs_roots(fs_info);
3244 fail_cleaner:
3245 	kthread_stop(fs_info->cleaner_kthread);
3246 
3247 	/*
3248 	 * make sure we're done with the btree inode before we stop our
3249 	 * kthreads
3250 	 */
3251 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3252 
3253 fail_sysfs:
3254 	btrfs_sysfs_remove_mounted(fs_info);
3255 
3256 fail_fsdev_sysfs:
3257 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3258 
3259 fail_block_groups:
3260 	btrfs_put_block_group_cache(fs_info);
3261 
3262 fail_tree_roots:
3263 	free_root_pointers(fs_info, 1);
3264 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3265 
3266 fail_sb_buffer:
3267 	btrfs_stop_all_workers(fs_info);
3268 	btrfs_free_block_groups(fs_info);
3269 fail_alloc:
3270 fail_iput:
3271 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3272 
3273 	iput(fs_info->btree_inode);
3274 fail_bio_counter:
3275 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3276 fail_delalloc_bytes:
3277 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3278 fail_dirty_metadata_bytes:
3279 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3280 fail_srcu:
3281 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3282 fail:
3283 	btrfs_free_stripe_hash_table(fs_info);
3284 	btrfs_close_devices(fs_info->fs_devices);
3285 	return err;
3286 
3287 recovery_tree_root:
3288 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3289 		goto fail_tree_roots;
3290 
3291 	free_root_pointers(fs_info, 0);
3292 
3293 	/* don't use the log in recovery mode, it won't be valid */
3294 	btrfs_set_super_log_root(disk_super, 0);
3295 
3296 	/* we can't trust the free space cache either */
3297 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3298 
3299 	ret = next_root_backup(fs_info, fs_info->super_copy,
3300 			       &num_backups_tried, &backup_index);
3301 	if (ret == -1)
3302 		goto fail_block_groups;
3303 	goto retry_root_backup;
3304 }
3305 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3306 
3307 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3308 {
3309 	if (uptodate) {
3310 		set_buffer_uptodate(bh);
3311 	} else {
3312 		struct btrfs_device *device = (struct btrfs_device *)
3313 			bh->b_private;
3314 
3315 		btrfs_warn_rl_in_rcu(device->fs_info,
3316 				"lost page write due to IO error on %s",
3317 					  rcu_str_deref(device->name));
3318 		/* note, we don't set_buffer_write_io_error because we have
3319 		 * our own ways of dealing with the IO errors
3320 		 */
3321 		clear_buffer_uptodate(bh);
3322 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3323 	}
3324 	unlock_buffer(bh);
3325 	put_bh(bh);
3326 }
3327 
3328 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3329 			struct buffer_head **bh_ret)
3330 {
3331 	struct buffer_head *bh;
3332 	struct btrfs_super_block *super;
3333 	u64 bytenr;
3334 
3335 	bytenr = btrfs_sb_offset(copy_num);
3336 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3337 		return -EINVAL;
3338 
3339 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3340 	/*
3341 	 * If we fail to read from the underlying devices, as of now
3342 	 * the best option we have is to mark it EIO.
3343 	 */
3344 	if (!bh)
3345 		return -EIO;
3346 
3347 	super = (struct btrfs_super_block *)bh->b_data;
3348 	if (btrfs_super_bytenr(super) != bytenr ||
3349 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3350 		brelse(bh);
3351 		return -EINVAL;
3352 	}
3353 
3354 	*bh_ret = bh;
3355 	return 0;
3356 }
3357 
3358 
3359 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3360 {
3361 	struct buffer_head *bh;
3362 	struct buffer_head *latest = NULL;
3363 	struct btrfs_super_block *super;
3364 	int i;
3365 	u64 transid = 0;
3366 	int ret = -EINVAL;
3367 
3368 	/* we would like to check all the supers, but that would make
3369 	 * a btrfs mount succeed after a mkfs from a different FS.
3370 	 * So, we need to add a special mount option to scan for
3371 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3372 	 */
3373 	for (i = 0; i < 1; i++) {
3374 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3375 		if (ret)
3376 			continue;
3377 
3378 		super = (struct btrfs_super_block *)bh->b_data;
3379 
3380 		if (!latest || btrfs_super_generation(super) > transid) {
3381 			brelse(latest);
3382 			latest = bh;
3383 			transid = btrfs_super_generation(super);
3384 		} else {
3385 			brelse(bh);
3386 		}
3387 	}
3388 
3389 	if (!latest)
3390 		return ERR_PTR(ret);
3391 
3392 	return latest;
3393 }
3394 
3395 /*
3396  * Write superblock @sb to the @device. Do not wait for completion, all the
3397  * buffer heads we write are pinned.
3398  *
3399  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3400  * the expected device size at commit time. Note that max_mirrors must be
3401  * same for write and wait phases.
3402  *
3403  * Return number of errors when buffer head is not found or submission fails.
3404  */
3405 static int write_dev_supers(struct btrfs_device *device,
3406 			    struct btrfs_super_block *sb, int max_mirrors)
3407 {
3408 	struct buffer_head *bh;
3409 	int i;
3410 	int ret;
3411 	int errors = 0;
3412 	u32 crc;
3413 	u64 bytenr;
3414 	int op_flags;
3415 
3416 	if (max_mirrors == 0)
3417 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3418 
3419 	for (i = 0; i < max_mirrors; i++) {
3420 		bytenr = btrfs_sb_offset(i);
3421 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3422 		    device->commit_total_bytes)
3423 			break;
3424 
3425 		btrfs_set_super_bytenr(sb, bytenr);
3426 
3427 		crc = ~(u32)0;
3428 		crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3429 				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3430 		btrfs_csum_final(crc, sb->csum);
3431 
3432 		/* One reference for us, and we leave it for the caller */
3433 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3434 			      BTRFS_SUPER_INFO_SIZE);
3435 		if (!bh) {
3436 			btrfs_err(device->fs_info,
3437 			    "couldn't get super buffer head for bytenr %llu",
3438 			    bytenr);
3439 			errors++;
3440 			continue;
3441 		}
3442 
3443 		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3444 
3445 		/* one reference for submit_bh */
3446 		get_bh(bh);
3447 
3448 		set_buffer_uptodate(bh);
3449 		lock_buffer(bh);
3450 		bh->b_end_io = btrfs_end_buffer_write_sync;
3451 		bh->b_private = device;
3452 
3453 		/*
3454 		 * we fua the first super.  The others we allow
3455 		 * to go down lazy.
3456 		 */
3457 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3458 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3459 			op_flags |= REQ_FUA;
3460 		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3461 		if (ret)
3462 			errors++;
3463 	}
3464 	return errors < i ? 0 : -1;
3465 }
3466 
3467 /*
3468  * Wait for write completion of superblocks done by write_dev_supers,
3469  * @max_mirrors same for write and wait phases.
3470  *
3471  * Return number of errors when buffer head is not found or not marked up to
3472  * date.
3473  */
3474 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3475 {
3476 	struct buffer_head *bh;
3477 	int i;
3478 	int errors = 0;
3479 	bool primary_failed = false;
3480 	u64 bytenr;
3481 
3482 	if (max_mirrors == 0)
3483 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3484 
3485 	for (i = 0; i < max_mirrors; i++) {
3486 		bytenr = btrfs_sb_offset(i);
3487 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3488 		    device->commit_total_bytes)
3489 			break;
3490 
3491 		bh = __find_get_block(device->bdev,
3492 				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3493 				      BTRFS_SUPER_INFO_SIZE);
3494 		if (!bh) {
3495 			errors++;
3496 			if (i == 0)
3497 				primary_failed = true;
3498 			continue;
3499 		}
3500 		wait_on_buffer(bh);
3501 		if (!buffer_uptodate(bh)) {
3502 			errors++;
3503 			if (i == 0)
3504 				primary_failed = true;
3505 		}
3506 
3507 		/* drop our reference */
3508 		brelse(bh);
3509 
3510 		/* drop the reference from the writing run */
3511 		brelse(bh);
3512 	}
3513 
3514 	/* log error, force error return */
3515 	if (primary_failed) {
3516 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3517 			  device->devid);
3518 		return -1;
3519 	}
3520 
3521 	return errors < i ? 0 : -1;
3522 }
3523 
3524 /*
3525  * endio for the write_dev_flush, this will wake anyone waiting
3526  * for the barrier when it is done
3527  */
3528 static void btrfs_end_empty_barrier(struct bio *bio)
3529 {
3530 	complete(bio->bi_private);
3531 }
3532 
3533 /*
3534  * Submit a flush request to the device if it supports it. Error handling is
3535  * done in the waiting counterpart.
3536  */
3537 static void write_dev_flush(struct btrfs_device *device)
3538 {
3539 	struct request_queue *q = bdev_get_queue(device->bdev);
3540 	struct bio *bio = device->flush_bio;
3541 
3542 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3543 		return;
3544 
3545 	bio_reset(bio);
3546 	bio->bi_end_io = btrfs_end_empty_barrier;
3547 	bio_set_dev(bio, device->bdev);
3548 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3549 	init_completion(&device->flush_wait);
3550 	bio->bi_private = &device->flush_wait;
3551 
3552 	btrfsic_submit_bio(bio);
3553 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3554 }
3555 
3556 /*
3557  * If the flush bio has been submitted by write_dev_flush, wait for it.
3558  */
3559 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3560 {
3561 	struct bio *bio = device->flush_bio;
3562 
3563 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3564 		return BLK_STS_OK;
3565 
3566 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3567 	wait_for_completion_io(&device->flush_wait);
3568 
3569 	return bio->bi_status;
3570 }
3571 
3572 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3573 {
3574 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3575 		return -EIO;
3576 	return 0;
3577 }
3578 
3579 /*
3580  * send an empty flush down to each device in parallel,
3581  * then wait for them
3582  */
3583 static int barrier_all_devices(struct btrfs_fs_info *info)
3584 {
3585 	struct list_head *head;
3586 	struct btrfs_device *dev;
3587 	int errors_wait = 0;
3588 	blk_status_t ret;
3589 
3590 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3591 	/* send down all the barriers */
3592 	head = &info->fs_devices->devices;
3593 	list_for_each_entry(dev, head, dev_list) {
3594 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3595 			continue;
3596 		if (!dev->bdev)
3597 			continue;
3598 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3599 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3600 			continue;
3601 
3602 		write_dev_flush(dev);
3603 		dev->last_flush_error = BLK_STS_OK;
3604 	}
3605 
3606 	/* wait for all the barriers */
3607 	list_for_each_entry(dev, head, dev_list) {
3608 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3609 			continue;
3610 		if (!dev->bdev) {
3611 			errors_wait++;
3612 			continue;
3613 		}
3614 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3615 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3616 			continue;
3617 
3618 		ret = wait_dev_flush(dev);
3619 		if (ret) {
3620 			dev->last_flush_error = ret;
3621 			btrfs_dev_stat_inc_and_print(dev,
3622 					BTRFS_DEV_STAT_FLUSH_ERRS);
3623 			errors_wait++;
3624 		}
3625 	}
3626 
3627 	if (errors_wait) {
3628 		/*
3629 		 * At some point we need the status of all disks
3630 		 * to arrive at the volume status. So error checking
3631 		 * is being pushed to a separate loop.
3632 		 */
3633 		return check_barrier_error(info);
3634 	}
3635 	return 0;
3636 }
3637 
3638 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3639 {
3640 	int raid_type;
3641 	int min_tolerated = INT_MAX;
3642 
3643 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3644 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3645 		min_tolerated = min(min_tolerated,
3646 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3647 				    tolerated_failures);
3648 
3649 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3650 		if (raid_type == BTRFS_RAID_SINGLE)
3651 			continue;
3652 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3653 			continue;
3654 		min_tolerated = min(min_tolerated,
3655 				    btrfs_raid_array[raid_type].
3656 				    tolerated_failures);
3657 	}
3658 
3659 	if (min_tolerated == INT_MAX) {
3660 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3661 		min_tolerated = 0;
3662 	}
3663 
3664 	return min_tolerated;
3665 }
3666 
3667 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3668 {
3669 	struct list_head *head;
3670 	struct btrfs_device *dev;
3671 	struct btrfs_super_block *sb;
3672 	struct btrfs_dev_item *dev_item;
3673 	int ret;
3674 	int do_barriers;
3675 	int max_errors;
3676 	int total_errors = 0;
3677 	u64 flags;
3678 
3679 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3680 
3681 	/*
3682 	 * max_mirrors == 0 indicates we're from commit_transaction,
3683 	 * not from fsync where the tree roots in fs_info have not
3684 	 * been consistent on disk.
3685 	 */
3686 	if (max_mirrors == 0)
3687 		backup_super_roots(fs_info);
3688 
3689 	sb = fs_info->super_for_commit;
3690 	dev_item = &sb->dev_item;
3691 
3692 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3693 	head = &fs_info->fs_devices->devices;
3694 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3695 
3696 	if (do_barriers) {
3697 		ret = barrier_all_devices(fs_info);
3698 		if (ret) {
3699 			mutex_unlock(
3700 				&fs_info->fs_devices->device_list_mutex);
3701 			btrfs_handle_fs_error(fs_info, ret,
3702 					      "errors while submitting device barriers.");
3703 			return ret;
3704 		}
3705 	}
3706 
3707 	list_for_each_entry(dev, head, dev_list) {
3708 		if (!dev->bdev) {
3709 			total_errors++;
3710 			continue;
3711 		}
3712 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3713 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3714 			continue;
3715 
3716 		btrfs_set_stack_device_generation(dev_item, 0);
3717 		btrfs_set_stack_device_type(dev_item, dev->type);
3718 		btrfs_set_stack_device_id(dev_item, dev->devid);
3719 		btrfs_set_stack_device_total_bytes(dev_item,
3720 						   dev->commit_total_bytes);
3721 		btrfs_set_stack_device_bytes_used(dev_item,
3722 						  dev->commit_bytes_used);
3723 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3724 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3725 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3726 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3727 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3728 
3729 		flags = btrfs_super_flags(sb);
3730 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3731 
3732 		ret = btrfs_validate_write_super(fs_info, sb);
3733 		if (ret < 0) {
3734 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3735 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3736 				"unexpected superblock corruption detected");
3737 			return -EUCLEAN;
3738 		}
3739 
3740 		ret = write_dev_supers(dev, sb, max_mirrors);
3741 		if (ret)
3742 			total_errors++;
3743 	}
3744 	if (total_errors > max_errors) {
3745 		btrfs_err(fs_info, "%d errors while writing supers",
3746 			  total_errors);
3747 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3748 
3749 		/* FUA is masked off if unsupported and can't be the reason */
3750 		btrfs_handle_fs_error(fs_info, -EIO,
3751 				      "%d errors while writing supers",
3752 				      total_errors);
3753 		return -EIO;
3754 	}
3755 
3756 	total_errors = 0;
3757 	list_for_each_entry(dev, head, dev_list) {
3758 		if (!dev->bdev)
3759 			continue;
3760 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3761 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3762 			continue;
3763 
3764 		ret = wait_dev_supers(dev, max_mirrors);
3765 		if (ret)
3766 			total_errors++;
3767 	}
3768 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3769 	if (total_errors > max_errors) {
3770 		btrfs_handle_fs_error(fs_info, -EIO,
3771 				      "%d errors while writing supers",
3772 				      total_errors);
3773 		return -EIO;
3774 	}
3775 	return 0;
3776 }
3777 
3778 /* Drop a fs root from the radix tree and free it. */
3779 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3780 				  struct btrfs_root *root)
3781 {
3782 	spin_lock(&fs_info->fs_roots_radix_lock);
3783 	radix_tree_delete(&fs_info->fs_roots_radix,
3784 			  (unsigned long)root->root_key.objectid);
3785 	spin_unlock(&fs_info->fs_roots_radix_lock);
3786 
3787 	if (btrfs_root_refs(&root->root_item) == 0)
3788 		synchronize_srcu(&fs_info->subvol_srcu);
3789 
3790 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3791 		btrfs_free_log(NULL, root);
3792 		if (root->reloc_root) {
3793 			free_extent_buffer(root->reloc_root->node);
3794 			free_extent_buffer(root->reloc_root->commit_root);
3795 			btrfs_put_fs_root(root->reloc_root);
3796 			root->reloc_root = NULL;
3797 		}
3798 	}
3799 
3800 	if (root->free_ino_pinned)
3801 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3802 	if (root->free_ino_ctl)
3803 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3804 	btrfs_free_fs_root(root);
3805 }
3806 
3807 void btrfs_free_fs_root(struct btrfs_root *root)
3808 {
3809 	iput(root->ino_cache_inode);
3810 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3811 	if (root->anon_dev)
3812 		free_anon_bdev(root->anon_dev);
3813 	if (root->subv_writers)
3814 		btrfs_free_subvolume_writers(root->subv_writers);
3815 	free_extent_buffer(root->node);
3816 	free_extent_buffer(root->commit_root);
3817 	kfree(root->free_ino_ctl);
3818 	kfree(root->free_ino_pinned);
3819 	btrfs_put_fs_root(root);
3820 }
3821 
3822 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3823 {
3824 	u64 root_objectid = 0;
3825 	struct btrfs_root *gang[8];
3826 	int i = 0;
3827 	int err = 0;
3828 	unsigned int ret = 0;
3829 	int index;
3830 
3831 	while (1) {
3832 		index = srcu_read_lock(&fs_info->subvol_srcu);
3833 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3834 					     (void **)gang, root_objectid,
3835 					     ARRAY_SIZE(gang));
3836 		if (!ret) {
3837 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3838 			break;
3839 		}
3840 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3841 
3842 		for (i = 0; i < ret; i++) {
3843 			/* Avoid to grab roots in dead_roots */
3844 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3845 				gang[i] = NULL;
3846 				continue;
3847 			}
3848 			/* grab all the search result for later use */
3849 			gang[i] = btrfs_grab_fs_root(gang[i]);
3850 		}
3851 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3852 
3853 		for (i = 0; i < ret; i++) {
3854 			if (!gang[i])
3855 				continue;
3856 			root_objectid = gang[i]->root_key.objectid;
3857 			err = btrfs_orphan_cleanup(gang[i]);
3858 			if (err)
3859 				break;
3860 			btrfs_put_fs_root(gang[i]);
3861 		}
3862 		root_objectid++;
3863 	}
3864 
3865 	/* release the uncleaned roots due to error */
3866 	for (; i < ret; i++) {
3867 		if (gang[i])
3868 			btrfs_put_fs_root(gang[i]);
3869 	}
3870 	return err;
3871 }
3872 
3873 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3874 {
3875 	struct btrfs_root *root = fs_info->tree_root;
3876 	struct btrfs_trans_handle *trans;
3877 
3878 	mutex_lock(&fs_info->cleaner_mutex);
3879 	btrfs_run_delayed_iputs(fs_info);
3880 	mutex_unlock(&fs_info->cleaner_mutex);
3881 	wake_up_process(fs_info->cleaner_kthread);
3882 
3883 	/* wait until ongoing cleanup work done */
3884 	down_write(&fs_info->cleanup_work_sem);
3885 	up_write(&fs_info->cleanup_work_sem);
3886 
3887 	trans = btrfs_join_transaction(root);
3888 	if (IS_ERR(trans))
3889 		return PTR_ERR(trans);
3890 	return btrfs_commit_transaction(trans);
3891 }
3892 
3893 void close_ctree(struct btrfs_fs_info *fs_info)
3894 {
3895 	int ret;
3896 
3897 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3898 	/*
3899 	 * We don't want the cleaner to start new transactions, add more delayed
3900 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3901 	 * because that frees the task_struct, and the transaction kthread might
3902 	 * still try to wake up the cleaner.
3903 	 */
3904 	kthread_park(fs_info->cleaner_kthread);
3905 
3906 	/* wait for the qgroup rescan worker to stop */
3907 	btrfs_qgroup_wait_for_completion(fs_info, false);
3908 
3909 	/* wait for the uuid_scan task to finish */
3910 	down(&fs_info->uuid_tree_rescan_sem);
3911 	/* avoid complains from lockdep et al., set sem back to initial state */
3912 	up(&fs_info->uuid_tree_rescan_sem);
3913 
3914 	/* pause restriper - we want to resume on mount */
3915 	btrfs_pause_balance(fs_info);
3916 
3917 	btrfs_dev_replace_suspend_for_unmount(fs_info);
3918 
3919 	btrfs_scrub_cancel(fs_info);
3920 
3921 	/* wait for any defraggers to finish */
3922 	wait_event(fs_info->transaction_wait,
3923 		   (atomic_read(&fs_info->defrag_running) == 0));
3924 
3925 	/* clear out the rbtree of defraggable inodes */
3926 	btrfs_cleanup_defrag_inodes(fs_info);
3927 
3928 	cancel_work_sync(&fs_info->async_reclaim_work);
3929 
3930 	if (!sb_rdonly(fs_info->sb)) {
3931 		/*
3932 		 * The cleaner kthread is stopped, so do one final pass over
3933 		 * unused block groups.
3934 		 */
3935 		btrfs_delete_unused_bgs(fs_info);
3936 
3937 		ret = btrfs_commit_super(fs_info);
3938 		if (ret)
3939 			btrfs_err(fs_info, "commit super ret %d", ret);
3940 	}
3941 
3942 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3943 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3944 		btrfs_error_commit_super(fs_info);
3945 
3946 	kthread_stop(fs_info->transaction_kthread);
3947 	kthread_stop(fs_info->cleaner_kthread);
3948 
3949 	ASSERT(list_empty(&fs_info->delayed_iputs));
3950 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3951 
3952 	btrfs_free_qgroup_config(fs_info);
3953 	ASSERT(list_empty(&fs_info->delalloc_roots));
3954 
3955 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3956 		btrfs_info(fs_info, "at unmount delalloc count %lld",
3957 		       percpu_counter_sum(&fs_info->delalloc_bytes));
3958 	}
3959 
3960 	btrfs_sysfs_remove_mounted(fs_info);
3961 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3962 
3963 	btrfs_free_fs_roots(fs_info);
3964 
3965 	btrfs_put_block_group_cache(fs_info);
3966 
3967 	/*
3968 	 * we must make sure there is not any read request to
3969 	 * submit after we stopping all workers.
3970 	 */
3971 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3972 	btrfs_stop_all_workers(fs_info);
3973 
3974 	btrfs_free_block_groups(fs_info);
3975 
3976 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3977 	free_root_pointers(fs_info, 1);
3978 
3979 	iput(fs_info->btree_inode);
3980 
3981 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3982 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3983 		btrfsic_unmount(fs_info->fs_devices);
3984 #endif
3985 
3986 	btrfs_close_devices(fs_info->fs_devices);
3987 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3988 
3989 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3990 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3991 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3992 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3993 
3994 	btrfs_free_stripe_hash_table(fs_info);
3995 	btrfs_free_ref_cache(fs_info);
3996 
3997 	while (!list_empty(&fs_info->pinned_chunks)) {
3998 		struct extent_map *em;
3999 
4000 		em = list_first_entry(&fs_info->pinned_chunks,
4001 				      struct extent_map, list);
4002 		list_del_init(&em->list);
4003 		free_extent_map(em);
4004 	}
4005 }
4006 
4007 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4008 			  int atomic)
4009 {
4010 	int ret;
4011 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4012 
4013 	ret = extent_buffer_uptodate(buf);
4014 	if (!ret)
4015 		return ret;
4016 
4017 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4018 				    parent_transid, atomic);
4019 	if (ret == -EAGAIN)
4020 		return ret;
4021 	return !ret;
4022 }
4023 
4024 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4025 {
4026 	struct btrfs_fs_info *fs_info;
4027 	struct btrfs_root *root;
4028 	u64 transid = btrfs_header_generation(buf);
4029 	int was_dirty;
4030 
4031 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4032 	/*
4033 	 * This is a fast path so only do this check if we have sanity tests
4034 	 * enabled.  Normal people shouldn't be using umapped buffers as dirty
4035 	 * outside of the sanity tests.
4036 	 */
4037 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4038 		return;
4039 #endif
4040 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4041 	fs_info = root->fs_info;
4042 	btrfs_assert_tree_locked(buf);
4043 	if (transid != fs_info->generation)
4044 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4045 			buf->start, transid, fs_info->generation);
4046 	was_dirty = set_extent_buffer_dirty(buf);
4047 	if (!was_dirty)
4048 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4049 					 buf->len,
4050 					 fs_info->dirty_metadata_batch);
4051 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4052 	/*
4053 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4054 	 * but item data not updated.
4055 	 * So here we should only check item pointers, not item data.
4056 	 */
4057 	if (btrfs_header_level(buf) == 0 &&
4058 	    btrfs_check_leaf_relaxed(fs_info, buf)) {
4059 		btrfs_print_leaf(buf);
4060 		ASSERT(0);
4061 	}
4062 #endif
4063 }
4064 
4065 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4066 					int flush_delayed)
4067 {
4068 	/*
4069 	 * looks as though older kernels can get into trouble with
4070 	 * this code, they end up stuck in balance_dirty_pages forever
4071 	 */
4072 	int ret;
4073 
4074 	if (current->flags & PF_MEMALLOC)
4075 		return;
4076 
4077 	if (flush_delayed)
4078 		btrfs_balance_delayed_items(fs_info);
4079 
4080 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4081 				     BTRFS_DIRTY_METADATA_THRESH,
4082 				     fs_info->dirty_metadata_batch);
4083 	if (ret > 0) {
4084 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4085 	}
4086 }
4087 
4088 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4089 {
4090 	__btrfs_btree_balance_dirty(fs_info, 1);
4091 }
4092 
4093 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4094 {
4095 	__btrfs_btree_balance_dirty(fs_info, 0);
4096 }
4097 
4098 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4099 		      struct btrfs_key *first_key)
4100 {
4101 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4102 	struct btrfs_fs_info *fs_info = root->fs_info;
4103 
4104 	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
4105 					      level, first_key);
4106 }
4107 
4108 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4109 {
4110 	/* cleanup FS via transaction */
4111 	btrfs_cleanup_transaction(fs_info);
4112 
4113 	mutex_lock(&fs_info->cleaner_mutex);
4114 	btrfs_run_delayed_iputs(fs_info);
4115 	mutex_unlock(&fs_info->cleaner_mutex);
4116 
4117 	down_write(&fs_info->cleanup_work_sem);
4118 	up_write(&fs_info->cleanup_work_sem);
4119 }
4120 
4121 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4122 {
4123 	struct btrfs_ordered_extent *ordered;
4124 
4125 	spin_lock(&root->ordered_extent_lock);
4126 	/*
4127 	 * This will just short circuit the ordered completion stuff which will
4128 	 * make sure the ordered extent gets properly cleaned up.
4129 	 */
4130 	list_for_each_entry(ordered, &root->ordered_extents,
4131 			    root_extent_list)
4132 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4133 	spin_unlock(&root->ordered_extent_lock);
4134 }
4135 
4136 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4137 {
4138 	struct btrfs_root *root;
4139 	struct list_head splice;
4140 
4141 	INIT_LIST_HEAD(&splice);
4142 
4143 	spin_lock(&fs_info->ordered_root_lock);
4144 	list_splice_init(&fs_info->ordered_roots, &splice);
4145 	while (!list_empty(&splice)) {
4146 		root = list_first_entry(&splice, struct btrfs_root,
4147 					ordered_root);
4148 		list_move_tail(&root->ordered_root,
4149 			       &fs_info->ordered_roots);
4150 
4151 		spin_unlock(&fs_info->ordered_root_lock);
4152 		btrfs_destroy_ordered_extents(root);
4153 
4154 		cond_resched();
4155 		spin_lock(&fs_info->ordered_root_lock);
4156 	}
4157 	spin_unlock(&fs_info->ordered_root_lock);
4158 }
4159 
4160 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4161 				      struct btrfs_fs_info *fs_info)
4162 {
4163 	struct rb_node *node;
4164 	struct btrfs_delayed_ref_root *delayed_refs;
4165 	struct btrfs_delayed_ref_node *ref;
4166 	int ret = 0;
4167 
4168 	delayed_refs = &trans->delayed_refs;
4169 
4170 	spin_lock(&delayed_refs->lock);
4171 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4172 		spin_unlock(&delayed_refs->lock);
4173 		btrfs_info(fs_info, "delayed_refs has NO entry");
4174 		return ret;
4175 	}
4176 
4177 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4178 		struct btrfs_delayed_ref_head *head;
4179 		struct rb_node *n;
4180 		bool pin_bytes = false;
4181 
4182 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4183 				href_node);
4184 		if (!mutex_trylock(&head->mutex)) {
4185 			refcount_inc(&head->refs);
4186 			spin_unlock(&delayed_refs->lock);
4187 
4188 			mutex_lock(&head->mutex);
4189 			mutex_unlock(&head->mutex);
4190 			btrfs_put_delayed_ref_head(head);
4191 			spin_lock(&delayed_refs->lock);
4192 			continue;
4193 		}
4194 		spin_lock(&head->lock);
4195 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4196 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4197 				       ref_node);
4198 			ref->in_tree = 0;
4199 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4200 			RB_CLEAR_NODE(&ref->ref_node);
4201 			if (!list_empty(&ref->add_list))
4202 				list_del(&ref->add_list);
4203 			atomic_dec(&delayed_refs->num_entries);
4204 			btrfs_put_delayed_ref(ref);
4205 		}
4206 		if (head->must_insert_reserved)
4207 			pin_bytes = true;
4208 		btrfs_free_delayed_extent_op(head->extent_op);
4209 		delayed_refs->num_heads--;
4210 		if (head->processing == 0)
4211 			delayed_refs->num_heads_ready--;
4212 		atomic_dec(&delayed_refs->num_entries);
4213 		rb_erase_cached(&head->href_node, &delayed_refs->href_root);
4214 		RB_CLEAR_NODE(&head->href_node);
4215 		spin_unlock(&head->lock);
4216 		spin_unlock(&delayed_refs->lock);
4217 		mutex_unlock(&head->mutex);
4218 
4219 		if (pin_bytes)
4220 			btrfs_pin_extent(fs_info, head->bytenr,
4221 					 head->num_bytes, 1);
4222 		btrfs_put_delayed_ref_head(head);
4223 		cond_resched();
4224 		spin_lock(&delayed_refs->lock);
4225 	}
4226 
4227 	spin_unlock(&delayed_refs->lock);
4228 
4229 	return ret;
4230 }
4231 
4232 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4233 {
4234 	struct btrfs_inode *btrfs_inode;
4235 	struct list_head splice;
4236 
4237 	INIT_LIST_HEAD(&splice);
4238 
4239 	spin_lock(&root->delalloc_lock);
4240 	list_splice_init(&root->delalloc_inodes, &splice);
4241 
4242 	while (!list_empty(&splice)) {
4243 		struct inode *inode = NULL;
4244 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4245 					       delalloc_inodes);
4246 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4247 		spin_unlock(&root->delalloc_lock);
4248 
4249 		/*
4250 		 * Make sure we get a live inode and that it'll not disappear
4251 		 * meanwhile.
4252 		 */
4253 		inode = igrab(&btrfs_inode->vfs_inode);
4254 		if (inode) {
4255 			invalidate_inode_pages2(inode->i_mapping);
4256 			iput(inode);
4257 		}
4258 		spin_lock(&root->delalloc_lock);
4259 	}
4260 	spin_unlock(&root->delalloc_lock);
4261 }
4262 
4263 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4264 {
4265 	struct btrfs_root *root;
4266 	struct list_head splice;
4267 
4268 	INIT_LIST_HEAD(&splice);
4269 
4270 	spin_lock(&fs_info->delalloc_root_lock);
4271 	list_splice_init(&fs_info->delalloc_roots, &splice);
4272 	while (!list_empty(&splice)) {
4273 		root = list_first_entry(&splice, struct btrfs_root,
4274 					 delalloc_root);
4275 		root = btrfs_grab_fs_root(root);
4276 		BUG_ON(!root);
4277 		spin_unlock(&fs_info->delalloc_root_lock);
4278 
4279 		btrfs_destroy_delalloc_inodes(root);
4280 		btrfs_put_fs_root(root);
4281 
4282 		spin_lock(&fs_info->delalloc_root_lock);
4283 	}
4284 	spin_unlock(&fs_info->delalloc_root_lock);
4285 }
4286 
4287 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4288 					struct extent_io_tree *dirty_pages,
4289 					int mark)
4290 {
4291 	int ret;
4292 	struct extent_buffer *eb;
4293 	u64 start = 0;
4294 	u64 end;
4295 
4296 	while (1) {
4297 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4298 					    mark, NULL);
4299 		if (ret)
4300 			break;
4301 
4302 		clear_extent_bits(dirty_pages, start, end, mark);
4303 		while (start <= end) {
4304 			eb = find_extent_buffer(fs_info, start);
4305 			start += fs_info->nodesize;
4306 			if (!eb)
4307 				continue;
4308 			wait_on_extent_buffer_writeback(eb);
4309 
4310 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4311 					       &eb->bflags))
4312 				clear_extent_buffer_dirty(eb);
4313 			free_extent_buffer_stale(eb);
4314 		}
4315 	}
4316 
4317 	return ret;
4318 }
4319 
4320 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4321 				       struct extent_io_tree *pinned_extents)
4322 {
4323 	struct extent_io_tree *unpin;
4324 	u64 start;
4325 	u64 end;
4326 	int ret;
4327 	bool loop = true;
4328 
4329 	unpin = pinned_extents;
4330 again:
4331 	while (1) {
4332 		/*
4333 		 * The btrfs_finish_extent_commit() may get the same range as
4334 		 * ours between find_first_extent_bit and clear_extent_dirty.
4335 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4336 		 * the same extent range.
4337 		 */
4338 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4339 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4340 					    EXTENT_DIRTY, NULL);
4341 		if (ret) {
4342 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4343 			break;
4344 		}
4345 
4346 		clear_extent_dirty(unpin, start, end);
4347 		btrfs_error_unpin_extent_range(fs_info, start, end);
4348 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4349 		cond_resched();
4350 	}
4351 
4352 	if (loop) {
4353 		if (unpin == &fs_info->freed_extents[0])
4354 			unpin = &fs_info->freed_extents[1];
4355 		else
4356 			unpin = &fs_info->freed_extents[0];
4357 		loop = false;
4358 		goto again;
4359 	}
4360 
4361 	return 0;
4362 }
4363 
4364 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4365 {
4366 	struct inode *inode;
4367 
4368 	inode = cache->io_ctl.inode;
4369 	if (inode) {
4370 		invalidate_inode_pages2(inode->i_mapping);
4371 		BTRFS_I(inode)->generation = 0;
4372 		cache->io_ctl.inode = NULL;
4373 		iput(inode);
4374 	}
4375 	btrfs_put_block_group(cache);
4376 }
4377 
4378 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4379 			     struct btrfs_fs_info *fs_info)
4380 {
4381 	struct btrfs_block_group_cache *cache;
4382 
4383 	spin_lock(&cur_trans->dirty_bgs_lock);
4384 	while (!list_empty(&cur_trans->dirty_bgs)) {
4385 		cache = list_first_entry(&cur_trans->dirty_bgs,
4386 					 struct btrfs_block_group_cache,
4387 					 dirty_list);
4388 
4389 		if (!list_empty(&cache->io_list)) {
4390 			spin_unlock(&cur_trans->dirty_bgs_lock);
4391 			list_del_init(&cache->io_list);
4392 			btrfs_cleanup_bg_io(cache);
4393 			spin_lock(&cur_trans->dirty_bgs_lock);
4394 		}
4395 
4396 		list_del_init(&cache->dirty_list);
4397 		spin_lock(&cache->lock);
4398 		cache->disk_cache_state = BTRFS_DC_ERROR;
4399 		spin_unlock(&cache->lock);
4400 
4401 		spin_unlock(&cur_trans->dirty_bgs_lock);
4402 		btrfs_put_block_group(cache);
4403 		spin_lock(&cur_trans->dirty_bgs_lock);
4404 	}
4405 	spin_unlock(&cur_trans->dirty_bgs_lock);
4406 
4407 	/*
4408 	 * Refer to the definition of io_bgs member for details why it's safe
4409 	 * to use it without any locking
4410 	 */
4411 	while (!list_empty(&cur_trans->io_bgs)) {
4412 		cache = list_first_entry(&cur_trans->io_bgs,
4413 					 struct btrfs_block_group_cache,
4414 					 io_list);
4415 
4416 		list_del_init(&cache->io_list);
4417 		spin_lock(&cache->lock);
4418 		cache->disk_cache_state = BTRFS_DC_ERROR;
4419 		spin_unlock(&cache->lock);
4420 		btrfs_cleanup_bg_io(cache);
4421 	}
4422 }
4423 
4424 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4425 				   struct btrfs_fs_info *fs_info)
4426 {
4427 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4428 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4429 	ASSERT(list_empty(&cur_trans->io_bgs));
4430 
4431 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4432 
4433 	cur_trans->state = TRANS_STATE_COMMIT_START;
4434 	wake_up(&fs_info->transaction_blocked_wait);
4435 
4436 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4437 	wake_up(&fs_info->transaction_wait);
4438 
4439 	btrfs_destroy_delayed_inodes(fs_info);
4440 	btrfs_assert_delayed_root_empty(fs_info);
4441 
4442 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4443 				     EXTENT_DIRTY);
4444 	btrfs_destroy_pinned_extent(fs_info,
4445 				    fs_info->pinned_extents);
4446 
4447 	cur_trans->state =TRANS_STATE_COMPLETED;
4448 	wake_up(&cur_trans->commit_wait);
4449 }
4450 
4451 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4452 {
4453 	struct btrfs_transaction *t;
4454 
4455 	mutex_lock(&fs_info->transaction_kthread_mutex);
4456 
4457 	spin_lock(&fs_info->trans_lock);
4458 	while (!list_empty(&fs_info->trans_list)) {
4459 		t = list_first_entry(&fs_info->trans_list,
4460 				     struct btrfs_transaction, list);
4461 		if (t->state >= TRANS_STATE_COMMIT_START) {
4462 			refcount_inc(&t->use_count);
4463 			spin_unlock(&fs_info->trans_lock);
4464 			btrfs_wait_for_commit(fs_info, t->transid);
4465 			btrfs_put_transaction(t);
4466 			spin_lock(&fs_info->trans_lock);
4467 			continue;
4468 		}
4469 		if (t == fs_info->running_transaction) {
4470 			t->state = TRANS_STATE_COMMIT_DOING;
4471 			spin_unlock(&fs_info->trans_lock);
4472 			/*
4473 			 * We wait for 0 num_writers since we don't hold a trans
4474 			 * handle open currently for this transaction.
4475 			 */
4476 			wait_event(t->writer_wait,
4477 				   atomic_read(&t->num_writers) == 0);
4478 		} else {
4479 			spin_unlock(&fs_info->trans_lock);
4480 		}
4481 		btrfs_cleanup_one_transaction(t, fs_info);
4482 
4483 		spin_lock(&fs_info->trans_lock);
4484 		if (t == fs_info->running_transaction)
4485 			fs_info->running_transaction = NULL;
4486 		list_del_init(&t->list);
4487 		spin_unlock(&fs_info->trans_lock);
4488 
4489 		btrfs_put_transaction(t);
4490 		trace_btrfs_transaction_commit(fs_info->tree_root);
4491 		spin_lock(&fs_info->trans_lock);
4492 	}
4493 	spin_unlock(&fs_info->trans_lock);
4494 	btrfs_destroy_all_ordered_extents(fs_info);
4495 	btrfs_destroy_delayed_inodes(fs_info);
4496 	btrfs_assert_delayed_root_empty(fs_info);
4497 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4498 	btrfs_destroy_all_delalloc_inodes(fs_info);
4499 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4500 
4501 	return 0;
4502 }
4503 
4504 static const struct extent_io_ops btree_extent_io_ops = {
4505 	/* mandatory callbacks */
4506 	.submit_bio_hook = btree_submit_bio_hook,
4507 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4508 	.readpage_io_failed_hook = btree_io_failed_hook,
4509 
4510 	/* optional callbacks */
4511 };
4512