xref: /openbmc/linux/fs/btrfs/disk-io.c (revision d4295e12)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <asm/unaligned.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "volumes.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 #include "tree-log.h"
29 #include "free-space-cache.h"
30 #include "free-space-tree.h"
31 #include "inode-map.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
34 #include "dev-replace.h"
35 #include "raid56.h"
36 #include "sysfs.h"
37 #include "qgroup.h"
38 #include "compression.h"
39 #include "tree-checker.h"
40 #include "ref-verify.h"
41 
42 #ifdef CONFIG_X86
43 #include <asm/cpufeature.h>
44 #endif
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static const struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 				      struct btrfs_fs_info *fs_info);
58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 					struct extent_io_tree *dirty_pages,
61 					int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 				       struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66 
67 /*
68  * btrfs_end_io_wq structs are used to do processing in task context when an IO
69  * is complete.  This is used during reads to verify checksums, and it is used
70  * by writes to insert metadata for new file extents after IO is complete.
71  */
72 struct btrfs_end_io_wq {
73 	struct bio *bio;
74 	bio_end_io_t *end_io;
75 	void *private;
76 	struct btrfs_fs_info *info;
77 	blk_status_t status;
78 	enum btrfs_wq_endio_type metadata;
79 	struct btrfs_work work;
80 };
81 
82 static struct kmem_cache *btrfs_end_io_wq_cache;
83 
84 int __init btrfs_end_io_wq_init(void)
85 {
86 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 					sizeof(struct btrfs_end_io_wq),
88 					0,
89 					SLAB_MEM_SPREAD,
90 					NULL);
91 	if (!btrfs_end_io_wq_cache)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 void __cold btrfs_end_io_wq_exit(void)
97 {
98 	kmem_cache_destroy(btrfs_end_io_wq_cache);
99 }
100 
101 /*
102  * async submit bios are used to offload expensive checksumming
103  * onto the worker threads.  They checksum file and metadata bios
104  * just before they are sent down the IO stack.
105  */
106 struct async_submit_bio {
107 	void *private_data;
108 	struct bio *bio;
109 	extent_submit_bio_start_t *submit_bio_start;
110 	int mirror_num;
111 	/*
112 	 * bio_offset is optional, can be used if the pages in the bio
113 	 * can't tell us where in the file the bio should go
114 	 */
115 	u64 bio_offset;
116 	struct btrfs_work work;
117 	blk_status_t status;
118 };
119 
120 /*
121  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
122  * eb, the lockdep key is determined by the btrfs_root it belongs to and
123  * the level the eb occupies in the tree.
124  *
125  * Different roots are used for different purposes and may nest inside each
126  * other and they require separate keysets.  As lockdep keys should be
127  * static, assign keysets according to the purpose of the root as indicated
128  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
129  * roots have separate keysets.
130  *
131  * Lock-nesting across peer nodes is always done with the immediate parent
132  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
133  * subclass to avoid triggering lockdep warning in such cases.
134  *
135  * The key is set by the readpage_end_io_hook after the buffer has passed
136  * csum validation but before the pages are unlocked.  It is also set by
137  * btrfs_init_new_buffer on freshly allocated blocks.
138  *
139  * We also add a check to make sure the highest level of the tree is the
140  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
141  * needs update as well.
142  */
143 #ifdef CONFIG_DEBUG_LOCK_ALLOC
144 # if BTRFS_MAX_LEVEL != 8
145 #  error
146 # endif
147 
148 static struct btrfs_lockdep_keyset {
149 	u64			id;		/* root objectid */
150 	const char		*name_stem;	/* lock name stem */
151 	char			names[BTRFS_MAX_LEVEL + 1][20];
152 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
153 } btrfs_lockdep_keysets[] = {
154 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
155 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
156 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
157 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
158 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
159 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
160 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
161 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
162 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
163 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
164 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
165 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
166 	{ .id = 0,				.name_stem = "tree"	},
167 };
168 
169 void __init btrfs_init_lockdep(void)
170 {
171 	int i, j;
172 
173 	/* initialize lockdep class names */
174 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
175 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
176 
177 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
178 			snprintf(ks->names[j], sizeof(ks->names[j]),
179 				 "btrfs-%s-%02d", ks->name_stem, j);
180 	}
181 }
182 
183 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
184 				    int level)
185 {
186 	struct btrfs_lockdep_keyset *ks;
187 
188 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
189 
190 	/* find the matching keyset, id 0 is the default entry */
191 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
192 		if (ks->id == objectid)
193 			break;
194 
195 	lockdep_set_class_and_name(&eb->lock,
196 				   &ks->keys[level], ks->names[level]);
197 }
198 
199 #endif
200 
201 /*
202  * extents on the btree inode are pretty simple, there's one extent
203  * that covers the entire device
204  */
205 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
206 		struct page *page, size_t pg_offset, u64 start, u64 len,
207 		int create)
208 {
209 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
210 	struct extent_map_tree *em_tree = &inode->extent_tree;
211 	struct extent_map *em;
212 	int ret;
213 
214 	read_lock(&em_tree->lock);
215 	em = lookup_extent_mapping(em_tree, start, len);
216 	if (em) {
217 		em->bdev = fs_info->fs_devices->latest_bdev;
218 		read_unlock(&em_tree->lock);
219 		goto out;
220 	}
221 	read_unlock(&em_tree->lock);
222 
223 	em = alloc_extent_map();
224 	if (!em) {
225 		em = ERR_PTR(-ENOMEM);
226 		goto out;
227 	}
228 	em->start = 0;
229 	em->len = (u64)-1;
230 	em->block_len = (u64)-1;
231 	em->block_start = 0;
232 	em->bdev = fs_info->fs_devices->latest_bdev;
233 
234 	write_lock(&em_tree->lock);
235 	ret = add_extent_mapping(em_tree, em, 0);
236 	if (ret == -EEXIST) {
237 		free_extent_map(em);
238 		em = lookup_extent_mapping(em_tree, start, len);
239 		if (!em)
240 			em = ERR_PTR(-EIO);
241 	} else if (ret) {
242 		free_extent_map(em);
243 		em = ERR_PTR(ret);
244 	}
245 	write_unlock(&em_tree->lock);
246 
247 out:
248 	return em;
249 }
250 
251 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
252 {
253 	return crc32c(seed, data, len);
254 }
255 
256 void btrfs_csum_final(u32 crc, u8 *result)
257 {
258 	put_unaligned_le32(~crc, result);
259 }
260 
261 /*
262  * compute the csum for a btree block, and either verify it or write it
263  * into the csum field of the block.
264  */
265 static int csum_tree_block(struct btrfs_fs_info *fs_info,
266 			   struct extent_buffer *buf,
267 			   int verify)
268 {
269 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
270 	char result[BTRFS_CSUM_SIZE];
271 	unsigned long len;
272 	unsigned long cur_len;
273 	unsigned long offset = BTRFS_CSUM_SIZE;
274 	char *kaddr;
275 	unsigned long map_start;
276 	unsigned long map_len;
277 	int err;
278 	u32 crc = ~(u32)0;
279 
280 	len = buf->len - offset;
281 	while (len > 0) {
282 		err = map_private_extent_buffer(buf, offset, 32,
283 					&kaddr, &map_start, &map_len);
284 		if (err)
285 			return err;
286 		cur_len = min(len, map_len - (offset - map_start));
287 		crc = btrfs_csum_data(kaddr + offset - map_start,
288 				      crc, cur_len);
289 		len -= cur_len;
290 		offset += cur_len;
291 	}
292 	memset(result, 0, BTRFS_CSUM_SIZE);
293 
294 	btrfs_csum_final(crc, result);
295 
296 	if (verify) {
297 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
298 			u32 val;
299 			u32 found = 0;
300 			memcpy(&found, result, csum_size);
301 
302 			read_extent_buffer(buf, &val, 0, csum_size);
303 			btrfs_warn_rl(fs_info,
304 				"%s checksum verify failed on %llu wanted %X found %X level %d",
305 				fs_info->sb->s_id, buf->start,
306 				val, found, btrfs_header_level(buf));
307 			return -EUCLEAN;
308 		}
309 	} else {
310 		write_extent_buffer(buf, result, 0, csum_size);
311 	}
312 
313 	return 0;
314 }
315 
316 /*
317  * we can't consider a given block up to date unless the transid of the
318  * block matches the transid in the parent node's pointer.  This is how we
319  * detect blocks that either didn't get written at all or got written
320  * in the wrong place.
321  */
322 static int verify_parent_transid(struct extent_io_tree *io_tree,
323 				 struct extent_buffer *eb, u64 parent_transid,
324 				 int atomic)
325 {
326 	struct extent_state *cached_state = NULL;
327 	int ret;
328 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
329 
330 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
331 		return 0;
332 
333 	if (atomic)
334 		return -EAGAIN;
335 
336 	if (need_lock) {
337 		btrfs_tree_read_lock(eb);
338 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
339 	}
340 
341 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
342 			 &cached_state);
343 	if (extent_buffer_uptodate(eb) &&
344 	    btrfs_header_generation(eb) == parent_transid) {
345 		ret = 0;
346 		goto out;
347 	}
348 	btrfs_err_rl(eb->fs_info,
349 		"parent transid verify failed on %llu wanted %llu found %llu",
350 			eb->start,
351 			parent_transid, btrfs_header_generation(eb));
352 	ret = 1;
353 
354 	/*
355 	 * Things reading via commit roots that don't have normal protection,
356 	 * like send, can have a really old block in cache that may point at a
357 	 * block that has been freed and re-allocated.  So don't clear uptodate
358 	 * if we find an eb that is under IO (dirty/writeback) because we could
359 	 * end up reading in the stale data and then writing it back out and
360 	 * making everybody very sad.
361 	 */
362 	if (!extent_buffer_under_io(eb))
363 		clear_extent_buffer_uptodate(eb);
364 out:
365 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
366 			     &cached_state);
367 	if (need_lock)
368 		btrfs_tree_read_unlock_blocking(eb);
369 	return ret;
370 }
371 
372 /*
373  * Return 0 if the superblock checksum type matches the checksum value of that
374  * algorithm. Pass the raw disk superblock data.
375  */
376 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
377 				  char *raw_disk_sb)
378 {
379 	struct btrfs_super_block *disk_sb =
380 		(struct btrfs_super_block *)raw_disk_sb;
381 	u16 csum_type = btrfs_super_csum_type(disk_sb);
382 	int ret = 0;
383 
384 	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
385 		u32 crc = ~(u32)0;
386 		char result[sizeof(crc)];
387 
388 		/*
389 		 * The super_block structure does not span the whole
390 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
391 		 * is filled with zeros and is included in the checksum.
392 		 */
393 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
394 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
395 		btrfs_csum_final(crc, result);
396 
397 		if (memcmp(raw_disk_sb, result, sizeof(result)))
398 			ret = 1;
399 	}
400 
401 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
402 		btrfs_err(fs_info, "unsupported checksum algorithm %u",
403 				csum_type);
404 		ret = 1;
405 	}
406 
407 	return ret;
408 }
409 
410 static int verify_level_key(struct btrfs_fs_info *fs_info,
411 			    struct extent_buffer *eb, int level,
412 			    struct btrfs_key *first_key, u64 parent_transid)
413 {
414 	int found_level;
415 	struct btrfs_key found_key;
416 	int ret;
417 
418 	found_level = btrfs_header_level(eb);
419 	if (found_level != level) {
420 #ifdef CONFIG_BTRFS_DEBUG
421 		WARN_ON(1);
422 		btrfs_err(fs_info,
423 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
424 			  eb->start, level, found_level);
425 #endif
426 		return -EIO;
427 	}
428 
429 	if (!first_key)
430 		return 0;
431 
432 	/*
433 	 * For live tree block (new tree blocks in current transaction),
434 	 * we need proper lock context to avoid race, which is impossible here.
435 	 * So we only checks tree blocks which is read from disk, whose
436 	 * generation <= fs_info->last_trans_committed.
437 	 */
438 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
439 		return 0;
440 	if (found_level)
441 		btrfs_node_key_to_cpu(eb, &found_key, 0);
442 	else
443 		btrfs_item_key_to_cpu(eb, &found_key, 0);
444 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
445 
446 #ifdef CONFIG_BTRFS_DEBUG
447 	if (ret) {
448 		WARN_ON(1);
449 		btrfs_err(fs_info,
450 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
451 			  eb->start, parent_transid, first_key->objectid,
452 			  first_key->type, first_key->offset,
453 			  found_key.objectid, found_key.type,
454 			  found_key.offset);
455 	}
456 #endif
457 	return ret;
458 }
459 
460 /*
461  * helper to read a given tree block, doing retries as required when
462  * the checksums don't match and we have alternate mirrors to try.
463  *
464  * @parent_transid:	expected transid, skip check if 0
465  * @level:		expected level, mandatory check
466  * @first_key:		expected key of first slot, skip check if NULL
467  */
468 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
469 					  struct extent_buffer *eb,
470 					  u64 parent_transid, int level,
471 					  struct btrfs_key *first_key)
472 {
473 	struct extent_io_tree *io_tree;
474 	int failed = 0;
475 	int ret;
476 	int num_copies = 0;
477 	int mirror_num = 0;
478 	int failed_mirror = 0;
479 
480 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
481 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
482 	while (1) {
483 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
484 					       mirror_num);
485 		if (!ret) {
486 			if (verify_parent_transid(io_tree, eb,
487 						   parent_transid, 0))
488 				ret = -EIO;
489 			else if (verify_level_key(fs_info, eb, level,
490 						  first_key, parent_transid))
491 				ret = -EUCLEAN;
492 			else
493 				break;
494 		}
495 
496 		/*
497 		 * This buffer's crc is fine, but its contents are corrupted, so
498 		 * there is no reason to read the other copies, they won't be
499 		 * any less wrong.
500 		 */
501 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
502 		    ret == -EUCLEAN)
503 			break;
504 
505 		num_copies = btrfs_num_copies(fs_info,
506 					      eb->start, eb->len);
507 		if (num_copies == 1)
508 			break;
509 
510 		if (!failed_mirror) {
511 			failed = 1;
512 			failed_mirror = eb->read_mirror;
513 		}
514 
515 		mirror_num++;
516 		if (mirror_num == failed_mirror)
517 			mirror_num++;
518 
519 		if (mirror_num > num_copies)
520 			break;
521 	}
522 
523 	if (failed && !ret && failed_mirror)
524 		repair_eb_io_failure(fs_info, eb, failed_mirror);
525 
526 	return ret;
527 }
528 
529 /*
530  * checksum a dirty tree block before IO.  This has extra checks to make sure
531  * we only fill in the checksum field in the first page of a multi-page block
532  */
533 
534 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
535 {
536 	u64 start = page_offset(page);
537 	u64 found_start;
538 	struct extent_buffer *eb;
539 
540 	eb = (struct extent_buffer *)page->private;
541 	if (page != eb->pages[0])
542 		return 0;
543 
544 	found_start = btrfs_header_bytenr(eb);
545 	/*
546 	 * Please do not consolidate these warnings into a single if.
547 	 * It is useful to know what went wrong.
548 	 */
549 	if (WARN_ON(found_start != start))
550 		return -EUCLEAN;
551 	if (WARN_ON(!PageUptodate(page)))
552 		return -EUCLEAN;
553 
554 	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
555 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
556 
557 	return csum_tree_block(fs_info, eb, 0);
558 }
559 
560 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
561 				 struct extent_buffer *eb)
562 {
563 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
564 	u8 fsid[BTRFS_FSID_SIZE];
565 	int ret = 1;
566 
567 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
568 	while (fs_devices) {
569 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
570 			ret = 0;
571 			break;
572 		}
573 		fs_devices = fs_devices->seed;
574 	}
575 	return ret;
576 }
577 
578 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
579 				      u64 phy_offset, struct page *page,
580 				      u64 start, u64 end, int mirror)
581 {
582 	u64 found_start;
583 	int found_level;
584 	struct extent_buffer *eb;
585 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
586 	struct btrfs_fs_info *fs_info = root->fs_info;
587 	int ret = 0;
588 	int reads_done;
589 
590 	if (!page->private)
591 		goto out;
592 
593 	eb = (struct extent_buffer *)page->private;
594 
595 	/* the pending IO might have been the only thing that kept this buffer
596 	 * in memory.  Make sure we have a ref for all this other checks
597 	 */
598 	extent_buffer_get(eb);
599 
600 	reads_done = atomic_dec_and_test(&eb->io_pages);
601 	if (!reads_done)
602 		goto err;
603 
604 	eb->read_mirror = mirror;
605 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
606 		ret = -EIO;
607 		goto err;
608 	}
609 
610 	found_start = btrfs_header_bytenr(eb);
611 	if (found_start != eb->start) {
612 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
613 			     eb->start, found_start);
614 		ret = -EIO;
615 		goto err;
616 	}
617 	if (check_tree_block_fsid(fs_info, eb)) {
618 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
619 			     eb->start);
620 		ret = -EIO;
621 		goto err;
622 	}
623 	found_level = btrfs_header_level(eb);
624 	if (found_level >= BTRFS_MAX_LEVEL) {
625 		btrfs_err(fs_info, "bad tree block level %d on %llu",
626 			  (int)btrfs_header_level(eb), eb->start);
627 		ret = -EIO;
628 		goto err;
629 	}
630 
631 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
632 				       eb, found_level);
633 
634 	ret = csum_tree_block(fs_info, eb, 1);
635 	if (ret)
636 		goto err;
637 
638 	/*
639 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
640 	 * that we don't try and read the other copies of this block, just
641 	 * return -EIO.
642 	 */
643 	if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
644 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
645 		ret = -EIO;
646 	}
647 
648 	if (found_level > 0 && btrfs_check_node(fs_info, eb))
649 		ret = -EIO;
650 
651 	if (!ret)
652 		set_extent_buffer_uptodate(eb);
653 err:
654 	if (reads_done &&
655 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
656 		btree_readahead_hook(eb, ret);
657 
658 	if (ret) {
659 		/*
660 		 * our io error hook is going to dec the io pages
661 		 * again, we have to make sure it has something
662 		 * to decrement
663 		 */
664 		atomic_inc(&eb->io_pages);
665 		clear_extent_buffer_uptodate(eb);
666 	}
667 	free_extent_buffer(eb);
668 out:
669 	return ret;
670 }
671 
672 static int btree_io_failed_hook(struct page *page, int failed_mirror)
673 {
674 	struct extent_buffer *eb;
675 
676 	eb = (struct extent_buffer *)page->private;
677 	set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
678 	eb->read_mirror = failed_mirror;
679 	atomic_dec(&eb->io_pages);
680 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
681 		btree_readahead_hook(eb, -EIO);
682 	return -EIO;	/* we fixed nothing */
683 }
684 
685 static void end_workqueue_bio(struct bio *bio)
686 {
687 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
688 	struct btrfs_fs_info *fs_info;
689 	struct btrfs_workqueue *wq;
690 	btrfs_work_func_t func;
691 
692 	fs_info = end_io_wq->info;
693 	end_io_wq->status = bio->bi_status;
694 
695 	if (bio_op(bio) == REQ_OP_WRITE) {
696 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
697 			wq = fs_info->endio_meta_write_workers;
698 			func = btrfs_endio_meta_write_helper;
699 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
700 			wq = fs_info->endio_freespace_worker;
701 			func = btrfs_freespace_write_helper;
702 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
703 			wq = fs_info->endio_raid56_workers;
704 			func = btrfs_endio_raid56_helper;
705 		} else {
706 			wq = fs_info->endio_write_workers;
707 			func = btrfs_endio_write_helper;
708 		}
709 	} else {
710 		if (unlikely(end_io_wq->metadata ==
711 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
712 			wq = fs_info->endio_repair_workers;
713 			func = btrfs_endio_repair_helper;
714 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
715 			wq = fs_info->endio_raid56_workers;
716 			func = btrfs_endio_raid56_helper;
717 		} else if (end_io_wq->metadata) {
718 			wq = fs_info->endio_meta_workers;
719 			func = btrfs_endio_meta_helper;
720 		} else {
721 			wq = fs_info->endio_workers;
722 			func = btrfs_endio_helper;
723 		}
724 	}
725 
726 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
727 	btrfs_queue_work(wq, &end_io_wq->work);
728 }
729 
730 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
731 			enum btrfs_wq_endio_type metadata)
732 {
733 	struct btrfs_end_io_wq *end_io_wq;
734 
735 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
736 	if (!end_io_wq)
737 		return BLK_STS_RESOURCE;
738 
739 	end_io_wq->private = bio->bi_private;
740 	end_io_wq->end_io = bio->bi_end_io;
741 	end_io_wq->info = info;
742 	end_io_wq->status = 0;
743 	end_io_wq->bio = bio;
744 	end_io_wq->metadata = metadata;
745 
746 	bio->bi_private = end_io_wq;
747 	bio->bi_end_io = end_workqueue_bio;
748 	return 0;
749 }
750 
751 static void run_one_async_start(struct btrfs_work *work)
752 {
753 	struct async_submit_bio *async;
754 	blk_status_t ret;
755 
756 	async = container_of(work, struct  async_submit_bio, work);
757 	ret = async->submit_bio_start(async->private_data, async->bio,
758 				      async->bio_offset);
759 	if (ret)
760 		async->status = ret;
761 }
762 
763 static void run_one_async_done(struct btrfs_work *work)
764 {
765 	struct async_submit_bio *async;
766 
767 	async = container_of(work, struct  async_submit_bio, work);
768 
769 	/* If an error occurred we just want to clean up the bio and move on */
770 	if (async->status) {
771 		async->bio->bi_status = async->status;
772 		bio_endio(async->bio);
773 		return;
774 	}
775 
776 	btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
777 }
778 
779 static void run_one_async_free(struct btrfs_work *work)
780 {
781 	struct async_submit_bio *async;
782 
783 	async = container_of(work, struct  async_submit_bio, work);
784 	kfree(async);
785 }
786 
787 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
788 				 int mirror_num, unsigned long bio_flags,
789 				 u64 bio_offset, void *private_data,
790 				 extent_submit_bio_start_t *submit_bio_start)
791 {
792 	struct async_submit_bio *async;
793 
794 	async = kmalloc(sizeof(*async), GFP_NOFS);
795 	if (!async)
796 		return BLK_STS_RESOURCE;
797 
798 	async->private_data = private_data;
799 	async->bio = bio;
800 	async->mirror_num = mirror_num;
801 	async->submit_bio_start = submit_bio_start;
802 
803 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
804 			run_one_async_done, run_one_async_free);
805 
806 	async->bio_offset = bio_offset;
807 
808 	async->status = 0;
809 
810 	if (op_is_sync(bio->bi_opf))
811 		btrfs_set_work_high_priority(&async->work);
812 
813 	btrfs_queue_work(fs_info->workers, &async->work);
814 	return 0;
815 }
816 
817 static blk_status_t btree_csum_one_bio(struct bio *bio)
818 {
819 	struct bio_vec *bvec;
820 	struct btrfs_root *root;
821 	int i, ret = 0;
822 
823 	ASSERT(!bio_flagged(bio, BIO_CLONED));
824 	bio_for_each_segment_all(bvec, bio, i) {
825 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
826 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
827 		if (ret)
828 			break;
829 	}
830 
831 	return errno_to_blk_status(ret);
832 }
833 
834 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
835 					     u64 bio_offset)
836 {
837 	/*
838 	 * when we're called for a write, we're already in the async
839 	 * submission context.  Just jump into btrfs_map_bio
840 	 */
841 	return btree_csum_one_bio(bio);
842 }
843 
844 static int check_async_write(struct btrfs_inode *bi)
845 {
846 	if (atomic_read(&bi->sync_writers))
847 		return 0;
848 #ifdef CONFIG_X86
849 	if (static_cpu_has(X86_FEATURE_XMM4_2))
850 		return 0;
851 #endif
852 	return 1;
853 }
854 
855 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
856 					  int mirror_num, unsigned long bio_flags,
857 					  u64 bio_offset)
858 {
859 	struct inode *inode = private_data;
860 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
861 	int async = check_async_write(BTRFS_I(inode));
862 	blk_status_t ret;
863 
864 	if (bio_op(bio) != REQ_OP_WRITE) {
865 		/*
866 		 * called for a read, do the setup so that checksum validation
867 		 * can happen in the async kernel threads
868 		 */
869 		ret = btrfs_bio_wq_end_io(fs_info, bio,
870 					  BTRFS_WQ_ENDIO_METADATA);
871 		if (ret)
872 			goto out_w_error;
873 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
874 	} else if (!async) {
875 		ret = btree_csum_one_bio(bio);
876 		if (ret)
877 			goto out_w_error;
878 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
879 	} else {
880 		/*
881 		 * kthread helpers are used to submit writes so that
882 		 * checksumming can happen in parallel across all CPUs
883 		 */
884 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
885 					  bio_offset, private_data,
886 					  btree_submit_bio_start);
887 	}
888 
889 	if (ret)
890 		goto out_w_error;
891 	return 0;
892 
893 out_w_error:
894 	bio->bi_status = ret;
895 	bio_endio(bio);
896 	return ret;
897 }
898 
899 #ifdef CONFIG_MIGRATION
900 static int btree_migratepage(struct address_space *mapping,
901 			struct page *newpage, struct page *page,
902 			enum migrate_mode mode)
903 {
904 	/*
905 	 * we can't safely write a btree page from here,
906 	 * we haven't done the locking hook
907 	 */
908 	if (PageDirty(page))
909 		return -EAGAIN;
910 	/*
911 	 * Buffers may be managed in a filesystem specific way.
912 	 * We must have no buffers or drop them.
913 	 */
914 	if (page_has_private(page) &&
915 	    !try_to_release_page(page, GFP_KERNEL))
916 		return -EAGAIN;
917 	return migrate_page(mapping, newpage, page, mode);
918 }
919 #endif
920 
921 
922 static int btree_writepages(struct address_space *mapping,
923 			    struct writeback_control *wbc)
924 {
925 	struct btrfs_fs_info *fs_info;
926 	int ret;
927 
928 	if (wbc->sync_mode == WB_SYNC_NONE) {
929 
930 		if (wbc->for_kupdate)
931 			return 0;
932 
933 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
934 		/* this is a bit racy, but that's ok */
935 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
936 					     BTRFS_DIRTY_METADATA_THRESH,
937 					     fs_info->dirty_metadata_batch);
938 		if (ret < 0)
939 			return 0;
940 	}
941 	return btree_write_cache_pages(mapping, wbc);
942 }
943 
944 static int btree_readpage(struct file *file, struct page *page)
945 {
946 	struct extent_io_tree *tree;
947 	tree = &BTRFS_I(page->mapping->host)->io_tree;
948 	return extent_read_full_page(tree, page, btree_get_extent, 0);
949 }
950 
951 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
952 {
953 	if (PageWriteback(page) || PageDirty(page))
954 		return 0;
955 
956 	return try_release_extent_buffer(page);
957 }
958 
959 static void btree_invalidatepage(struct page *page, unsigned int offset,
960 				 unsigned int length)
961 {
962 	struct extent_io_tree *tree;
963 	tree = &BTRFS_I(page->mapping->host)->io_tree;
964 	extent_invalidatepage(tree, page, offset);
965 	btree_releasepage(page, GFP_NOFS);
966 	if (PagePrivate(page)) {
967 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
968 			   "page private not zero on page %llu",
969 			   (unsigned long long)page_offset(page));
970 		ClearPagePrivate(page);
971 		set_page_private(page, 0);
972 		put_page(page);
973 	}
974 }
975 
976 static int btree_set_page_dirty(struct page *page)
977 {
978 #ifdef DEBUG
979 	struct extent_buffer *eb;
980 
981 	BUG_ON(!PagePrivate(page));
982 	eb = (struct extent_buffer *)page->private;
983 	BUG_ON(!eb);
984 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
985 	BUG_ON(!atomic_read(&eb->refs));
986 	btrfs_assert_tree_locked(eb);
987 #endif
988 	return __set_page_dirty_nobuffers(page);
989 }
990 
991 static const struct address_space_operations btree_aops = {
992 	.readpage	= btree_readpage,
993 	.writepages	= btree_writepages,
994 	.releasepage	= btree_releasepage,
995 	.invalidatepage = btree_invalidatepage,
996 #ifdef CONFIG_MIGRATION
997 	.migratepage	= btree_migratepage,
998 #endif
999 	.set_page_dirty = btree_set_page_dirty,
1000 };
1001 
1002 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1003 {
1004 	struct extent_buffer *buf = NULL;
1005 	struct inode *btree_inode = fs_info->btree_inode;
1006 
1007 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1008 	if (IS_ERR(buf))
1009 		return;
1010 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1011 				 buf, WAIT_NONE, 0);
1012 	free_extent_buffer(buf);
1013 }
1014 
1015 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1016 			 int mirror_num, struct extent_buffer **eb)
1017 {
1018 	struct extent_buffer *buf = NULL;
1019 	struct inode *btree_inode = fs_info->btree_inode;
1020 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1021 	int ret;
1022 
1023 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1024 	if (IS_ERR(buf))
1025 		return 0;
1026 
1027 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1028 
1029 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1030 				       mirror_num);
1031 	if (ret) {
1032 		free_extent_buffer(buf);
1033 		return ret;
1034 	}
1035 
1036 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1037 		free_extent_buffer(buf);
1038 		return -EIO;
1039 	} else if (extent_buffer_uptodate(buf)) {
1040 		*eb = buf;
1041 	} else {
1042 		free_extent_buffer(buf);
1043 	}
1044 	return 0;
1045 }
1046 
1047 struct extent_buffer *btrfs_find_create_tree_block(
1048 						struct btrfs_fs_info *fs_info,
1049 						u64 bytenr)
1050 {
1051 	if (btrfs_is_testing(fs_info))
1052 		return alloc_test_extent_buffer(fs_info, bytenr);
1053 	return alloc_extent_buffer(fs_info, bytenr);
1054 }
1055 
1056 
1057 int btrfs_write_tree_block(struct extent_buffer *buf)
1058 {
1059 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1060 					buf->start + buf->len - 1);
1061 }
1062 
1063 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1064 {
1065 	filemap_fdatawait_range(buf->pages[0]->mapping,
1066 			        buf->start, buf->start + buf->len - 1);
1067 }
1068 
1069 /*
1070  * Read tree block at logical address @bytenr and do variant basic but critical
1071  * verification.
1072  *
1073  * @parent_transid:	expected transid of this tree block, skip check if 0
1074  * @level:		expected level, mandatory check
1075  * @first_key:		expected key in slot 0, skip check if NULL
1076  */
1077 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1078 				      u64 parent_transid, int level,
1079 				      struct btrfs_key *first_key)
1080 {
1081 	struct extent_buffer *buf = NULL;
1082 	int ret;
1083 
1084 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1085 	if (IS_ERR(buf))
1086 		return buf;
1087 
1088 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1089 					     level, first_key);
1090 	if (ret) {
1091 		free_extent_buffer(buf);
1092 		return ERR_PTR(ret);
1093 	}
1094 	return buf;
1095 
1096 }
1097 
1098 void clean_tree_block(struct btrfs_fs_info *fs_info,
1099 		      struct extent_buffer *buf)
1100 {
1101 	if (btrfs_header_generation(buf) ==
1102 	    fs_info->running_transaction->transid) {
1103 		btrfs_assert_tree_locked(buf);
1104 
1105 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1106 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1107 						 -buf->len,
1108 						 fs_info->dirty_metadata_batch);
1109 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1110 			btrfs_set_lock_blocking(buf);
1111 			clear_extent_buffer_dirty(buf);
1112 		}
1113 	}
1114 }
1115 
1116 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1117 {
1118 	struct btrfs_subvolume_writers *writers;
1119 	int ret;
1120 
1121 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1122 	if (!writers)
1123 		return ERR_PTR(-ENOMEM);
1124 
1125 	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1126 	if (ret < 0) {
1127 		kfree(writers);
1128 		return ERR_PTR(ret);
1129 	}
1130 
1131 	init_waitqueue_head(&writers->wait);
1132 	return writers;
1133 }
1134 
1135 static void
1136 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1137 {
1138 	percpu_counter_destroy(&writers->counter);
1139 	kfree(writers);
1140 }
1141 
1142 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1143 			 u64 objectid)
1144 {
1145 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1146 	root->node = NULL;
1147 	root->commit_root = NULL;
1148 	root->state = 0;
1149 	root->orphan_cleanup_state = 0;
1150 
1151 	root->last_trans = 0;
1152 	root->highest_objectid = 0;
1153 	root->nr_delalloc_inodes = 0;
1154 	root->nr_ordered_extents = 0;
1155 	root->inode_tree = RB_ROOT;
1156 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1157 	root->block_rsv = NULL;
1158 
1159 	INIT_LIST_HEAD(&root->dirty_list);
1160 	INIT_LIST_HEAD(&root->root_list);
1161 	INIT_LIST_HEAD(&root->delalloc_inodes);
1162 	INIT_LIST_HEAD(&root->delalloc_root);
1163 	INIT_LIST_HEAD(&root->ordered_extents);
1164 	INIT_LIST_HEAD(&root->ordered_root);
1165 	INIT_LIST_HEAD(&root->logged_list[0]);
1166 	INIT_LIST_HEAD(&root->logged_list[1]);
1167 	spin_lock_init(&root->inode_lock);
1168 	spin_lock_init(&root->delalloc_lock);
1169 	spin_lock_init(&root->ordered_extent_lock);
1170 	spin_lock_init(&root->accounting_lock);
1171 	spin_lock_init(&root->log_extents_lock[0]);
1172 	spin_lock_init(&root->log_extents_lock[1]);
1173 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1174 	mutex_init(&root->objectid_mutex);
1175 	mutex_init(&root->log_mutex);
1176 	mutex_init(&root->ordered_extent_mutex);
1177 	mutex_init(&root->delalloc_mutex);
1178 	init_waitqueue_head(&root->log_writer_wait);
1179 	init_waitqueue_head(&root->log_commit_wait[0]);
1180 	init_waitqueue_head(&root->log_commit_wait[1]);
1181 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1182 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1183 	atomic_set(&root->log_commit[0], 0);
1184 	atomic_set(&root->log_commit[1], 0);
1185 	atomic_set(&root->log_writers, 0);
1186 	atomic_set(&root->log_batch, 0);
1187 	refcount_set(&root->refs, 1);
1188 	atomic_set(&root->will_be_snapshotted, 0);
1189 	atomic_set(&root->snapshot_force_cow, 0);
1190 	root->log_transid = 0;
1191 	root->log_transid_committed = -1;
1192 	root->last_log_commit = 0;
1193 	if (!dummy)
1194 		extent_io_tree_init(&root->dirty_log_pages, NULL);
1195 
1196 	memset(&root->root_key, 0, sizeof(root->root_key));
1197 	memset(&root->root_item, 0, sizeof(root->root_item));
1198 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1199 	if (!dummy)
1200 		root->defrag_trans_start = fs_info->generation;
1201 	else
1202 		root->defrag_trans_start = 0;
1203 	root->root_key.objectid = objectid;
1204 	root->anon_dev = 0;
1205 
1206 	spin_lock_init(&root->root_item_lock);
1207 }
1208 
1209 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1210 		gfp_t flags)
1211 {
1212 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1213 	if (root)
1214 		root->fs_info = fs_info;
1215 	return root;
1216 }
1217 
1218 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1219 /* Should only be used by the testing infrastructure */
1220 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1221 {
1222 	struct btrfs_root *root;
1223 
1224 	if (!fs_info)
1225 		return ERR_PTR(-EINVAL);
1226 
1227 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1228 	if (!root)
1229 		return ERR_PTR(-ENOMEM);
1230 
1231 	/* We don't use the stripesize in selftest, set it as sectorsize */
1232 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1233 	root->alloc_bytenr = 0;
1234 
1235 	return root;
1236 }
1237 #endif
1238 
1239 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1240 				     struct btrfs_fs_info *fs_info,
1241 				     u64 objectid)
1242 {
1243 	struct extent_buffer *leaf;
1244 	struct btrfs_root *tree_root = fs_info->tree_root;
1245 	struct btrfs_root *root;
1246 	struct btrfs_key key;
1247 	int ret = 0;
1248 	uuid_le uuid = NULL_UUID_LE;
1249 
1250 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1251 	if (!root)
1252 		return ERR_PTR(-ENOMEM);
1253 
1254 	__setup_root(root, fs_info, objectid);
1255 	root->root_key.objectid = objectid;
1256 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1257 	root->root_key.offset = 0;
1258 
1259 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1260 	if (IS_ERR(leaf)) {
1261 		ret = PTR_ERR(leaf);
1262 		leaf = NULL;
1263 		goto fail;
1264 	}
1265 
1266 	root->node = leaf;
1267 	btrfs_mark_buffer_dirty(leaf);
1268 
1269 	root->commit_root = btrfs_root_node(root);
1270 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1271 
1272 	root->root_item.flags = 0;
1273 	root->root_item.byte_limit = 0;
1274 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1275 	btrfs_set_root_generation(&root->root_item, trans->transid);
1276 	btrfs_set_root_level(&root->root_item, 0);
1277 	btrfs_set_root_refs(&root->root_item, 1);
1278 	btrfs_set_root_used(&root->root_item, leaf->len);
1279 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1280 	btrfs_set_root_dirid(&root->root_item, 0);
1281 	if (is_fstree(objectid))
1282 		uuid_le_gen(&uuid);
1283 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1284 	root->root_item.drop_level = 0;
1285 
1286 	key.objectid = objectid;
1287 	key.type = BTRFS_ROOT_ITEM_KEY;
1288 	key.offset = 0;
1289 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1290 	if (ret)
1291 		goto fail;
1292 
1293 	btrfs_tree_unlock(leaf);
1294 
1295 	return root;
1296 
1297 fail:
1298 	if (leaf) {
1299 		btrfs_tree_unlock(leaf);
1300 		free_extent_buffer(root->commit_root);
1301 		free_extent_buffer(leaf);
1302 	}
1303 	kfree(root);
1304 
1305 	return ERR_PTR(ret);
1306 }
1307 
1308 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1309 					 struct btrfs_fs_info *fs_info)
1310 {
1311 	struct btrfs_root *root;
1312 	struct extent_buffer *leaf;
1313 
1314 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1315 	if (!root)
1316 		return ERR_PTR(-ENOMEM);
1317 
1318 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1319 
1320 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1321 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1322 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1323 
1324 	/*
1325 	 * DON'T set REF_COWS for log trees
1326 	 *
1327 	 * log trees do not get reference counted because they go away
1328 	 * before a real commit is actually done.  They do store pointers
1329 	 * to file data extents, and those reference counts still get
1330 	 * updated (along with back refs to the log tree).
1331 	 */
1332 
1333 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1334 			NULL, 0, 0, 0);
1335 	if (IS_ERR(leaf)) {
1336 		kfree(root);
1337 		return ERR_CAST(leaf);
1338 	}
1339 
1340 	root->node = leaf;
1341 
1342 	btrfs_mark_buffer_dirty(root->node);
1343 	btrfs_tree_unlock(root->node);
1344 	return root;
1345 }
1346 
1347 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1348 			     struct btrfs_fs_info *fs_info)
1349 {
1350 	struct btrfs_root *log_root;
1351 
1352 	log_root = alloc_log_tree(trans, fs_info);
1353 	if (IS_ERR(log_root))
1354 		return PTR_ERR(log_root);
1355 	WARN_ON(fs_info->log_root_tree);
1356 	fs_info->log_root_tree = log_root;
1357 	return 0;
1358 }
1359 
1360 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1361 		       struct btrfs_root *root)
1362 {
1363 	struct btrfs_fs_info *fs_info = root->fs_info;
1364 	struct btrfs_root *log_root;
1365 	struct btrfs_inode_item *inode_item;
1366 
1367 	log_root = alloc_log_tree(trans, fs_info);
1368 	if (IS_ERR(log_root))
1369 		return PTR_ERR(log_root);
1370 
1371 	log_root->last_trans = trans->transid;
1372 	log_root->root_key.offset = root->root_key.objectid;
1373 
1374 	inode_item = &log_root->root_item.inode;
1375 	btrfs_set_stack_inode_generation(inode_item, 1);
1376 	btrfs_set_stack_inode_size(inode_item, 3);
1377 	btrfs_set_stack_inode_nlink(inode_item, 1);
1378 	btrfs_set_stack_inode_nbytes(inode_item,
1379 				     fs_info->nodesize);
1380 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1381 
1382 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1383 
1384 	WARN_ON(root->log_root);
1385 	root->log_root = log_root;
1386 	root->log_transid = 0;
1387 	root->log_transid_committed = -1;
1388 	root->last_log_commit = 0;
1389 	return 0;
1390 }
1391 
1392 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1393 					       struct btrfs_key *key)
1394 {
1395 	struct btrfs_root *root;
1396 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1397 	struct btrfs_path *path;
1398 	u64 generation;
1399 	int ret;
1400 	int level;
1401 
1402 	path = btrfs_alloc_path();
1403 	if (!path)
1404 		return ERR_PTR(-ENOMEM);
1405 
1406 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1407 	if (!root) {
1408 		ret = -ENOMEM;
1409 		goto alloc_fail;
1410 	}
1411 
1412 	__setup_root(root, fs_info, key->objectid);
1413 
1414 	ret = btrfs_find_root(tree_root, key, path,
1415 			      &root->root_item, &root->root_key);
1416 	if (ret) {
1417 		if (ret > 0)
1418 			ret = -ENOENT;
1419 		goto find_fail;
1420 	}
1421 
1422 	generation = btrfs_root_generation(&root->root_item);
1423 	level = btrfs_root_level(&root->root_item);
1424 	root->node = read_tree_block(fs_info,
1425 				     btrfs_root_bytenr(&root->root_item),
1426 				     generation, level, NULL);
1427 	if (IS_ERR(root->node)) {
1428 		ret = PTR_ERR(root->node);
1429 		goto find_fail;
1430 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1431 		ret = -EIO;
1432 		free_extent_buffer(root->node);
1433 		goto find_fail;
1434 	}
1435 	root->commit_root = btrfs_root_node(root);
1436 out:
1437 	btrfs_free_path(path);
1438 	return root;
1439 
1440 find_fail:
1441 	kfree(root);
1442 alloc_fail:
1443 	root = ERR_PTR(ret);
1444 	goto out;
1445 }
1446 
1447 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1448 				      struct btrfs_key *location)
1449 {
1450 	struct btrfs_root *root;
1451 
1452 	root = btrfs_read_tree_root(tree_root, location);
1453 	if (IS_ERR(root))
1454 		return root;
1455 
1456 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1457 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1458 		btrfs_check_and_init_root_item(&root->root_item);
1459 	}
1460 
1461 	return root;
1462 }
1463 
1464 int btrfs_init_fs_root(struct btrfs_root *root)
1465 {
1466 	int ret;
1467 	struct btrfs_subvolume_writers *writers;
1468 
1469 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1470 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1471 					GFP_NOFS);
1472 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1473 		ret = -ENOMEM;
1474 		goto fail;
1475 	}
1476 
1477 	writers = btrfs_alloc_subvolume_writers();
1478 	if (IS_ERR(writers)) {
1479 		ret = PTR_ERR(writers);
1480 		goto fail;
1481 	}
1482 	root->subv_writers = writers;
1483 
1484 	btrfs_init_free_ino_ctl(root);
1485 	spin_lock_init(&root->ino_cache_lock);
1486 	init_waitqueue_head(&root->ino_cache_wait);
1487 
1488 	ret = get_anon_bdev(&root->anon_dev);
1489 	if (ret)
1490 		goto fail;
1491 
1492 	mutex_lock(&root->objectid_mutex);
1493 	ret = btrfs_find_highest_objectid(root,
1494 					&root->highest_objectid);
1495 	if (ret) {
1496 		mutex_unlock(&root->objectid_mutex);
1497 		goto fail;
1498 	}
1499 
1500 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1501 
1502 	mutex_unlock(&root->objectid_mutex);
1503 
1504 	return 0;
1505 fail:
1506 	/* The caller is responsible to call btrfs_free_fs_root */
1507 	return ret;
1508 }
1509 
1510 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1511 					u64 root_id)
1512 {
1513 	struct btrfs_root *root;
1514 
1515 	spin_lock(&fs_info->fs_roots_radix_lock);
1516 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1517 				 (unsigned long)root_id);
1518 	spin_unlock(&fs_info->fs_roots_radix_lock);
1519 	return root;
1520 }
1521 
1522 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1523 			 struct btrfs_root *root)
1524 {
1525 	int ret;
1526 
1527 	ret = radix_tree_preload(GFP_NOFS);
1528 	if (ret)
1529 		return ret;
1530 
1531 	spin_lock(&fs_info->fs_roots_radix_lock);
1532 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1533 				(unsigned long)root->root_key.objectid,
1534 				root);
1535 	if (ret == 0)
1536 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1537 	spin_unlock(&fs_info->fs_roots_radix_lock);
1538 	radix_tree_preload_end();
1539 
1540 	return ret;
1541 }
1542 
1543 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1544 				     struct btrfs_key *location,
1545 				     bool check_ref)
1546 {
1547 	struct btrfs_root *root;
1548 	struct btrfs_path *path;
1549 	struct btrfs_key key;
1550 	int ret;
1551 
1552 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1553 		return fs_info->tree_root;
1554 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1555 		return fs_info->extent_root;
1556 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1557 		return fs_info->chunk_root;
1558 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1559 		return fs_info->dev_root;
1560 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1561 		return fs_info->csum_root;
1562 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1563 		return fs_info->quota_root ? fs_info->quota_root :
1564 					     ERR_PTR(-ENOENT);
1565 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1566 		return fs_info->uuid_root ? fs_info->uuid_root :
1567 					    ERR_PTR(-ENOENT);
1568 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1569 		return fs_info->free_space_root ? fs_info->free_space_root :
1570 						  ERR_PTR(-ENOENT);
1571 again:
1572 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1573 	if (root) {
1574 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1575 			return ERR_PTR(-ENOENT);
1576 		return root;
1577 	}
1578 
1579 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1580 	if (IS_ERR(root))
1581 		return root;
1582 
1583 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1584 		ret = -ENOENT;
1585 		goto fail;
1586 	}
1587 
1588 	ret = btrfs_init_fs_root(root);
1589 	if (ret)
1590 		goto fail;
1591 
1592 	path = btrfs_alloc_path();
1593 	if (!path) {
1594 		ret = -ENOMEM;
1595 		goto fail;
1596 	}
1597 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1598 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1599 	key.offset = location->objectid;
1600 
1601 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1602 	btrfs_free_path(path);
1603 	if (ret < 0)
1604 		goto fail;
1605 	if (ret == 0)
1606 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1607 
1608 	ret = btrfs_insert_fs_root(fs_info, root);
1609 	if (ret) {
1610 		if (ret == -EEXIST) {
1611 			btrfs_free_fs_root(root);
1612 			goto again;
1613 		}
1614 		goto fail;
1615 	}
1616 	return root;
1617 fail:
1618 	btrfs_free_fs_root(root);
1619 	return ERR_PTR(ret);
1620 }
1621 
1622 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1623 {
1624 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1625 	int ret = 0;
1626 	struct btrfs_device *device;
1627 	struct backing_dev_info *bdi;
1628 
1629 	rcu_read_lock();
1630 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1631 		if (!device->bdev)
1632 			continue;
1633 		bdi = device->bdev->bd_bdi;
1634 		if (bdi_congested(bdi, bdi_bits)) {
1635 			ret = 1;
1636 			break;
1637 		}
1638 	}
1639 	rcu_read_unlock();
1640 	return ret;
1641 }
1642 
1643 /*
1644  * called by the kthread helper functions to finally call the bio end_io
1645  * functions.  This is where read checksum verification actually happens
1646  */
1647 static void end_workqueue_fn(struct btrfs_work *work)
1648 {
1649 	struct bio *bio;
1650 	struct btrfs_end_io_wq *end_io_wq;
1651 
1652 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1653 	bio = end_io_wq->bio;
1654 
1655 	bio->bi_status = end_io_wq->status;
1656 	bio->bi_private = end_io_wq->private;
1657 	bio->bi_end_io = end_io_wq->end_io;
1658 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1659 	bio_endio(bio);
1660 }
1661 
1662 static int cleaner_kthread(void *arg)
1663 {
1664 	struct btrfs_root *root = arg;
1665 	struct btrfs_fs_info *fs_info = root->fs_info;
1666 	int again;
1667 
1668 	while (1) {
1669 		again = 0;
1670 
1671 		/* Make the cleaner go to sleep early. */
1672 		if (btrfs_need_cleaner_sleep(fs_info))
1673 			goto sleep;
1674 
1675 		/*
1676 		 * Do not do anything if we might cause open_ctree() to block
1677 		 * before we have finished mounting the filesystem.
1678 		 */
1679 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1680 			goto sleep;
1681 
1682 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1683 			goto sleep;
1684 
1685 		/*
1686 		 * Avoid the problem that we change the status of the fs
1687 		 * during the above check and trylock.
1688 		 */
1689 		if (btrfs_need_cleaner_sleep(fs_info)) {
1690 			mutex_unlock(&fs_info->cleaner_mutex);
1691 			goto sleep;
1692 		}
1693 
1694 		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1695 		btrfs_run_delayed_iputs(fs_info);
1696 		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1697 
1698 		again = btrfs_clean_one_deleted_snapshot(root);
1699 		mutex_unlock(&fs_info->cleaner_mutex);
1700 
1701 		/*
1702 		 * The defragger has dealt with the R/O remount and umount,
1703 		 * needn't do anything special here.
1704 		 */
1705 		btrfs_run_defrag_inodes(fs_info);
1706 
1707 		/*
1708 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1709 		 * with relocation (btrfs_relocate_chunk) and relocation
1710 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1711 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1712 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1713 		 * unused block groups.
1714 		 */
1715 		btrfs_delete_unused_bgs(fs_info);
1716 sleep:
1717 		if (kthread_should_park())
1718 			kthread_parkme();
1719 		if (kthread_should_stop())
1720 			return 0;
1721 		if (!again) {
1722 			set_current_state(TASK_INTERRUPTIBLE);
1723 			schedule();
1724 			__set_current_state(TASK_RUNNING);
1725 		}
1726 	}
1727 }
1728 
1729 static int transaction_kthread(void *arg)
1730 {
1731 	struct btrfs_root *root = arg;
1732 	struct btrfs_fs_info *fs_info = root->fs_info;
1733 	struct btrfs_trans_handle *trans;
1734 	struct btrfs_transaction *cur;
1735 	u64 transid;
1736 	time64_t now;
1737 	unsigned long delay;
1738 	bool cannot_commit;
1739 
1740 	do {
1741 		cannot_commit = false;
1742 		delay = HZ * fs_info->commit_interval;
1743 		mutex_lock(&fs_info->transaction_kthread_mutex);
1744 
1745 		spin_lock(&fs_info->trans_lock);
1746 		cur = fs_info->running_transaction;
1747 		if (!cur) {
1748 			spin_unlock(&fs_info->trans_lock);
1749 			goto sleep;
1750 		}
1751 
1752 		now = ktime_get_seconds();
1753 		if (cur->state < TRANS_STATE_BLOCKED &&
1754 		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1755 		    (now < cur->start_time ||
1756 		     now - cur->start_time < fs_info->commit_interval)) {
1757 			spin_unlock(&fs_info->trans_lock);
1758 			delay = HZ * 5;
1759 			goto sleep;
1760 		}
1761 		transid = cur->transid;
1762 		spin_unlock(&fs_info->trans_lock);
1763 
1764 		/* If the file system is aborted, this will always fail. */
1765 		trans = btrfs_attach_transaction(root);
1766 		if (IS_ERR(trans)) {
1767 			if (PTR_ERR(trans) != -ENOENT)
1768 				cannot_commit = true;
1769 			goto sleep;
1770 		}
1771 		if (transid == trans->transid) {
1772 			btrfs_commit_transaction(trans);
1773 		} else {
1774 			btrfs_end_transaction(trans);
1775 		}
1776 sleep:
1777 		wake_up_process(fs_info->cleaner_kthread);
1778 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1779 
1780 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1781 				      &fs_info->fs_state)))
1782 			btrfs_cleanup_transaction(fs_info);
1783 		if (!kthread_should_stop() &&
1784 				(!btrfs_transaction_blocked(fs_info) ||
1785 				 cannot_commit))
1786 			schedule_timeout_interruptible(delay);
1787 	} while (!kthread_should_stop());
1788 	return 0;
1789 }
1790 
1791 /*
1792  * this will find the highest generation in the array of
1793  * root backups.  The index of the highest array is returned,
1794  * or -1 if we can't find anything.
1795  *
1796  * We check to make sure the array is valid by comparing the
1797  * generation of the latest  root in the array with the generation
1798  * in the super block.  If they don't match we pitch it.
1799  */
1800 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1801 {
1802 	u64 cur;
1803 	int newest_index = -1;
1804 	struct btrfs_root_backup *root_backup;
1805 	int i;
1806 
1807 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1808 		root_backup = info->super_copy->super_roots + i;
1809 		cur = btrfs_backup_tree_root_gen(root_backup);
1810 		if (cur == newest_gen)
1811 			newest_index = i;
1812 	}
1813 
1814 	/* check to see if we actually wrapped around */
1815 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1816 		root_backup = info->super_copy->super_roots;
1817 		cur = btrfs_backup_tree_root_gen(root_backup);
1818 		if (cur == newest_gen)
1819 			newest_index = 0;
1820 	}
1821 	return newest_index;
1822 }
1823 
1824 
1825 /*
1826  * find the oldest backup so we know where to store new entries
1827  * in the backup array.  This will set the backup_root_index
1828  * field in the fs_info struct
1829  */
1830 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1831 				     u64 newest_gen)
1832 {
1833 	int newest_index = -1;
1834 
1835 	newest_index = find_newest_super_backup(info, newest_gen);
1836 	/* if there was garbage in there, just move along */
1837 	if (newest_index == -1) {
1838 		info->backup_root_index = 0;
1839 	} else {
1840 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1841 	}
1842 }
1843 
1844 /*
1845  * copy all the root pointers into the super backup array.
1846  * this will bump the backup pointer by one when it is
1847  * done
1848  */
1849 static void backup_super_roots(struct btrfs_fs_info *info)
1850 {
1851 	int next_backup;
1852 	struct btrfs_root_backup *root_backup;
1853 	int last_backup;
1854 
1855 	next_backup = info->backup_root_index;
1856 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1857 		BTRFS_NUM_BACKUP_ROOTS;
1858 
1859 	/*
1860 	 * just overwrite the last backup if we're at the same generation
1861 	 * this happens only at umount
1862 	 */
1863 	root_backup = info->super_for_commit->super_roots + last_backup;
1864 	if (btrfs_backup_tree_root_gen(root_backup) ==
1865 	    btrfs_header_generation(info->tree_root->node))
1866 		next_backup = last_backup;
1867 
1868 	root_backup = info->super_for_commit->super_roots + next_backup;
1869 
1870 	/*
1871 	 * make sure all of our padding and empty slots get zero filled
1872 	 * regardless of which ones we use today
1873 	 */
1874 	memset(root_backup, 0, sizeof(*root_backup));
1875 
1876 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1877 
1878 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1879 	btrfs_set_backup_tree_root_gen(root_backup,
1880 			       btrfs_header_generation(info->tree_root->node));
1881 
1882 	btrfs_set_backup_tree_root_level(root_backup,
1883 			       btrfs_header_level(info->tree_root->node));
1884 
1885 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1886 	btrfs_set_backup_chunk_root_gen(root_backup,
1887 			       btrfs_header_generation(info->chunk_root->node));
1888 	btrfs_set_backup_chunk_root_level(root_backup,
1889 			       btrfs_header_level(info->chunk_root->node));
1890 
1891 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1892 	btrfs_set_backup_extent_root_gen(root_backup,
1893 			       btrfs_header_generation(info->extent_root->node));
1894 	btrfs_set_backup_extent_root_level(root_backup,
1895 			       btrfs_header_level(info->extent_root->node));
1896 
1897 	/*
1898 	 * we might commit during log recovery, which happens before we set
1899 	 * the fs_root.  Make sure it is valid before we fill it in.
1900 	 */
1901 	if (info->fs_root && info->fs_root->node) {
1902 		btrfs_set_backup_fs_root(root_backup,
1903 					 info->fs_root->node->start);
1904 		btrfs_set_backup_fs_root_gen(root_backup,
1905 			       btrfs_header_generation(info->fs_root->node));
1906 		btrfs_set_backup_fs_root_level(root_backup,
1907 			       btrfs_header_level(info->fs_root->node));
1908 	}
1909 
1910 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1911 	btrfs_set_backup_dev_root_gen(root_backup,
1912 			       btrfs_header_generation(info->dev_root->node));
1913 	btrfs_set_backup_dev_root_level(root_backup,
1914 				       btrfs_header_level(info->dev_root->node));
1915 
1916 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1917 	btrfs_set_backup_csum_root_gen(root_backup,
1918 			       btrfs_header_generation(info->csum_root->node));
1919 	btrfs_set_backup_csum_root_level(root_backup,
1920 			       btrfs_header_level(info->csum_root->node));
1921 
1922 	btrfs_set_backup_total_bytes(root_backup,
1923 			     btrfs_super_total_bytes(info->super_copy));
1924 	btrfs_set_backup_bytes_used(root_backup,
1925 			     btrfs_super_bytes_used(info->super_copy));
1926 	btrfs_set_backup_num_devices(root_backup,
1927 			     btrfs_super_num_devices(info->super_copy));
1928 
1929 	/*
1930 	 * if we don't copy this out to the super_copy, it won't get remembered
1931 	 * for the next commit
1932 	 */
1933 	memcpy(&info->super_copy->super_roots,
1934 	       &info->super_for_commit->super_roots,
1935 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1936 }
1937 
1938 /*
1939  * this copies info out of the root backup array and back into
1940  * the in-memory super block.  It is meant to help iterate through
1941  * the array, so you send it the number of backups you've already
1942  * tried and the last backup index you used.
1943  *
1944  * this returns -1 when it has tried all the backups
1945  */
1946 static noinline int next_root_backup(struct btrfs_fs_info *info,
1947 				     struct btrfs_super_block *super,
1948 				     int *num_backups_tried, int *backup_index)
1949 {
1950 	struct btrfs_root_backup *root_backup;
1951 	int newest = *backup_index;
1952 
1953 	if (*num_backups_tried == 0) {
1954 		u64 gen = btrfs_super_generation(super);
1955 
1956 		newest = find_newest_super_backup(info, gen);
1957 		if (newest == -1)
1958 			return -1;
1959 
1960 		*backup_index = newest;
1961 		*num_backups_tried = 1;
1962 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1963 		/* we've tried all the backups, all done */
1964 		return -1;
1965 	} else {
1966 		/* jump to the next oldest backup */
1967 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1968 			BTRFS_NUM_BACKUP_ROOTS;
1969 		*backup_index = newest;
1970 		*num_backups_tried += 1;
1971 	}
1972 	root_backup = super->super_roots + newest;
1973 
1974 	btrfs_set_super_generation(super,
1975 				   btrfs_backup_tree_root_gen(root_backup));
1976 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1977 	btrfs_set_super_root_level(super,
1978 				   btrfs_backup_tree_root_level(root_backup));
1979 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1980 
1981 	/*
1982 	 * fixme: the total bytes and num_devices need to match or we should
1983 	 * need a fsck
1984 	 */
1985 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1986 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1987 	return 0;
1988 }
1989 
1990 /* helper to cleanup workers */
1991 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1992 {
1993 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1994 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1995 	btrfs_destroy_workqueue(fs_info->workers);
1996 	btrfs_destroy_workqueue(fs_info->endio_workers);
1997 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1998 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
1999 	btrfs_destroy_workqueue(fs_info->rmw_workers);
2000 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
2001 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2002 	btrfs_destroy_workqueue(fs_info->submit_workers);
2003 	btrfs_destroy_workqueue(fs_info->delayed_workers);
2004 	btrfs_destroy_workqueue(fs_info->caching_workers);
2005 	btrfs_destroy_workqueue(fs_info->readahead_workers);
2006 	btrfs_destroy_workqueue(fs_info->flush_workers);
2007 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2008 	btrfs_destroy_workqueue(fs_info->extent_workers);
2009 	/*
2010 	 * Now that all other work queues are destroyed, we can safely destroy
2011 	 * the queues used for metadata I/O, since tasks from those other work
2012 	 * queues can do metadata I/O operations.
2013 	 */
2014 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2015 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2016 }
2017 
2018 static void free_root_extent_buffers(struct btrfs_root *root)
2019 {
2020 	if (root) {
2021 		free_extent_buffer(root->node);
2022 		free_extent_buffer(root->commit_root);
2023 		root->node = NULL;
2024 		root->commit_root = NULL;
2025 	}
2026 }
2027 
2028 /* helper to cleanup tree roots */
2029 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2030 {
2031 	free_root_extent_buffers(info->tree_root);
2032 
2033 	free_root_extent_buffers(info->dev_root);
2034 	free_root_extent_buffers(info->extent_root);
2035 	free_root_extent_buffers(info->csum_root);
2036 	free_root_extent_buffers(info->quota_root);
2037 	free_root_extent_buffers(info->uuid_root);
2038 	if (chunk_root)
2039 		free_root_extent_buffers(info->chunk_root);
2040 	free_root_extent_buffers(info->free_space_root);
2041 }
2042 
2043 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2044 {
2045 	int ret;
2046 	struct btrfs_root *gang[8];
2047 	int i;
2048 
2049 	while (!list_empty(&fs_info->dead_roots)) {
2050 		gang[0] = list_entry(fs_info->dead_roots.next,
2051 				     struct btrfs_root, root_list);
2052 		list_del(&gang[0]->root_list);
2053 
2054 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2055 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2056 		} else {
2057 			free_extent_buffer(gang[0]->node);
2058 			free_extent_buffer(gang[0]->commit_root);
2059 			btrfs_put_fs_root(gang[0]);
2060 		}
2061 	}
2062 
2063 	while (1) {
2064 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2065 					     (void **)gang, 0,
2066 					     ARRAY_SIZE(gang));
2067 		if (!ret)
2068 			break;
2069 		for (i = 0; i < ret; i++)
2070 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2071 	}
2072 
2073 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2074 		btrfs_free_log_root_tree(NULL, fs_info);
2075 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2076 	}
2077 }
2078 
2079 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2080 {
2081 	mutex_init(&fs_info->scrub_lock);
2082 	atomic_set(&fs_info->scrubs_running, 0);
2083 	atomic_set(&fs_info->scrub_pause_req, 0);
2084 	atomic_set(&fs_info->scrubs_paused, 0);
2085 	atomic_set(&fs_info->scrub_cancel_req, 0);
2086 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2087 	fs_info->scrub_workers_refcnt = 0;
2088 }
2089 
2090 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2091 {
2092 	spin_lock_init(&fs_info->balance_lock);
2093 	mutex_init(&fs_info->balance_mutex);
2094 	atomic_set(&fs_info->balance_pause_req, 0);
2095 	atomic_set(&fs_info->balance_cancel_req, 0);
2096 	fs_info->balance_ctl = NULL;
2097 	init_waitqueue_head(&fs_info->balance_wait_q);
2098 }
2099 
2100 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2101 {
2102 	struct inode *inode = fs_info->btree_inode;
2103 
2104 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2105 	set_nlink(inode, 1);
2106 	/*
2107 	 * we set the i_size on the btree inode to the max possible int.
2108 	 * the real end of the address space is determined by all of
2109 	 * the devices in the system
2110 	 */
2111 	inode->i_size = OFFSET_MAX;
2112 	inode->i_mapping->a_ops = &btree_aops;
2113 
2114 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2115 	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2116 	BTRFS_I(inode)->io_tree.track_uptodate = 0;
2117 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2118 
2119 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2120 
2121 	BTRFS_I(inode)->root = fs_info->tree_root;
2122 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2123 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2124 	btrfs_insert_inode_hash(inode);
2125 }
2126 
2127 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2128 {
2129 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2130 	rwlock_init(&fs_info->dev_replace.lock);
2131 	atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2132 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2133 	init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2134 }
2135 
2136 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2137 {
2138 	spin_lock_init(&fs_info->qgroup_lock);
2139 	mutex_init(&fs_info->qgroup_ioctl_lock);
2140 	fs_info->qgroup_tree = RB_ROOT;
2141 	fs_info->qgroup_op_tree = RB_ROOT;
2142 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2143 	fs_info->qgroup_seq = 1;
2144 	fs_info->qgroup_ulist = NULL;
2145 	fs_info->qgroup_rescan_running = false;
2146 	mutex_init(&fs_info->qgroup_rescan_lock);
2147 }
2148 
2149 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2150 		struct btrfs_fs_devices *fs_devices)
2151 {
2152 	u32 max_active = fs_info->thread_pool_size;
2153 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2154 
2155 	fs_info->workers =
2156 		btrfs_alloc_workqueue(fs_info, "worker",
2157 				      flags | WQ_HIGHPRI, max_active, 16);
2158 
2159 	fs_info->delalloc_workers =
2160 		btrfs_alloc_workqueue(fs_info, "delalloc",
2161 				      flags, max_active, 2);
2162 
2163 	fs_info->flush_workers =
2164 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2165 				      flags, max_active, 0);
2166 
2167 	fs_info->caching_workers =
2168 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2169 
2170 	/*
2171 	 * a higher idle thresh on the submit workers makes it much more
2172 	 * likely that bios will be send down in a sane order to the
2173 	 * devices
2174 	 */
2175 	fs_info->submit_workers =
2176 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2177 				      min_t(u64, fs_devices->num_devices,
2178 					    max_active), 64);
2179 
2180 	fs_info->fixup_workers =
2181 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2182 
2183 	/*
2184 	 * endios are largely parallel and should have a very
2185 	 * low idle thresh
2186 	 */
2187 	fs_info->endio_workers =
2188 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2189 	fs_info->endio_meta_workers =
2190 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2191 				      max_active, 4);
2192 	fs_info->endio_meta_write_workers =
2193 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2194 				      max_active, 2);
2195 	fs_info->endio_raid56_workers =
2196 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2197 				      max_active, 4);
2198 	fs_info->endio_repair_workers =
2199 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2200 	fs_info->rmw_workers =
2201 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2202 	fs_info->endio_write_workers =
2203 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2204 				      max_active, 2);
2205 	fs_info->endio_freespace_worker =
2206 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2207 				      max_active, 0);
2208 	fs_info->delayed_workers =
2209 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2210 				      max_active, 0);
2211 	fs_info->readahead_workers =
2212 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2213 				      max_active, 2);
2214 	fs_info->qgroup_rescan_workers =
2215 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2216 	fs_info->extent_workers =
2217 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2218 				      min_t(u64, fs_devices->num_devices,
2219 					    max_active), 8);
2220 
2221 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2222 	      fs_info->submit_workers && fs_info->flush_workers &&
2223 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2224 	      fs_info->endio_meta_write_workers &&
2225 	      fs_info->endio_repair_workers &&
2226 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2227 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2228 	      fs_info->caching_workers && fs_info->readahead_workers &&
2229 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2230 	      fs_info->extent_workers &&
2231 	      fs_info->qgroup_rescan_workers)) {
2232 		return -ENOMEM;
2233 	}
2234 
2235 	return 0;
2236 }
2237 
2238 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2239 			    struct btrfs_fs_devices *fs_devices)
2240 {
2241 	int ret;
2242 	struct btrfs_root *log_tree_root;
2243 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2244 	u64 bytenr = btrfs_super_log_root(disk_super);
2245 	int level = btrfs_super_log_root_level(disk_super);
2246 
2247 	if (fs_devices->rw_devices == 0) {
2248 		btrfs_warn(fs_info, "log replay required on RO media");
2249 		return -EIO;
2250 	}
2251 
2252 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2253 	if (!log_tree_root)
2254 		return -ENOMEM;
2255 
2256 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2257 
2258 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2259 					      fs_info->generation + 1,
2260 					      level, NULL);
2261 	if (IS_ERR(log_tree_root->node)) {
2262 		btrfs_warn(fs_info, "failed to read log tree");
2263 		ret = PTR_ERR(log_tree_root->node);
2264 		kfree(log_tree_root);
2265 		return ret;
2266 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2267 		btrfs_err(fs_info, "failed to read log tree");
2268 		free_extent_buffer(log_tree_root->node);
2269 		kfree(log_tree_root);
2270 		return -EIO;
2271 	}
2272 	/* returns with log_tree_root freed on success */
2273 	ret = btrfs_recover_log_trees(log_tree_root);
2274 	if (ret) {
2275 		btrfs_handle_fs_error(fs_info, ret,
2276 				      "Failed to recover log tree");
2277 		free_extent_buffer(log_tree_root->node);
2278 		kfree(log_tree_root);
2279 		return ret;
2280 	}
2281 
2282 	if (sb_rdonly(fs_info->sb)) {
2283 		ret = btrfs_commit_super(fs_info);
2284 		if (ret)
2285 			return ret;
2286 	}
2287 
2288 	return 0;
2289 }
2290 
2291 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2292 {
2293 	struct btrfs_root *tree_root = fs_info->tree_root;
2294 	struct btrfs_root *root;
2295 	struct btrfs_key location;
2296 	int ret;
2297 
2298 	BUG_ON(!fs_info->tree_root);
2299 
2300 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2301 	location.type = BTRFS_ROOT_ITEM_KEY;
2302 	location.offset = 0;
2303 
2304 	root = btrfs_read_tree_root(tree_root, &location);
2305 	if (IS_ERR(root)) {
2306 		ret = PTR_ERR(root);
2307 		goto out;
2308 	}
2309 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2310 	fs_info->extent_root = root;
2311 
2312 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2313 	root = btrfs_read_tree_root(tree_root, &location);
2314 	if (IS_ERR(root)) {
2315 		ret = PTR_ERR(root);
2316 		goto out;
2317 	}
2318 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2319 	fs_info->dev_root = root;
2320 	btrfs_init_devices_late(fs_info);
2321 
2322 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2323 	root = btrfs_read_tree_root(tree_root, &location);
2324 	if (IS_ERR(root)) {
2325 		ret = PTR_ERR(root);
2326 		goto out;
2327 	}
2328 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2329 	fs_info->csum_root = root;
2330 
2331 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2332 	root = btrfs_read_tree_root(tree_root, &location);
2333 	if (!IS_ERR(root)) {
2334 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2335 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2336 		fs_info->quota_root = root;
2337 	}
2338 
2339 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2340 	root = btrfs_read_tree_root(tree_root, &location);
2341 	if (IS_ERR(root)) {
2342 		ret = PTR_ERR(root);
2343 		if (ret != -ENOENT)
2344 			goto out;
2345 	} else {
2346 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2347 		fs_info->uuid_root = root;
2348 	}
2349 
2350 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2351 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2352 		root = btrfs_read_tree_root(tree_root, &location);
2353 		if (IS_ERR(root)) {
2354 			ret = PTR_ERR(root);
2355 			goto out;
2356 		}
2357 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2358 		fs_info->free_space_root = root;
2359 	}
2360 
2361 	return 0;
2362 out:
2363 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2364 		   location.objectid, ret);
2365 	return ret;
2366 }
2367 
2368 /*
2369  * Real super block validation
2370  * NOTE: super csum type and incompat features will not be checked here.
2371  *
2372  * @sb:		super block to check
2373  * @mirror_num:	the super block number to check its bytenr:
2374  * 		0	the primary (1st) sb
2375  * 		1, 2	2nd and 3rd backup copy
2376  * 	       -1	skip bytenr check
2377  */
2378 static int validate_super(struct btrfs_fs_info *fs_info,
2379 			    struct btrfs_super_block *sb, int mirror_num)
2380 {
2381 	u64 nodesize = btrfs_super_nodesize(sb);
2382 	u64 sectorsize = btrfs_super_sectorsize(sb);
2383 	int ret = 0;
2384 
2385 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2386 		btrfs_err(fs_info, "no valid FS found");
2387 		ret = -EINVAL;
2388 	}
2389 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2390 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2391 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2392 		ret = -EINVAL;
2393 	}
2394 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2395 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2396 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2397 		ret = -EINVAL;
2398 	}
2399 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2400 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2401 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2402 		ret = -EINVAL;
2403 	}
2404 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2405 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2406 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2407 		ret = -EINVAL;
2408 	}
2409 
2410 	/*
2411 	 * Check sectorsize and nodesize first, other check will need it.
2412 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2413 	 */
2414 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2415 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2416 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2417 		ret = -EINVAL;
2418 	}
2419 	/* Only PAGE SIZE is supported yet */
2420 	if (sectorsize != PAGE_SIZE) {
2421 		btrfs_err(fs_info,
2422 			"sectorsize %llu not supported yet, only support %lu",
2423 			sectorsize, PAGE_SIZE);
2424 		ret = -EINVAL;
2425 	}
2426 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2427 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2428 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2429 		ret = -EINVAL;
2430 	}
2431 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2432 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2433 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2434 		ret = -EINVAL;
2435 	}
2436 
2437 	/* Root alignment check */
2438 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2439 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2440 			   btrfs_super_root(sb));
2441 		ret = -EINVAL;
2442 	}
2443 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2444 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2445 			   btrfs_super_chunk_root(sb));
2446 		ret = -EINVAL;
2447 	}
2448 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2449 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2450 			   btrfs_super_log_root(sb));
2451 		ret = -EINVAL;
2452 	}
2453 
2454 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
2455 		btrfs_err(fs_info,
2456 			   "dev_item UUID does not match fsid: %pU != %pU",
2457 			   fs_info->fsid, sb->dev_item.fsid);
2458 		ret = -EINVAL;
2459 	}
2460 
2461 	/*
2462 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2463 	 * done later
2464 	 */
2465 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2466 		btrfs_err(fs_info, "bytes_used is too small %llu",
2467 			  btrfs_super_bytes_used(sb));
2468 		ret = -EINVAL;
2469 	}
2470 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2471 		btrfs_err(fs_info, "invalid stripesize %u",
2472 			  btrfs_super_stripesize(sb));
2473 		ret = -EINVAL;
2474 	}
2475 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2476 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2477 			   btrfs_super_num_devices(sb));
2478 	if (btrfs_super_num_devices(sb) == 0) {
2479 		btrfs_err(fs_info, "number of devices is 0");
2480 		ret = -EINVAL;
2481 	}
2482 
2483 	if (mirror_num >= 0 &&
2484 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2485 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2486 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2487 		ret = -EINVAL;
2488 	}
2489 
2490 	/*
2491 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2492 	 * and one chunk
2493 	 */
2494 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2495 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2496 			  btrfs_super_sys_array_size(sb),
2497 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2498 		ret = -EINVAL;
2499 	}
2500 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2501 			+ sizeof(struct btrfs_chunk)) {
2502 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2503 			  btrfs_super_sys_array_size(sb),
2504 			  sizeof(struct btrfs_disk_key)
2505 			  + sizeof(struct btrfs_chunk));
2506 		ret = -EINVAL;
2507 	}
2508 
2509 	/*
2510 	 * The generation is a global counter, we'll trust it more than the others
2511 	 * but it's still possible that it's the one that's wrong.
2512 	 */
2513 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2514 		btrfs_warn(fs_info,
2515 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2516 			btrfs_super_generation(sb),
2517 			btrfs_super_chunk_root_generation(sb));
2518 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2519 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2520 		btrfs_warn(fs_info,
2521 			"suspicious: generation < cache_generation: %llu < %llu",
2522 			btrfs_super_generation(sb),
2523 			btrfs_super_cache_generation(sb));
2524 
2525 	return ret;
2526 }
2527 
2528 /*
2529  * Validation of super block at mount time.
2530  * Some checks already done early at mount time, like csum type and incompat
2531  * flags will be skipped.
2532  */
2533 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2534 {
2535 	return validate_super(fs_info, fs_info->super_copy, 0);
2536 }
2537 
2538 /*
2539  * Validation of super block at write time.
2540  * Some checks like bytenr check will be skipped as their values will be
2541  * overwritten soon.
2542  * Extra checks like csum type and incompat flags will be done here.
2543  */
2544 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2545 				      struct btrfs_super_block *sb)
2546 {
2547 	int ret;
2548 
2549 	ret = validate_super(fs_info, sb, -1);
2550 	if (ret < 0)
2551 		goto out;
2552 	if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
2553 		ret = -EUCLEAN;
2554 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2555 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2556 		goto out;
2557 	}
2558 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2559 		ret = -EUCLEAN;
2560 		btrfs_err(fs_info,
2561 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2562 			  btrfs_super_incompat_flags(sb),
2563 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2564 		goto out;
2565 	}
2566 out:
2567 	if (ret < 0)
2568 		btrfs_err(fs_info,
2569 		"super block corruption detected before writing it to disk");
2570 	return ret;
2571 }
2572 
2573 int open_ctree(struct super_block *sb,
2574 	       struct btrfs_fs_devices *fs_devices,
2575 	       char *options)
2576 {
2577 	u32 sectorsize;
2578 	u32 nodesize;
2579 	u32 stripesize;
2580 	u64 generation;
2581 	u64 features;
2582 	struct btrfs_key location;
2583 	struct buffer_head *bh;
2584 	struct btrfs_super_block *disk_super;
2585 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2586 	struct btrfs_root *tree_root;
2587 	struct btrfs_root *chunk_root;
2588 	int ret;
2589 	int err = -EINVAL;
2590 	int num_backups_tried = 0;
2591 	int backup_index = 0;
2592 	int clear_free_space_tree = 0;
2593 	int level;
2594 
2595 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2596 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2597 	if (!tree_root || !chunk_root) {
2598 		err = -ENOMEM;
2599 		goto fail;
2600 	}
2601 
2602 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2603 	if (ret) {
2604 		err = ret;
2605 		goto fail;
2606 	}
2607 
2608 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2609 	if (ret) {
2610 		err = ret;
2611 		goto fail_srcu;
2612 	}
2613 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2614 					(1 + ilog2(nr_cpu_ids));
2615 
2616 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2617 	if (ret) {
2618 		err = ret;
2619 		goto fail_dirty_metadata_bytes;
2620 	}
2621 
2622 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2623 			GFP_KERNEL);
2624 	if (ret) {
2625 		err = ret;
2626 		goto fail_delalloc_bytes;
2627 	}
2628 
2629 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2630 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2631 	INIT_LIST_HEAD(&fs_info->trans_list);
2632 	INIT_LIST_HEAD(&fs_info->dead_roots);
2633 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2634 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2635 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2636 	INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2637 	spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2638 	spin_lock_init(&fs_info->delalloc_root_lock);
2639 	spin_lock_init(&fs_info->trans_lock);
2640 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2641 	spin_lock_init(&fs_info->delayed_iput_lock);
2642 	spin_lock_init(&fs_info->defrag_inodes_lock);
2643 	spin_lock_init(&fs_info->tree_mod_seq_lock);
2644 	spin_lock_init(&fs_info->super_lock);
2645 	spin_lock_init(&fs_info->qgroup_op_lock);
2646 	spin_lock_init(&fs_info->buffer_lock);
2647 	spin_lock_init(&fs_info->unused_bgs_lock);
2648 	rwlock_init(&fs_info->tree_mod_log_lock);
2649 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2650 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2651 	mutex_init(&fs_info->reloc_mutex);
2652 	mutex_init(&fs_info->delalloc_root_mutex);
2653 	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2654 	seqlock_init(&fs_info->profiles_lock);
2655 
2656 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2657 	INIT_LIST_HEAD(&fs_info->space_info);
2658 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2659 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2660 	btrfs_mapping_init(&fs_info->mapping_tree);
2661 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2662 			     BTRFS_BLOCK_RSV_GLOBAL);
2663 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2664 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2665 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2666 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2667 			     BTRFS_BLOCK_RSV_DELOPS);
2668 	atomic_set(&fs_info->async_delalloc_pages, 0);
2669 	atomic_set(&fs_info->defrag_running, 0);
2670 	atomic_set(&fs_info->qgroup_op_seq, 0);
2671 	atomic_set(&fs_info->reada_works_cnt, 0);
2672 	atomic64_set(&fs_info->tree_mod_seq, 0);
2673 	fs_info->sb = sb;
2674 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2675 	fs_info->metadata_ratio = 0;
2676 	fs_info->defrag_inodes = RB_ROOT;
2677 	atomic64_set(&fs_info->free_chunk_space, 0);
2678 	fs_info->tree_mod_log = RB_ROOT;
2679 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2680 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2681 	/* readahead state */
2682 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2683 	spin_lock_init(&fs_info->reada_lock);
2684 	btrfs_init_ref_verify(fs_info);
2685 
2686 	fs_info->thread_pool_size = min_t(unsigned long,
2687 					  num_online_cpus() + 2, 8);
2688 
2689 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2690 	spin_lock_init(&fs_info->ordered_root_lock);
2691 
2692 	fs_info->btree_inode = new_inode(sb);
2693 	if (!fs_info->btree_inode) {
2694 		err = -ENOMEM;
2695 		goto fail_bio_counter;
2696 	}
2697 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2698 
2699 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2700 					GFP_KERNEL);
2701 	if (!fs_info->delayed_root) {
2702 		err = -ENOMEM;
2703 		goto fail_iput;
2704 	}
2705 	btrfs_init_delayed_root(fs_info->delayed_root);
2706 
2707 	btrfs_init_scrub(fs_info);
2708 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2709 	fs_info->check_integrity_print_mask = 0;
2710 #endif
2711 	btrfs_init_balance(fs_info);
2712 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2713 
2714 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2715 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2716 
2717 	btrfs_init_btree_inode(fs_info);
2718 
2719 	spin_lock_init(&fs_info->block_group_cache_lock);
2720 	fs_info->block_group_cache_tree = RB_ROOT;
2721 	fs_info->first_logical_byte = (u64)-1;
2722 
2723 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2724 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2725 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2726 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2727 
2728 	mutex_init(&fs_info->ordered_operations_mutex);
2729 	mutex_init(&fs_info->tree_log_mutex);
2730 	mutex_init(&fs_info->chunk_mutex);
2731 	mutex_init(&fs_info->transaction_kthread_mutex);
2732 	mutex_init(&fs_info->cleaner_mutex);
2733 	mutex_init(&fs_info->ro_block_group_mutex);
2734 	init_rwsem(&fs_info->commit_root_sem);
2735 	init_rwsem(&fs_info->cleanup_work_sem);
2736 	init_rwsem(&fs_info->subvol_sem);
2737 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2738 
2739 	btrfs_init_dev_replace_locks(fs_info);
2740 	btrfs_init_qgroup(fs_info);
2741 
2742 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2743 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2744 
2745 	init_waitqueue_head(&fs_info->transaction_throttle);
2746 	init_waitqueue_head(&fs_info->transaction_wait);
2747 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2748 	init_waitqueue_head(&fs_info->async_submit_wait);
2749 
2750 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
2751 
2752 	/* Usable values until the real ones are cached from the superblock */
2753 	fs_info->nodesize = 4096;
2754 	fs_info->sectorsize = 4096;
2755 	fs_info->stripesize = 4096;
2756 
2757 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2758 	if (ret) {
2759 		err = ret;
2760 		goto fail_alloc;
2761 	}
2762 
2763 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2764 
2765 	invalidate_bdev(fs_devices->latest_bdev);
2766 
2767 	/*
2768 	 * Read super block and check the signature bytes only
2769 	 */
2770 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2771 	if (IS_ERR(bh)) {
2772 		err = PTR_ERR(bh);
2773 		goto fail_alloc;
2774 	}
2775 
2776 	/*
2777 	 * We want to check superblock checksum, the type is stored inside.
2778 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2779 	 */
2780 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2781 		btrfs_err(fs_info, "superblock checksum mismatch");
2782 		err = -EINVAL;
2783 		brelse(bh);
2784 		goto fail_alloc;
2785 	}
2786 
2787 	/*
2788 	 * super_copy is zeroed at allocation time and we never touch the
2789 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2790 	 * the whole block of INFO_SIZE
2791 	 */
2792 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2793 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2794 	       sizeof(*fs_info->super_for_commit));
2795 	brelse(bh);
2796 
2797 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2798 
2799 	ret = btrfs_validate_mount_super(fs_info);
2800 	if (ret) {
2801 		btrfs_err(fs_info, "superblock contains fatal errors");
2802 		err = -EINVAL;
2803 		goto fail_alloc;
2804 	}
2805 
2806 	disk_super = fs_info->super_copy;
2807 	if (!btrfs_super_root(disk_super))
2808 		goto fail_alloc;
2809 
2810 	/* check FS state, whether FS is broken. */
2811 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2812 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2813 
2814 	/*
2815 	 * run through our array of backup supers and setup
2816 	 * our ring pointer to the oldest one
2817 	 */
2818 	generation = btrfs_super_generation(disk_super);
2819 	find_oldest_super_backup(fs_info, generation);
2820 
2821 	/*
2822 	 * In the long term, we'll store the compression type in the super
2823 	 * block, and it'll be used for per file compression control.
2824 	 */
2825 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2826 
2827 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2828 	if (ret) {
2829 		err = ret;
2830 		goto fail_alloc;
2831 	}
2832 
2833 	features = btrfs_super_incompat_flags(disk_super) &
2834 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2835 	if (features) {
2836 		btrfs_err(fs_info,
2837 		    "cannot mount because of unsupported optional features (%llx)",
2838 		    features);
2839 		err = -EINVAL;
2840 		goto fail_alloc;
2841 	}
2842 
2843 	features = btrfs_super_incompat_flags(disk_super);
2844 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2845 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2846 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2847 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2848 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2849 
2850 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2851 		btrfs_info(fs_info, "has skinny extents");
2852 
2853 	/*
2854 	 * flag our filesystem as having big metadata blocks if
2855 	 * they are bigger than the page size
2856 	 */
2857 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2858 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2859 			btrfs_info(fs_info,
2860 				"flagging fs with big metadata feature");
2861 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2862 	}
2863 
2864 	nodesize = btrfs_super_nodesize(disk_super);
2865 	sectorsize = btrfs_super_sectorsize(disk_super);
2866 	stripesize = sectorsize;
2867 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2868 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2869 
2870 	/* Cache block sizes */
2871 	fs_info->nodesize = nodesize;
2872 	fs_info->sectorsize = sectorsize;
2873 	fs_info->stripesize = stripesize;
2874 
2875 	/*
2876 	 * mixed block groups end up with duplicate but slightly offset
2877 	 * extent buffers for the same range.  It leads to corruptions
2878 	 */
2879 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2880 	    (sectorsize != nodesize)) {
2881 		btrfs_err(fs_info,
2882 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2883 			nodesize, sectorsize);
2884 		goto fail_alloc;
2885 	}
2886 
2887 	/*
2888 	 * Needn't use the lock because there is no other task which will
2889 	 * update the flag.
2890 	 */
2891 	btrfs_set_super_incompat_flags(disk_super, features);
2892 
2893 	features = btrfs_super_compat_ro_flags(disk_super) &
2894 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2895 	if (!sb_rdonly(sb) && features) {
2896 		btrfs_err(fs_info,
2897 	"cannot mount read-write because of unsupported optional features (%llx)",
2898 		       features);
2899 		err = -EINVAL;
2900 		goto fail_alloc;
2901 	}
2902 
2903 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2904 	if (ret) {
2905 		err = ret;
2906 		goto fail_sb_buffer;
2907 	}
2908 
2909 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2910 	sb->s_bdi->congested_data = fs_info;
2911 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2912 	sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2913 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2914 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2915 
2916 	sb->s_blocksize = sectorsize;
2917 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2918 	memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
2919 
2920 	mutex_lock(&fs_info->chunk_mutex);
2921 	ret = btrfs_read_sys_array(fs_info);
2922 	mutex_unlock(&fs_info->chunk_mutex);
2923 	if (ret) {
2924 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2925 		goto fail_sb_buffer;
2926 	}
2927 
2928 	generation = btrfs_super_chunk_root_generation(disk_super);
2929 	level = btrfs_super_chunk_root_level(disk_super);
2930 
2931 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2932 
2933 	chunk_root->node = read_tree_block(fs_info,
2934 					   btrfs_super_chunk_root(disk_super),
2935 					   generation, level, NULL);
2936 	if (IS_ERR(chunk_root->node) ||
2937 	    !extent_buffer_uptodate(chunk_root->node)) {
2938 		btrfs_err(fs_info, "failed to read chunk root");
2939 		if (!IS_ERR(chunk_root->node))
2940 			free_extent_buffer(chunk_root->node);
2941 		chunk_root->node = NULL;
2942 		goto fail_tree_roots;
2943 	}
2944 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2945 	chunk_root->commit_root = btrfs_root_node(chunk_root);
2946 
2947 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2948 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2949 
2950 	ret = btrfs_read_chunk_tree(fs_info);
2951 	if (ret) {
2952 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2953 		goto fail_tree_roots;
2954 	}
2955 
2956 	/*
2957 	 * Keep the devid that is marked to be the target device for the
2958 	 * device replace procedure
2959 	 */
2960 	btrfs_free_extra_devids(fs_devices, 0);
2961 
2962 	if (!fs_devices->latest_bdev) {
2963 		btrfs_err(fs_info, "failed to read devices");
2964 		goto fail_tree_roots;
2965 	}
2966 
2967 retry_root_backup:
2968 	generation = btrfs_super_generation(disk_super);
2969 	level = btrfs_super_root_level(disk_super);
2970 
2971 	tree_root->node = read_tree_block(fs_info,
2972 					  btrfs_super_root(disk_super),
2973 					  generation, level, NULL);
2974 	if (IS_ERR(tree_root->node) ||
2975 	    !extent_buffer_uptodate(tree_root->node)) {
2976 		btrfs_warn(fs_info, "failed to read tree root");
2977 		if (!IS_ERR(tree_root->node))
2978 			free_extent_buffer(tree_root->node);
2979 		tree_root->node = NULL;
2980 		goto recovery_tree_root;
2981 	}
2982 
2983 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2984 	tree_root->commit_root = btrfs_root_node(tree_root);
2985 	btrfs_set_root_refs(&tree_root->root_item, 1);
2986 
2987 	mutex_lock(&tree_root->objectid_mutex);
2988 	ret = btrfs_find_highest_objectid(tree_root,
2989 					&tree_root->highest_objectid);
2990 	if (ret) {
2991 		mutex_unlock(&tree_root->objectid_mutex);
2992 		goto recovery_tree_root;
2993 	}
2994 
2995 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2996 
2997 	mutex_unlock(&tree_root->objectid_mutex);
2998 
2999 	ret = btrfs_read_roots(fs_info);
3000 	if (ret)
3001 		goto recovery_tree_root;
3002 
3003 	fs_info->generation = generation;
3004 	fs_info->last_trans_committed = generation;
3005 
3006 	ret = btrfs_verify_dev_extents(fs_info);
3007 	if (ret) {
3008 		btrfs_err(fs_info,
3009 			  "failed to verify dev extents against chunks: %d",
3010 			  ret);
3011 		goto fail_block_groups;
3012 	}
3013 	ret = btrfs_recover_balance(fs_info);
3014 	if (ret) {
3015 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3016 		goto fail_block_groups;
3017 	}
3018 
3019 	ret = btrfs_init_dev_stats(fs_info);
3020 	if (ret) {
3021 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3022 		goto fail_block_groups;
3023 	}
3024 
3025 	ret = btrfs_init_dev_replace(fs_info);
3026 	if (ret) {
3027 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3028 		goto fail_block_groups;
3029 	}
3030 
3031 	btrfs_free_extra_devids(fs_devices, 1);
3032 
3033 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3034 	if (ret) {
3035 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3036 				ret);
3037 		goto fail_block_groups;
3038 	}
3039 
3040 	ret = btrfs_sysfs_add_device(fs_devices);
3041 	if (ret) {
3042 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3043 				ret);
3044 		goto fail_fsdev_sysfs;
3045 	}
3046 
3047 	ret = btrfs_sysfs_add_mounted(fs_info);
3048 	if (ret) {
3049 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3050 		goto fail_fsdev_sysfs;
3051 	}
3052 
3053 	ret = btrfs_init_space_info(fs_info);
3054 	if (ret) {
3055 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3056 		goto fail_sysfs;
3057 	}
3058 
3059 	ret = btrfs_read_block_groups(fs_info);
3060 	if (ret) {
3061 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3062 		goto fail_sysfs;
3063 	}
3064 
3065 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3066 		btrfs_warn(fs_info,
3067 		"writeable mount is not allowed due to too many missing devices");
3068 		goto fail_sysfs;
3069 	}
3070 
3071 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3072 					       "btrfs-cleaner");
3073 	if (IS_ERR(fs_info->cleaner_kthread))
3074 		goto fail_sysfs;
3075 
3076 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3077 						   tree_root,
3078 						   "btrfs-transaction");
3079 	if (IS_ERR(fs_info->transaction_kthread))
3080 		goto fail_cleaner;
3081 
3082 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3083 	    !fs_info->fs_devices->rotating) {
3084 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3085 	}
3086 
3087 	/*
3088 	 * Mount does not set all options immediately, we can do it now and do
3089 	 * not have to wait for transaction commit
3090 	 */
3091 	btrfs_apply_pending_changes(fs_info);
3092 
3093 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3094 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3095 		ret = btrfsic_mount(fs_info, fs_devices,
3096 				    btrfs_test_opt(fs_info,
3097 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3098 				    1 : 0,
3099 				    fs_info->check_integrity_print_mask);
3100 		if (ret)
3101 			btrfs_warn(fs_info,
3102 				"failed to initialize integrity check module: %d",
3103 				ret);
3104 	}
3105 #endif
3106 	ret = btrfs_read_qgroup_config(fs_info);
3107 	if (ret)
3108 		goto fail_trans_kthread;
3109 
3110 	if (btrfs_build_ref_tree(fs_info))
3111 		btrfs_err(fs_info, "couldn't build ref tree");
3112 
3113 	/* do not make disk changes in broken FS or nologreplay is given */
3114 	if (btrfs_super_log_root(disk_super) != 0 &&
3115 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3116 		ret = btrfs_replay_log(fs_info, fs_devices);
3117 		if (ret) {
3118 			err = ret;
3119 			goto fail_qgroup;
3120 		}
3121 	}
3122 
3123 	ret = btrfs_find_orphan_roots(fs_info);
3124 	if (ret)
3125 		goto fail_qgroup;
3126 
3127 	if (!sb_rdonly(sb)) {
3128 		ret = btrfs_cleanup_fs_roots(fs_info);
3129 		if (ret)
3130 			goto fail_qgroup;
3131 
3132 		mutex_lock(&fs_info->cleaner_mutex);
3133 		ret = btrfs_recover_relocation(tree_root);
3134 		mutex_unlock(&fs_info->cleaner_mutex);
3135 		if (ret < 0) {
3136 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3137 					ret);
3138 			err = -EINVAL;
3139 			goto fail_qgroup;
3140 		}
3141 	}
3142 
3143 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3144 	location.type = BTRFS_ROOT_ITEM_KEY;
3145 	location.offset = 0;
3146 
3147 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3148 	if (IS_ERR(fs_info->fs_root)) {
3149 		err = PTR_ERR(fs_info->fs_root);
3150 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3151 		goto fail_qgroup;
3152 	}
3153 
3154 	if (sb_rdonly(sb))
3155 		return 0;
3156 
3157 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3158 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3159 		clear_free_space_tree = 1;
3160 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3161 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3162 		btrfs_warn(fs_info, "free space tree is invalid");
3163 		clear_free_space_tree = 1;
3164 	}
3165 
3166 	if (clear_free_space_tree) {
3167 		btrfs_info(fs_info, "clearing free space tree");
3168 		ret = btrfs_clear_free_space_tree(fs_info);
3169 		if (ret) {
3170 			btrfs_warn(fs_info,
3171 				   "failed to clear free space tree: %d", ret);
3172 			close_ctree(fs_info);
3173 			return ret;
3174 		}
3175 	}
3176 
3177 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3178 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3179 		btrfs_info(fs_info, "creating free space tree");
3180 		ret = btrfs_create_free_space_tree(fs_info);
3181 		if (ret) {
3182 			btrfs_warn(fs_info,
3183 				"failed to create free space tree: %d", ret);
3184 			close_ctree(fs_info);
3185 			return ret;
3186 		}
3187 	}
3188 
3189 	down_read(&fs_info->cleanup_work_sem);
3190 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3191 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3192 		up_read(&fs_info->cleanup_work_sem);
3193 		close_ctree(fs_info);
3194 		return ret;
3195 	}
3196 	up_read(&fs_info->cleanup_work_sem);
3197 
3198 	ret = btrfs_resume_balance_async(fs_info);
3199 	if (ret) {
3200 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3201 		close_ctree(fs_info);
3202 		return ret;
3203 	}
3204 
3205 	ret = btrfs_resume_dev_replace_async(fs_info);
3206 	if (ret) {
3207 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3208 		close_ctree(fs_info);
3209 		return ret;
3210 	}
3211 
3212 	btrfs_qgroup_rescan_resume(fs_info);
3213 
3214 	if (!fs_info->uuid_root) {
3215 		btrfs_info(fs_info, "creating UUID tree");
3216 		ret = btrfs_create_uuid_tree(fs_info);
3217 		if (ret) {
3218 			btrfs_warn(fs_info,
3219 				"failed to create the UUID tree: %d", ret);
3220 			close_ctree(fs_info);
3221 			return ret;
3222 		}
3223 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3224 		   fs_info->generation !=
3225 				btrfs_super_uuid_tree_generation(disk_super)) {
3226 		btrfs_info(fs_info, "checking UUID tree");
3227 		ret = btrfs_check_uuid_tree(fs_info);
3228 		if (ret) {
3229 			btrfs_warn(fs_info,
3230 				"failed to check the UUID tree: %d", ret);
3231 			close_ctree(fs_info);
3232 			return ret;
3233 		}
3234 	} else {
3235 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3236 	}
3237 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3238 
3239 	/*
3240 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3241 	 * no need to keep the flag
3242 	 */
3243 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3244 
3245 	return 0;
3246 
3247 fail_qgroup:
3248 	btrfs_free_qgroup_config(fs_info);
3249 fail_trans_kthread:
3250 	kthread_stop(fs_info->transaction_kthread);
3251 	btrfs_cleanup_transaction(fs_info);
3252 	btrfs_free_fs_roots(fs_info);
3253 fail_cleaner:
3254 	kthread_stop(fs_info->cleaner_kthread);
3255 
3256 	/*
3257 	 * make sure we're done with the btree inode before we stop our
3258 	 * kthreads
3259 	 */
3260 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3261 
3262 fail_sysfs:
3263 	btrfs_sysfs_remove_mounted(fs_info);
3264 
3265 fail_fsdev_sysfs:
3266 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3267 
3268 fail_block_groups:
3269 	btrfs_put_block_group_cache(fs_info);
3270 
3271 fail_tree_roots:
3272 	free_root_pointers(fs_info, 1);
3273 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3274 
3275 fail_sb_buffer:
3276 	btrfs_stop_all_workers(fs_info);
3277 	btrfs_free_block_groups(fs_info);
3278 fail_alloc:
3279 fail_iput:
3280 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3281 
3282 	iput(fs_info->btree_inode);
3283 fail_bio_counter:
3284 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3285 fail_delalloc_bytes:
3286 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3287 fail_dirty_metadata_bytes:
3288 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3289 fail_srcu:
3290 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3291 fail:
3292 	btrfs_free_stripe_hash_table(fs_info);
3293 	btrfs_close_devices(fs_info->fs_devices);
3294 	return err;
3295 
3296 recovery_tree_root:
3297 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3298 		goto fail_tree_roots;
3299 
3300 	free_root_pointers(fs_info, 0);
3301 
3302 	/* don't use the log in recovery mode, it won't be valid */
3303 	btrfs_set_super_log_root(disk_super, 0);
3304 
3305 	/* we can't trust the free space cache either */
3306 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3307 
3308 	ret = next_root_backup(fs_info, fs_info->super_copy,
3309 			       &num_backups_tried, &backup_index);
3310 	if (ret == -1)
3311 		goto fail_block_groups;
3312 	goto retry_root_backup;
3313 }
3314 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3315 
3316 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3317 {
3318 	if (uptodate) {
3319 		set_buffer_uptodate(bh);
3320 	} else {
3321 		struct btrfs_device *device = (struct btrfs_device *)
3322 			bh->b_private;
3323 
3324 		btrfs_warn_rl_in_rcu(device->fs_info,
3325 				"lost page write due to IO error on %s",
3326 					  rcu_str_deref(device->name));
3327 		/* note, we don't set_buffer_write_io_error because we have
3328 		 * our own ways of dealing with the IO errors
3329 		 */
3330 		clear_buffer_uptodate(bh);
3331 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3332 	}
3333 	unlock_buffer(bh);
3334 	put_bh(bh);
3335 }
3336 
3337 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3338 			struct buffer_head **bh_ret)
3339 {
3340 	struct buffer_head *bh;
3341 	struct btrfs_super_block *super;
3342 	u64 bytenr;
3343 
3344 	bytenr = btrfs_sb_offset(copy_num);
3345 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3346 		return -EINVAL;
3347 
3348 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3349 	/*
3350 	 * If we fail to read from the underlying devices, as of now
3351 	 * the best option we have is to mark it EIO.
3352 	 */
3353 	if (!bh)
3354 		return -EIO;
3355 
3356 	super = (struct btrfs_super_block *)bh->b_data;
3357 	if (btrfs_super_bytenr(super) != bytenr ||
3358 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3359 		brelse(bh);
3360 		return -EINVAL;
3361 	}
3362 
3363 	*bh_ret = bh;
3364 	return 0;
3365 }
3366 
3367 
3368 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3369 {
3370 	struct buffer_head *bh;
3371 	struct buffer_head *latest = NULL;
3372 	struct btrfs_super_block *super;
3373 	int i;
3374 	u64 transid = 0;
3375 	int ret = -EINVAL;
3376 
3377 	/* we would like to check all the supers, but that would make
3378 	 * a btrfs mount succeed after a mkfs from a different FS.
3379 	 * So, we need to add a special mount option to scan for
3380 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3381 	 */
3382 	for (i = 0; i < 1; i++) {
3383 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3384 		if (ret)
3385 			continue;
3386 
3387 		super = (struct btrfs_super_block *)bh->b_data;
3388 
3389 		if (!latest || btrfs_super_generation(super) > transid) {
3390 			brelse(latest);
3391 			latest = bh;
3392 			transid = btrfs_super_generation(super);
3393 		} else {
3394 			brelse(bh);
3395 		}
3396 	}
3397 
3398 	if (!latest)
3399 		return ERR_PTR(ret);
3400 
3401 	return latest;
3402 }
3403 
3404 /*
3405  * Write superblock @sb to the @device. Do not wait for completion, all the
3406  * buffer heads we write are pinned.
3407  *
3408  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3409  * the expected device size at commit time. Note that max_mirrors must be
3410  * same for write and wait phases.
3411  *
3412  * Return number of errors when buffer head is not found or submission fails.
3413  */
3414 static int write_dev_supers(struct btrfs_device *device,
3415 			    struct btrfs_super_block *sb, int max_mirrors)
3416 {
3417 	struct buffer_head *bh;
3418 	int i;
3419 	int ret;
3420 	int errors = 0;
3421 	u32 crc;
3422 	u64 bytenr;
3423 	int op_flags;
3424 
3425 	if (max_mirrors == 0)
3426 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3427 
3428 	for (i = 0; i < max_mirrors; i++) {
3429 		bytenr = btrfs_sb_offset(i);
3430 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3431 		    device->commit_total_bytes)
3432 			break;
3433 
3434 		btrfs_set_super_bytenr(sb, bytenr);
3435 
3436 		crc = ~(u32)0;
3437 		crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3438 				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3439 		btrfs_csum_final(crc, sb->csum);
3440 
3441 		/* One reference for us, and we leave it for the caller */
3442 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3443 			      BTRFS_SUPER_INFO_SIZE);
3444 		if (!bh) {
3445 			btrfs_err(device->fs_info,
3446 			    "couldn't get super buffer head for bytenr %llu",
3447 			    bytenr);
3448 			errors++;
3449 			continue;
3450 		}
3451 
3452 		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3453 
3454 		/* one reference for submit_bh */
3455 		get_bh(bh);
3456 
3457 		set_buffer_uptodate(bh);
3458 		lock_buffer(bh);
3459 		bh->b_end_io = btrfs_end_buffer_write_sync;
3460 		bh->b_private = device;
3461 
3462 		/*
3463 		 * we fua the first super.  The others we allow
3464 		 * to go down lazy.
3465 		 */
3466 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3467 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3468 			op_flags |= REQ_FUA;
3469 		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3470 		if (ret)
3471 			errors++;
3472 	}
3473 	return errors < i ? 0 : -1;
3474 }
3475 
3476 /*
3477  * Wait for write completion of superblocks done by write_dev_supers,
3478  * @max_mirrors same for write and wait phases.
3479  *
3480  * Return number of errors when buffer head is not found or not marked up to
3481  * date.
3482  */
3483 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3484 {
3485 	struct buffer_head *bh;
3486 	int i;
3487 	int errors = 0;
3488 	bool primary_failed = false;
3489 	u64 bytenr;
3490 
3491 	if (max_mirrors == 0)
3492 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3493 
3494 	for (i = 0; i < max_mirrors; i++) {
3495 		bytenr = btrfs_sb_offset(i);
3496 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3497 		    device->commit_total_bytes)
3498 			break;
3499 
3500 		bh = __find_get_block(device->bdev,
3501 				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3502 				      BTRFS_SUPER_INFO_SIZE);
3503 		if (!bh) {
3504 			errors++;
3505 			if (i == 0)
3506 				primary_failed = true;
3507 			continue;
3508 		}
3509 		wait_on_buffer(bh);
3510 		if (!buffer_uptodate(bh)) {
3511 			errors++;
3512 			if (i == 0)
3513 				primary_failed = true;
3514 		}
3515 
3516 		/* drop our reference */
3517 		brelse(bh);
3518 
3519 		/* drop the reference from the writing run */
3520 		brelse(bh);
3521 	}
3522 
3523 	/* log error, force error return */
3524 	if (primary_failed) {
3525 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3526 			  device->devid);
3527 		return -1;
3528 	}
3529 
3530 	return errors < i ? 0 : -1;
3531 }
3532 
3533 /*
3534  * endio for the write_dev_flush, this will wake anyone waiting
3535  * for the barrier when it is done
3536  */
3537 static void btrfs_end_empty_barrier(struct bio *bio)
3538 {
3539 	complete(bio->bi_private);
3540 }
3541 
3542 /*
3543  * Submit a flush request to the device if it supports it. Error handling is
3544  * done in the waiting counterpart.
3545  */
3546 static void write_dev_flush(struct btrfs_device *device)
3547 {
3548 	struct request_queue *q = bdev_get_queue(device->bdev);
3549 	struct bio *bio = device->flush_bio;
3550 
3551 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3552 		return;
3553 
3554 	bio_reset(bio);
3555 	bio->bi_end_io = btrfs_end_empty_barrier;
3556 	bio_set_dev(bio, device->bdev);
3557 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3558 	init_completion(&device->flush_wait);
3559 	bio->bi_private = &device->flush_wait;
3560 
3561 	btrfsic_submit_bio(bio);
3562 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3563 }
3564 
3565 /*
3566  * If the flush bio has been submitted by write_dev_flush, wait for it.
3567  */
3568 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3569 {
3570 	struct bio *bio = device->flush_bio;
3571 
3572 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3573 		return BLK_STS_OK;
3574 
3575 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3576 	wait_for_completion_io(&device->flush_wait);
3577 
3578 	return bio->bi_status;
3579 }
3580 
3581 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3582 {
3583 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3584 		return -EIO;
3585 	return 0;
3586 }
3587 
3588 /*
3589  * send an empty flush down to each device in parallel,
3590  * then wait for them
3591  */
3592 static int barrier_all_devices(struct btrfs_fs_info *info)
3593 {
3594 	struct list_head *head;
3595 	struct btrfs_device *dev;
3596 	int errors_wait = 0;
3597 	blk_status_t ret;
3598 
3599 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3600 	/* send down all the barriers */
3601 	head = &info->fs_devices->devices;
3602 	list_for_each_entry(dev, head, dev_list) {
3603 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3604 			continue;
3605 		if (!dev->bdev)
3606 			continue;
3607 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3608 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3609 			continue;
3610 
3611 		write_dev_flush(dev);
3612 		dev->last_flush_error = BLK_STS_OK;
3613 	}
3614 
3615 	/* wait for all the barriers */
3616 	list_for_each_entry(dev, head, dev_list) {
3617 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3618 			continue;
3619 		if (!dev->bdev) {
3620 			errors_wait++;
3621 			continue;
3622 		}
3623 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3624 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3625 			continue;
3626 
3627 		ret = wait_dev_flush(dev);
3628 		if (ret) {
3629 			dev->last_flush_error = ret;
3630 			btrfs_dev_stat_inc_and_print(dev,
3631 					BTRFS_DEV_STAT_FLUSH_ERRS);
3632 			errors_wait++;
3633 		}
3634 	}
3635 
3636 	if (errors_wait) {
3637 		/*
3638 		 * At some point we need the status of all disks
3639 		 * to arrive at the volume status. So error checking
3640 		 * is being pushed to a separate loop.
3641 		 */
3642 		return check_barrier_error(info);
3643 	}
3644 	return 0;
3645 }
3646 
3647 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3648 {
3649 	int raid_type;
3650 	int min_tolerated = INT_MAX;
3651 
3652 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3653 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3654 		min_tolerated = min(min_tolerated,
3655 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3656 				    tolerated_failures);
3657 
3658 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3659 		if (raid_type == BTRFS_RAID_SINGLE)
3660 			continue;
3661 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3662 			continue;
3663 		min_tolerated = min(min_tolerated,
3664 				    btrfs_raid_array[raid_type].
3665 				    tolerated_failures);
3666 	}
3667 
3668 	if (min_tolerated == INT_MAX) {
3669 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3670 		min_tolerated = 0;
3671 	}
3672 
3673 	return min_tolerated;
3674 }
3675 
3676 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3677 {
3678 	struct list_head *head;
3679 	struct btrfs_device *dev;
3680 	struct btrfs_super_block *sb;
3681 	struct btrfs_dev_item *dev_item;
3682 	int ret;
3683 	int do_barriers;
3684 	int max_errors;
3685 	int total_errors = 0;
3686 	u64 flags;
3687 
3688 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3689 
3690 	/*
3691 	 * max_mirrors == 0 indicates we're from commit_transaction,
3692 	 * not from fsync where the tree roots in fs_info have not
3693 	 * been consistent on disk.
3694 	 */
3695 	if (max_mirrors == 0)
3696 		backup_super_roots(fs_info);
3697 
3698 	sb = fs_info->super_for_commit;
3699 	dev_item = &sb->dev_item;
3700 
3701 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3702 	head = &fs_info->fs_devices->devices;
3703 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3704 
3705 	if (do_barriers) {
3706 		ret = barrier_all_devices(fs_info);
3707 		if (ret) {
3708 			mutex_unlock(
3709 				&fs_info->fs_devices->device_list_mutex);
3710 			btrfs_handle_fs_error(fs_info, ret,
3711 					      "errors while submitting device barriers.");
3712 			return ret;
3713 		}
3714 	}
3715 
3716 	list_for_each_entry(dev, head, dev_list) {
3717 		if (!dev->bdev) {
3718 			total_errors++;
3719 			continue;
3720 		}
3721 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3722 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3723 			continue;
3724 
3725 		btrfs_set_stack_device_generation(dev_item, 0);
3726 		btrfs_set_stack_device_type(dev_item, dev->type);
3727 		btrfs_set_stack_device_id(dev_item, dev->devid);
3728 		btrfs_set_stack_device_total_bytes(dev_item,
3729 						   dev->commit_total_bytes);
3730 		btrfs_set_stack_device_bytes_used(dev_item,
3731 						  dev->commit_bytes_used);
3732 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3733 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3734 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3735 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3736 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3737 
3738 		flags = btrfs_super_flags(sb);
3739 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3740 
3741 		ret = btrfs_validate_write_super(fs_info, sb);
3742 		if (ret < 0) {
3743 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3744 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3745 				"unexpected superblock corruption detected");
3746 			return -EUCLEAN;
3747 		}
3748 
3749 		ret = write_dev_supers(dev, sb, max_mirrors);
3750 		if (ret)
3751 			total_errors++;
3752 	}
3753 	if (total_errors > max_errors) {
3754 		btrfs_err(fs_info, "%d errors while writing supers",
3755 			  total_errors);
3756 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3757 
3758 		/* FUA is masked off if unsupported and can't be the reason */
3759 		btrfs_handle_fs_error(fs_info, -EIO,
3760 				      "%d errors while writing supers",
3761 				      total_errors);
3762 		return -EIO;
3763 	}
3764 
3765 	total_errors = 0;
3766 	list_for_each_entry(dev, head, dev_list) {
3767 		if (!dev->bdev)
3768 			continue;
3769 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3770 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3771 			continue;
3772 
3773 		ret = wait_dev_supers(dev, max_mirrors);
3774 		if (ret)
3775 			total_errors++;
3776 	}
3777 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3778 	if (total_errors > max_errors) {
3779 		btrfs_handle_fs_error(fs_info, -EIO,
3780 				      "%d errors while writing supers",
3781 				      total_errors);
3782 		return -EIO;
3783 	}
3784 	return 0;
3785 }
3786 
3787 /* Drop a fs root from the radix tree and free it. */
3788 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3789 				  struct btrfs_root *root)
3790 {
3791 	spin_lock(&fs_info->fs_roots_radix_lock);
3792 	radix_tree_delete(&fs_info->fs_roots_radix,
3793 			  (unsigned long)root->root_key.objectid);
3794 	spin_unlock(&fs_info->fs_roots_radix_lock);
3795 
3796 	if (btrfs_root_refs(&root->root_item) == 0)
3797 		synchronize_srcu(&fs_info->subvol_srcu);
3798 
3799 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3800 		btrfs_free_log(NULL, root);
3801 		if (root->reloc_root) {
3802 			free_extent_buffer(root->reloc_root->node);
3803 			free_extent_buffer(root->reloc_root->commit_root);
3804 			btrfs_put_fs_root(root->reloc_root);
3805 			root->reloc_root = NULL;
3806 		}
3807 	}
3808 
3809 	if (root->free_ino_pinned)
3810 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3811 	if (root->free_ino_ctl)
3812 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3813 	btrfs_free_fs_root(root);
3814 }
3815 
3816 void btrfs_free_fs_root(struct btrfs_root *root)
3817 {
3818 	iput(root->ino_cache_inode);
3819 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3820 	if (root->anon_dev)
3821 		free_anon_bdev(root->anon_dev);
3822 	if (root->subv_writers)
3823 		btrfs_free_subvolume_writers(root->subv_writers);
3824 	free_extent_buffer(root->node);
3825 	free_extent_buffer(root->commit_root);
3826 	kfree(root->free_ino_ctl);
3827 	kfree(root->free_ino_pinned);
3828 	btrfs_put_fs_root(root);
3829 }
3830 
3831 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3832 {
3833 	u64 root_objectid = 0;
3834 	struct btrfs_root *gang[8];
3835 	int i = 0;
3836 	int err = 0;
3837 	unsigned int ret = 0;
3838 	int index;
3839 
3840 	while (1) {
3841 		index = srcu_read_lock(&fs_info->subvol_srcu);
3842 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3843 					     (void **)gang, root_objectid,
3844 					     ARRAY_SIZE(gang));
3845 		if (!ret) {
3846 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3847 			break;
3848 		}
3849 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3850 
3851 		for (i = 0; i < ret; i++) {
3852 			/* Avoid to grab roots in dead_roots */
3853 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3854 				gang[i] = NULL;
3855 				continue;
3856 			}
3857 			/* grab all the search result for later use */
3858 			gang[i] = btrfs_grab_fs_root(gang[i]);
3859 		}
3860 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3861 
3862 		for (i = 0; i < ret; i++) {
3863 			if (!gang[i])
3864 				continue;
3865 			root_objectid = gang[i]->root_key.objectid;
3866 			err = btrfs_orphan_cleanup(gang[i]);
3867 			if (err)
3868 				break;
3869 			btrfs_put_fs_root(gang[i]);
3870 		}
3871 		root_objectid++;
3872 	}
3873 
3874 	/* release the uncleaned roots due to error */
3875 	for (; i < ret; i++) {
3876 		if (gang[i])
3877 			btrfs_put_fs_root(gang[i]);
3878 	}
3879 	return err;
3880 }
3881 
3882 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3883 {
3884 	struct btrfs_root *root = fs_info->tree_root;
3885 	struct btrfs_trans_handle *trans;
3886 
3887 	mutex_lock(&fs_info->cleaner_mutex);
3888 	btrfs_run_delayed_iputs(fs_info);
3889 	mutex_unlock(&fs_info->cleaner_mutex);
3890 	wake_up_process(fs_info->cleaner_kthread);
3891 
3892 	/* wait until ongoing cleanup work done */
3893 	down_write(&fs_info->cleanup_work_sem);
3894 	up_write(&fs_info->cleanup_work_sem);
3895 
3896 	trans = btrfs_join_transaction(root);
3897 	if (IS_ERR(trans))
3898 		return PTR_ERR(trans);
3899 	return btrfs_commit_transaction(trans);
3900 }
3901 
3902 void close_ctree(struct btrfs_fs_info *fs_info)
3903 {
3904 	int ret;
3905 
3906 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3907 	/*
3908 	 * We don't want the cleaner to start new transactions, add more delayed
3909 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3910 	 * because that frees the task_struct, and the transaction kthread might
3911 	 * still try to wake up the cleaner.
3912 	 */
3913 	kthread_park(fs_info->cleaner_kthread);
3914 
3915 	/* wait for the qgroup rescan worker to stop */
3916 	btrfs_qgroup_wait_for_completion(fs_info, false);
3917 
3918 	/* wait for the uuid_scan task to finish */
3919 	down(&fs_info->uuid_tree_rescan_sem);
3920 	/* avoid complains from lockdep et al., set sem back to initial state */
3921 	up(&fs_info->uuid_tree_rescan_sem);
3922 
3923 	/* pause restriper - we want to resume on mount */
3924 	btrfs_pause_balance(fs_info);
3925 
3926 	btrfs_dev_replace_suspend_for_unmount(fs_info);
3927 
3928 	btrfs_scrub_cancel(fs_info);
3929 
3930 	/* wait for any defraggers to finish */
3931 	wait_event(fs_info->transaction_wait,
3932 		   (atomic_read(&fs_info->defrag_running) == 0));
3933 
3934 	/* clear out the rbtree of defraggable inodes */
3935 	btrfs_cleanup_defrag_inodes(fs_info);
3936 
3937 	cancel_work_sync(&fs_info->async_reclaim_work);
3938 
3939 	if (!sb_rdonly(fs_info->sb)) {
3940 		/*
3941 		 * The cleaner kthread is stopped, so do one final pass over
3942 		 * unused block groups.
3943 		 */
3944 		btrfs_delete_unused_bgs(fs_info);
3945 
3946 		ret = btrfs_commit_super(fs_info);
3947 		if (ret)
3948 			btrfs_err(fs_info, "commit super ret %d", ret);
3949 	}
3950 
3951 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3952 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3953 		btrfs_error_commit_super(fs_info);
3954 
3955 	kthread_stop(fs_info->transaction_kthread);
3956 	kthread_stop(fs_info->cleaner_kthread);
3957 
3958 	ASSERT(list_empty(&fs_info->delayed_iputs));
3959 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3960 
3961 	btrfs_free_qgroup_config(fs_info);
3962 	ASSERT(list_empty(&fs_info->delalloc_roots));
3963 
3964 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3965 		btrfs_info(fs_info, "at unmount delalloc count %lld",
3966 		       percpu_counter_sum(&fs_info->delalloc_bytes));
3967 	}
3968 
3969 	btrfs_sysfs_remove_mounted(fs_info);
3970 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3971 
3972 	btrfs_free_fs_roots(fs_info);
3973 
3974 	btrfs_put_block_group_cache(fs_info);
3975 
3976 	/*
3977 	 * we must make sure there is not any read request to
3978 	 * submit after we stopping all workers.
3979 	 */
3980 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3981 	btrfs_stop_all_workers(fs_info);
3982 
3983 	btrfs_free_block_groups(fs_info);
3984 
3985 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3986 	free_root_pointers(fs_info, 1);
3987 
3988 	iput(fs_info->btree_inode);
3989 
3990 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3991 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3992 		btrfsic_unmount(fs_info->fs_devices);
3993 #endif
3994 
3995 	btrfs_close_devices(fs_info->fs_devices);
3996 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3997 
3998 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3999 	percpu_counter_destroy(&fs_info->delalloc_bytes);
4000 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4001 	cleanup_srcu_struct(&fs_info->subvol_srcu);
4002 
4003 	btrfs_free_stripe_hash_table(fs_info);
4004 	btrfs_free_ref_cache(fs_info);
4005 
4006 	while (!list_empty(&fs_info->pinned_chunks)) {
4007 		struct extent_map *em;
4008 
4009 		em = list_first_entry(&fs_info->pinned_chunks,
4010 				      struct extent_map, list);
4011 		list_del_init(&em->list);
4012 		free_extent_map(em);
4013 	}
4014 }
4015 
4016 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4017 			  int atomic)
4018 {
4019 	int ret;
4020 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4021 
4022 	ret = extent_buffer_uptodate(buf);
4023 	if (!ret)
4024 		return ret;
4025 
4026 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4027 				    parent_transid, atomic);
4028 	if (ret == -EAGAIN)
4029 		return ret;
4030 	return !ret;
4031 }
4032 
4033 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4034 {
4035 	struct btrfs_fs_info *fs_info;
4036 	struct btrfs_root *root;
4037 	u64 transid = btrfs_header_generation(buf);
4038 	int was_dirty;
4039 
4040 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4041 	/*
4042 	 * This is a fast path so only do this check if we have sanity tests
4043 	 * enabled.  Normal people shouldn't be using umapped buffers as dirty
4044 	 * outside of the sanity tests.
4045 	 */
4046 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4047 		return;
4048 #endif
4049 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4050 	fs_info = root->fs_info;
4051 	btrfs_assert_tree_locked(buf);
4052 	if (transid != fs_info->generation)
4053 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4054 			buf->start, transid, fs_info->generation);
4055 	was_dirty = set_extent_buffer_dirty(buf);
4056 	if (!was_dirty)
4057 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4058 					 buf->len,
4059 					 fs_info->dirty_metadata_batch);
4060 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4061 	/*
4062 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4063 	 * but item data not updated.
4064 	 * So here we should only check item pointers, not item data.
4065 	 */
4066 	if (btrfs_header_level(buf) == 0 &&
4067 	    btrfs_check_leaf_relaxed(fs_info, buf)) {
4068 		btrfs_print_leaf(buf);
4069 		ASSERT(0);
4070 	}
4071 #endif
4072 }
4073 
4074 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4075 					int flush_delayed)
4076 {
4077 	/*
4078 	 * looks as though older kernels can get into trouble with
4079 	 * this code, they end up stuck in balance_dirty_pages forever
4080 	 */
4081 	int ret;
4082 
4083 	if (current->flags & PF_MEMALLOC)
4084 		return;
4085 
4086 	if (flush_delayed)
4087 		btrfs_balance_delayed_items(fs_info);
4088 
4089 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4090 				     BTRFS_DIRTY_METADATA_THRESH,
4091 				     fs_info->dirty_metadata_batch);
4092 	if (ret > 0) {
4093 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4094 	}
4095 }
4096 
4097 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4098 {
4099 	__btrfs_btree_balance_dirty(fs_info, 1);
4100 }
4101 
4102 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4103 {
4104 	__btrfs_btree_balance_dirty(fs_info, 0);
4105 }
4106 
4107 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4108 		      struct btrfs_key *first_key)
4109 {
4110 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4111 	struct btrfs_fs_info *fs_info = root->fs_info;
4112 
4113 	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
4114 					      level, first_key);
4115 }
4116 
4117 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4118 {
4119 	/* cleanup FS via transaction */
4120 	btrfs_cleanup_transaction(fs_info);
4121 
4122 	mutex_lock(&fs_info->cleaner_mutex);
4123 	btrfs_run_delayed_iputs(fs_info);
4124 	mutex_unlock(&fs_info->cleaner_mutex);
4125 
4126 	down_write(&fs_info->cleanup_work_sem);
4127 	up_write(&fs_info->cleanup_work_sem);
4128 }
4129 
4130 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4131 {
4132 	struct btrfs_ordered_extent *ordered;
4133 
4134 	spin_lock(&root->ordered_extent_lock);
4135 	/*
4136 	 * This will just short circuit the ordered completion stuff which will
4137 	 * make sure the ordered extent gets properly cleaned up.
4138 	 */
4139 	list_for_each_entry(ordered, &root->ordered_extents,
4140 			    root_extent_list)
4141 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4142 	spin_unlock(&root->ordered_extent_lock);
4143 }
4144 
4145 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4146 {
4147 	struct btrfs_root *root;
4148 	struct list_head splice;
4149 
4150 	INIT_LIST_HEAD(&splice);
4151 
4152 	spin_lock(&fs_info->ordered_root_lock);
4153 	list_splice_init(&fs_info->ordered_roots, &splice);
4154 	while (!list_empty(&splice)) {
4155 		root = list_first_entry(&splice, struct btrfs_root,
4156 					ordered_root);
4157 		list_move_tail(&root->ordered_root,
4158 			       &fs_info->ordered_roots);
4159 
4160 		spin_unlock(&fs_info->ordered_root_lock);
4161 		btrfs_destroy_ordered_extents(root);
4162 
4163 		cond_resched();
4164 		spin_lock(&fs_info->ordered_root_lock);
4165 	}
4166 	spin_unlock(&fs_info->ordered_root_lock);
4167 }
4168 
4169 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4170 				      struct btrfs_fs_info *fs_info)
4171 {
4172 	struct rb_node *node;
4173 	struct btrfs_delayed_ref_root *delayed_refs;
4174 	struct btrfs_delayed_ref_node *ref;
4175 	int ret = 0;
4176 
4177 	delayed_refs = &trans->delayed_refs;
4178 
4179 	spin_lock(&delayed_refs->lock);
4180 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4181 		spin_unlock(&delayed_refs->lock);
4182 		btrfs_info(fs_info, "delayed_refs has NO entry");
4183 		return ret;
4184 	}
4185 
4186 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4187 		struct btrfs_delayed_ref_head *head;
4188 		struct rb_node *n;
4189 		bool pin_bytes = false;
4190 
4191 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4192 				href_node);
4193 		if (!mutex_trylock(&head->mutex)) {
4194 			refcount_inc(&head->refs);
4195 			spin_unlock(&delayed_refs->lock);
4196 
4197 			mutex_lock(&head->mutex);
4198 			mutex_unlock(&head->mutex);
4199 			btrfs_put_delayed_ref_head(head);
4200 			spin_lock(&delayed_refs->lock);
4201 			continue;
4202 		}
4203 		spin_lock(&head->lock);
4204 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4205 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4206 				       ref_node);
4207 			ref->in_tree = 0;
4208 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4209 			RB_CLEAR_NODE(&ref->ref_node);
4210 			if (!list_empty(&ref->add_list))
4211 				list_del(&ref->add_list);
4212 			atomic_dec(&delayed_refs->num_entries);
4213 			btrfs_put_delayed_ref(ref);
4214 		}
4215 		if (head->must_insert_reserved)
4216 			pin_bytes = true;
4217 		btrfs_free_delayed_extent_op(head->extent_op);
4218 		delayed_refs->num_heads--;
4219 		if (head->processing == 0)
4220 			delayed_refs->num_heads_ready--;
4221 		atomic_dec(&delayed_refs->num_entries);
4222 		rb_erase_cached(&head->href_node, &delayed_refs->href_root);
4223 		RB_CLEAR_NODE(&head->href_node);
4224 		spin_unlock(&head->lock);
4225 		spin_unlock(&delayed_refs->lock);
4226 		mutex_unlock(&head->mutex);
4227 
4228 		if (pin_bytes)
4229 			btrfs_pin_extent(fs_info, head->bytenr,
4230 					 head->num_bytes, 1);
4231 		btrfs_put_delayed_ref_head(head);
4232 		cond_resched();
4233 		spin_lock(&delayed_refs->lock);
4234 	}
4235 
4236 	spin_unlock(&delayed_refs->lock);
4237 
4238 	return ret;
4239 }
4240 
4241 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4242 {
4243 	struct btrfs_inode *btrfs_inode;
4244 	struct list_head splice;
4245 
4246 	INIT_LIST_HEAD(&splice);
4247 
4248 	spin_lock(&root->delalloc_lock);
4249 	list_splice_init(&root->delalloc_inodes, &splice);
4250 
4251 	while (!list_empty(&splice)) {
4252 		struct inode *inode = NULL;
4253 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4254 					       delalloc_inodes);
4255 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4256 		spin_unlock(&root->delalloc_lock);
4257 
4258 		/*
4259 		 * Make sure we get a live inode and that it'll not disappear
4260 		 * meanwhile.
4261 		 */
4262 		inode = igrab(&btrfs_inode->vfs_inode);
4263 		if (inode) {
4264 			invalidate_inode_pages2(inode->i_mapping);
4265 			iput(inode);
4266 		}
4267 		spin_lock(&root->delalloc_lock);
4268 	}
4269 	spin_unlock(&root->delalloc_lock);
4270 }
4271 
4272 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4273 {
4274 	struct btrfs_root *root;
4275 	struct list_head splice;
4276 
4277 	INIT_LIST_HEAD(&splice);
4278 
4279 	spin_lock(&fs_info->delalloc_root_lock);
4280 	list_splice_init(&fs_info->delalloc_roots, &splice);
4281 	while (!list_empty(&splice)) {
4282 		root = list_first_entry(&splice, struct btrfs_root,
4283 					 delalloc_root);
4284 		root = btrfs_grab_fs_root(root);
4285 		BUG_ON(!root);
4286 		spin_unlock(&fs_info->delalloc_root_lock);
4287 
4288 		btrfs_destroy_delalloc_inodes(root);
4289 		btrfs_put_fs_root(root);
4290 
4291 		spin_lock(&fs_info->delalloc_root_lock);
4292 	}
4293 	spin_unlock(&fs_info->delalloc_root_lock);
4294 }
4295 
4296 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4297 					struct extent_io_tree *dirty_pages,
4298 					int mark)
4299 {
4300 	int ret;
4301 	struct extent_buffer *eb;
4302 	u64 start = 0;
4303 	u64 end;
4304 
4305 	while (1) {
4306 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4307 					    mark, NULL);
4308 		if (ret)
4309 			break;
4310 
4311 		clear_extent_bits(dirty_pages, start, end, mark);
4312 		while (start <= end) {
4313 			eb = find_extent_buffer(fs_info, start);
4314 			start += fs_info->nodesize;
4315 			if (!eb)
4316 				continue;
4317 			wait_on_extent_buffer_writeback(eb);
4318 
4319 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4320 					       &eb->bflags))
4321 				clear_extent_buffer_dirty(eb);
4322 			free_extent_buffer_stale(eb);
4323 		}
4324 	}
4325 
4326 	return ret;
4327 }
4328 
4329 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4330 				       struct extent_io_tree *pinned_extents)
4331 {
4332 	struct extent_io_tree *unpin;
4333 	u64 start;
4334 	u64 end;
4335 	int ret;
4336 	bool loop = true;
4337 
4338 	unpin = pinned_extents;
4339 again:
4340 	while (1) {
4341 		/*
4342 		 * The btrfs_finish_extent_commit() may get the same range as
4343 		 * ours between find_first_extent_bit and clear_extent_dirty.
4344 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4345 		 * the same extent range.
4346 		 */
4347 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4348 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4349 					    EXTENT_DIRTY, NULL);
4350 		if (ret) {
4351 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4352 			break;
4353 		}
4354 
4355 		clear_extent_dirty(unpin, start, end);
4356 		btrfs_error_unpin_extent_range(fs_info, start, end);
4357 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4358 		cond_resched();
4359 	}
4360 
4361 	if (loop) {
4362 		if (unpin == &fs_info->freed_extents[0])
4363 			unpin = &fs_info->freed_extents[1];
4364 		else
4365 			unpin = &fs_info->freed_extents[0];
4366 		loop = false;
4367 		goto again;
4368 	}
4369 
4370 	return 0;
4371 }
4372 
4373 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4374 {
4375 	struct inode *inode;
4376 
4377 	inode = cache->io_ctl.inode;
4378 	if (inode) {
4379 		invalidate_inode_pages2(inode->i_mapping);
4380 		BTRFS_I(inode)->generation = 0;
4381 		cache->io_ctl.inode = NULL;
4382 		iput(inode);
4383 	}
4384 	btrfs_put_block_group(cache);
4385 }
4386 
4387 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4388 			     struct btrfs_fs_info *fs_info)
4389 {
4390 	struct btrfs_block_group_cache *cache;
4391 
4392 	spin_lock(&cur_trans->dirty_bgs_lock);
4393 	while (!list_empty(&cur_trans->dirty_bgs)) {
4394 		cache = list_first_entry(&cur_trans->dirty_bgs,
4395 					 struct btrfs_block_group_cache,
4396 					 dirty_list);
4397 
4398 		if (!list_empty(&cache->io_list)) {
4399 			spin_unlock(&cur_trans->dirty_bgs_lock);
4400 			list_del_init(&cache->io_list);
4401 			btrfs_cleanup_bg_io(cache);
4402 			spin_lock(&cur_trans->dirty_bgs_lock);
4403 		}
4404 
4405 		list_del_init(&cache->dirty_list);
4406 		spin_lock(&cache->lock);
4407 		cache->disk_cache_state = BTRFS_DC_ERROR;
4408 		spin_unlock(&cache->lock);
4409 
4410 		spin_unlock(&cur_trans->dirty_bgs_lock);
4411 		btrfs_put_block_group(cache);
4412 		spin_lock(&cur_trans->dirty_bgs_lock);
4413 	}
4414 	spin_unlock(&cur_trans->dirty_bgs_lock);
4415 
4416 	/*
4417 	 * Refer to the definition of io_bgs member for details why it's safe
4418 	 * to use it without any locking
4419 	 */
4420 	while (!list_empty(&cur_trans->io_bgs)) {
4421 		cache = list_first_entry(&cur_trans->io_bgs,
4422 					 struct btrfs_block_group_cache,
4423 					 io_list);
4424 
4425 		list_del_init(&cache->io_list);
4426 		spin_lock(&cache->lock);
4427 		cache->disk_cache_state = BTRFS_DC_ERROR;
4428 		spin_unlock(&cache->lock);
4429 		btrfs_cleanup_bg_io(cache);
4430 	}
4431 }
4432 
4433 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4434 				   struct btrfs_fs_info *fs_info)
4435 {
4436 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4437 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4438 	ASSERT(list_empty(&cur_trans->io_bgs));
4439 
4440 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4441 
4442 	cur_trans->state = TRANS_STATE_COMMIT_START;
4443 	wake_up(&fs_info->transaction_blocked_wait);
4444 
4445 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4446 	wake_up(&fs_info->transaction_wait);
4447 
4448 	btrfs_destroy_delayed_inodes(fs_info);
4449 	btrfs_assert_delayed_root_empty(fs_info);
4450 
4451 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4452 				     EXTENT_DIRTY);
4453 	btrfs_destroy_pinned_extent(fs_info,
4454 				    fs_info->pinned_extents);
4455 
4456 	cur_trans->state =TRANS_STATE_COMPLETED;
4457 	wake_up(&cur_trans->commit_wait);
4458 }
4459 
4460 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4461 {
4462 	struct btrfs_transaction *t;
4463 
4464 	mutex_lock(&fs_info->transaction_kthread_mutex);
4465 
4466 	spin_lock(&fs_info->trans_lock);
4467 	while (!list_empty(&fs_info->trans_list)) {
4468 		t = list_first_entry(&fs_info->trans_list,
4469 				     struct btrfs_transaction, list);
4470 		if (t->state >= TRANS_STATE_COMMIT_START) {
4471 			refcount_inc(&t->use_count);
4472 			spin_unlock(&fs_info->trans_lock);
4473 			btrfs_wait_for_commit(fs_info, t->transid);
4474 			btrfs_put_transaction(t);
4475 			spin_lock(&fs_info->trans_lock);
4476 			continue;
4477 		}
4478 		if (t == fs_info->running_transaction) {
4479 			t->state = TRANS_STATE_COMMIT_DOING;
4480 			spin_unlock(&fs_info->trans_lock);
4481 			/*
4482 			 * We wait for 0 num_writers since we don't hold a trans
4483 			 * handle open currently for this transaction.
4484 			 */
4485 			wait_event(t->writer_wait,
4486 				   atomic_read(&t->num_writers) == 0);
4487 		} else {
4488 			spin_unlock(&fs_info->trans_lock);
4489 		}
4490 		btrfs_cleanup_one_transaction(t, fs_info);
4491 
4492 		spin_lock(&fs_info->trans_lock);
4493 		if (t == fs_info->running_transaction)
4494 			fs_info->running_transaction = NULL;
4495 		list_del_init(&t->list);
4496 		spin_unlock(&fs_info->trans_lock);
4497 
4498 		btrfs_put_transaction(t);
4499 		trace_btrfs_transaction_commit(fs_info->tree_root);
4500 		spin_lock(&fs_info->trans_lock);
4501 	}
4502 	spin_unlock(&fs_info->trans_lock);
4503 	btrfs_destroy_all_ordered_extents(fs_info);
4504 	btrfs_destroy_delayed_inodes(fs_info);
4505 	btrfs_assert_delayed_root_empty(fs_info);
4506 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4507 	btrfs_destroy_all_delalloc_inodes(fs_info);
4508 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4509 
4510 	return 0;
4511 }
4512 
4513 static const struct extent_io_ops btree_extent_io_ops = {
4514 	/* mandatory callbacks */
4515 	.submit_bio_hook = btree_submit_bio_hook,
4516 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4517 	.readpage_io_failed_hook = btree_io_failed_hook,
4518 
4519 	/* optional callbacks */
4520 };
4521