xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 5d0e4d78)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53 
54 #ifdef CONFIG_X86
55 #include <asm/cpufeature.h>
56 #endif
57 
58 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
59 				 BTRFS_HEADER_FLAG_RELOC |\
60 				 BTRFS_SUPER_FLAG_ERROR |\
61 				 BTRFS_SUPER_FLAG_SEEDING |\
62 				 BTRFS_SUPER_FLAG_METADUMP)
63 
64 static const struct extent_io_ops btree_extent_io_ops;
65 static void end_workqueue_fn(struct btrfs_work *work);
66 static void free_fs_root(struct btrfs_root *root);
67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
68 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
69 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
70 				      struct btrfs_fs_info *fs_info);
71 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
72 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
73 					struct extent_io_tree *dirty_pages,
74 					int mark);
75 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
76 				       struct extent_io_tree *pinned_extents);
77 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
78 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
79 
80 /*
81  * btrfs_end_io_wq structs are used to do processing in task context when an IO
82  * is complete.  This is used during reads to verify checksums, and it is used
83  * by writes to insert metadata for new file extents after IO is complete.
84  */
85 struct btrfs_end_io_wq {
86 	struct bio *bio;
87 	bio_end_io_t *end_io;
88 	void *private;
89 	struct btrfs_fs_info *info;
90 	blk_status_t status;
91 	enum btrfs_wq_endio_type metadata;
92 	struct btrfs_work work;
93 };
94 
95 static struct kmem_cache *btrfs_end_io_wq_cache;
96 
97 int __init btrfs_end_io_wq_init(void)
98 {
99 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
100 					sizeof(struct btrfs_end_io_wq),
101 					0,
102 					SLAB_MEM_SPREAD,
103 					NULL);
104 	if (!btrfs_end_io_wq_cache)
105 		return -ENOMEM;
106 	return 0;
107 }
108 
109 void btrfs_end_io_wq_exit(void)
110 {
111 	kmem_cache_destroy(btrfs_end_io_wq_cache);
112 }
113 
114 /*
115  * async submit bios are used to offload expensive checksumming
116  * onto the worker threads.  They checksum file and metadata bios
117  * just before they are sent down the IO stack.
118  */
119 struct async_submit_bio {
120 	void *private_data;
121 	struct btrfs_fs_info *fs_info;
122 	struct bio *bio;
123 	extent_submit_bio_hook_t *submit_bio_start;
124 	extent_submit_bio_hook_t *submit_bio_done;
125 	int mirror_num;
126 	unsigned long bio_flags;
127 	/*
128 	 * bio_offset is optional, can be used if the pages in the bio
129 	 * can't tell us where in the file the bio should go
130 	 */
131 	u64 bio_offset;
132 	struct btrfs_work work;
133 	blk_status_t status;
134 };
135 
136 /*
137  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
138  * eb, the lockdep key is determined by the btrfs_root it belongs to and
139  * the level the eb occupies in the tree.
140  *
141  * Different roots are used for different purposes and may nest inside each
142  * other and they require separate keysets.  As lockdep keys should be
143  * static, assign keysets according to the purpose of the root as indicated
144  * by btrfs_root->objectid.  This ensures that all special purpose roots
145  * have separate keysets.
146  *
147  * Lock-nesting across peer nodes is always done with the immediate parent
148  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
149  * subclass to avoid triggering lockdep warning in such cases.
150  *
151  * The key is set by the readpage_end_io_hook after the buffer has passed
152  * csum validation but before the pages are unlocked.  It is also set by
153  * btrfs_init_new_buffer on freshly allocated blocks.
154  *
155  * We also add a check to make sure the highest level of the tree is the
156  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
157  * needs update as well.
158  */
159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
160 # if BTRFS_MAX_LEVEL != 8
161 #  error
162 # endif
163 
164 static struct btrfs_lockdep_keyset {
165 	u64			id;		/* root objectid */
166 	const char		*name_stem;	/* lock name stem */
167 	char			names[BTRFS_MAX_LEVEL + 1][20];
168 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
169 } btrfs_lockdep_keysets[] = {
170 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
171 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
172 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
173 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
174 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
175 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
176 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
177 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
178 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
179 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
180 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
181 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
182 	{ .id = 0,				.name_stem = "tree"	},
183 };
184 
185 void __init btrfs_init_lockdep(void)
186 {
187 	int i, j;
188 
189 	/* initialize lockdep class names */
190 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
191 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
192 
193 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
194 			snprintf(ks->names[j], sizeof(ks->names[j]),
195 				 "btrfs-%s-%02d", ks->name_stem, j);
196 	}
197 }
198 
199 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
200 				    int level)
201 {
202 	struct btrfs_lockdep_keyset *ks;
203 
204 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
205 
206 	/* find the matching keyset, id 0 is the default entry */
207 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
208 		if (ks->id == objectid)
209 			break;
210 
211 	lockdep_set_class_and_name(&eb->lock,
212 				   &ks->keys[level], ks->names[level]);
213 }
214 
215 #endif
216 
217 /*
218  * extents on the btree inode are pretty simple, there's one extent
219  * that covers the entire device
220  */
221 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
222 		struct page *page, size_t pg_offset, u64 start, u64 len,
223 		int create)
224 {
225 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
226 	struct extent_map_tree *em_tree = &inode->extent_tree;
227 	struct extent_map *em;
228 	int ret;
229 
230 	read_lock(&em_tree->lock);
231 	em = lookup_extent_mapping(em_tree, start, len);
232 	if (em) {
233 		em->bdev = fs_info->fs_devices->latest_bdev;
234 		read_unlock(&em_tree->lock);
235 		goto out;
236 	}
237 	read_unlock(&em_tree->lock);
238 
239 	em = alloc_extent_map();
240 	if (!em) {
241 		em = ERR_PTR(-ENOMEM);
242 		goto out;
243 	}
244 	em->start = 0;
245 	em->len = (u64)-1;
246 	em->block_len = (u64)-1;
247 	em->block_start = 0;
248 	em->bdev = fs_info->fs_devices->latest_bdev;
249 
250 	write_lock(&em_tree->lock);
251 	ret = add_extent_mapping(em_tree, em, 0);
252 	if (ret == -EEXIST) {
253 		free_extent_map(em);
254 		em = lookup_extent_mapping(em_tree, start, len);
255 		if (!em)
256 			em = ERR_PTR(-EIO);
257 	} else if (ret) {
258 		free_extent_map(em);
259 		em = ERR_PTR(ret);
260 	}
261 	write_unlock(&em_tree->lock);
262 
263 out:
264 	return em;
265 }
266 
267 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
268 {
269 	return btrfs_crc32c(seed, data, len);
270 }
271 
272 void btrfs_csum_final(u32 crc, u8 *result)
273 {
274 	put_unaligned_le32(~crc, result);
275 }
276 
277 /*
278  * compute the csum for a btree block, and either verify it or write it
279  * into the csum field of the block.
280  */
281 static int csum_tree_block(struct btrfs_fs_info *fs_info,
282 			   struct extent_buffer *buf,
283 			   int verify)
284 {
285 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
286 	char *result = NULL;
287 	unsigned long len;
288 	unsigned long cur_len;
289 	unsigned long offset = BTRFS_CSUM_SIZE;
290 	char *kaddr;
291 	unsigned long map_start;
292 	unsigned long map_len;
293 	int err;
294 	u32 crc = ~(u32)0;
295 	unsigned long inline_result;
296 
297 	len = buf->len - offset;
298 	while (len > 0) {
299 		err = map_private_extent_buffer(buf, offset, 32,
300 					&kaddr, &map_start, &map_len);
301 		if (err)
302 			return err;
303 		cur_len = min(len, map_len - (offset - map_start));
304 		crc = btrfs_csum_data(kaddr + offset - map_start,
305 				      crc, cur_len);
306 		len -= cur_len;
307 		offset += cur_len;
308 	}
309 	if (csum_size > sizeof(inline_result)) {
310 		result = kzalloc(csum_size, GFP_NOFS);
311 		if (!result)
312 			return -ENOMEM;
313 	} else {
314 		result = (char *)&inline_result;
315 	}
316 
317 	btrfs_csum_final(crc, result);
318 
319 	if (verify) {
320 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
321 			u32 val;
322 			u32 found = 0;
323 			memcpy(&found, result, csum_size);
324 
325 			read_extent_buffer(buf, &val, 0, csum_size);
326 			btrfs_warn_rl(fs_info,
327 				"%s checksum verify failed on %llu wanted %X found %X level %d",
328 				fs_info->sb->s_id, buf->start,
329 				val, found, btrfs_header_level(buf));
330 			if (result != (char *)&inline_result)
331 				kfree(result);
332 			return -EUCLEAN;
333 		}
334 	} else {
335 		write_extent_buffer(buf, result, 0, csum_size);
336 	}
337 	if (result != (char *)&inline_result)
338 		kfree(result);
339 	return 0;
340 }
341 
342 /*
343  * we can't consider a given block up to date unless the transid of the
344  * block matches the transid in the parent node's pointer.  This is how we
345  * detect blocks that either didn't get written at all or got written
346  * in the wrong place.
347  */
348 static int verify_parent_transid(struct extent_io_tree *io_tree,
349 				 struct extent_buffer *eb, u64 parent_transid,
350 				 int atomic)
351 {
352 	struct extent_state *cached_state = NULL;
353 	int ret;
354 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
355 
356 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
357 		return 0;
358 
359 	if (atomic)
360 		return -EAGAIN;
361 
362 	if (need_lock) {
363 		btrfs_tree_read_lock(eb);
364 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
365 	}
366 
367 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
368 			 &cached_state);
369 	if (extent_buffer_uptodate(eb) &&
370 	    btrfs_header_generation(eb) == parent_transid) {
371 		ret = 0;
372 		goto out;
373 	}
374 	btrfs_err_rl(eb->fs_info,
375 		"parent transid verify failed on %llu wanted %llu found %llu",
376 			eb->start,
377 			parent_transid, btrfs_header_generation(eb));
378 	ret = 1;
379 
380 	/*
381 	 * Things reading via commit roots that don't have normal protection,
382 	 * like send, can have a really old block in cache that may point at a
383 	 * block that has been freed and re-allocated.  So don't clear uptodate
384 	 * if we find an eb that is under IO (dirty/writeback) because we could
385 	 * end up reading in the stale data and then writing it back out and
386 	 * making everybody very sad.
387 	 */
388 	if (!extent_buffer_under_io(eb))
389 		clear_extent_buffer_uptodate(eb);
390 out:
391 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
392 			     &cached_state, GFP_NOFS);
393 	if (need_lock)
394 		btrfs_tree_read_unlock_blocking(eb);
395 	return ret;
396 }
397 
398 /*
399  * Return 0 if the superblock checksum type matches the checksum value of that
400  * algorithm. Pass the raw disk superblock data.
401  */
402 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
403 				  char *raw_disk_sb)
404 {
405 	struct btrfs_super_block *disk_sb =
406 		(struct btrfs_super_block *)raw_disk_sb;
407 	u16 csum_type = btrfs_super_csum_type(disk_sb);
408 	int ret = 0;
409 
410 	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
411 		u32 crc = ~(u32)0;
412 		const int csum_size = sizeof(crc);
413 		char result[csum_size];
414 
415 		/*
416 		 * The super_block structure does not span the whole
417 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
418 		 * is filled with zeros and is included in the checksum.
419 		 */
420 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
421 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
422 		btrfs_csum_final(crc, result);
423 
424 		if (memcmp(raw_disk_sb, result, csum_size))
425 			ret = 1;
426 	}
427 
428 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
429 		btrfs_err(fs_info, "unsupported checksum algorithm %u",
430 				csum_type);
431 		ret = 1;
432 	}
433 
434 	return ret;
435 }
436 
437 /*
438  * helper to read a given tree block, doing retries as required when
439  * the checksums don't match and we have alternate mirrors to try.
440  */
441 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
442 					  struct extent_buffer *eb,
443 					  u64 parent_transid)
444 {
445 	struct extent_io_tree *io_tree;
446 	int failed = 0;
447 	int ret;
448 	int num_copies = 0;
449 	int mirror_num = 0;
450 	int failed_mirror = 0;
451 
452 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
453 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
454 	while (1) {
455 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
456 					       btree_get_extent, mirror_num);
457 		if (!ret) {
458 			if (!verify_parent_transid(io_tree, eb,
459 						   parent_transid, 0))
460 				break;
461 			else
462 				ret = -EIO;
463 		}
464 
465 		/*
466 		 * This buffer's crc is fine, but its contents are corrupted, so
467 		 * there is no reason to read the other copies, they won't be
468 		 * any less wrong.
469 		 */
470 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
471 			break;
472 
473 		num_copies = btrfs_num_copies(fs_info,
474 					      eb->start, eb->len);
475 		if (num_copies == 1)
476 			break;
477 
478 		if (!failed_mirror) {
479 			failed = 1;
480 			failed_mirror = eb->read_mirror;
481 		}
482 
483 		mirror_num++;
484 		if (mirror_num == failed_mirror)
485 			mirror_num++;
486 
487 		if (mirror_num > num_copies)
488 			break;
489 	}
490 
491 	if (failed && !ret && failed_mirror)
492 		repair_eb_io_failure(fs_info, eb, failed_mirror);
493 
494 	return ret;
495 }
496 
497 /*
498  * checksum a dirty tree block before IO.  This has extra checks to make sure
499  * we only fill in the checksum field in the first page of a multi-page block
500  */
501 
502 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
503 {
504 	u64 start = page_offset(page);
505 	u64 found_start;
506 	struct extent_buffer *eb;
507 
508 	eb = (struct extent_buffer *)page->private;
509 	if (page != eb->pages[0])
510 		return 0;
511 
512 	found_start = btrfs_header_bytenr(eb);
513 	/*
514 	 * Please do not consolidate these warnings into a single if.
515 	 * It is useful to know what went wrong.
516 	 */
517 	if (WARN_ON(found_start != start))
518 		return -EUCLEAN;
519 	if (WARN_ON(!PageUptodate(page)))
520 		return -EUCLEAN;
521 
522 	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
523 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
524 
525 	return csum_tree_block(fs_info, eb, 0);
526 }
527 
528 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
529 				 struct extent_buffer *eb)
530 {
531 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
532 	u8 fsid[BTRFS_UUID_SIZE];
533 	int ret = 1;
534 
535 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
536 	while (fs_devices) {
537 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
538 			ret = 0;
539 			break;
540 		}
541 		fs_devices = fs_devices->seed;
542 	}
543 	return ret;
544 }
545 
546 #define CORRUPT(reason, eb, root, slot)					\
547 	btrfs_crit(root->fs_info,					\
548 		   "corrupt %s, %s: block=%llu, root=%llu, slot=%d",	\
549 		   btrfs_header_level(eb) == 0 ? "leaf" : "node",	\
550 		   reason, btrfs_header_bytenr(eb), root->objectid, slot)
551 
552 static noinline int check_leaf(struct btrfs_root *root,
553 			       struct extent_buffer *leaf)
554 {
555 	struct btrfs_fs_info *fs_info = root->fs_info;
556 	struct btrfs_key key;
557 	struct btrfs_key leaf_key;
558 	u32 nritems = btrfs_header_nritems(leaf);
559 	int slot;
560 
561 	/*
562 	 * Extent buffers from a relocation tree have a owner field that
563 	 * corresponds to the subvolume tree they are based on. So just from an
564 	 * extent buffer alone we can not find out what is the id of the
565 	 * corresponding subvolume tree, so we can not figure out if the extent
566 	 * buffer corresponds to the root of the relocation tree or not. So skip
567 	 * this check for relocation trees.
568 	 */
569 	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
570 		struct btrfs_root *check_root;
571 
572 		key.objectid = btrfs_header_owner(leaf);
573 		key.type = BTRFS_ROOT_ITEM_KEY;
574 		key.offset = (u64)-1;
575 
576 		check_root = btrfs_get_fs_root(fs_info, &key, false);
577 		/*
578 		 * The only reason we also check NULL here is that during
579 		 * open_ctree() some roots has not yet been set up.
580 		 */
581 		if (!IS_ERR_OR_NULL(check_root)) {
582 			struct extent_buffer *eb;
583 
584 			eb = btrfs_root_node(check_root);
585 			/* if leaf is the root, then it's fine */
586 			if (leaf != eb) {
587 				CORRUPT("non-root leaf's nritems is 0",
588 					leaf, check_root, 0);
589 				free_extent_buffer(eb);
590 				return -EIO;
591 			}
592 			free_extent_buffer(eb);
593 		}
594 		return 0;
595 	}
596 
597 	if (nritems == 0)
598 		return 0;
599 
600 	/* Check the 0 item */
601 	if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
602 	    BTRFS_LEAF_DATA_SIZE(fs_info)) {
603 		CORRUPT("invalid item offset size pair", leaf, root, 0);
604 		return -EIO;
605 	}
606 
607 	/*
608 	 * Check to make sure each items keys are in the correct order and their
609 	 * offsets make sense.  We only have to loop through nritems-1 because
610 	 * we check the current slot against the next slot, which verifies the
611 	 * next slot's offset+size makes sense and that the current's slot
612 	 * offset is correct.
613 	 */
614 	for (slot = 0; slot < nritems - 1; slot++) {
615 		btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
616 		btrfs_item_key_to_cpu(leaf, &key, slot + 1);
617 
618 		/* Make sure the keys are in the right order */
619 		if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
620 			CORRUPT("bad key order", leaf, root, slot);
621 			return -EIO;
622 		}
623 
624 		/*
625 		 * Make sure the offset and ends are right, remember that the
626 		 * item data starts at the end of the leaf and grows towards the
627 		 * front.
628 		 */
629 		if (btrfs_item_offset_nr(leaf, slot) !=
630 			btrfs_item_end_nr(leaf, slot + 1)) {
631 			CORRUPT("slot offset bad", leaf, root, slot);
632 			return -EIO;
633 		}
634 
635 		/*
636 		 * Check to make sure that we don't point outside of the leaf,
637 		 * just in case all the items are consistent to each other, but
638 		 * all point outside of the leaf.
639 		 */
640 		if (btrfs_item_end_nr(leaf, slot) >
641 		    BTRFS_LEAF_DATA_SIZE(fs_info)) {
642 			CORRUPT("slot end outside of leaf", leaf, root, slot);
643 			return -EIO;
644 		}
645 	}
646 
647 	return 0;
648 }
649 
650 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
651 {
652 	unsigned long nr = btrfs_header_nritems(node);
653 	struct btrfs_key key, next_key;
654 	int slot;
655 	u64 bytenr;
656 	int ret = 0;
657 
658 	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
659 		btrfs_crit(root->fs_info,
660 			   "corrupt node: block %llu root %llu nritems %lu",
661 			   node->start, root->objectid, nr);
662 		return -EIO;
663 	}
664 
665 	for (slot = 0; slot < nr - 1; slot++) {
666 		bytenr = btrfs_node_blockptr(node, slot);
667 		btrfs_node_key_to_cpu(node, &key, slot);
668 		btrfs_node_key_to_cpu(node, &next_key, slot + 1);
669 
670 		if (!bytenr) {
671 			CORRUPT("invalid item slot", node, root, slot);
672 			ret = -EIO;
673 			goto out;
674 		}
675 
676 		if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
677 			CORRUPT("bad key order", node, root, slot);
678 			ret = -EIO;
679 			goto out;
680 		}
681 	}
682 out:
683 	return ret;
684 }
685 
686 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
687 				      u64 phy_offset, struct page *page,
688 				      u64 start, u64 end, int mirror)
689 {
690 	u64 found_start;
691 	int found_level;
692 	struct extent_buffer *eb;
693 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
694 	struct btrfs_fs_info *fs_info = root->fs_info;
695 	int ret = 0;
696 	int reads_done;
697 
698 	if (!page->private)
699 		goto out;
700 
701 	eb = (struct extent_buffer *)page->private;
702 
703 	/* the pending IO might have been the only thing that kept this buffer
704 	 * in memory.  Make sure we have a ref for all this other checks
705 	 */
706 	extent_buffer_get(eb);
707 
708 	reads_done = atomic_dec_and_test(&eb->io_pages);
709 	if (!reads_done)
710 		goto err;
711 
712 	eb->read_mirror = mirror;
713 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
714 		ret = -EIO;
715 		goto err;
716 	}
717 
718 	found_start = btrfs_header_bytenr(eb);
719 	if (found_start != eb->start) {
720 		btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
721 			     found_start, eb->start);
722 		ret = -EIO;
723 		goto err;
724 	}
725 	if (check_tree_block_fsid(fs_info, eb)) {
726 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
727 			     eb->start);
728 		ret = -EIO;
729 		goto err;
730 	}
731 	found_level = btrfs_header_level(eb);
732 	if (found_level >= BTRFS_MAX_LEVEL) {
733 		btrfs_err(fs_info, "bad tree block level %d",
734 			  (int)btrfs_header_level(eb));
735 		ret = -EIO;
736 		goto err;
737 	}
738 
739 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
740 				       eb, found_level);
741 
742 	ret = csum_tree_block(fs_info, eb, 1);
743 	if (ret)
744 		goto err;
745 
746 	/*
747 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
748 	 * that we don't try and read the other copies of this block, just
749 	 * return -EIO.
750 	 */
751 	if (found_level == 0 && check_leaf(root, eb)) {
752 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
753 		ret = -EIO;
754 	}
755 
756 	if (found_level > 0 && check_node(root, eb))
757 		ret = -EIO;
758 
759 	if (!ret)
760 		set_extent_buffer_uptodate(eb);
761 err:
762 	if (reads_done &&
763 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
764 		btree_readahead_hook(eb, ret);
765 
766 	if (ret) {
767 		/*
768 		 * our io error hook is going to dec the io pages
769 		 * again, we have to make sure it has something
770 		 * to decrement
771 		 */
772 		atomic_inc(&eb->io_pages);
773 		clear_extent_buffer_uptodate(eb);
774 	}
775 	free_extent_buffer(eb);
776 out:
777 	return ret;
778 }
779 
780 static int btree_io_failed_hook(struct page *page, int failed_mirror)
781 {
782 	struct extent_buffer *eb;
783 
784 	eb = (struct extent_buffer *)page->private;
785 	set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
786 	eb->read_mirror = failed_mirror;
787 	atomic_dec(&eb->io_pages);
788 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
789 		btree_readahead_hook(eb, -EIO);
790 	return -EIO;	/* we fixed nothing */
791 }
792 
793 static void end_workqueue_bio(struct bio *bio)
794 {
795 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
796 	struct btrfs_fs_info *fs_info;
797 	struct btrfs_workqueue *wq;
798 	btrfs_work_func_t func;
799 
800 	fs_info = end_io_wq->info;
801 	end_io_wq->status = bio->bi_status;
802 
803 	if (bio_op(bio) == REQ_OP_WRITE) {
804 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
805 			wq = fs_info->endio_meta_write_workers;
806 			func = btrfs_endio_meta_write_helper;
807 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
808 			wq = fs_info->endio_freespace_worker;
809 			func = btrfs_freespace_write_helper;
810 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
811 			wq = fs_info->endio_raid56_workers;
812 			func = btrfs_endio_raid56_helper;
813 		} else {
814 			wq = fs_info->endio_write_workers;
815 			func = btrfs_endio_write_helper;
816 		}
817 	} else {
818 		if (unlikely(end_io_wq->metadata ==
819 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
820 			wq = fs_info->endio_repair_workers;
821 			func = btrfs_endio_repair_helper;
822 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
823 			wq = fs_info->endio_raid56_workers;
824 			func = btrfs_endio_raid56_helper;
825 		} else if (end_io_wq->metadata) {
826 			wq = fs_info->endio_meta_workers;
827 			func = btrfs_endio_meta_helper;
828 		} else {
829 			wq = fs_info->endio_workers;
830 			func = btrfs_endio_helper;
831 		}
832 	}
833 
834 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
835 	btrfs_queue_work(wq, &end_io_wq->work);
836 }
837 
838 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
839 			enum btrfs_wq_endio_type metadata)
840 {
841 	struct btrfs_end_io_wq *end_io_wq;
842 
843 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
844 	if (!end_io_wq)
845 		return BLK_STS_RESOURCE;
846 
847 	end_io_wq->private = bio->bi_private;
848 	end_io_wq->end_io = bio->bi_end_io;
849 	end_io_wq->info = info;
850 	end_io_wq->status = 0;
851 	end_io_wq->bio = bio;
852 	end_io_wq->metadata = metadata;
853 
854 	bio->bi_private = end_io_wq;
855 	bio->bi_end_io = end_workqueue_bio;
856 	return 0;
857 }
858 
859 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
860 {
861 	unsigned long limit = min_t(unsigned long,
862 				    info->thread_pool_size,
863 				    info->fs_devices->open_devices);
864 	return 256 * limit;
865 }
866 
867 static void run_one_async_start(struct btrfs_work *work)
868 {
869 	struct async_submit_bio *async;
870 	blk_status_t ret;
871 
872 	async = container_of(work, struct  async_submit_bio, work);
873 	ret = async->submit_bio_start(async->private_data, async->bio,
874 				      async->mirror_num, async->bio_flags,
875 				      async->bio_offset);
876 	if (ret)
877 		async->status = ret;
878 }
879 
880 static void run_one_async_done(struct btrfs_work *work)
881 {
882 	struct btrfs_fs_info *fs_info;
883 	struct async_submit_bio *async;
884 	int limit;
885 
886 	async = container_of(work, struct  async_submit_bio, work);
887 	fs_info = async->fs_info;
888 
889 	limit = btrfs_async_submit_limit(fs_info);
890 	limit = limit * 2 / 3;
891 
892 	/*
893 	 * atomic_dec_return implies a barrier for waitqueue_active
894 	 */
895 	if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
896 	    waitqueue_active(&fs_info->async_submit_wait))
897 		wake_up(&fs_info->async_submit_wait);
898 
899 	/* If an error occurred we just want to clean up the bio and move on */
900 	if (async->status) {
901 		async->bio->bi_status = async->status;
902 		bio_endio(async->bio);
903 		return;
904 	}
905 
906 	async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
907 			       async->bio_flags, async->bio_offset);
908 }
909 
910 static void run_one_async_free(struct btrfs_work *work)
911 {
912 	struct async_submit_bio *async;
913 
914 	async = container_of(work, struct  async_submit_bio, work);
915 	kfree(async);
916 }
917 
918 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
919 				 int mirror_num, unsigned long bio_flags,
920 				 u64 bio_offset, void *private_data,
921 				 extent_submit_bio_hook_t *submit_bio_start,
922 				 extent_submit_bio_hook_t *submit_bio_done)
923 {
924 	struct async_submit_bio *async;
925 
926 	async = kmalloc(sizeof(*async), GFP_NOFS);
927 	if (!async)
928 		return BLK_STS_RESOURCE;
929 
930 	async->private_data = private_data;
931 	async->fs_info = fs_info;
932 	async->bio = bio;
933 	async->mirror_num = mirror_num;
934 	async->submit_bio_start = submit_bio_start;
935 	async->submit_bio_done = submit_bio_done;
936 
937 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
938 			run_one_async_done, run_one_async_free);
939 
940 	async->bio_flags = bio_flags;
941 	async->bio_offset = bio_offset;
942 
943 	async->status = 0;
944 
945 	atomic_inc(&fs_info->nr_async_submits);
946 
947 	if (op_is_sync(bio->bi_opf))
948 		btrfs_set_work_high_priority(&async->work);
949 
950 	btrfs_queue_work(fs_info->workers, &async->work);
951 
952 	while (atomic_read(&fs_info->async_submit_draining) &&
953 	      atomic_read(&fs_info->nr_async_submits)) {
954 		wait_event(fs_info->async_submit_wait,
955 			   (atomic_read(&fs_info->nr_async_submits) == 0));
956 	}
957 
958 	return 0;
959 }
960 
961 static blk_status_t btree_csum_one_bio(struct bio *bio)
962 {
963 	struct bio_vec *bvec;
964 	struct btrfs_root *root;
965 	int i, ret = 0;
966 
967 	ASSERT(!bio_flagged(bio, BIO_CLONED));
968 	bio_for_each_segment_all(bvec, bio, i) {
969 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
970 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
971 		if (ret)
972 			break;
973 	}
974 
975 	return errno_to_blk_status(ret);
976 }
977 
978 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
979 					     int mirror_num, unsigned long bio_flags,
980 					     u64 bio_offset)
981 {
982 	/*
983 	 * when we're called for a write, we're already in the async
984 	 * submission context.  Just jump into btrfs_map_bio
985 	 */
986 	return btree_csum_one_bio(bio);
987 }
988 
989 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
990 					    int mirror_num, unsigned long bio_flags,
991 					    u64 bio_offset)
992 {
993 	struct inode *inode = private_data;
994 	blk_status_t ret;
995 
996 	/*
997 	 * when we're called for a write, we're already in the async
998 	 * submission context.  Just jump into btrfs_map_bio
999 	 */
1000 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
1001 	if (ret) {
1002 		bio->bi_status = ret;
1003 		bio_endio(bio);
1004 	}
1005 	return ret;
1006 }
1007 
1008 static int check_async_write(unsigned long bio_flags)
1009 {
1010 	if (bio_flags & EXTENT_BIO_TREE_LOG)
1011 		return 0;
1012 #ifdef CONFIG_X86
1013 	if (static_cpu_has(X86_FEATURE_XMM4_2))
1014 		return 0;
1015 #endif
1016 	return 1;
1017 }
1018 
1019 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
1020 					  int mirror_num, unsigned long bio_flags,
1021 					  u64 bio_offset)
1022 {
1023 	struct inode *inode = private_data;
1024 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1025 	int async = check_async_write(bio_flags);
1026 	blk_status_t ret;
1027 
1028 	if (bio_op(bio) != REQ_OP_WRITE) {
1029 		/*
1030 		 * called for a read, do the setup so that checksum validation
1031 		 * can happen in the async kernel threads
1032 		 */
1033 		ret = btrfs_bio_wq_end_io(fs_info, bio,
1034 					  BTRFS_WQ_ENDIO_METADATA);
1035 		if (ret)
1036 			goto out_w_error;
1037 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1038 	} else if (!async) {
1039 		ret = btree_csum_one_bio(bio);
1040 		if (ret)
1041 			goto out_w_error;
1042 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1043 	} else {
1044 		/*
1045 		 * kthread helpers are used to submit writes so that
1046 		 * checksumming can happen in parallel across all CPUs
1047 		 */
1048 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
1049 					  bio_offset, private_data,
1050 					  __btree_submit_bio_start,
1051 					  __btree_submit_bio_done);
1052 	}
1053 
1054 	if (ret)
1055 		goto out_w_error;
1056 	return 0;
1057 
1058 out_w_error:
1059 	bio->bi_status = ret;
1060 	bio_endio(bio);
1061 	return ret;
1062 }
1063 
1064 #ifdef CONFIG_MIGRATION
1065 static int btree_migratepage(struct address_space *mapping,
1066 			struct page *newpage, struct page *page,
1067 			enum migrate_mode mode)
1068 {
1069 	/*
1070 	 * we can't safely write a btree page from here,
1071 	 * we haven't done the locking hook
1072 	 */
1073 	if (PageDirty(page))
1074 		return -EAGAIN;
1075 	/*
1076 	 * Buffers may be managed in a filesystem specific way.
1077 	 * We must have no buffers or drop them.
1078 	 */
1079 	if (page_has_private(page) &&
1080 	    !try_to_release_page(page, GFP_KERNEL))
1081 		return -EAGAIN;
1082 	return migrate_page(mapping, newpage, page, mode);
1083 }
1084 #endif
1085 
1086 
1087 static int btree_writepages(struct address_space *mapping,
1088 			    struct writeback_control *wbc)
1089 {
1090 	struct btrfs_fs_info *fs_info;
1091 	int ret;
1092 
1093 	if (wbc->sync_mode == WB_SYNC_NONE) {
1094 
1095 		if (wbc->for_kupdate)
1096 			return 0;
1097 
1098 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
1099 		/* this is a bit racy, but that's ok */
1100 		ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1101 					     BTRFS_DIRTY_METADATA_THRESH);
1102 		if (ret < 0)
1103 			return 0;
1104 	}
1105 	return btree_write_cache_pages(mapping, wbc);
1106 }
1107 
1108 static int btree_readpage(struct file *file, struct page *page)
1109 {
1110 	struct extent_io_tree *tree;
1111 	tree = &BTRFS_I(page->mapping->host)->io_tree;
1112 	return extent_read_full_page(tree, page, btree_get_extent, 0);
1113 }
1114 
1115 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1116 {
1117 	if (PageWriteback(page) || PageDirty(page))
1118 		return 0;
1119 
1120 	return try_release_extent_buffer(page);
1121 }
1122 
1123 static void btree_invalidatepage(struct page *page, unsigned int offset,
1124 				 unsigned int length)
1125 {
1126 	struct extent_io_tree *tree;
1127 	tree = &BTRFS_I(page->mapping->host)->io_tree;
1128 	extent_invalidatepage(tree, page, offset);
1129 	btree_releasepage(page, GFP_NOFS);
1130 	if (PagePrivate(page)) {
1131 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1132 			   "page private not zero on page %llu",
1133 			   (unsigned long long)page_offset(page));
1134 		ClearPagePrivate(page);
1135 		set_page_private(page, 0);
1136 		put_page(page);
1137 	}
1138 }
1139 
1140 static int btree_set_page_dirty(struct page *page)
1141 {
1142 #ifdef DEBUG
1143 	struct extent_buffer *eb;
1144 
1145 	BUG_ON(!PagePrivate(page));
1146 	eb = (struct extent_buffer *)page->private;
1147 	BUG_ON(!eb);
1148 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1149 	BUG_ON(!atomic_read(&eb->refs));
1150 	btrfs_assert_tree_locked(eb);
1151 #endif
1152 	return __set_page_dirty_nobuffers(page);
1153 }
1154 
1155 static const struct address_space_operations btree_aops = {
1156 	.readpage	= btree_readpage,
1157 	.writepages	= btree_writepages,
1158 	.releasepage	= btree_releasepage,
1159 	.invalidatepage = btree_invalidatepage,
1160 #ifdef CONFIG_MIGRATION
1161 	.migratepage	= btree_migratepage,
1162 #endif
1163 	.set_page_dirty = btree_set_page_dirty,
1164 };
1165 
1166 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1167 {
1168 	struct extent_buffer *buf = NULL;
1169 	struct inode *btree_inode = fs_info->btree_inode;
1170 
1171 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1172 	if (IS_ERR(buf))
1173 		return;
1174 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1175 				 buf, WAIT_NONE, btree_get_extent, 0);
1176 	free_extent_buffer(buf);
1177 }
1178 
1179 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1180 			 int mirror_num, struct extent_buffer **eb)
1181 {
1182 	struct extent_buffer *buf = NULL;
1183 	struct inode *btree_inode = fs_info->btree_inode;
1184 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1185 	int ret;
1186 
1187 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1188 	if (IS_ERR(buf))
1189 		return 0;
1190 
1191 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1192 
1193 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1194 				       btree_get_extent, mirror_num);
1195 	if (ret) {
1196 		free_extent_buffer(buf);
1197 		return ret;
1198 	}
1199 
1200 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1201 		free_extent_buffer(buf);
1202 		return -EIO;
1203 	} else if (extent_buffer_uptodate(buf)) {
1204 		*eb = buf;
1205 	} else {
1206 		free_extent_buffer(buf);
1207 	}
1208 	return 0;
1209 }
1210 
1211 struct extent_buffer *btrfs_find_create_tree_block(
1212 						struct btrfs_fs_info *fs_info,
1213 						u64 bytenr)
1214 {
1215 	if (btrfs_is_testing(fs_info))
1216 		return alloc_test_extent_buffer(fs_info, bytenr);
1217 	return alloc_extent_buffer(fs_info, bytenr);
1218 }
1219 
1220 
1221 int btrfs_write_tree_block(struct extent_buffer *buf)
1222 {
1223 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1224 					buf->start + buf->len - 1);
1225 }
1226 
1227 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1228 {
1229 	filemap_fdatawait_range(buf->pages[0]->mapping,
1230 			        buf->start, buf->start + buf->len - 1);
1231 }
1232 
1233 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1234 				      u64 parent_transid)
1235 {
1236 	struct extent_buffer *buf = NULL;
1237 	int ret;
1238 
1239 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1240 	if (IS_ERR(buf))
1241 		return buf;
1242 
1243 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1244 	if (ret) {
1245 		free_extent_buffer(buf);
1246 		return ERR_PTR(ret);
1247 	}
1248 	return buf;
1249 
1250 }
1251 
1252 void clean_tree_block(struct btrfs_fs_info *fs_info,
1253 		      struct extent_buffer *buf)
1254 {
1255 	if (btrfs_header_generation(buf) ==
1256 	    fs_info->running_transaction->transid) {
1257 		btrfs_assert_tree_locked(buf);
1258 
1259 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1260 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1261 						 -buf->len,
1262 						 fs_info->dirty_metadata_batch);
1263 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1264 			btrfs_set_lock_blocking(buf);
1265 			clear_extent_buffer_dirty(buf);
1266 		}
1267 	}
1268 }
1269 
1270 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1271 {
1272 	struct btrfs_subvolume_writers *writers;
1273 	int ret;
1274 
1275 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1276 	if (!writers)
1277 		return ERR_PTR(-ENOMEM);
1278 
1279 	ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1280 	if (ret < 0) {
1281 		kfree(writers);
1282 		return ERR_PTR(ret);
1283 	}
1284 
1285 	init_waitqueue_head(&writers->wait);
1286 	return writers;
1287 }
1288 
1289 static void
1290 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1291 {
1292 	percpu_counter_destroy(&writers->counter);
1293 	kfree(writers);
1294 }
1295 
1296 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1297 			 u64 objectid)
1298 {
1299 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1300 	root->node = NULL;
1301 	root->commit_root = NULL;
1302 	root->state = 0;
1303 	root->orphan_cleanup_state = 0;
1304 
1305 	root->objectid = objectid;
1306 	root->last_trans = 0;
1307 	root->highest_objectid = 0;
1308 	root->nr_delalloc_inodes = 0;
1309 	root->nr_ordered_extents = 0;
1310 	root->name = NULL;
1311 	root->inode_tree = RB_ROOT;
1312 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1313 	root->block_rsv = NULL;
1314 	root->orphan_block_rsv = NULL;
1315 
1316 	INIT_LIST_HEAD(&root->dirty_list);
1317 	INIT_LIST_HEAD(&root->root_list);
1318 	INIT_LIST_HEAD(&root->delalloc_inodes);
1319 	INIT_LIST_HEAD(&root->delalloc_root);
1320 	INIT_LIST_HEAD(&root->ordered_extents);
1321 	INIT_LIST_HEAD(&root->ordered_root);
1322 	INIT_LIST_HEAD(&root->logged_list[0]);
1323 	INIT_LIST_HEAD(&root->logged_list[1]);
1324 	spin_lock_init(&root->orphan_lock);
1325 	spin_lock_init(&root->inode_lock);
1326 	spin_lock_init(&root->delalloc_lock);
1327 	spin_lock_init(&root->ordered_extent_lock);
1328 	spin_lock_init(&root->accounting_lock);
1329 	spin_lock_init(&root->log_extents_lock[0]);
1330 	spin_lock_init(&root->log_extents_lock[1]);
1331 	mutex_init(&root->objectid_mutex);
1332 	mutex_init(&root->log_mutex);
1333 	mutex_init(&root->ordered_extent_mutex);
1334 	mutex_init(&root->delalloc_mutex);
1335 	init_waitqueue_head(&root->log_writer_wait);
1336 	init_waitqueue_head(&root->log_commit_wait[0]);
1337 	init_waitqueue_head(&root->log_commit_wait[1]);
1338 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1339 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1340 	atomic_set(&root->log_commit[0], 0);
1341 	atomic_set(&root->log_commit[1], 0);
1342 	atomic_set(&root->log_writers, 0);
1343 	atomic_set(&root->log_batch, 0);
1344 	atomic_set(&root->orphan_inodes, 0);
1345 	refcount_set(&root->refs, 1);
1346 	atomic_set(&root->will_be_snapshoted, 0);
1347 	atomic64_set(&root->qgroup_meta_rsv, 0);
1348 	root->log_transid = 0;
1349 	root->log_transid_committed = -1;
1350 	root->last_log_commit = 0;
1351 	if (!dummy)
1352 		extent_io_tree_init(&root->dirty_log_pages, NULL);
1353 
1354 	memset(&root->root_key, 0, sizeof(root->root_key));
1355 	memset(&root->root_item, 0, sizeof(root->root_item));
1356 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1357 	if (!dummy)
1358 		root->defrag_trans_start = fs_info->generation;
1359 	else
1360 		root->defrag_trans_start = 0;
1361 	root->root_key.objectid = objectid;
1362 	root->anon_dev = 0;
1363 
1364 	spin_lock_init(&root->root_item_lock);
1365 }
1366 
1367 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1368 		gfp_t flags)
1369 {
1370 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1371 	if (root)
1372 		root->fs_info = fs_info;
1373 	return root;
1374 }
1375 
1376 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1377 /* Should only be used by the testing infrastructure */
1378 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1379 {
1380 	struct btrfs_root *root;
1381 
1382 	if (!fs_info)
1383 		return ERR_PTR(-EINVAL);
1384 
1385 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1386 	if (!root)
1387 		return ERR_PTR(-ENOMEM);
1388 
1389 	/* We don't use the stripesize in selftest, set it as sectorsize */
1390 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1391 	root->alloc_bytenr = 0;
1392 
1393 	return root;
1394 }
1395 #endif
1396 
1397 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1398 				     struct btrfs_fs_info *fs_info,
1399 				     u64 objectid)
1400 {
1401 	struct extent_buffer *leaf;
1402 	struct btrfs_root *tree_root = fs_info->tree_root;
1403 	struct btrfs_root *root;
1404 	struct btrfs_key key;
1405 	int ret = 0;
1406 	uuid_le uuid;
1407 
1408 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1409 	if (!root)
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	__setup_root(root, fs_info, objectid);
1413 	root->root_key.objectid = objectid;
1414 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1415 	root->root_key.offset = 0;
1416 
1417 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1418 	if (IS_ERR(leaf)) {
1419 		ret = PTR_ERR(leaf);
1420 		leaf = NULL;
1421 		goto fail;
1422 	}
1423 
1424 	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1425 	btrfs_set_header_bytenr(leaf, leaf->start);
1426 	btrfs_set_header_generation(leaf, trans->transid);
1427 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1428 	btrfs_set_header_owner(leaf, objectid);
1429 	root->node = leaf;
1430 
1431 	write_extent_buffer_fsid(leaf, fs_info->fsid);
1432 	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1433 	btrfs_mark_buffer_dirty(leaf);
1434 
1435 	root->commit_root = btrfs_root_node(root);
1436 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1437 
1438 	root->root_item.flags = 0;
1439 	root->root_item.byte_limit = 0;
1440 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1441 	btrfs_set_root_generation(&root->root_item, trans->transid);
1442 	btrfs_set_root_level(&root->root_item, 0);
1443 	btrfs_set_root_refs(&root->root_item, 1);
1444 	btrfs_set_root_used(&root->root_item, leaf->len);
1445 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1446 	btrfs_set_root_dirid(&root->root_item, 0);
1447 	uuid_le_gen(&uuid);
1448 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1449 	root->root_item.drop_level = 0;
1450 
1451 	key.objectid = objectid;
1452 	key.type = BTRFS_ROOT_ITEM_KEY;
1453 	key.offset = 0;
1454 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1455 	if (ret)
1456 		goto fail;
1457 
1458 	btrfs_tree_unlock(leaf);
1459 
1460 	return root;
1461 
1462 fail:
1463 	if (leaf) {
1464 		btrfs_tree_unlock(leaf);
1465 		free_extent_buffer(root->commit_root);
1466 		free_extent_buffer(leaf);
1467 	}
1468 	kfree(root);
1469 
1470 	return ERR_PTR(ret);
1471 }
1472 
1473 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1474 					 struct btrfs_fs_info *fs_info)
1475 {
1476 	struct btrfs_root *root;
1477 	struct extent_buffer *leaf;
1478 
1479 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1480 	if (!root)
1481 		return ERR_PTR(-ENOMEM);
1482 
1483 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1484 
1485 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1486 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1487 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1488 
1489 	/*
1490 	 * DON'T set REF_COWS for log trees
1491 	 *
1492 	 * log trees do not get reference counted because they go away
1493 	 * before a real commit is actually done.  They do store pointers
1494 	 * to file data extents, and those reference counts still get
1495 	 * updated (along with back refs to the log tree).
1496 	 */
1497 
1498 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1499 			NULL, 0, 0, 0);
1500 	if (IS_ERR(leaf)) {
1501 		kfree(root);
1502 		return ERR_CAST(leaf);
1503 	}
1504 
1505 	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1506 	btrfs_set_header_bytenr(leaf, leaf->start);
1507 	btrfs_set_header_generation(leaf, trans->transid);
1508 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1509 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1510 	root->node = leaf;
1511 
1512 	write_extent_buffer_fsid(root->node, fs_info->fsid);
1513 	btrfs_mark_buffer_dirty(root->node);
1514 	btrfs_tree_unlock(root->node);
1515 	return root;
1516 }
1517 
1518 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1519 			     struct btrfs_fs_info *fs_info)
1520 {
1521 	struct btrfs_root *log_root;
1522 
1523 	log_root = alloc_log_tree(trans, fs_info);
1524 	if (IS_ERR(log_root))
1525 		return PTR_ERR(log_root);
1526 	WARN_ON(fs_info->log_root_tree);
1527 	fs_info->log_root_tree = log_root;
1528 	return 0;
1529 }
1530 
1531 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1532 		       struct btrfs_root *root)
1533 {
1534 	struct btrfs_fs_info *fs_info = root->fs_info;
1535 	struct btrfs_root *log_root;
1536 	struct btrfs_inode_item *inode_item;
1537 
1538 	log_root = alloc_log_tree(trans, fs_info);
1539 	if (IS_ERR(log_root))
1540 		return PTR_ERR(log_root);
1541 
1542 	log_root->last_trans = trans->transid;
1543 	log_root->root_key.offset = root->root_key.objectid;
1544 
1545 	inode_item = &log_root->root_item.inode;
1546 	btrfs_set_stack_inode_generation(inode_item, 1);
1547 	btrfs_set_stack_inode_size(inode_item, 3);
1548 	btrfs_set_stack_inode_nlink(inode_item, 1);
1549 	btrfs_set_stack_inode_nbytes(inode_item,
1550 				     fs_info->nodesize);
1551 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1552 
1553 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1554 
1555 	WARN_ON(root->log_root);
1556 	root->log_root = log_root;
1557 	root->log_transid = 0;
1558 	root->log_transid_committed = -1;
1559 	root->last_log_commit = 0;
1560 	return 0;
1561 }
1562 
1563 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1564 					       struct btrfs_key *key)
1565 {
1566 	struct btrfs_root *root;
1567 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1568 	struct btrfs_path *path;
1569 	u64 generation;
1570 	int ret;
1571 
1572 	path = btrfs_alloc_path();
1573 	if (!path)
1574 		return ERR_PTR(-ENOMEM);
1575 
1576 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1577 	if (!root) {
1578 		ret = -ENOMEM;
1579 		goto alloc_fail;
1580 	}
1581 
1582 	__setup_root(root, fs_info, key->objectid);
1583 
1584 	ret = btrfs_find_root(tree_root, key, path,
1585 			      &root->root_item, &root->root_key);
1586 	if (ret) {
1587 		if (ret > 0)
1588 			ret = -ENOENT;
1589 		goto find_fail;
1590 	}
1591 
1592 	generation = btrfs_root_generation(&root->root_item);
1593 	root->node = read_tree_block(fs_info,
1594 				     btrfs_root_bytenr(&root->root_item),
1595 				     generation);
1596 	if (IS_ERR(root->node)) {
1597 		ret = PTR_ERR(root->node);
1598 		goto find_fail;
1599 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1600 		ret = -EIO;
1601 		free_extent_buffer(root->node);
1602 		goto find_fail;
1603 	}
1604 	root->commit_root = btrfs_root_node(root);
1605 out:
1606 	btrfs_free_path(path);
1607 	return root;
1608 
1609 find_fail:
1610 	kfree(root);
1611 alloc_fail:
1612 	root = ERR_PTR(ret);
1613 	goto out;
1614 }
1615 
1616 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1617 				      struct btrfs_key *location)
1618 {
1619 	struct btrfs_root *root;
1620 
1621 	root = btrfs_read_tree_root(tree_root, location);
1622 	if (IS_ERR(root))
1623 		return root;
1624 
1625 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1626 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1627 		btrfs_check_and_init_root_item(&root->root_item);
1628 	}
1629 
1630 	return root;
1631 }
1632 
1633 int btrfs_init_fs_root(struct btrfs_root *root)
1634 {
1635 	int ret;
1636 	struct btrfs_subvolume_writers *writers;
1637 
1638 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1639 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1640 					GFP_NOFS);
1641 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1642 		ret = -ENOMEM;
1643 		goto fail;
1644 	}
1645 
1646 	writers = btrfs_alloc_subvolume_writers();
1647 	if (IS_ERR(writers)) {
1648 		ret = PTR_ERR(writers);
1649 		goto fail;
1650 	}
1651 	root->subv_writers = writers;
1652 
1653 	btrfs_init_free_ino_ctl(root);
1654 	spin_lock_init(&root->ino_cache_lock);
1655 	init_waitqueue_head(&root->ino_cache_wait);
1656 
1657 	ret = get_anon_bdev(&root->anon_dev);
1658 	if (ret)
1659 		goto fail;
1660 
1661 	mutex_lock(&root->objectid_mutex);
1662 	ret = btrfs_find_highest_objectid(root,
1663 					&root->highest_objectid);
1664 	if (ret) {
1665 		mutex_unlock(&root->objectid_mutex);
1666 		goto fail;
1667 	}
1668 
1669 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1670 
1671 	mutex_unlock(&root->objectid_mutex);
1672 
1673 	return 0;
1674 fail:
1675 	/* the caller is responsible to call free_fs_root */
1676 	return ret;
1677 }
1678 
1679 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1680 					u64 root_id)
1681 {
1682 	struct btrfs_root *root;
1683 
1684 	spin_lock(&fs_info->fs_roots_radix_lock);
1685 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1686 				 (unsigned long)root_id);
1687 	spin_unlock(&fs_info->fs_roots_radix_lock);
1688 	return root;
1689 }
1690 
1691 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1692 			 struct btrfs_root *root)
1693 {
1694 	int ret;
1695 
1696 	ret = radix_tree_preload(GFP_NOFS);
1697 	if (ret)
1698 		return ret;
1699 
1700 	spin_lock(&fs_info->fs_roots_radix_lock);
1701 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1702 				(unsigned long)root->root_key.objectid,
1703 				root);
1704 	if (ret == 0)
1705 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1706 	spin_unlock(&fs_info->fs_roots_radix_lock);
1707 	radix_tree_preload_end();
1708 
1709 	return ret;
1710 }
1711 
1712 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1713 				     struct btrfs_key *location,
1714 				     bool check_ref)
1715 {
1716 	struct btrfs_root *root;
1717 	struct btrfs_path *path;
1718 	struct btrfs_key key;
1719 	int ret;
1720 
1721 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1722 		return fs_info->tree_root;
1723 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1724 		return fs_info->extent_root;
1725 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1726 		return fs_info->chunk_root;
1727 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1728 		return fs_info->dev_root;
1729 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1730 		return fs_info->csum_root;
1731 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1732 		return fs_info->quota_root ? fs_info->quota_root :
1733 					     ERR_PTR(-ENOENT);
1734 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1735 		return fs_info->uuid_root ? fs_info->uuid_root :
1736 					    ERR_PTR(-ENOENT);
1737 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1738 		return fs_info->free_space_root ? fs_info->free_space_root :
1739 						  ERR_PTR(-ENOENT);
1740 again:
1741 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1742 	if (root) {
1743 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1744 			return ERR_PTR(-ENOENT);
1745 		return root;
1746 	}
1747 
1748 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1749 	if (IS_ERR(root))
1750 		return root;
1751 
1752 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1753 		ret = -ENOENT;
1754 		goto fail;
1755 	}
1756 
1757 	ret = btrfs_init_fs_root(root);
1758 	if (ret)
1759 		goto fail;
1760 
1761 	path = btrfs_alloc_path();
1762 	if (!path) {
1763 		ret = -ENOMEM;
1764 		goto fail;
1765 	}
1766 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1767 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1768 	key.offset = location->objectid;
1769 
1770 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1771 	btrfs_free_path(path);
1772 	if (ret < 0)
1773 		goto fail;
1774 	if (ret == 0)
1775 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1776 
1777 	ret = btrfs_insert_fs_root(fs_info, root);
1778 	if (ret) {
1779 		if (ret == -EEXIST) {
1780 			free_fs_root(root);
1781 			goto again;
1782 		}
1783 		goto fail;
1784 	}
1785 	return root;
1786 fail:
1787 	free_fs_root(root);
1788 	return ERR_PTR(ret);
1789 }
1790 
1791 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1792 {
1793 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1794 	int ret = 0;
1795 	struct btrfs_device *device;
1796 	struct backing_dev_info *bdi;
1797 
1798 	rcu_read_lock();
1799 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1800 		if (!device->bdev)
1801 			continue;
1802 		bdi = device->bdev->bd_bdi;
1803 		if (bdi_congested(bdi, bdi_bits)) {
1804 			ret = 1;
1805 			break;
1806 		}
1807 	}
1808 	rcu_read_unlock();
1809 	return ret;
1810 }
1811 
1812 /*
1813  * called by the kthread helper functions to finally call the bio end_io
1814  * functions.  This is where read checksum verification actually happens
1815  */
1816 static void end_workqueue_fn(struct btrfs_work *work)
1817 {
1818 	struct bio *bio;
1819 	struct btrfs_end_io_wq *end_io_wq;
1820 
1821 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1822 	bio = end_io_wq->bio;
1823 
1824 	bio->bi_status = end_io_wq->status;
1825 	bio->bi_private = end_io_wq->private;
1826 	bio->bi_end_io = end_io_wq->end_io;
1827 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1828 	bio_endio(bio);
1829 }
1830 
1831 static int cleaner_kthread(void *arg)
1832 {
1833 	struct btrfs_root *root = arg;
1834 	struct btrfs_fs_info *fs_info = root->fs_info;
1835 	int again;
1836 	struct btrfs_trans_handle *trans;
1837 
1838 	do {
1839 		again = 0;
1840 
1841 		/* Make the cleaner go to sleep early. */
1842 		if (btrfs_need_cleaner_sleep(fs_info))
1843 			goto sleep;
1844 
1845 		/*
1846 		 * Do not do anything if we might cause open_ctree() to block
1847 		 * before we have finished mounting the filesystem.
1848 		 */
1849 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1850 			goto sleep;
1851 
1852 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1853 			goto sleep;
1854 
1855 		/*
1856 		 * Avoid the problem that we change the status of the fs
1857 		 * during the above check and trylock.
1858 		 */
1859 		if (btrfs_need_cleaner_sleep(fs_info)) {
1860 			mutex_unlock(&fs_info->cleaner_mutex);
1861 			goto sleep;
1862 		}
1863 
1864 		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1865 		btrfs_run_delayed_iputs(fs_info);
1866 		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1867 
1868 		again = btrfs_clean_one_deleted_snapshot(root);
1869 		mutex_unlock(&fs_info->cleaner_mutex);
1870 
1871 		/*
1872 		 * The defragger has dealt with the R/O remount and umount,
1873 		 * needn't do anything special here.
1874 		 */
1875 		btrfs_run_defrag_inodes(fs_info);
1876 
1877 		/*
1878 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1879 		 * with relocation (btrfs_relocate_chunk) and relocation
1880 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1881 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1882 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1883 		 * unused block groups.
1884 		 */
1885 		btrfs_delete_unused_bgs(fs_info);
1886 sleep:
1887 		if (!again) {
1888 			set_current_state(TASK_INTERRUPTIBLE);
1889 			if (!kthread_should_stop())
1890 				schedule();
1891 			__set_current_state(TASK_RUNNING);
1892 		}
1893 	} while (!kthread_should_stop());
1894 
1895 	/*
1896 	 * Transaction kthread is stopped before us and wakes us up.
1897 	 * However we might have started a new transaction and COWed some
1898 	 * tree blocks when deleting unused block groups for example. So
1899 	 * make sure we commit the transaction we started to have a clean
1900 	 * shutdown when evicting the btree inode - if it has dirty pages
1901 	 * when we do the final iput() on it, eviction will trigger a
1902 	 * writeback for it which will fail with null pointer dereferences
1903 	 * since work queues and other resources were already released and
1904 	 * destroyed by the time the iput/eviction/writeback is made.
1905 	 */
1906 	trans = btrfs_attach_transaction(root);
1907 	if (IS_ERR(trans)) {
1908 		if (PTR_ERR(trans) != -ENOENT)
1909 			btrfs_err(fs_info,
1910 				  "cleaner transaction attach returned %ld",
1911 				  PTR_ERR(trans));
1912 	} else {
1913 		int ret;
1914 
1915 		ret = btrfs_commit_transaction(trans);
1916 		if (ret)
1917 			btrfs_err(fs_info,
1918 				  "cleaner open transaction commit returned %d",
1919 				  ret);
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static int transaction_kthread(void *arg)
1926 {
1927 	struct btrfs_root *root = arg;
1928 	struct btrfs_fs_info *fs_info = root->fs_info;
1929 	struct btrfs_trans_handle *trans;
1930 	struct btrfs_transaction *cur;
1931 	u64 transid;
1932 	unsigned long now;
1933 	unsigned long delay;
1934 	bool cannot_commit;
1935 
1936 	do {
1937 		cannot_commit = false;
1938 		delay = HZ * fs_info->commit_interval;
1939 		mutex_lock(&fs_info->transaction_kthread_mutex);
1940 
1941 		spin_lock(&fs_info->trans_lock);
1942 		cur = fs_info->running_transaction;
1943 		if (!cur) {
1944 			spin_unlock(&fs_info->trans_lock);
1945 			goto sleep;
1946 		}
1947 
1948 		now = get_seconds();
1949 		if (cur->state < TRANS_STATE_BLOCKED &&
1950 		    (now < cur->start_time ||
1951 		     now - cur->start_time < fs_info->commit_interval)) {
1952 			spin_unlock(&fs_info->trans_lock);
1953 			delay = HZ * 5;
1954 			goto sleep;
1955 		}
1956 		transid = cur->transid;
1957 		spin_unlock(&fs_info->trans_lock);
1958 
1959 		/* If the file system is aborted, this will always fail. */
1960 		trans = btrfs_attach_transaction(root);
1961 		if (IS_ERR(trans)) {
1962 			if (PTR_ERR(trans) != -ENOENT)
1963 				cannot_commit = true;
1964 			goto sleep;
1965 		}
1966 		if (transid == trans->transid) {
1967 			btrfs_commit_transaction(trans);
1968 		} else {
1969 			btrfs_end_transaction(trans);
1970 		}
1971 sleep:
1972 		wake_up_process(fs_info->cleaner_kthread);
1973 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1974 
1975 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1976 				      &fs_info->fs_state)))
1977 			btrfs_cleanup_transaction(fs_info);
1978 		set_current_state(TASK_INTERRUPTIBLE);
1979 		if (!kthread_should_stop() &&
1980 				(!btrfs_transaction_blocked(fs_info) ||
1981 				 cannot_commit))
1982 			schedule_timeout(delay);
1983 		__set_current_state(TASK_RUNNING);
1984 	} while (!kthread_should_stop());
1985 	return 0;
1986 }
1987 
1988 /*
1989  * this will find the highest generation in the array of
1990  * root backups.  The index of the highest array is returned,
1991  * or -1 if we can't find anything.
1992  *
1993  * We check to make sure the array is valid by comparing the
1994  * generation of the latest  root in the array with the generation
1995  * in the super block.  If they don't match we pitch it.
1996  */
1997 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1998 {
1999 	u64 cur;
2000 	int newest_index = -1;
2001 	struct btrfs_root_backup *root_backup;
2002 	int i;
2003 
2004 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2005 		root_backup = info->super_copy->super_roots + i;
2006 		cur = btrfs_backup_tree_root_gen(root_backup);
2007 		if (cur == newest_gen)
2008 			newest_index = i;
2009 	}
2010 
2011 	/* check to see if we actually wrapped around */
2012 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
2013 		root_backup = info->super_copy->super_roots;
2014 		cur = btrfs_backup_tree_root_gen(root_backup);
2015 		if (cur == newest_gen)
2016 			newest_index = 0;
2017 	}
2018 	return newest_index;
2019 }
2020 
2021 
2022 /*
2023  * find the oldest backup so we know where to store new entries
2024  * in the backup array.  This will set the backup_root_index
2025  * field in the fs_info struct
2026  */
2027 static void find_oldest_super_backup(struct btrfs_fs_info *info,
2028 				     u64 newest_gen)
2029 {
2030 	int newest_index = -1;
2031 
2032 	newest_index = find_newest_super_backup(info, newest_gen);
2033 	/* if there was garbage in there, just move along */
2034 	if (newest_index == -1) {
2035 		info->backup_root_index = 0;
2036 	} else {
2037 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
2038 	}
2039 }
2040 
2041 /*
2042  * copy all the root pointers into the super backup array.
2043  * this will bump the backup pointer by one when it is
2044  * done
2045  */
2046 static void backup_super_roots(struct btrfs_fs_info *info)
2047 {
2048 	int next_backup;
2049 	struct btrfs_root_backup *root_backup;
2050 	int last_backup;
2051 
2052 	next_backup = info->backup_root_index;
2053 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
2054 		BTRFS_NUM_BACKUP_ROOTS;
2055 
2056 	/*
2057 	 * just overwrite the last backup if we're at the same generation
2058 	 * this happens only at umount
2059 	 */
2060 	root_backup = info->super_for_commit->super_roots + last_backup;
2061 	if (btrfs_backup_tree_root_gen(root_backup) ==
2062 	    btrfs_header_generation(info->tree_root->node))
2063 		next_backup = last_backup;
2064 
2065 	root_backup = info->super_for_commit->super_roots + next_backup;
2066 
2067 	/*
2068 	 * make sure all of our padding and empty slots get zero filled
2069 	 * regardless of which ones we use today
2070 	 */
2071 	memset(root_backup, 0, sizeof(*root_backup));
2072 
2073 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
2074 
2075 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
2076 	btrfs_set_backup_tree_root_gen(root_backup,
2077 			       btrfs_header_generation(info->tree_root->node));
2078 
2079 	btrfs_set_backup_tree_root_level(root_backup,
2080 			       btrfs_header_level(info->tree_root->node));
2081 
2082 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
2083 	btrfs_set_backup_chunk_root_gen(root_backup,
2084 			       btrfs_header_generation(info->chunk_root->node));
2085 	btrfs_set_backup_chunk_root_level(root_backup,
2086 			       btrfs_header_level(info->chunk_root->node));
2087 
2088 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2089 	btrfs_set_backup_extent_root_gen(root_backup,
2090 			       btrfs_header_generation(info->extent_root->node));
2091 	btrfs_set_backup_extent_root_level(root_backup,
2092 			       btrfs_header_level(info->extent_root->node));
2093 
2094 	/*
2095 	 * we might commit during log recovery, which happens before we set
2096 	 * the fs_root.  Make sure it is valid before we fill it in.
2097 	 */
2098 	if (info->fs_root && info->fs_root->node) {
2099 		btrfs_set_backup_fs_root(root_backup,
2100 					 info->fs_root->node->start);
2101 		btrfs_set_backup_fs_root_gen(root_backup,
2102 			       btrfs_header_generation(info->fs_root->node));
2103 		btrfs_set_backup_fs_root_level(root_backup,
2104 			       btrfs_header_level(info->fs_root->node));
2105 	}
2106 
2107 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2108 	btrfs_set_backup_dev_root_gen(root_backup,
2109 			       btrfs_header_generation(info->dev_root->node));
2110 	btrfs_set_backup_dev_root_level(root_backup,
2111 				       btrfs_header_level(info->dev_root->node));
2112 
2113 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2114 	btrfs_set_backup_csum_root_gen(root_backup,
2115 			       btrfs_header_generation(info->csum_root->node));
2116 	btrfs_set_backup_csum_root_level(root_backup,
2117 			       btrfs_header_level(info->csum_root->node));
2118 
2119 	btrfs_set_backup_total_bytes(root_backup,
2120 			     btrfs_super_total_bytes(info->super_copy));
2121 	btrfs_set_backup_bytes_used(root_backup,
2122 			     btrfs_super_bytes_used(info->super_copy));
2123 	btrfs_set_backup_num_devices(root_backup,
2124 			     btrfs_super_num_devices(info->super_copy));
2125 
2126 	/*
2127 	 * if we don't copy this out to the super_copy, it won't get remembered
2128 	 * for the next commit
2129 	 */
2130 	memcpy(&info->super_copy->super_roots,
2131 	       &info->super_for_commit->super_roots,
2132 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2133 }
2134 
2135 /*
2136  * this copies info out of the root backup array and back into
2137  * the in-memory super block.  It is meant to help iterate through
2138  * the array, so you send it the number of backups you've already
2139  * tried and the last backup index you used.
2140  *
2141  * this returns -1 when it has tried all the backups
2142  */
2143 static noinline int next_root_backup(struct btrfs_fs_info *info,
2144 				     struct btrfs_super_block *super,
2145 				     int *num_backups_tried, int *backup_index)
2146 {
2147 	struct btrfs_root_backup *root_backup;
2148 	int newest = *backup_index;
2149 
2150 	if (*num_backups_tried == 0) {
2151 		u64 gen = btrfs_super_generation(super);
2152 
2153 		newest = find_newest_super_backup(info, gen);
2154 		if (newest == -1)
2155 			return -1;
2156 
2157 		*backup_index = newest;
2158 		*num_backups_tried = 1;
2159 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2160 		/* we've tried all the backups, all done */
2161 		return -1;
2162 	} else {
2163 		/* jump to the next oldest backup */
2164 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2165 			BTRFS_NUM_BACKUP_ROOTS;
2166 		*backup_index = newest;
2167 		*num_backups_tried += 1;
2168 	}
2169 	root_backup = super->super_roots + newest;
2170 
2171 	btrfs_set_super_generation(super,
2172 				   btrfs_backup_tree_root_gen(root_backup));
2173 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2174 	btrfs_set_super_root_level(super,
2175 				   btrfs_backup_tree_root_level(root_backup));
2176 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2177 
2178 	/*
2179 	 * fixme: the total bytes and num_devices need to match or we should
2180 	 * need a fsck
2181 	 */
2182 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2183 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2184 	return 0;
2185 }
2186 
2187 /* helper to cleanup workers */
2188 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2189 {
2190 	btrfs_destroy_workqueue(fs_info->fixup_workers);
2191 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
2192 	btrfs_destroy_workqueue(fs_info->workers);
2193 	btrfs_destroy_workqueue(fs_info->endio_workers);
2194 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2195 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2196 	btrfs_destroy_workqueue(fs_info->rmw_workers);
2197 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
2198 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2199 	btrfs_destroy_workqueue(fs_info->submit_workers);
2200 	btrfs_destroy_workqueue(fs_info->delayed_workers);
2201 	btrfs_destroy_workqueue(fs_info->caching_workers);
2202 	btrfs_destroy_workqueue(fs_info->readahead_workers);
2203 	btrfs_destroy_workqueue(fs_info->flush_workers);
2204 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2205 	btrfs_destroy_workqueue(fs_info->extent_workers);
2206 	/*
2207 	 * Now that all other work queues are destroyed, we can safely destroy
2208 	 * the queues used for metadata I/O, since tasks from those other work
2209 	 * queues can do metadata I/O operations.
2210 	 */
2211 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2212 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2213 }
2214 
2215 static void free_root_extent_buffers(struct btrfs_root *root)
2216 {
2217 	if (root) {
2218 		free_extent_buffer(root->node);
2219 		free_extent_buffer(root->commit_root);
2220 		root->node = NULL;
2221 		root->commit_root = NULL;
2222 	}
2223 }
2224 
2225 /* helper to cleanup tree roots */
2226 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2227 {
2228 	free_root_extent_buffers(info->tree_root);
2229 
2230 	free_root_extent_buffers(info->dev_root);
2231 	free_root_extent_buffers(info->extent_root);
2232 	free_root_extent_buffers(info->csum_root);
2233 	free_root_extent_buffers(info->quota_root);
2234 	free_root_extent_buffers(info->uuid_root);
2235 	if (chunk_root)
2236 		free_root_extent_buffers(info->chunk_root);
2237 	free_root_extent_buffers(info->free_space_root);
2238 }
2239 
2240 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2241 {
2242 	int ret;
2243 	struct btrfs_root *gang[8];
2244 	int i;
2245 
2246 	while (!list_empty(&fs_info->dead_roots)) {
2247 		gang[0] = list_entry(fs_info->dead_roots.next,
2248 				     struct btrfs_root, root_list);
2249 		list_del(&gang[0]->root_list);
2250 
2251 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2252 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2253 		} else {
2254 			free_extent_buffer(gang[0]->node);
2255 			free_extent_buffer(gang[0]->commit_root);
2256 			btrfs_put_fs_root(gang[0]);
2257 		}
2258 	}
2259 
2260 	while (1) {
2261 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2262 					     (void **)gang, 0,
2263 					     ARRAY_SIZE(gang));
2264 		if (!ret)
2265 			break;
2266 		for (i = 0; i < ret; i++)
2267 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2268 	}
2269 
2270 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2271 		btrfs_free_log_root_tree(NULL, fs_info);
2272 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2273 	}
2274 }
2275 
2276 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2277 {
2278 	mutex_init(&fs_info->scrub_lock);
2279 	atomic_set(&fs_info->scrubs_running, 0);
2280 	atomic_set(&fs_info->scrub_pause_req, 0);
2281 	atomic_set(&fs_info->scrubs_paused, 0);
2282 	atomic_set(&fs_info->scrub_cancel_req, 0);
2283 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2284 	fs_info->scrub_workers_refcnt = 0;
2285 }
2286 
2287 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2288 {
2289 	spin_lock_init(&fs_info->balance_lock);
2290 	mutex_init(&fs_info->balance_mutex);
2291 	atomic_set(&fs_info->balance_running, 0);
2292 	atomic_set(&fs_info->balance_pause_req, 0);
2293 	atomic_set(&fs_info->balance_cancel_req, 0);
2294 	fs_info->balance_ctl = NULL;
2295 	init_waitqueue_head(&fs_info->balance_wait_q);
2296 }
2297 
2298 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2299 {
2300 	struct inode *inode = fs_info->btree_inode;
2301 
2302 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2303 	set_nlink(inode, 1);
2304 	/*
2305 	 * we set the i_size on the btree inode to the max possible int.
2306 	 * the real end of the address space is determined by all of
2307 	 * the devices in the system
2308 	 */
2309 	inode->i_size = OFFSET_MAX;
2310 	inode->i_mapping->a_ops = &btree_aops;
2311 
2312 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2313 	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2314 	BTRFS_I(inode)->io_tree.track_uptodate = 0;
2315 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2316 
2317 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2318 
2319 	BTRFS_I(inode)->root = fs_info->tree_root;
2320 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2321 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2322 	btrfs_insert_inode_hash(inode);
2323 }
2324 
2325 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2326 {
2327 	fs_info->dev_replace.lock_owner = 0;
2328 	atomic_set(&fs_info->dev_replace.nesting_level, 0);
2329 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2330 	rwlock_init(&fs_info->dev_replace.lock);
2331 	atomic_set(&fs_info->dev_replace.read_locks, 0);
2332 	atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2333 	init_waitqueue_head(&fs_info->replace_wait);
2334 	init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2335 }
2336 
2337 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2338 {
2339 	spin_lock_init(&fs_info->qgroup_lock);
2340 	mutex_init(&fs_info->qgroup_ioctl_lock);
2341 	fs_info->qgroup_tree = RB_ROOT;
2342 	fs_info->qgroup_op_tree = RB_ROOT;
2343 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2344 	fs_info->qgroup_seq = 1;
2345 	fs_info->qgroup_ulist = NULL;
2346 	fs_info->qgroup_rescan_running = false;
2347 	mutex_init(&fs_info->qgroup_rescan_lock);
2348 }
2349 
2350 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2351 		struct btrfs_fs_devices *fs_devices)
2352 {
2353 	int max_active = fs_info->thread_pool_size;
2354 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2355 
2356 	fs_info->workers =
2357 		btrfs_alloc_workqueue(fs_info, "worker",
2358 				      flags | WQ_HIGHPRI, max_active, 16);
2359 
2360 	fs_info->delalloc_workers =
2361 		btrfs_alloc_workqueue(fs_info, "delalloc",
2362 				      flags, max_active, 2);
2363 
2364 	fs_info->flush_workers =
2365 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2366 				      flags, max_active, 0);
2367 
2368 	fs_info->caching_workers =
2369 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2370 
2371 	/*
2372 	 * a higher idle thresh on the submit workers makes it much more
2373 	 * likely that bios will be send down in a sane order to the
2374 	 * devices
2375 	 */
2376 	fs_info->submit_workers =
2377 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2378 				      min_t(u64, fs_devices->num_devices,
2379 					    max_active), 64);
2380 
2381 	fs_info->fixup_workers =
2382 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2383 
2384 	/*
2385 	 * endios are largely parallel and should have a very
2386 	 * low idle thresh
2387 	 */
2388 	fs_info->endio_workers =
2389 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2390 	fs_info->endio_meta_workers =
2391 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2392 				      max_active, 4);
2393 	fs_info->endio_meta_write_workers =
2394 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2395 				      max_active, 2);
2396 	fs_info->endio_raid56_workers =
2397 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2398 				      max_active, 4);
2399 	fs_info->endio_repair_workers =
2400 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2401 	fs_info->rmw_workers =
2402 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2403 	fs_info->endio_write_workers =
2404 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2405 				      max_active, 2);
2406 	fs_info->endio_freespace_worker =
2407 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2408 				      max_active, 0);
2409 	fs_info->delayed_workers =
2410 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2411 				      max_active, 0);
2412 	fs_info->readahead_workers =
2413 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2414 				      max_active, 2);
2415 	fs_info->qgroup_rescan_workers =
2416 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2417 	fs_info->extent_workers =
2418 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2419 				      min_t(u64, fs_devices->num_devices,
2420 					    max_active), 8);
2421 
2422 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2423 	      fs_info->submit_workers && fs_info->flush_workers &&
2424 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2425 	      fs_info->endio_meta_write_workers &&
2426 	      fs_info->endio_repair_workers &&
2427 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2428 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2429 	      fs_info->caching_workers && fs_info->readahead_workers &&
2430 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2431 	      fs_info->extent_workers &&
2432 	      fs_info->qgroup_rescan_workers)) {
2433 		return -ENOMEM;
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2440 			    struct btrfs_fs_devices *fs_devices)
2441 {
2442 	int ret;
2443 	struct btrfs_root *log_tree_root;
2444 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2445 	u64 bytenr = btrfs_super_log_root(disk_super);
2446 
2447 	if (fs_devices->rw_devices == 0) {
2448 		btrfs_warn(fs_info, "log replay required on RO media");
2449 		return -EIO;
2450 	}
2451 
2452 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2453 	if (!log_tree_root)
2454 		return -ENOMEM;
2455 
2456 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2457 
2458 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2459 					      fs_info->generation + 1);
2460 	if (IS_ERR(log_tree_root->node)) {
2461 		btrfs_warn(fs_info, "failed to read log tree");
2462 		ret = PTR_ERR(log_tree_root->node);
2463 		kfree(log_tree_root);
2464 		return ret;
2465 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2466 		btrfs_err(fs_info, "failed to read log tree");
2467 		free_extent_buffer(log_tree_root->node);
2468 		kfree(log_tree_root);
2469 		return -EIO;
2470 	}
2471 	/* returns with log_tree_root freed on success */
2472 	ret = btrfs_recover_log_trees(log_tree_root);
2473 	if (ret) {
2474 		btrfs_handle_fs_error(fs_info, ret,
2475 				      "Failed to recover log tree");
2476 		free_extent_buffer(log_tree_root->node);
2477 		kfree(log_tree_root);
2478 		return ret;
2479 	}
2480 
2481 	if (fs_info->sb->s_flags & MS_RDONLY) {
2482 		ret = btrfs_commit_super(fs_info);
2483 		if (ret)
2484 			return ret;
2485 	}
2486 
2487 	return 0;
2488 }
2489 
2490 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2491 {
2492 	struct btrfs_root *tree_root = fs_info->tree_root;
2493 	struct btrfs_root *root;
2494 	struct btrfs_key location;
2495 	int ret;
2496 
2497 	BUG_ON(!fs_info->tree_root);
2498 
2499 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2500 	location.type = BTRFS_ROOT_ITEM_KEY;
2501 	location.offset = 0;
2502 
2503 	root = btrfs_read_tree_root(tree_root, &location);
2504 	if (IS_ERR(root))
2505 		return PTR_ERR(root);
2506 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2507 	fs_info->extent_root = root;
2508 
2509 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2510 	root = btrfs_read_tree_root(tree_root, &location);
2511 	if (IS_ERR(root))
2512 		return PTR_ERR(root);
2513 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2514 	fs_info->dev_root = root;
2515 	btrfs_init_devices_late(fs_info);
2516 
2517 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2518 	root = btrfs_read_tree_root(tree_root, &location);
2519 	if (IS_ERR(root))
2520 		return PTR_ERR(root);
2521 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2522 	fs_info->csum_root = root;
2523 
2524 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2525 	root = btrfs_read_tree_root(tree_root, &location);
2526 	if (!IS_ERR(root)) {
2527 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2528 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2529 		fs_info->quota_root = root;
2530 	}
2531 
2532 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2533 	root = btrfs_read_tree_root(tree_root, &location);
2534 	if (IS_ERR(root)) {
2535 		ret = PTR_ERR(root);
2536 		if (ret != -ENOENT)
2537 			return ret;
2538 	} else {
2539 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2540 		fs_info->uuid_root = root;
2541 	}
2542 
2543 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2544 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2545 		root = btrfs_read_tree_root(tree_root, &location);
2546 		if (IS_ERR(root))
2547 			return PTR_ERR(root);
2548 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2549 		fs_info->free_space_root = root;
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 int open_ctree(struct super_block *sb,
2556 	       struct btrfs_fs_devices *fs_devices,
2557 	       char *options)
2558 {
2559 	u32 sectorsize;
2560 	u32 nodesize;
2561 	u32 stripesize;
2562 	u64 generation;
2563 	u64 features;
2564 	struct btrfs_key location;
2565 	struct buffer_head *bh;
2566 	struct btrfs_super_block *disk_super;
2567 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2568 	struct btrfs_root *tree_root;
2569 	struct btrfs_root *chunk_root;
2570 	int ret;
2571 	int err = -EINVAL;
2572 	int num_backups_tried = 0;
2573 	int backup_index = 0;
2574 	int max_active;
2575 	int clear_free_space_tree = 0;
2576 
2577 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2578 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2579 	if (!tree_root || !chunk_root) {
2580 		err = -ENOMEM;
2581 		goto fail;
2582 	}
2583 
2584 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2585 	if (ret) {
2586 		err = ret;
2587 		goto fail;
2588 	}
2589 
2590 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2591 	if (ret) {
2592 		err = ret;
2593 		goto fail_srcu;
2594 	}
2595 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2596 					(1 + ilog2(nr_cpu_ids));
2597 
2598 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2599 	if (ret) {
2600 		err = ret;
2601 		goto fail_dirty_metadata_bytes;
2602 	}
2603 
2604 	ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2605 	if (ret) {
2606 		err = ret;
2607 		goto fail_delalloc_bytes;
2608 	}
2609 
2610 	fs_info->btree_inode = new_inode(sb);
2611 	if (!fs_info->btree_inode) {
2612 		err = -ENOMEM;
2613 		goto fail_bio_counter;
2614 	}
2615 
2616 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2617 
2618 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2619 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2620 	INIT_LIST_HEAD(&fs_info->trans_list);
2621 	INIT_LIST_HEAD(&fs_info->dead_roots);
2622 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2623 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2624 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2625 	spin_lock_init(&fs_info->delalloc_root_lock);
2626 	spin_lock_init(&fs_info->trans_lock);
2627 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2628 	spin_lock_init(&fs_info->delayed_iput_lock);
2629 	spin_lock_init(&fs_info->defrag_inodes_lock);
2630 	spin_lock_init(&fs_info->tree_mod_seq_lock);
2631 	spin_lock_init(&fs_info->super_lock);
2632 	spin_lock_init(&fs_info->qgroup_op_lock);
2633 	spin_lock_init(&fs_info->buffer_lock);
2634 	spin_lock_init(&fs_info->unused_bgs_lock);
2635 	rwlock_init(&fs_info->tree_mod_log_lock);
2636 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2637 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2638 	mutex_init(&fs_info->reloc_mutex);
2639 	mutex_init(&fs_info->delalloc_root_mutex);
2640 	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2641 	seqlock_init(&fs_info->profiles_lock);
2642 
2643 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2644 	INIT_LIST_HEAD(&fs_info->space_info);
2645 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2646 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2647 	btrfs_mapping_init(&fs_info->mapping_tree);
2648 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2649 			     BTRFS_BLOCK_RSV_GLOBAL);
2650 	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2651 			     BTRFS_BLOCK_RSV_DELALLOC);
2652 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2653 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2654 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2655 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2656 			     BTRFS_BLOCK_RSV_DELOPS);
2657 	atomic_set(&fs_info->nr_async_submits, 0);
2658 	atomic_set(&fs_info->async_delalloc_pages, 0);
2659 	atomic_set(&fs_info->async_submit_draining, 0);
2660 	atomic_set(&fs_info->nr_async_bios, 0);
2661 	atomic_set(&fs_info->defrag_running, 0);
2662 	atomic_set(&fs_info->qgroup_op_seq, 0);
2663 	atomic_set(&fs_info->reada_works_cnt, 0);
2664 	atomic64_set(&fs_info->tree_mod_seq, 0);
2665 	fs_info->sb = sb;
2666 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2667 	fs_info->metadata_ratio = 0;
2668 	fs_info->defrag_inodes = RB_ROOT;
2669 	atomic64_set(&fs_info->free_chunk_space, 0);
2670 	fs_info->tree_mod_log = RB_ROOT;
2671 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2672 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2673 	/* readahead state */
2674 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2675 	spin_lock_init(&fs_info->reada_lock);
2676 
2677 	fs_info->thread_pool_size = min_t(unsigned long,
2678 					  num_online_cpus() + 2, 8);
2679 
2680 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2681 	spin_lock_init(&fs_info->ordered_root_lock);
2682 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2683 					GFP_KERNEL);
2684 	if (!fs_info->delayed_root) {
2685 		err = -ENOMEM;
2686 		goto fail_iput;
2687 	}
2688 	btrfs_init_delayed_root(fs_info->delayed_root);
2689 
2690 	btrfs_init_scrub(fs_info);
2691 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2692 	fs_info->check_integrity_print_mask = 0;
2693 #endif
2694 	btrfs_init_balance(fs_info);
2695 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2696 
2697 	sb->s_blocksize = 4096;
2698 	sb->s_blocksize_bits = blksize_bits(4096);
2699 
2700 	btrfs_init_btree_inode(fs_info);
2701 
2702 	spin_lock_init(&fs_info->block_group_cache_lock);
2703 	fs_info->block_group_cache_tree = RB_ROOT;
2704 	fs_info->first_logical_byte = (u64)-1;
2705 
2706 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2707 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2708 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2709 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2710 
2711 	mutex_init(&fs_info->ordered_operations_mutex);
2712 	mutex_init(&fs_info->tree_log_mutex);
2713 	mutex_init(&fs_info->chunk_mutex);
2714 	mutex_init(&fs_info->transaction_kthread_mutex);
2715 	mutex_init(&fs_info->cleaner_mutex);
2716 	mutex_init(&fs_info->volume_mutex);
2717 	mutex_init(&fs_info->ro_block_group_mutex);
2718 	init_rwsem(&fs_info->commit_root_sem);
2719 	init_rwsem(&fs_info->cleanup_work_sem);
2720 	init_rwsem(&fs_info->subvol_sem);
2721 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2722 
2723 	btrfs_init_dev_replace_locks(fs_info);
2724 	btrfs_init_qgroup(fs_info);
2725 
2726 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2727 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2728 
2729 	init_waitqueue_head(&fs_info->transaction_throttle);
2730 	init_waitqueue_head(&fs_info->transaction_wait);
2731 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2732 	init_waitqueue_head(&fs_info->async_submit_wait);
2733 
2734 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
2735 
2736 	/* Usable values until the real ones are cached from the superblock */
2737 	fs_info->nodesize = 4096;
2738 	fs_info->sectorsize = 4096;
2739 	fs_info->stripesize = 4096;
2740 
2741 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2742 	if (ret) {
2743 		err = ret;
2744 		goto fail_alloc;
2745 	}
2746 
2747 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2748 
2749 	invalidate_bdev(fs_devices->latest_bdev);
2750 
2751 	/*
2752 	 * Read super block and check the signature bytes only
2753 	 */
2754 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2755 	if (IS_ERR(bh)) {
2756 		err = PTR_ERR(bh);
2757 		goto fail_alloc;
2758 	}
2759 
2760 	/*
2761 	 * We want to check superblock checksum, the type is stored inside.
2762 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2763 	 */
2764 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2765 		btrfs_err(fs_info, "superblock checksum mismatch");
2766 		err = -EINVAL;
2767 		brelse(bh);
2768 		goto fail_alloc;
2769 	}
2770 
2771 	/*
2772 	 * super_copy is zeroed at allocation time and we never touch the
2773 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2774 	 * the whole block of INFO_SIZE
2775 	 */
2776 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2777 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2778 	       sizeof(*fs_info->super_for_commit));
2779 	brelse(bh);
2780 
2781 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2782 
2783 	ret = btrfs_check_super_valid(fs_info);
2784 	if (ret) {
2785 		btrfs_err(fs_info, "superblock contains fatal errors");
2786 		err = -EINVAL;
2787 		goto fail_alloc;
2788 	}
2789 
2790 	disk_super = fs_info->super_copy;
2791 	if (!btrfs_super_root(disk_super))
2792 		goto fail_alloc;
2793 
2794 	/* check FS state, whether FS is broken. */
2795 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2796 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2797 
2798 	/*
2799 	 * run through our array of backup supers and setup
2800 	 * our ring pointer to the oldest one
2801 	 */
2802 	generation = btrfs_super_generation(disk_super);
2803 	find_oldest_super_backup(fs_info, generation);
2804 
2805 	/*
2806 	 * In the long term, we'll store the compression type in the super
2807 	 * block, and it'll be used for per file compression control.
2808 	 */
2809 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2810 
2811 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2812 	if (ret) {
2813 		err = ret;
2814 		goto fail_alloc;
2815 	}
2816 
2817 	features = btrfs_super_incompat_flags(disk_super) &
2818 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2819 	if (features) {
2820 		btrfs_err(fs_info,
2821 		    "cannot mount because of unsupported optional features (%llx)",
2822 		    features);
2823 		err = -EINVAL;
2824 		goto fail_alloc;
2825 	}
2826 
2827 	features = btrfs_super_incompat_flags(disk_super);
2828 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2829 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2830 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2831 
2832 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2833 		btrfs_info(fs_info, "has skinny extents");
2834 
2835 	/*
2836 	 * flag our filesystem as having big metadata blocks if
2837 	 * they are bigger than the page size
2838 	 */
2839 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2840 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2841 			btrfs_info(fs_info,
2842 				"flagging fs with big metadata feature");
2843 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2844 	}
2845 
2846 	nodesize = btrfs_super_nodesize(disk_super);
2847 	sectorsize = btrfs_super_sectorsize(disk_super);
2848 	stripesize = sectorsize;
2849 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2850 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2851 
2852 	/* Cache block sizes */
2853 	fs_info->nodesize = nodesize;
2854 	fs_info->sectorsize = sectorsize;
2855 	fs_info->stripesize = stripesize;
2856 
2857 	/*
2858 	 * mixed block groups end up with duplicate but slightly offset
2859 	 * extent buffers for the same range.  It leads to corruptions
2860 	 */
2861 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2862 	    (sectorsize != nodesize)) {
2863 		btrfs_err(fs_info,
2864 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2865 			nodesize, sectorsize);
2866 		goto fail_alloc;
2867 	}
2868 
2869 	/*
2870 	 * Needn't use the lock because there is no other task which will
2871 	 * update the flag.
2872 	 */
2873 	btrfs_set_super_incompat_flags(disk_super, features);
2874 
2875 	features = btrfs_super_compat_ro_flags(disk_super) &
2876 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2877 	if (!(sb->s_flags & MS_RDONLY) && features) {
2878 		btrfs_err(fs_info,
2879 	"cannot mount read-write because of unsupported optional features (%llx)",
2880 		       features);
2881 		err = -EINVAL;
2882 		goto fail_alloc;
2883 	}
2884 
2885 	max_active = fs_info->thread_pool_size;
2886 
2887 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2888 	if (ret) {
2889 		err = ret;
2890 		goto fail_sb_buffer;
2891 	}
2892 
2893 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2894 	sb->s_bdi->congested_data = fs_info;
2895 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2896 	sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
2897 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2898 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2899 
2900 	sb->s_blocksize = sectorsize;
2901 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2902 
2903 	mutex_lock(&fs_info->chunk_mutex);
2904 	ret = btrfs_read_sys_array(fs_info);
2905 	mutex_unlock(&fs_info->chunk_mutex);
2906 	if (ret) {
2907 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2908 		goto fail_sb_buffer;
2909 	}
2910 
2911 	generation = btrfs_super_chunk_root_generation(disk_super);
2912 
2913 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2914 
2915 	chunk_root->node = read_tree_block(fs_info,
2916 					   btrfs_super_chunk_root(disk_super),
2917 					   generation);
2918 	if (IS_ERR(chunk_root->node) ||
2919 	    !extent_buffer_uptodate(chunk_root->node)) {
2920 		btrfs_err(fs_info, "failed to read chunk root");
2921 		if (!IS_ERR(chunk_root->node))
2922 			free_extent_buffer(chunk_root->node);
2923 		chunk_root->node = NULL;
2924 		goto fail_tree_roots;
2925 	}
2926 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2927 	chunk_root->commit_root = btrfs_root_node(chunk_root);
2928 
2929 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2930 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2931 
2932 	ret = btrfs_read_chunk_tree(fs_info);
2933 	if (ret) {
2934 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2935 		goto fail_tree_roots;
2936 	}
2937 
2938 	/*
2939 	 * keep the device that is marked to be the target device for the
2940 	 * dev_replace procedure
2941 	 */
2942 	btrfs_close_extra_devices(fs_devices, 0);
2943 
2944 	if (!fs_devices->latest_bdev) {
2945 		btrfs_err(fs_info, "failed to read devices");
2946 		goto fail_tree_roots;
2947 	}
2948 
2949 retry_root_backup:
2950 	generation = btrfs_super_generation(disk_super);
2951 
2952 	tree_root->node = read_tree_block(fs_info,
2953 					  btrfs_super_root(disk_super),
2954 					  generation);
2955 	if (IS_ERR(tree_root->node) ||
2956 	    !extent_buffer_uptodate(tree_root->node)) {
2957 		btrfs_warn(fs_info, "failed to read tree root");
2958 		if (!IS_ERR(tree_root->node))
2959 			free_extent_buffer(tree_root->node);
2960 		tree_root->node = NULL;
2961 		goto recovery_tree_root;
2962 	}
2963 
2964 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2965 	tree_root->commit_root = btrfs_root_node(tree_root);
2966 	btrfs_set_root_refs(&tree_root->root_item, 1);
2967 
2968 	mutex_lock(&tree_root->objectid_mutex);
2969 	ret = btrfs_find_highest_objectid(tree_root,
2970 					&tree_root->highest_objectid);
2971 	if (ret) {
2972 		mutex_unlock(&tree_root->objectid_mutex);
2973 		goto recovery_tree_root;
2974 	}
2975 
2976 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2977 
2978 	mutex_unlock(&tree_root->objectid_mutex);
2979 
2980 	ret = btrfs_read_roots(fs_info);
2981 	if (ret)
2982 		goto recovery_tree_root;
2983 
2984 	fs_info->generation = generation;
2985 	fs_info->last_trans_committed = generation;
2986 
2987 	ret = btrfs_recover_balance(fs_info);
2988 	if (ret) {
2989 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
2990 		goto fail_block_groups;
2991 	}
2992 
2993 	ret = btrfs_init_dev_stats(fs_info);
2994 	if (ret) {
2995 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2996 		goto fail_block_groups;
2997 	}
2998 
2999 	ret = btrfs_init_dev_replace(fs_info);
3000 	if (ret) {
3001 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3002 		goto fail_block_groups;
3003 	}
3004 
3005 	btrfs_close_extra_devices(fs_devices, 1);
3006 
3007 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3008 	if (ret) {
3009 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3010 				ret);
3011 		goto fail_block_groups;
3012 	}
3013 
3014 	ret = btrfs_sysfs_add_device(fs_devices);
3015 	if (ret) {
3016 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3017 				ret);
3018 		goto fail_fsdev_sysfs;
3019 	}
3020 
3021 	ret = btrfs_sysfs_add_mounted(fs_info);
3022 	if (ret) {
3023 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3024 		goto fail_fsdev_sysfs;
3025 	}
3026 
3027 	ret = btrfs_init_space_info(fs_info);
3028 	if (ret) {
3029 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3030 		goto fail_sysfs;
3031 	}
3032 
3033 	ret = btrfs_read_block_groups(fs_info);
3034 	if (ret) {
3035 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3036 		goto fail_sysfs;
3037 	}
3038 	fs_info->num_tolerated_disk_barrier_failures =
3039 		btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3040 	if (fs_info->fs_devices->missing_devices >
3041 	     fs_info->num_tolerated_disk_barrier_failures &&
3042 	    !(sb->s_flags & MS_RDONLY)) {
3043 		btrfs_warn(fs_info,
3044 "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
3045 			fs_info->fs_devices->missing_devices,
3046 			fs_info->num_tolerated_disk_barrier_failures);
3047 		goto fail_sysfs;
3048 	}
3049 
3050 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3051 					       "btrfs-cleaner");
3052 	if (IS_ERR(fs_info->cleaner_kthread))
3053 		goto fail_sysfs;
3054 
3055 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3056 						   tree_root,
3057 						   "btrfs-transaction");
3058 	if (IS_ERR(fs_info->transaction_kthread))
3059 		goto fail_cleaner;
3060 
3061 	if (!btrfs_test_opt(fs_info, SSD) &&
3062 	    !btrfs_test_opt(fs_info, NOSSD) &&
3063 	    !fs_info->fs_devices->rotating) {
3064 		btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
3065 		btrfs_set_opt(fs_info->mount_opt, SSD);
3066 	}
3067 
3068 	/*
3069 	 * Mount does not set all options immediately, we can do it now and do
3070 	 * not have to wait for transaction commit
3071 	 */
3072 	btrfs_apply_pending_changes(fs_info);
3073 
3074 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3075 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3076 		ret = btrfsic_mount(fs_info, fs_devices,
3077 				    btrfs_test_opt(fs_info,
3078 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3079 				    1 : 0,
3080 				    fs_info->check_integrity_print_mask);
3081 		if (ret)
3082 			btrfs_warn(fs_info,
3083 				"failed to initialize integrity check module: %d",
3084 				ret);
3085 	}
3086 #endif
3087 	ret = btrfs_read_qgroup_config(fs_info);
3088 	if (ret)
3089 		goto fail_trans_kthread;
3090 
3091 	/* do not make disk changes in broken FS or nologreplay is given */
3092 	if (btrfs_super_log_root(disk_super) != 0 &&
3093 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3094 		ret = btrfs_replay_log(fs_info, fs_devices);
3095 		if (ret) {
3096 			err = ret;
3097 			goto fail_qgroup;
3098 		}
3099 	}
3100 
3101 	ret = btrfs_find_orphan_roots(fs_info);
3102 	if (ret)
3103 		goto fail_qgroup;
3104 
3105 	if (!(sb->s_flags & MS_RDONLY)) {
3106 		ret = btrfs_cleanup_fs_roots(fs_info);
3107 		if (ret)
3108 			goto fail_qgroup;
3109 
3110 		mutex_lock(&fs_info->cleaner_mutex);
3111 		ret = btrfs_recover_relocation(tree_root);
3112 		mutex_unlock(&fs_info->cleaner_mutex);
3113 		if (ret < 0) {
3114 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3115 					ret);
3116 			err = -EINVAL;
3117 			goto fail_qgroup;
3118 		}
3119 	}
3120 
3121 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3122 	location.type = BTRFS_ROOT_ITEM_KEY;
3123 	location.offset = 0;
3124 
3125 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3126 	if (IS_ERR(fs_info->fs_root)) {
3127 		err = PTR_ERR(fs_info->fs_root);
3128 		goto fail_qgroup;
3129 	}
3130 
3131 	if (sb->s_flags & MS_RDONLY)
3132 		return 0;
3133 
3134 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3135 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3136 		clear_free_space_tree = 1;
3137 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3138 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3139 		btrfs_warn(fs_info, "free space tree is invalid");
3140 		clear_free_space_tree = 1;
3141 	}
3142 
3143 	if (clear_free_space_tree) {
3144 		btrfs_info(fs_info, "clearing free space tree");
3145 		ret = btrfs_clear_free_space_tree(fs_info);
3146 		if (ret) {
3147 			btrfs_warn(fs_info,
3148 				   "failed to clear free space tree: %d", ret);
3149 			close_ctree(fs_info);
3150 			return ret;
3151 		}
3152 	}
3153 
3154 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3155 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3156 		btrfs_info(fs_info, "creating free space tree");
3157 		ret = btrfs_create_free_space_tree(fs_info);
3158 		if (ret) {
3159 			btrfs_warn(fs_info,
3160 				"failed to create free space tree: %d", ret);
3161 			close_ctree(fs_info);
3162 			return ret;
3163 		}
3164 	}
3165 
3166 	down_read(&fs_info->cleanup_work_sem);
3167 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3168 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3169 		up_read(&fs_info->cleanup_work_sem);
3170 		close_ctree(fs_info);
3171 		return ret;
3172 	}
3173 	up_read(&fs_info->cleanup_work_sem);
3174 
3175 	ret = btrfs_resume_balance_async(fs_info);
3176 	if (ret) {
3177 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3178 		close_ctree(fs_info);
3179 		return ret;
3180 	}
3181 
3182 	ret = btrfs_resume_dev_replace_async(fs_info);
3183 	if (ret) {
3184 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3185 		close_ctree(fs_info);
3186 		return ret;
3187 	}
3188 
3189 	btrfs_qgroup_rescan_resume(fs_info);
3190 
3191 	if (!fs_info->uuid_root) {
3192 		btrfs_info(fs_info, "creating UUID tree");
3193 		ret = btrfs_create_uuid_tree(fs_info);
3194 		if (ret) {
3195 			btrfs_warn(fs_info,
3196 				"failed to create the UUID tree: %d", ret);
3197 			close_ctree(fs_info);
3198 			return ret;
3199 		}
3200 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3201 		   fs_info->generation !=
3202 				btrfs_super_uuid_tree_generation(disk_super)) {
3203 		btrfs_info(fs_info, "checking UUID tree");
3204 		ret = btrfs_check_uuid_tree(fs_info);
3205 		if (ret) {
3206 			btrfs_warn(fs_info,
3207 				"failed to check the UUID tree: %d", ret);
3208 			close_ctree(fs_info);
3209 			return ret;
3210 		}
3211 	} else {
3212 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3213 	}
3214 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3215 
3216 	/*
3217 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3218 	 * no need to keep the flag
3219 	 */
3220 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3221 
3222 	return 0;
3223 
3224 fail_qgroup:
3225 	btrfs_free_qgroup_config(fs_info);
3226 fail_trans_kthread:
3227 	kthread_stop(fs_info->transaction_kthread);
3228 	btrfs_cleanup_transaction(fs_info);
3229 	btrfs_free_fs_roots(fs_info);
3230 fail_cleaner:
3231 	kthread_stop(fs_info->cleaner_kthread);
3232 
3233 	/*
3234 	 * make sure we're done with the btree inode before we stop our
3235 	 * kthreads
3236 	 */
3237 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3238 
3239 fail_sysfs:
3240 	btrfs_sysfs_remove_mounted(fs_info);
3241 
3242 fail_fsdev_sysfs:
3243 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3244 
3245 fail_block_groups:
3246 	btrfs_put_block_group_cache(fs_info);
3247 
3248 fail_tree_roots:
3249 	free_root_pointers(fs_info, 1);
3250 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3251 
3252 fail_sb_buffer:
3253 	btrfs_stop_all_workers(fs_info);
3254 	btrfs_free_block_groups(fs_info);
3255 fail_alloc:
3256 fail_iput:
3257 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3258 
3259 	iput(fs_info->btree_inode);
3260 fail_bio_counter:
3261 	percpu_counter_destroy(&fs_info->bio_counter);
3262 fail_delalloc_bytes:
3263 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3264 fail_dirty_metadata_bytes:
3265 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3266 fail_srcu:
3267 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3268 fail:
3269 	btrfs_free_stripe_hash_table(fs_info);
3270 	btrfs_close_devices(fs_info->fs_devices);
3271 	return err;
3272 
3273 recovery_tree_root:
3274 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3275 		goto fail_tree_roots;
3276 
3277 	free_root_pointers(fs_info, 0);
3278 
3279 	/* don't use the log in recovery mode, it won't be valid */
3280 	btrfs_set_super_log_root(disk_super, 0);
3281 
3282 	/* we can't trust the free space cache either */
3283 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3284 
3285 	ret = next_root_backup(fs_info, fs_info->super_copy,
3286 			       &num_backups_tried, &backup_index);
3287 	if (ret == -1)
3288 		goto fail_block_groups;
3289 	goto retry_root_backup;
3290 }
3291 
3292 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3293 {
3294 	if (uptodate) {
3295 		set_buffer_uptodate(bh);
3296 	} else {
3297 		struct btrfs_device *device = (struct btrfs_device *)
3298 			bh->b_private;
3299 
3300 		btrfs_warn_rl_in_rcu(device->fs_info,
3301 				"lost page write due to IO error on %s",
3302 					  rcu_str_deref(device->name));
3303 		/* note, we don't set_buffer_write_io_error because we have
3304 		 * our own ways of dealing with the IO errors
3305 		 */
3306 		clear_buffer_uptodate(bh);
3307 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3308 	}
3309 	unlock_buffer(bh);
3310 	put_bh(bh);
3311 }
3312 
3313 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3314 			struct buffer_head **bh_ret)
3315 {
3316 	struct buffer_head *bh;
3317 	struct btrfs_super_block *super;
3318 	u64 bytenr;
3319 
3320 	bytenr = btrfs_sb_offset(copy_num);
3321 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3322 		return -EINVAL;
3323 
3324 	bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
3325 	/*
3326 	 * If we fail to read from the underlying devices, as of now
3327 	 * the best option we have is to mark it EIO.
3328 	 */
3329 	if (!bh)
3330 		return -EIO;
3331 
3332 	super = (struct btrfs_super_block *)bh->b_data;
3333 	if (btrfs_super_bytenr(super) != bytenr ||
3334 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3335 		brelse(bh);
3336 		return -EINVAL;
3337 	}
3338 
3339 	*bh_ret = bh;
3340 	return 0;
3341 }
3342 
3343 
3344 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3345 {
3346 	struct buffer_head *bh;
3347 	struct buffer_head *latest = NULL;
3348 	struct btrfs_super_block *super;
3349 	int i;
3350 	u64 transid = 0;
3351 	int ret = -EINVAL;
3352 
3353 	/* we would like to check all the supers, but that would make
3354 	 * a btrfs mount succeed after a mkfs from a different FS.
3355 	 * So, we need to add a special mount option to scan for
3356 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3357 	 */
3358 	for (i = 0; i < 1; i++) {
3359 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3360 		if (ret)
3361 			continue;
3362 
3363 		super = (struct btrfs_super_block *)bh->b_data;
3364 
3365 		if (!latest || btrfs_super_generation(super) > transid) {
3366 			brelse(latest);
3367 			latest = bh;
3368 			transid = btrfs_super_generation(super);
3369 		} else {
3370 			brelse(bh);
3371 		}
3372 	}
3373 
3374 	if (!latest)
3375 		return ERR_PTR(ret);
3376 
3377 	return latest;
3378 }
3379 
3380 /*
3381  * this should be called twice, once with wait == 0 and
3382  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3383  * we write are pinned.
3384  *
3385  * They are released when wait == 1 is done.
3386  * max_mirrors must be the same for both runs, and it indicates how
3387  * many supers on this one device should be written.
3388  *
3389  * max_mirrors == 0 means to write them all.
3390  */
3391 static int write_dev_supers(struct btrfs_device *device,
3392 			    struct btrfs_super_block *sb,
3393 			    int wait, int max_mirrors)
3394 {
3395 	struct buffer_head *bh;
3396 	int i;
3397 	int ret;
3398 	int errors = 0;
3399 	u32 crc;
3400 	u64 bytenr;
3401 
3402 	if (max_mirrors == 0)
3403 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3404 
3405 	for (i = 0; i < max_mirrors; i++) {
3406 		bytenr = btrfs_sb_offset(i);
3407 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3408 		    device->commit_total_bytes)
3409 			break;
3410 
3411 		if (wait) {
3412 			bh = __find_get_block(device->bdev, bytenr / 4096,
3413 					      BTRFS_SUPER_INFO_SIZE);
3414 			if (!bh) {
3415 				errors++;
3416 				continue;
3417 			}
3418 			wait_on_buffer(bh);
3419 			if (!buffer_uptodate(bh))
3420 				errors++;
3421 
3422 			/* drop our reference */
3423 			brelse(bh);
3424 
3425 			/* drop the reference from the wait == 0 run */
3426 			brelse(bh);
3427 			continue;
3428 		} else {
3429 			btrfs_set_super_bytenr(sb, bytenr);
3430 
3431 			crc = ~(u32)0;
3432 			crc = btrfs_csum_data((const char *)sb +
3433 					      BTRFS_CSUM_SIZE, crc,
3434 					      BTRFS_SUPER_INFO_SIZE -
3435 					      BTRFS_CSUM_SIZE);
3436 			btrfs_csum_final(crc, sb->csum);
3437 
3438 			/*
3439 			 * one reference for us, and we leave it for the
3440 			 * caller
3441 			 */
3442 			bh = __getblk(device->bdev, bytenr / 4096,
3443 				      BTRFS_SUPER_INFO_SIZE);
3444 			if (!bh) {
3445 				btrfs_err(device->fs_info,
3446 				    "couldn't get super buffer head for bytenr %llu",
3447 				    bytenr);
3448 				errors++;
3449 				continue;
3450 			}
3451 
3452 			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3453 
3454 			/* one reference for submit_bh */
3455 			get_bh(bh);
3456 
3457 			set_buffer_uptodate(bh);
3458 			lock_buffer(bh);
3459 			bh->b_end_io = btrfs_end_buffer_write_sync;
3460 			bh->b_private = device;
3461 		}
3462 
3463 		/*
3464 		 * we fua the first super.  The others we allow
3465 		 * to go down lazy.
3466 		 */
3467 		if (i == 0) {
3468 			ret = btrfsic_submit_bh(REQ_OP_WRITE,
3469 						REQ_SYNC | REQ_FUA, bh);
3470 		} else {
3471 			ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3472 		}
3473 		if (ret)
3474 			errors++;
3475 	}
3476 	return errors < i ? 0 : -1;
3477 }
3478 
3479 /*
3480  * endio for the write_dev_flush, this will wake anyone waiting
3481  * for the barrier when it is done
3482  */
3483 static void btrfs_end_empty_barrier(struct bio *bio)
3484 {
3485 	complete(bio->bi_private);
3486 }
3487 
3488 /*
3489  * Submit a flush request to the device if it supports it. Error handling is
3490  * done in the waiting counterpart.
3491  */
3492 static void write_dev_flush(struct btrfs_device *device)
3493 {
3494 	struct request_queue *q = bdev_get_queue(device->bdev);
3495 	struct bio *bio = device->flush_bio;
3496 
3497 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3498 		return;
3499 
3500 	bio_reset(bio);
3501 	bio->bi_end_io = btrfs_end_empty_barrier;
3502 	bio->bi_bdev = device->bdev;
3503 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3504 	init_completion(&device->flush_wait);
3505 	bio->bi_private = &device->flush_wait;
3506 
3507 	submit_bio(bio);
3508 	device->flush_bio_sent = 1;
3509 }
3510 
3511 /*
3512  * If the flush bio has been submitted by write_dev_flush, wait for it.
3513  */
3514 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3515 {
3516 	struct bio *bio = device->flush_bio;
3517 
3518 	if (!device->flush_bio_sent)
3519 		return 0;
3520 
3521 	device->flush_bio_sent = 0;
3522 	wait_for_completion_io(&device->flush_wait);
3523 
3524 	return bio->bi_status;
3525 }
3526 
3527 static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
3528 {
3529 	int dev_flush_error = 0;
3530 	struct btrfs_device *dev;
3531 
3532 	list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
3533 		if (!dev->bdev || dev->last_flush_error)
3534 			dev_flush_error++;
3535 	}
3536 
3537 	if (dev_flush_error >
3538 	    fsdevs->fs_info->num_tolerated_disk_barrier_failures)
3539 		return -EIO;
3540 
3541 	return 0;
3542 }
3543 
3544 /*
3545  * send an empty flush down to each device in parallel,
3546  * then wait for them
3547  */
3548 static int barrier_all_devices(struct btrfs_fs_info *info)
3549 {
3550 	struct list_head *head;
3551 	struct btrfs_device *dev;
3552 	int errors_wait = 0;
3553 	blk_status_t ret;
3554 
3555 	/* send down all the barriers */
3556 	head = &info->fs_devices->devices;
3557 	list_for_each_entry_rcu(dev, head, dev_list) {
3558 		if (dev->missing)
3559 			continue;
3560 		if (!dev->bdev)
3561 			continue;
3562 		if (!dev->in_fs_metadata || !dev->writeable)
3563 			continue;
3564 
3565 		write_dev_flush(dev);
3566 		dev->last_flush_error = 0;
3567 	}
3568 
3569 	/* wait for all the barriers */
3570 	list_for_each_entry_rcu(dev, head, dev_list) {
3571 		if (dev->missing)
3572 			continue;
3573 		if (!dev->bdev) {
3574 			errors_wait++;
3575 			continue;
3576 		}
3577 		if (!dev->in_fs_metadata || !dev->writeable)
3578 			continue;
3579 
3580 		ret = wait_dev_flush(dev);
3581 		if (ret) {
3582 			dev->last_flush_error = ret;
3583 			btrfs_dev_stat_inc_and_print(dev,
3584 					BTRFS_DEV_STAT_FLUSH_ERRS);
3585 			errors_wait++;
3586 		}
3587 	}
3588 
3589 	if (errors_wait) {
3590 		/*
3591 		 * At some point we need the status of all disks
3592 		 * to arrive at the volume status. So error checking
3593 		 * is being pushed to a separate loop.
3594 		 */
3595 		return check_barrier_error(info->fs_devices);
3596 	}
3597 	return 0;
3598 }
3599 
3600 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3601 {
3602 	int raid_type;
3603 	int min_tolerated = INT_MAX;
3604 
3605 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3606 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3607 		min_tolerated = min(min_tolerated,
3608 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3609 				    tolerated_failures);
3610 
3611 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3612 		if (raid_type == BTRFS_RAID_SINGLE)
3613 			continue;
3614 		if (!(flags & btrfs_raid_group[raid_type]))
3615 			continue;
3616 		min_tolerated = min(min_tolerated,
3617 				    btrfs_raid_array[raid_type].
3618 				    tolerated_failures);
3619 	}
3620 
3621 	if (min_tolerated == INT_MAX) {
3622 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3623 		min_tolerated = 0;
3624 	}
3625 
3626 	return min_tolerated;
3627 }
3628 
3629 int btrfs_calc_num_tolerated_disk_barrier_failures(
3630 	struct btrfs_fs_info *fs_info)
3631 {
3632 	struct btrfs_ioctl_space_info space;
3633 	struct btrfs_space_info *sinfo;
3634 	u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3635 		       BTRFS_BLOCK_GROUP_SYSTEM,
3636 		       BTRFS_BLOCK_GROUP_METADATA,
3637 		       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3638 	int i;
3639 	int c;
3640 	int num_tolerated_disk_barrier_failures =
3641 		(int)fs_info->fs_devices->num_devices;
3642 
3643 	for (i = 0; i < ARRAY_SIZE(types); i++) {
3644 		struct btrfs_space_info *tmp;
3645 
3646 		sinfo = NULL;
3647 		rcu_read_lock();
3648 		list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3649 			if (tmp->flags == types[i]) {
3650 				sinfo = tmp;
3651 				break;
3652 			}
3653 		}
3654 		rcu_read_unlock();
3655 
3656 		if (!sinfo)
3657 			continue;
3658 
3659 		down_read(&sinfo->groups_sem);
3660 		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3661 			u64 flags;
3662 
3663 			if (list_empty(&sinfo->block_groups[c]))
3664 				continue;
3665 
3666 			btrfs_get_block_group_info(&sinfo->block_groups[c],
3667 						   &space);
3668 			if (space.total_bytes == 0 || space.used_bytes == 0)
3669 				continue;
3670 			flags = space.flags;
3671 
3672 			num_tolerated_disk_barrier_failures = min(
3673 				num_tolerated_disk_barrier_failures,
3674 				btrfs_get_num_tolerated_disk_barrier_failures(
3675 					flags));
3676 		}
3677 		up_read(&sinfo->groups_sem);
3678 	}
3679 
3680 	return num_tolerated_disk_barrier_failures;
3681 }
3682 
3683 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3684 {
3685 	struct list_head *head;
3686 	struct btrfs_device *dev;
3687 	struct btrfs_super_block *sb;
3688 	struct btrfs_dev_item *dev_item;
3689 	int ret;
3690 	int do_barriers;
3691 	int max_errors;
3692 	int total_errors = 0;
3693 	u64 flags;
3694 
3695 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3696 	backup_super_roots(fs_info);
3697 
3698 	sb = fs_info->super_for_commit;
3699 	dev_item = &sb->dev_item;
3700 
3701 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3702 	head = &fs_info->fs_devices->devices;
3703 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3704 
3705 	if (do_barriers) {
3706 		ret = barrier_all_devices(fs_info);
3707 		if (ret) {
3708 			mutex_unlock(
3709 				&fs_info->fs_devices->device_list_mutex);
3710 			btrfs_handle_fs_error(fs_info, ret,
3711 					      "errors while submitting device barriers.");
3712 			return ret;
3713 		}
3714 	}
3715 
3716 	list_for_each_entry_rcu(dev, head, dev_list) {
3717 		if (!dev->bdev) {
3718 			total_errors++;
3719 			continue;
3720 		}
3721 		if (!dev->in_fs_metadata || !dev->writeable)
3722 			continue;
3723 
3724 		btrfs_set_stack_device_generation(dev_item, 0);
3725 		btrfs_set_stack_device_type(dev_item, dev->type);
3726 		btrfs_set_stack_device_id(dev_item, dev->devid);
3727 		btrfs_set_stack_device_total_bytes(dev_item,
3728 						   dev->commit_total_bytes);
3729 		btrfs_set_stack_device_bytes_used(dev_item,
3730 						  dev->commit_bytes_used);
3731 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3732 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3733 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3734 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3735 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3736 
3737 		flags = btrfs_super_flags(sb);
3738 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3739 
3740 		ret = write_dev_supers(dev, sb, 0, max_mirrors);
3741 		if (ret)
3742 			total_errors++;
3743 	}
3744 	if (total_errors > max_errors) {
3745 		btrfs_err(fs_info, "%d errors while writing supers",
3746 			  total_errors);
3747 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3748 
3749 		/* FUA is masked off if unsupported and can't be the reason */
3750 		btrfs_handle_fs_error(fs_info, -EIO,
3751 				      "%d errors while writing supers",
3752 				      total_errors);
3753 		return -EIO;
3754 	}
3755 
3756 	total_errors = 0;
3757 	list_for_each_entry_rcu(dev, head, dev_list) {
3758 		if (!dev->bdev)
3759 			continue;
3760 		if (!dev->in_fs_metadata || !dev->writeable)
3761 			continue;
3762 
3763 		ret = write_dev_supers(dev, sb, 1, max_mirrors);
3764 		if (ret)
3765 			total_errors++;
3766 	}
3767 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3768 	if (total_errors > max_errors) {
3769 		btrfs_handle_fs_error(fs_info, -EIO,
3770 				      "%d errors while writing supers",
3771 				      total_errors);
3772 		return -EIO;
3773 	}
3774 	return 0;
3775 }
3776 
3777 /* Drop a fs root from the radix tree and free it. */
3778 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3779 				  struct btrfs_root *root)
3780 {
3781 	spin_lock(&fs_info->fs_roots_radix_lock);
3782 	radix_tree_delete(&fs_info->fs_roots_radix,
3783 			  (unsigned long)root->root_key.objectid);
3784 	spin_unlock(&fs_info->fs_roots_radix_lock);
3785 
3786 	if (btrfs_root_refs(&root->root_item) == 0)
3787 		synchronize_srcu(&fs_info->subvol_srcu);
3788 
3789 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3790 		btrfs_free_log(NULL, root);
3791 		if (root->reloc_root) {
3792 			free_extent_buffer(root->reloc_root->node);
3793 			free_extent_buffer(root->reloc_root->commit_root);
3794 			btrfs_put_fs_root(root->reloc_root);
3795 			root->reloc_root = NULL;
3796 		}
3797 	}
3798 
3799 	if (root->free_ino_pinned)
3800 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3801 	if (root->free_ino_ctl)
3802 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3803 	free_fs_root(root);
3804 }
3805 
3806 static void free_fs_root(struct btrfs_root *root)
3807 {
3808 	iput(root->ino_cache_inode);
3809 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3810 	btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3811 	root->orphan_block_rsv = NULL;
3812 	if (root->anon_dev)
3813 		free_anon_bdev(root->anon_dev);
3814 	if (root->subv_writers)
3815 		btrfs_free_subvolume_writers(root->subv_writers);
3816 	free_extent_buffer(root->node);
3817 	free_extent_buffer(root->commit_root);
3818 	kfree(root->free_ino_ctl);
3819 	kfree(root->free_ino_pinned);
3820 	kfree(root->name);
3821 	btrfs_put_fs_root(root);
3822 }
3823 
3824 void btrfs_free_fs_root(struct btrfs_root *root)
3825 {
3826 	free_fs_root(root);
3827 }
3828 
3829 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3830 {
3831 	u64 root_objectid = 0;
3832 	struct btrfs_root *gang[8];
3833 	int i = 0;
3834 	int err = 0;
3835 	unsigned int ret = 0;
3836 	int index;
3837 
3838 	while (1) {
3839 		index = srcu_read_lock(&fs_info->subvol_srcu);
3840 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3841 					     (void **)gang, root_objectid,
3842 					     ARRAY_SIZE(gang));
3843 		if (!ret) {
3844 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3845 			break;
3846 		}
3847 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3848 
3849 		for (i = 0; i < ret; i++) {
3850 			/* Avoid to grab roots in dead_roots */
3851 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3852 				gang[i] = NULL;
3853 				continue;
3854 			}
3855 			/* grab all the search result for later use */
3856 			gang[i] = btrfs_grab_fs_root(gang[i]);
3857 		}
3858 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3859 
3860 		for (i = 0; i < ret; i++) {
3861 			if (!gang[i])
3862 				continue;
3863 			root_objectid = gang[i]->root_key.objectid;
3864 			err = btrfs_orphan_cleanup(gang[i]);
3865 			if (err)
3866 				break;
3867 			btrfs_put_fs_root(gang[i]);
3868 		}
3869 		root_objectid++;
3870 	}
3871 
3872 	/* release the uncleaned roots due to error */
3873 	for (; i < ret; i++) {
3874 		if (gang[i])
3875 			btrfs_put_fs_root(gang[i]);
3876 	}
3877 	return err;
3878 }
3879 
3880 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3881 {
3882 	struct btrfs_root *root = fs_info->tree_root;
3883 	struct btrfs_trans_handle *trans;
3884 
3885 	mutex_lock(&fs_info->cleaner_mutex);
3886 	btrfs_run_delayed_iputs(fs_info);
3887 	mutex_unlock(&fs_info->cleaner_mutex);
3888 	wake_up_process(fs_info->cleaner_kthread);
3889 
3890 	/* wait until ongoing cleanup work done */
3891 	down_write(&fs_info->cleanup_work_sem);
3892 	up_write(&fs_info->cleanup_work_sem);
3893 
3894 	trans = btrfs_join_transaction(root);
3895 	if (IS_ERR(trans))
3896 		return PTR_ERR(trans);
3897 	return btrfs_commit_transaction(trans);
3898 }
3899 
3900 void close_ctree(struct btrfs_fs_info *fs_info)
3901 {
3902 	struct btrfs_root *root = fs_info->tree_root;
3903 	int ret;
3904 
3905 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3906 
3907 	/* wait for the qgroup rescan worker to stop */
3908 	btrfs_qgroup_wait_for_completion(fs_info, false);
3909 
3910 	/* wait for the uuid_scan task to finish */
3911 	down(&fs_info->uuid_tree_rescan_sem);
3912 	/* avoid complains from lockdep et al., set sem back to initial state */
3913 	up(&fs_info->uuid_tree_rescan_sem);
3914 
3915 	/* pause restriper - we want to resume on mount */
3916 	btrfs_pause_balance(fs_info);
3917 
3918 	btrfs_dev_replace_suspend_for_unmount(fs_info);
3919 
3920 	btrfs_scrub_cancel(fs_info);
3921 
3922 	/* wait for any defraggers to finish */
3923 	wait_event(fs_info->transaction_wait,
3924 		   (atomic_read(&fs_info->defrag_running) == 0));
3925 
3926 	/* clear out the rbtree of defraggable inodes */
3927 	btrfs_cleanup_defrag_inodes(fs_info);
3928 
3929 	cancel_work_sync(&fs_info->async_reclaim_work);
3930 
3931 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3932 		/*
3933 		 * If the cleaner thread is stopped and there are
3934 		 * block groups queued for removal, the deletion will be
3935 		 * skipped when we quit the cleaner thread.
3936 		 */
3937 		btrfs_delete_unused_bgs(fs_info);
3938 
3939 		ret = btrfs_commit_super(fs_info);
3940 		if (ret)
3941 			btrfs_err(fs_info, "commit super ret %d", ret);
3942 	}
3943 
3944 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3945 		btrfs_error_commit_super(fs_info);
3946 
3947 	kthread_stop(fs_info->transaction_kthread);
3948 	kthread_stop(fs_info->cleaner_kthread);
3949 
3950 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3951 
3952 	btrfs_free_qgroup_config(fs_info);
3953 
3954 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3955 		btrfs_info(fs_info, "at unmount delalloc count %lld",
3956 		       percpu_counter_sum(&fs_info->delalloc_bytes));
3957 	}
3958 
3959 	btrfs_sysfs_remove_mounted(fs_info);
3960 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3961 
3962 	btrfs_free_fs_roots(fs_info);
3963 
3964 	btrfs_put_block_group_cache(fs_info);
3965 
3966 	/*
3967 	 * we must make sure there is not any read request to
3968 	 * submit after we stopping all workers.
3969 	 */
3970 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3971 	btrfs_stop_all_workers(fs_info);
3972 
3973 	btrfs_free_block_groups(fs_info);
3974 
3975 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3976 	free_root_pointers(fs_info, 1);
3977 
3978 	iput(fs_info->btree_inode);
3979 
3980 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3981 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3982 		btrfsic_unmount(fs_info->fs_devices);
3983 #endif
3984 
3985 	btrfs_close_devices(fs_info->fs_devices);
3986 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3987 
3988 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3989 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3990 	percpu_counter_destroy(&fs_info->bio_counter);
3991 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3992 
3993 	btrfs_free_stripe_hash_table(fs_info);
3994 
3995 	__btrfs_free_block_rsv(root->orphan_block_rsv);
3996 	root->orphan_block_rsv = NULL;
3997 
3998 	mutex_lock(&fs_info->chunk_mutex);
3999 	while (!list_empty(&fs_info->pinned_chunks)) {
4000 		struct extent_map *em;
4001 
4002 		em = list_first_entry(&fs_info->pinned_chunks,
4003 				      struct extent_map, list);
4004 		list_del_init(&em->list);
4005 		free_extent_map(em);
4006 	}
4007 	mutex_unlock(&fs_info->chunk_mutex);
4008 }
4009 
4010 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4011 			  int atomic)
4012 {
4013 	int ret;
4014 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4015 
4016 	ret = extent_buffer_uptodate(buf);
4017 	if (!ret)
4018 		return ret;
4019 
4020 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4021 				    parent_transid, atomic);
4022 	if (ret == -EAGAIN)
4023 		return ret;
4024 	return !ret;
4025 }
4026 
4027 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4028 {
4029 	struct btrfs_fs_info *fs_info;
4030 	struct btrfs_root *root;
4031 	u64 transid = btrfs_header_generation(buf);
4032 	int was_dirty;
4033 
4034 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4035 	/*
4036 	 * This is a fast path so only do this check if we have sanity tests
4037 	 * enabled.  Normal people shouldn't be marking dummy buffers as dirty
4038 	 * outside of the sanity tests.
4039 	 */
4040 	if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
4041 		return;
4042 #endif
4043 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4044 	fs_info = root->fs_info;
4045 	btrfs_assert_tree_locked(buf);
4046 	if (transid != fs_info->generation)
4047 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4048 			buf->start, transid, fs_info->generation);
4049 	was_dirty = set_extent_buffer_dirty(buf);
4050 	if (!was_dirty)
4051 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4052 					 buf->len,
4053 					 fs_info->dirty_metadata_batch);
4054 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4055 	if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4056 		btrfs_print_leaf(fs_info, buf);
4057 		ASSERT(0);
4058 	}
4059 #endif
4060 }
4061 
4062 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4063 					int flush_delayed)
4064 {
4065 	/*
4066 	 * looks as though older kernels can get into trouble with
4067 	 * this code, they end up stuck in balance_dirty_pages forever
4068 	 */
4069 	int ret;
4070 
4071 	if (current->flags & PF_MEMALLOC)
4072 		return;
4073 
4074 	if (flush_delayed)
4075 		btrfs_balance_delayed_items(fs_info);
4076 
4077 	ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4078 				     BTRFS_DIRTY_METADATA_THRESH);
4079 	if (ret > 0) {
4080 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4081 	}
4082 }
4083 
4084 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4085 {
4086 	__btrfs_btree_balance_dirty(fs_info, 1);
4087 }
4088 
4089 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4090 {
4091 	__btrfs_btree_balance_dirty(fs_info, 0);
4092 }
4093 
4094 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4095 {
4096 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4097 	struct btrfs_fs_info *fs_info = root->fs_info;
4098 
4099 	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
4100 }
4101 
4102 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
4103 {
4104 	struct btrfs_super_block *sb = fs_info->super_copy;
4105 	u64 nodesize = btrfs_super_nodesize(sb);
4106 	u64 sectorsize = btrfs_super_sectorsize(sb);
4107 	int ret = 0;
4108 
4109 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4110 		btrfs_err(fs_info, "no valid FS found");
4111 		ret = -EINVAL;
4112 	}
4113 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4114 		btrfs_warn(fs_info, "unrecognized super flag: %llu",
4115 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4116 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4117 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
4118 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4119 		ret = -EINVAL;
4120 	}
4121 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4122 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4123 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4124 		ret = -EINVAL;
4125 	}
4126 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4127 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
4128 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4129 		ret = -EINVAL;
4130 	}
4131 
4132 	/*
4133 	 * Check sectorsize and nodesize first, other check will need it.
4134 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4135 	 */
4136 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4137 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4138 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4139 		ret = -EINVAL;
4140 	}
4141 	/* Only PAGE SIZE is supported yet */
4142 	if (sectorsize != PAGE_SIZE) {
4143 		btrfs_err(fs_info,
4144 			"sectorsize %llu not supported yet, only support %lu",
4145 			sectorsize, PAGE_SIZE);
4146 		ret = -EINVAL;
4147 	}
4148 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4149 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4150 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4151 		ret = -EINVAL;
4152 	}
4153 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4154 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4155 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
4156 		ret = -EINVAL;
4157 	}
4158 
4159 	/* Root alignment check */
4160 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4161 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4162 			   btrfs_super_root(sb));
4163 		ret = -EINVAL;
4164 	}
4165 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4166 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4167 			   btrfs_super_chunk_root(sb));
4168 		ret = -EINVAL;
4169 	}
4170 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4171 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
4172 			   btrfs_super_log_root(sb));
4173 		ret = -EINVAL;
4174 	}
4175 
4176 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
4177 		btrfs_err(fs_info,
4178 			   "dev_item UUID does not match fsid: %pU != %pU",
4179 			   fs_info->fsid, sb->dev_item.fsid);
4180 		ret = -EINVAL;
4181 	}
4182 
4183 	/*
4184 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4185 	 * done later
4186 	 */
4187 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4188 		btrfs_err(fs_info, "bytes_used is too small %llu",
4189 			  btrfs_super_bytes_used(sb));
4190 		ret = -EINVAL;
4191 	}
4192 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4193 		btrfs_err(fs_info, "invalid stripesize %u",
4194 			  btrfs_super_stripesize(sb));
4195 		ret = -EINVAL;
4196 	}
4197 	if (btrfs_super_num_devices(sb) > (1UL << 31))
4198 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
4199 			   btrfs_super_num_devices(sb));
4200 	if (btrfs_super_num_devices(sb) == 0) {
4201 		btrfs_err(fs_info, "number of devices is 0");
4202 		ret = -EINVAL;
4203 	}
4204 
4205 	if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4206 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
4207 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4208 		ret = -EINVAL;
4209 	}
4210 
4211 	/*
4212 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
4213 	 * and one chunk
4214 	 */
4215 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4216 		btrfs_err(fs_info, "system chunk array too big %u > %u",
4217 			  btrfs_super_sys_array_size(sb),
4218 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4219 		ret = -EINVAL;
4220 	}
4221 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4222 			+ sizeof(struct btrfs_chunk)) {
4223 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
4224 			  btrfs_super_sys_array_size(sb),
4225 			  sizeof(struct btrfs_disk_key)
4226 			  + sizeof(struct btrfs_chunk));
4227 		ret = -EINVAL;
4228 	}
4229 
4230 	/*
4231 	 * The generation is a global counter, we'll trust it more than the others
4232 	 * but it's still possible that it's the one that's wrong.
4233 	 */
4234 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4235 		btrfs_warn(fs_info,
4236 			"suspicious: generation < chunk_root_generation: %llu < %llu",
4237 			btrfs_super_generation(sb),
4238 			btrfs_super_chunk_root_generation(sb));
4239 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4240 	    && btrfs_super_cache_generation(sb) != (u64)-1)
4241 		btrfs_warn(fs_info,
4242 			"suspicious: generation < cache_generation: %llu < %llu",
4243 			btrfs_super_generation(sb),
4244 			btrfs_super_cache_generation(sb));
4245 
4246 	return ret;
4247 }
4248 
4249 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4250 {
4251 	mutex_lock(&fs_info->cleaner_mutex);
4252 	btrfs_run_delayed_iputs(fs_info);
4253 	mutex_unlock(&fs_info->cleaner_mutex);
4254 
4255 	down_write(&fs_info->cleanup_work_sem);
4256 	up_write(&fs_info->cleanup_work_sem);
4257 
4258 	/* cleanup FS via transaction */
4259 	btrfs_cleanup_transaction(fs_info);
4260 }
4261 
4262 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4263 {
4264 	struct btrfs_ordered_extent *ordered;
4265 
4266 	spin_lock(&root->ordered_extent_lock);
4267 	/*
4268 	 * This will just short circuit the ordered completion stuff which will
4269 	 * make sure the ordered extent gets properly cleaned up.
4270 	 */
4271 	list_for_each_entry(ordered, &root->ordered_extents,
4272 			    root_extent_list)
4273 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4274 	spin_unlock(&root->ordered_extent_lock);
4275 }
4276 
4277 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4278 {
4279 	struct btrfs_root *root;
4280 	struct list_head splice;
4281 
4282 	INIT_LIST_HEAD(&splice);
4283 
4284 	spin_lock(&fs_info->ordered_root_lock);
4285 	list_splice_init(&fs_info->ordered_roots, &splice);
4286 	while (!list_empty(&splice)) {
4287 		root = list_first_entry(&splice, struct btrfs_root,
4288 					ordered_root);
4289 		list_move_tail(&root->ordered_root,
4290 			       &fs_info->ordered_roots);
4291 
4292 		spin_unlock(&fs_info->ordered_root_lock);
4293 		btrfs_destroy_ordered_extents(root);
4294 
4295 		cond_resched();
4296 		spin_lock(&fs_info->ordered_root_lock);
4297 	}
4298 	spin_unlock(&fs_info->ordered_root_lock);
4299 }
4300 
4301 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4302 				      struct btrfs_fs_info *fs_info)
4303 {
4304 	struct rb_node *node;
4305 	struct btrfs_delayed_ref_root *delayed_refs;
4306 	struct btrfs_delayed_ref_node *ref;
4307 	int ret = 0;
4308 
4309 	delayed_refs = &trans->delayed_refs;
4310 
4311 	spin_lock(&delayed_refs->lock);
4312 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4313 		spin_unlock(&delayed_refs->lock);
4314 		btrfs_info(fs_info, "delayed_refs has NO entry");
4315 		return ret;
4316 	}
4317 
4318 	while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4319 		struct btrfs_delayed_ref_head *head;
4320 		struct btrfs_delayed_ref_node *tmp;
4321 		bool pin_bytes = false;
4322 
4323 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4324 				href_node);
4325 		if (!mutex_trylock(&head->mutex)) {
4326 			refcount_inc(&head->node.refs);
4327 			spin_unlock(&delayed_refs->lock);
4328 
4329 			mutex_lock(&head->mutex);
4330 			mutex_unlock(&head->mutex);
4331 			btrfs_put_delayed_ref(&head->node);
4332 			spin_lock(&delayed_refs->lock);
4333 			continue;
4334 		}
4335 		spin_lock(&head->lock);
4336 		list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4337 						 list) {
4338 			ref->in_tree = 0;
4339 			list_del(&ref->list);
4340 			if (!list_empty(&ref->add_list))
4341 				list_del(&ref->add_list);
4342 			atomic_dec(&delayed_refs->num_entries);
4343 			btrfs_put_delayed_ref(ref);
4344 		}
4345 		if (head->must_insert_reserved)
4346 			pin_bytes = true;
4347 		btrfs_free_delayed_extent_op(head->extent_op);
4348 		delayed_refs->num_heads--;
4349 		if (head->processing == 0)
4350 			delayed_refs->num_heads_ready--;
4351 		atomic_dec(&delayed_refs->num_entries);
4352 		head->node.in_tree = 0;
4353 		rb_erase(&head->href_node, &delayed_refs->href_root);
4354 		spin_unlock(&head->lock);
4355 		spin_unlock(&delayed_refs->lock);
4356 		mutex_unlock(&head->mutex);
4357 
4358 		if (pin_bytes)
4359 			btrfs_pin_extent(fs_info, head->node.bytenr,
4360 					 head->node.num_bytes, 1);
4361 		btrfs_put_delayed_ref(&head->node);
4362 		cond_resched();
4363 		spin_lock(&delayed_refs->lock);
4364 	}
4365 
4366 	spin_unlock(&delayed_refs->lock);
4367 
4368 	return ret;
4369 }
4370 
4371 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4372 {
4373 	struct btrfs_inode *btrfs_inode;
4374 	struct list_head splice;
4375 
4376 	INIT_LIST_HEAD(&splice);
4377 
4378 	spin_lock(&root->delalloc_lock);
4379 	list_splice_init(&root->delalloc_inodes, &splice);
4380 
4381 	while (!list_empty(&splice)) {
4382 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4383 					       delalloc_inodes);
4384 
4385 		list_del_init(&btrfs_inode->delalloc_inodes);
4386 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4387 			  &btrfs_inode->runtime_flags);
4388 		spin_unlock(&root->delalloc_lock);
4389 
4390 		btrfs_invalidate_inodes(btrfs_inode->root);
4391 
4392 		spin_lock(&root->delalloc_lock);
4393 	}
4394 
4395 	spin_unlock(&root->delalloc_lock);
4396 }
4397 
4398 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4399 {
4400 	struct btrfs_root *root;
4401 	struct list_head splice;
4402 
4403 	INIT_LIST_HEAD(&splice);
4404 
4405 	spin_lock(&fs_info->delalloc_root_lock);
4406 	list_splice_init(&fs_info->delalloc_roots, &splice);
4407 	while (!list_empty(&splice)) {
4408 		root = list_first_entry(&splice, struct btrfs_root,
4409 					 delalloc_root);
4410 		list_del_init(&root->delalloc_root);
4411 		root = btrfs_grab_fs_root(root);
4412 		BUG_ON(!root);
4413 		spin_unlock(&fs_info->delalloc_root_lock);
4414 
4415 		btrfs_destroy_delalloc_inodes(root);
4416 		btrfs_put_fs_root(root);
4417 
4418 		spin_lock(&fs_info->delalloc_root_lock);
4419 	}
4420 	spin_unlock(&fs_info->delalloc_root_lock);
4421 }
4422 
4423 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4424 					struct extent_io_tree *dirty_pages,
4425 					int mark)
4426 {
4427 	int ret;
4428 	struct extent_buffer *eb;
4429 	u64 start = 0;
4430 	u64 end;
4431 
4432 	while (1) {
4433 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4434 					    mark, NULL);
4435 		if (ret)
4436 			break;
4437 
4438 		clear_extent_bits(dirty_pages, start, end, mark);
4439 		while (start <= end) {
4440 			eb = find_extent_buffer(fs_info, start);
4441 			start += fs_info->nodesize;
4442 			if (!eb)
4443 				continue;
4444 			wait_on_extent_buffer_writeback(eb);
4445 
4446 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4447 					       &eb->bflags))
4448 				clear_extent_buffer_dirty(eb);
4449 			free_extent_buffer_stale(eb);
4450 		}
4451 	}
4452 
4453 	return ret;
4454 }
4455 
4456 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4457 				       struct extent_io_tree *pinned_extents)
4458 {
4459 	struct extent_io_tree *unpin;
4460 	u64 start;
4461 	u64 end;
4462 	int ret;
4463 	bool loop = true;
4464 
4465 	unpin = pinned_extents;
4466 again:
4467 	while (1) {
4468 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4469 					    EXTENT_DIRTY, NULL);
4470 		if (ret)
4471 			break;
4472 
4473 		clear_extent_dirty(unpin, start, end);
4474 		btrfs_error_unpin_extent_range(fs_info, start, end);
4475 		cond_resched();
4476 	}
4477 
4478 	if (loop) {
4479 		if (unpin == &fs_info->freed_extents[0])
4480 			unpin = &fs_info->freed_extents[1];
4481 		else
4482 			unpin = &fs_info->freed_extents[0];
4483 		loop = false;
4484 		goto again;
4485 	}
4486 
4487 	return 0;
4488 }
4489 
4490 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4491 {
4492 	struct inode *inode;
4493 
4494 	inode = cache->io_ctl.inode;
4495 	if (inode) {
4496 		invalidate_inode_pages2(inode->i_mapping);
4497 		BTRFS_I(inode)->generation = 0;
4498 		cache->io_ctl.inode = NULL;
4499 		iput(inode);
4500 	}
4501 	btrfs_put_block_group(cache);
4502 }
4503 
4504 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4505 			     struct btrfs_fs_info *fs_info)
4506 {
4507 	struct btrfs_block_group_cache *cache;
4508 
4509 	spin_lock(&cur_trans->dirty_bgs_lock);
4510 	while (!list_empty(&cur_trans->dirty_bgs)) {
4511 		cache = list_first_entry(&cur_trans->dirty_bgs,
4512 					 struct btrfs_block_group_cache,
4513 					 dirty_list);
4514 		if (!cache) {
4515 			btrfs_err(fs_info, "orphan block group dirty_bgs list");
4516 			spin_unlock(&cur_trans->dirty_bgs_lock);
4517 			return;
4518 		}
4519 
4520 		if (!list_empty(&cache->io_list)) {
4521 			spin_unlock(&cur_trans->dirty_bgs_lock);
4522 			list_del_init(&cache->io_list);
4523 			btrfs_cleanup_bg_io(cache);
4524 			spin_lock(&cur_trans->dirty_bgs_lock);
4525 		}
4526 
4527 		list_del_init(&cache->dirty_list);
4528 		spin_lock(&cache->lock);
4529 		cache->disk_cache_state = BTRFS_DC_ERROR;
4530 		spin_unlock(&cache->lock);
4531 
4532 		spin_unlock(&cur_trans->dirty_bgs_lock);
4533 		btrfs_put_block_group(cache);
4534 		spin_lock(&cur_trans->dirty_bgs_lock);
4535 	}
4536 	spin_unlock(&cur_trans->dirty_bgs_lock);
4537 
4538 	while (!list_empty(&cur_trans->io_bgs)) {
4539 		cache = list_first_entry(&cur_trans->io_bgs,
4540 					 struct btrfs_block_group_cache,
4541 					 io_list);
4542 		if (!cache) {
4543 			btrfs_err(fs_info, "orphan block group on io_bgs list");
4544 			return;
4545 		}
4546 
4547 		list_del_init(&cache->io_list);
4548 		spin_lock(&cache->lock);
4549 		cache->disk_cache_state = BTRFS_DC_ERROR;
4550 		spin_unlock(&cache->lock);
4551 		btrfs_cleanup_bg_io(cache);
4552 	}
4553 }
4554 
4555 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4556 				   struct btrfs_fs_info *fs_info)
4557 {
4558 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4559 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4560 	ASSERT(list_empty(&cur_trans->io_bgs));
4561 
4562 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4563 
4564 	cur_trans->state = TRANS_STATE_COMMIT_START;
4565 	wake_up(&fs_info->transaction_blocked_wait);
4566 
4567 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4568 	wake_up(&fs_info->transaction_wait);
4569 
4570 	btrfs_destroy_delayed_inodes(fs_info);
4571 	btrfs_assert_delayed_root_empty(fs_info);
4572 
4573 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4574 				     EXTENT_DIRTY);
4575 	btrfs_destroy_pinned_extent(fs_info,
4576 				    fs_info->pinned_extents);
4577 
4578 	cur_trans->state =TRANS_STATE_COMPLETED;
4579 	wake_up(&cur_trans->commit_wait);
4580 }
4581 
4582 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4583 {
4584 	struct btrfs_transaction *t;
4585 
4586 	mutex_lock(&fs_info->transaction_kthread_mutex);
4587 
4588 	spin_lock(&fs_info->trans_lock);
4589 	while (!list_empty(&fs_info->trans_list)) {
4590 		t = list_first_entry(&fs_info->trans_list,
4591 				     struct btrfs_transaction, list);
4592 		if (t->state >= TRANS_STATE_COMMIT_START) {
4593 			refcount_inc(&t->use_count);
4594 			spin_unlock(&fs_info->trans_lock);
4595 			btrfs_wait_for_commit(fs_info, t->transid);
4596 			btrfs_put_transaction(t);
4597 			spin_lock(&fs_info->trans_lock);
4598 			continue;
4599 		}
4600 		if (t == fs_info->running_transaction) {
4601 			t->state = TRANS_STATE_COMMIT_DOING;
4602 			spin_unlock(&fs_info->trans_lock);
4603 			/*
4604 			 * We wait for 0 num_writers since we don't hold a trans
4605 			 * handle open currently for this transaction.
4606 			 */
4607 			wait_event(t->writer_wait,
4608 				   atomic_read(&t->num_writers) == 0);
4609 		} else {
4610 			spin_unlock(&fs_info->trans_lock);
4611 		}
4612 		btrfs_cleanup_one_transaction(t, fs_info);
4613 
4614 		spin_lock(&fs_info->trans_lock);
4615 		if (t == fs_info->running_transaction)
4616 			fs_info->running_transaction = NULL;
4617 		list_del_init(&t->list);
4618 		spin_unlock(&fs_info->trans_lock);
4619 
4620 		btrfs_put_transaction(t);
4621 		trace_btrfs_transaction_commit(fs_info->tree_root);
4622 		spin_lock(&fs_info->trans_lock);
4623 	}
4624 	spin_unlock(&fs_info->trans_lock);
4625 	btrfs_destroy_all_ordered_extents(fs_info);
4626 	btrfs_destroy_delayed_inodes(fs_info);
4627 	btrfs_assert_delayed_root_empty(fs_info);
4628 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4629 	btrfs_destroy_all_delalloc_inodes(fs_info);
4630 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4631 
4632 	return 0;
4633 }
4634 
4635 static struct btrfs_fs_info *btree_fs_info(void *private_data)
4636 {
4637 	struct inode *inode = private_data;
4638 	return btrfs_sb(inode->i_sb);
4639 }
4640 
4641 static const struct extent_io_ops btree_extent_io_ops = {
4642 	/* mandatory callbacks */
4643 	.submit_bio_hook = btree_submit_bio_hook,
4644 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4645 	/* note we're sharing with inode.c for the merge bio hook */
4646 	.merge_bio_hook = btrfs_merge_bio_hook,
4647 	.readpage_io_failed_hook = btree_io_failed_hook,
4648 	.set_range_writeback = btrfs_set_range_writeback,
4649 	.tree_fs_info = btree_fs_info,
4650 
4651 	/* optional callbacks */
4652 };
4653