xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 3ddc8b84)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "bio.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
34 #include "dev-replace.h"
35 #include "raid56.h"
36 #include "sysfs.h"
37 #include "qgroup.h"
38 #include "compression.h"
39 #include "tree-checker.h"
40 #include "ref-verify.h"
41 #include "block-group.h"
42 #include "discard.h"
43 #include "space-info.h"
44 #include "zoned.h"
45 #include "subpage.h"
46 #include "fs.h"
47 #include "accessors.h"
48 #include "extent-tree.h"
49 #include "root-tree.h"
50 #include "defrag.h"
51 #include "uuid-tree.h"
52 #include "relocation.h"
53 #include "scrub.h"
54 #include "super.h"
55 
56 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
57 				 BTRFS_HEADER_FLAG_RELOC |\
58 				 BTRFS_SUPER_FLAG_ERROR |\
59 				 BTRFS_SUPER_FLAG_SEEDING |\
60 				 BTRFS_SUPER_FLAG_METADUMP |\
61 				 BTRFS_SUPER_FLAG_METADUMP_V2)
62 
63 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
64 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
65 
66 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
67 {
68 	if (fs_info->csum_shash)
69 		crypto_free_shash(fs_info->csum_shash);
70 }
71 
72 /*
73  * Compute the csum of a btree block and store the result to provided buffer.
74  */
75 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
76 {
77 	struct btrfs_fs_info *fs_info = buf->fs_info;
78 	const int num_pages = num_extent_pages(buf);
79 	const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
80 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
81 	char *kaddr;
82 	int i;
83 
84 	shash->tfm = fs_info->csum_shash;
85 	crypto_shash_init(shash);
86 	kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
87 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
88 			    first_page_part - BTRFS_CSUM_SIZE);
89 
90 	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
91 		kaddr = page_address(buf->pages[i]);
92 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
93 	}
94 	memset(result, 0, BTRFS_CSUM_SIZE);
95 	crypto_shash_final(shash, result);
96 }
97 
98 /*
99  * we can't consider a given block up to date unless the transid of the
100  * block matches the transid in the parent node's pointer.  This is how we
101  * detect blocks that either didn't get written at all or got written
102  * in the wrong place.
103  */
104 int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
105 {
106 	if (!extent_buffer_uptodate(eb))
107 		return 0;
108 
109 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
110 		return 1;
111 
112 	if (atomic)
113 		return -EAGAIN;
114 
115 	if (!extent_buffer_uptodate(eb) ||
116 	    btrfs_header_generation(eb) != parent_transid) {
117 		btrfs_err_rl(eb->fs_info,
118 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
119 			eb->start, eb->read_mirror,
120 			parent_transid, btrfs_header_generation(eb));
121 		clear_extent_buffer_uptodate(eb);
122 		return 0;
123 	}
124 	return 1;
125 }
126 
127 static bool btrfs_supported_super_csum(u16 csum_type)
128 {
129 	switch (csum_type) {
130 	case BTRFS_CSUM_TYPE_CRC32:
131 	case BTRFS_CSUM_TYPE_XXHASH:
132 	case BTRFS_CSUM_TYPE_SHA256:
133 	case BTRFS_CSUM_TYPE_BLAKE2:
134 		return true;
135 	default:
136 		return false;
137 	}
138 }
139 
140 /*
141  * Return 0 if the superblock checksum type matches the checksum value of that
142  * algorithm. Pass the raw disk superblock data.
143  */
144 int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
145 			   const struct btrfs_super_block *disk_sb)
146 {
147 	char result[BTRFS_CSUM_SIZE];
148 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
149 
150 	shash->tfm = fs_info->csum_shash;
151 
152 	/*
153 	 * The super_block structure does not span the whole
154 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
155 	 * filled with zeros and is included in the checksum.
156 	 */
157 	crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
158 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
159 
160 	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
161 		return 1;
162 
163 	return 0;
164 }
165 
166 static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
167 				      int mirror_num)
168 {
169 	struct btrfs_fs_info *fs_info = eb->fs_info;
170 	int i, num_pages = num_extent_pages(eb);
171 	int ret = 0;
172 
173 	if (sb_rdonly(fs_info->sb))
174 		return -EROFS;
175 
176 	for (i = 0; i < num_pages; i++) {
177 		struct page *p = eb->pages[i];
178 		u64 start = max_t(u64, eb->start, page_offset(p));
179 		u64 end = min_t(u64, eb->start + eb->len, page_offset(p) + PAGE_SIZE);
180 		u32 len = end - start;
181 
182 		ret = btrfs_repair_io_failure(fs_info, 0, start, len,
183 				start, p, offset_in_page(start), mirror_num);
184 		if (ret)
185 			break;
186 	}
187 
188 	return ret;
189 }
190 
191 /*
192  * helper to read a given tree block, doing retries as required when
193  * the checksums don't match and we have alternate mirrors to try.
194  *
195  * @check:		expected tree parentness check, see the comments of the
196  *			structure for details.
197  */
198 int btrfs_read_extent_buffer(struct extent_buffer *eb,
199 			     struct btrfs_tree_parent_check *check)
200 {
201 	struct btrfs_fs_info *fs_info = eb->fs_info;
202 	int failed = 0;
203 	int ret;
204 	int num_copies = 0;
205 	int mirror_num = 0;
206 	int failed_mirror = 0;
207 
208 	ASSERT(check);
209 
210 	while (1) {
211 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
212 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
213 		if (!ret)
214 			break;
215 
216 		num_copies = btrfs_num_copies(fs_info,
217 					      eb->start, eb->len);
218 		if (num_copies == 1)
219 			break;
220 
221 		if (!failed_mirror) {
222 			failed = 1;
223 			failed_mirror = eb->read_mirror;
224 		}
225 
226 		mirror_num++;
227 		if (mirror_num == failed_mirror)
228 			mirror_num++;
229 
230 		if (mirror_num > num_copies)
231 			break;
232 	}
233 
234 	if (failed && !ret && failed_mirror)
235 		btrfs_repair_eb_io_failure(eb, failed_mirror);
236 
237 	return ret;
238 }
239 
240 /*
241  * Checksum a dirty tree block before IO.
242  */
243 blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
244 {
245 	struct extent_buffer *eb = bbio->private;
246 	struct btrfs_fs_info *fs_info = eb->fs_info;
247 	u64 found_start = btrfs_header_bytenr(eb);
248 	u8 result[BTRFS_CSUM_SIZE];
249 	int ret;
250 
251 	/* Btree blocks are always contiguous on disk. */
252 	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
253 		return BLK_STS_IOERR;
254 	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
255 		return BLK_STS_IOERR;
256 
257 	if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)) {
258 		WARN_ON_ONCE(found_start != 0);
259 		return BLK_STS_OK;
260 	}
261 
262 	if (WARN_ON_ONCE(found_start != eb->start))
263 		return BLK_STS_IOERR;
264 	if (WARN_ON(!btrfs_page_test_uptodate(fs_info, eb->pages[0], eb->start,
265 					      eb->len)))
266 		return BLK_STS_IOERR;
267 
268 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
269 				    offsetof(struct btrfs_header, fsid),
270 				    BTRFS_FSID_SIZE) == 0);
271 	csum_tree_block(eb, result);
272 
273 	if (btrfs_header_level(eb))
274 		ret = btrfs_check_node(eb);
275 	else
276 		ret = btrfs_check_leaf(eb);
277 
278 	if (ret < 0)
279 		goto error;
280 
281 	/*
282 	 * Also check the generation, the eb reached here must be newer than
283 	 * last committed. Or something seriously wrong happened.
284 	 */
285 	if (unlikely(btrfs_header_generation(eb) <= fs_info->last_trans_committed)) {
286 		ret = -EUCLEAN;
287 		btrfs_err(fs_info,
288 			"block=%llu bad generation, have %llu expect > %llu",
289 			  eb->start, btrfs_header_generation(eb),
290 			  fs_info->last_trans_committed);
291 		goto error;
292 	}
293 	write_extent_buffer(eb, result, 0, fs_info->csum_size);
294 	return BLK_STS_OK;
295 
296 error:
297 	btrfs_print_tree(eb, 0);
298 	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
299 		  eb->start);
300 	/*
301 	 * Be noisy if this is an extent buffer from a log tree. We don't abort
302 	 * a transaction in case there's a bad log tree extent buffer, we just
303 	 * fallback to a transaction commit. Still we want to know when there is
304 	 * a bad log tree extent buffer, as that may signal a bug somewhere.
305 	 */
306 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
307 		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
308 	return errno_to_blk_status(ret);
309 }
310 
311 static bool check_tree_block_fsid(struct extent_buffer *eb)
312 {
313 	struct btrfs_fs_info *fs_info = eb->fs_info;
314 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
315 	u8 fsid[BTRFS_FSID_SIZE];
316 
317 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
318 			   BTRFS_FSID_SIZE);
319 
320 	/*
321 	 * alloc_fs_devices() copies the fsid into metadata_uuid if the
322 	 * metadata_uuid is unset in the superblock, including for a seed device.
323 	 * So, we can use fs_devices->metadata_uuid.
324 	 */
325 	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
326 		return false;
327 
328 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
329 		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
330 			return false;
331 
332 	return true;
333 }
334 
335 /* Do basic extent buffer checks at read time */
336 int btrfs_validate_extent_buffer(struct extent_buffer *eb,
337 				 struct btrfs_tree_parent_check *check)
338 {
339 	struct btrfs_fs_info *fs_info = eb->fs_info;
340 	u64 found_start;
341 	const u32 csum_size = fs_info->csum_size;
342 	u8 found_level;
343 	u8 result[BTRFS_CSUM_SIZE];
344 	const u8 *header_csum;
345 	int ret = 0;
346 
347 	ASSERT(check);
348 
349 	found_start = btrfs_header_bytenr(eb);
350 	if (found_start != eb->start) {
351 		btrfs_err_rl(fs_info,
352 			"bad tree block start, mirror %u want %llu have %llu",
353 			     eb->read_mirror, eb->start, found_start);
354 		ret = -EIO;
355 		goto out;
356 	}
357 	if (check_tree_block_fsid(eb)) {
358 		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
359 			     eb->start, eb->read_mirror);
360 		ret = -EIO;
361 		goto out;
362 	}
363 	found_level = btrfs_header_level(eb);
364 	if (found_level >= BTRFS_MAX_LEVEL) {
365 		btrfs_err(fs_info,
366 			"bad tree block level, mirror %u level %d on logical %llu",
367 			eb->read_mirror, btrfs_header_level(eb), eb->start);
368 		ret = -EIO;
369 		goto out;
370 	}
371 
372 	csum_tree_block(eb, result);
373 	header_csum = page_address(eb->pages[0]) +
374 		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
375 
376 	if (memcmp(result, header_csum, csum_size) != 0) {
377 		btrfs_warn_rl(fs_info,
378 "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
379 			      eb->start, eb->read_mirror,
380 			      CSUM_FMT_VALUE(csum_size, header_csum),
381 			      CSUM_FMT_VALUE(csum_size, result),
382 			      btrfs_header_level(eb));
383 		ret = -EUCLEAN;
384 		goto out;
385 	}
386 
387 	if (found_level != check->level) {
388 		btrfs_err(fs_info,
389 		"level verify failed on logical %llu mirror %u wanted %u found %u",
390 			  eb->start, eb->read_mirror, check->level, found_level);
391 		ret = -EIO;
392 		goto out;
393 	}
394 	if (unlikely(check->transid &&
395 		     btrfs_header_generation(eb) != check->transid)) {
396 		btrfs_err_rl(eb->fs_info,
397 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
398 				eb->start, eb->read_mirror, check->transid,
399 				btrfs_header_generation(eb));
400 		ret = -EIO;
401 		goto out;
402 	}
403 	if (check->has_first_key) {
404 		struct btrfs_key *expect_key = &check->first_key;
405 		struct btrfs_key found_key;
406 
407 		if (found_level)
408 			btrfs_node_key_to_cpu(eb, &found_key, 0);
409 		else
410 			btrfs_item_key_to_cpu(eb, &found_key, 0);
411 		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
412 			btrfs_err(fs_info,
413 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
414 				  eb->start, check->transid,
415 				  expect_key->objectid,
416 				  expect_key->type, expect_key->offset,
417 				  found_key.objectid, found_key.type,
418 				  found_key.offset);
419 			ret = -EUCLEAN;
420 			goto out;
421 		}
422 	}
423 	if (check->owner_root) {
424 		ret = btrfs_check_eb_owner(eb, check->owner_root);
425 		if (ret < 0)
426 			goto out;
427 	}
428 
429 	/*
430 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
431 	 * that we don't try and read the other copies of this block, just
432 	 * return -EIO.
433 	 */
434 	if (found_level == 0 && btrfs_check_leaf(eb)) {
435 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
436 		ret = -EIO;
437 	}
438 
439 	if (found_level > 0 && btrfs_check_node(eb))
440 		ret = -EIO;
441 
442 	if (ret)
443 		btrfs_err(fs_info,
444 		"read time tree block corruption detected on logical %llu mirror %u",
445 			  eb->start, eb->read_mirror);
446 out:
447 	return ret;
448 }
449 
450 #ifdef CONFIG_MIGRATION
451 static int btree_migrate_folio(struct address_space *mapping,
452 		struct folio *dst, struct folio *src, enum migrate_mode mode)
453 {
454 	/*
455 	 * we can't safely write a btree page from here,
456 	 * we haven't done the locking hook
457 	 */
458 	if (folio_test_dirty(src))
459 		return -EAGAIN;
460 	/*
461 	 * Buffers may be managed in a filesystem specific way.
462 	 * We must have no buffers or drop them.
463 	 */
464 	if (folio_get_private(src) &&
465 	    !filemap_release_folio(src, GFP_KERNEL))
466 		return -EAGAIN;
467 	return migrate_folio(mapping, dst, src, mode);
468 }
469 #else
470 #define btree_migrate_folio NULL
471 #endif
472 
473 static int btree_writepages(struct address_space *mapping,
474 			    struct writeback_control *wbc)
475 {
476 	struct btrfs_fs_info *fs_info;
477 	int ret;
478 
479 	if (wbc->sync_mode == WB_SYNC_NONE) {
480 
481 		if (wbc->for_kupdate)
482 			return 0;
483 
484 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
485 		/* this is a bit racy, but that's ok */
486 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
487 					     BTRFS_DIRTY_METADATA_THRESH,
488 					     fs_info->dirty_metadata_batch);
489 		if (ret < 0)
490 			return 0;
491 	}
492 	return btree_write_cache_pages(mapping, wbc);
493 }
494 
495 static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
496 {
497 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
498 		return false;
499 
500 	return try_release_extent_buffer(&folio->page);
501 }
502 
503 static void btree_invalidate_folio(struct folio *folio, size_t offset,
504 				 size_t length)
505 {
506 	struct extent_io_tree *tree;
507 	tree = &BTRFS_I(folio->mapping->host)->io_tree;
508 	extent_invalidate_folio(tree, folio, offset);
509 	btree_release_folio(folio, GFP_NOFS);
510 	if (folio_get_private(folio)) {
511 		btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
512 			   "folio private not zero on folio %llu",
513 			   (unsigned long long)folio_pos(folio));
514 		folio_detach_private(folio);
515 	}
516 }
517 
518 #ifdef DEBUG
519 static bool btree_dirty_folio(struct address_space *mapping,
520 		struct folio *folio)
521 {
522 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
523 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
524 	struct btrfs_subpage *subpage;
525 	struct extent_buffer *eb;
526 	int cur_bit = 0;
527 	u64 page_start = folio_pos(folio);
528 
529 	if (fs_info->sectorsize == PAGE_SIZE) {
530 		eb = folio_get_private(folio);
531 		BUG_ON(!eb);
532 		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
533 		BUG_ON(!atomic_read(&eb->refs));
534 		btrfs_assert_tree_write_locked(eb);
535 		return filemap_dirty_folio(mapping, folio);
536 	}
537 
538 	ASSERT(spi);
539 	subpage = folio_get_private(folio);
540 
541 	for (cur_bit = spi->dirty_offset;
542 	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
543 	     cur_bit++) {
544 		unsigned long flags;
545 		u64 cur;
546 
547 		spin_lock_irqsave(&subpage->lock, flags);
548 		if (!test_bit(cur_bit, subpage->bitmaps)) {
549 			spin_unlock_irqrestore(&subpage->lock, flags);
550 			continue;
551 		}
552 		spin_unlock_irqrestore(&subpage->lock, flags);
553 		cur = page_start + cur_bit * fs_info->sectorsize;
554 
555 		eb = find_extent_buffer(fs_info, cur);
556 		ASSERT(eb);
557 		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
558 		ASSERT(atomic_read(&eb->refs));
559 		btrfs_assert_tree_write_locked(eb);
560 		free_extent_buffer(eb);
561 
562 		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
563 	}
564 	return filemap_dirty_folio(mapping, folio);
565 }
566 #else
567 #define btree_dirty_folio filemap_dirty_folio
568 #endif
569 
570 static const struct address_space_operations btree_aops = {
571 	.writepages	= btree_writepages,
572 	.release_folio	= btree_release_folio,
573 	.invalidate_folio = btree_invalidate_folio,
574 	.migrate_folio	= btree_migrate_folio,
575 	.dirty_folio	= btree_dirty_folio,
576 };
577 
578 struct extent_buffer *btrfs_find_create_tree_block(
579 						struct btrfs_fs_info *fs_info,
580 						u64 bytenr, u64 owner_root,
581 						int level)
582 {
583 	if (btrfs_is_testing(fs_info))
584 		return alloc_test_extent_buffer(fs_info, bytenr);
585 	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
586 }
587 
588 /*
589  * Read tree block at logical address @bytenr and do variant basic but critical
590  * verification.
591  *
592  * @check:		expected tree parentness check, see comments of the
593  *			structure for details.
594  */
595 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
596 				      struct btrfs_tree_parent_check *check)
597 {
598 	struct extent_buffer *buf = NULL;
599 	int ret;
600 
601 	ASSERT(check);
602 
603 	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
604 					   check->level);
605 	if (IS_ERR(buf))
606 		return buf;
607 
608 	ret = btrfs_read_extent_buffer(buf, check);
609 	if (ret) {
610 		free_extent_buffer_stale(buf);
611 		return ERR_PTR(ret);
612 	}
613 	if (btrfs_check_eb_owner(buf, check->owner_root)) {
614 		free_extent_buffer_stale(buf);
615 		return ERR_PTR(-EUCLEAN);
616 	}
617 	return buf;
618 
619 }
620 
621 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
622 			 u64 objectid)
623 {
624 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
625 
626 	memset(&root->root_key, 0, sizeof(root->root_key));
627 	memset(&root->root_item, 0, sizeof(root->root_item));
628 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
629 	root->fs_info = fs_info;
630 	root->root_key.objectid = objectid;
631 	root->node = NULL;
632 	root->commit_root = NULL;
633 	root->state = 0;
634 	RB_CLEAR_NODE(&root->rb_node);
635 
636 	root->last_trans = 0;
637 	root->free_objectid = 0;
638 	root->nr_delalloc_inodes = 0;
639 	root->nr_ordered_extents = 0;
640 	root->inode_tree = RB_ROOT;
641 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
642 
643 	btrfs_init_root_block_rsv(root);
644 
645 	INIT_LIST_HEAD(&root->dirty_list);
646 	INIT_LIST_HEAD(&root->root_list);
647 	INIT_LIST_HEAD(&root->delalloc_inodes);
648 	INIT_LIST_HEAD(&root->delalloc_root);
649 	INIT_LIST_HEAD(&root->ordered_extents);
650 	INIT_LIST_HEAD(&root->ordered_root);
651 	INIT_LIST_HEAD(&root->reloc_dirty_list);
652 	INIT_LIST_HEAD(&root->logged_list[0]);
653 	INIT_LIST_HEAD(&root->logged_list[1]);
654 	spin_lock_init(&root->inode_lock);
655 	spin_lock_init(&root->delalloc_lock);
656 	spin_lock_init(&root->ordered_extent_lock);
657 	spin_lock_init(&root->accounting_lock);
658 	spin_lock_init(&root->log_extents_lock[0]);
659 	spin_lock_init(&root->log_extents_lock[1]);
660 	spin_lock_init(&root->qgroup_meta_rsv_lock);
661 	mutex_init(&root->objectid_mutex);
662 	mutex_init(&root->log_mutex);
663 	mutex_init(&root->ordered_extent_mutex);
664 	mutex_init(&root->delalloc_mutex);
665 	init_waitqueue_head(&root->qgroup_flush_wait);
666 	init_waitqueue_head(&root->log_writer_wait);
667 	init_waitqueue_head(&root->log_commit_wait[0]);
668 	init_waitqueue_head(&root->log_commit_wait[1]);
669 	INIT_LIST_HEAD(&root->log_ctxs[0]);
670 	INIT_LIST_HEAD(&root->log_ctxs[1]);
671 	atomic_set(&root->log_commit[0], 0);
672 	atomic_set(&root->log_commit[1], 0);
673 	atomic_set(&root->log_writers, 0);
674 	atomic_set(&root->log_batch, 0);
675 	refcount_set(&root->refs, 1);
676 	atomic_set(&root->snapshot_force_cow, 0);
677 	atomic_set(&root->nr_swapfiles, 0);
678 	root->log_transid = 0;
679 	root->log_transid_committed = -1;
680 	root->last_log_commit = 0;
681 	root->anon_dev = 0;
682 	if (!dummy) {
683 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
684 				    IO_TREE_ROOT_DIRTY_LOG_PAGES);
685 		extent_io_tree_init(fs_info, &root->log_csum_range,
686 				    IO_TREE_LOG_CSUM_RANGE);
687 	}
688 
689 	spin_lock_init(&root->root_item_lock);
690 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
691 #ifdef CONFIG_BTRFS_DEBUG
692 	INIT_LIST_HEAD(&root->leak_list);
693 	spin_lock(&fs_info->fs_roots_radix_lock);
694 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
695 	spin_unlock(&fs_info->fs_roots_radix_lock);
696 #endif
697 }
698 
699 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
700 					   u64 objectid, gfp_t flags)
701 {
702 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
703 	if (root)
704 		__setup_root(root, fs_info, objectid);
705 	return root;
706 }
707 
708 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
709 /* Should only be used by the testing infrastructure */
710 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
711 {
712 	struct btrfs_root *root;
713 
714 	if (!fs_info)
715 		return ERR_PTR(-EINVAL);
716 
717 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
718 	if (!root)
719 		return ERR_PTR(-ENOMEM);
720 
721 	/* We don't use the stripesize in selftest, set it as sectorsize */
722 	root->alloc_bytenr = 0;
723 
724 	return root;
725 }
726 #endif
727 
728 static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
729 {
730 	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
731 	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
732 
733 	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
734 }
735 
736 static int global_root_key_cmp(const void *k, const struct rb_node *node)
737 {
738 	const struct btrfs_key *key = k;
739 	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
740 
741 	return btrfs_comp_cpu_keys(key, &root->root_key);
742 }
743 
744 int btrfs_global_root_insert(struct btrfs_root *root)
745 {
746 	struct btrfs_fs_info *fs_info = root->fs_info;
747 	struct rb_node *tmp;
748 	int ret = 0;
749 
750 	write_lock(&fs_info->global_root_lock);
751 	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
752 	write_unlock(&fs_info->global_root_lock);
753 
754 	if (tmp) {
755 		ret = -EEXIST;
756 		btrfs_warn(fs_info, "global root %llu %llu already exists",
757 				root->root_key.objectid, root->root_key.offset);
758 	}
759 	return ret;
760 }
761 
762 void btrfs_global_root_delete(struct btrfs_root *root)
763 {
764 	struct btrfs_fs_info *fs_info = root->fs_info;
765 
766 	write_lock(&fs_info->global_root_lock);
767 	rb_erase(&root->rb_node, &fs_info->global_root_tree);
768 	write_unlock(&fs_info->global_root_lock);
769 }
770 
771 struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
772 				     struct btrfs_key *key)
773 {
774 	struct rb_node *node;
775 	struct btrfs_root *root = NULL;
776 
777 	read_lock(&fs_info->global_root_lock);
778 	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
779 	if (node)
780 		root = container_of(node, struct btrfs_root, rb_node);
781 	read_unlock(&fs_info->global_root_lock);
782 
783 	return root;
784 }
785 
786 static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
787 {
788 	struct btrfs_block_group *block_group;
789 	u64 ret;
790 
791 	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
792 		return 0;
793 
794 	if (bytenr)
795 		block_group = btrfs_lookup_block_group(fs_info, bytenr);
796 	else
797 		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
798 	ASSERT(block_group);
799 	if (!block_group)
800 		return 0;
801 	ret = block_group->global_root_id;
802 	btrfs_put_block_group(block_group);
803 
804 	return ret;
805 }
806 
807 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
808 {
809 	struct btrfs_key key = {
810 		.objectid = BTRFS_CSUM_TREE_OBJECTID,
811 		.type = BTRFS_ROOT_ITEM_KEY,
812 		.offset = btrfs_global_root_id(fs_info, bytenr),
813 	};
814 
815 	return btrfs_global_root(fs_info, &key);
816 }
817 
818 struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
819 {
820 	struct btrfs_key key = {
821 		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
822 		.type = BTRFS_ROOT_ITEM_KEY,
823 		.offset = btrfs_global_root_id(fs_info, bytenr),
824 	};
825 
826 	return btrfs_global_root(fs_info, &key);
827 }
828 
829 struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
830 {
831 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
832 		return fs_info->block_group_root;
833 	return btrfs_extent_root(fs_info, 0);
834 }
835 
836 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
837 				     u64 objectid)
838 {
839 	struct btrfs_fs_info *fs_info = trans->fs_info;
840 	struct extent_buffer *leaf;
841 	struct btrfs_root *tree_root = fs_info->tree_root;
842 	struct btrfs_root *root;
843 	struct btrfs_key key;
844 	unsigned int nofs_flag;
845 	int ret = 0;
846 
847 	/*
848 	 * We're holding a transaction handle, so use a NOFS memory allocation
849 	 * context to avoid deadlock if reclaim happens.
850 	 */
851 	nofs_flag = memalloc_nofs_save();
852 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
853 	memalloc_nofs_restore(nofs_flag);
854 	if (!root)
855 		return ERR_PTR(-ENOMEM);
856 
857 	root->root_key.objectid = objectid;
858 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
859 	root->root_key.offset = 0;
860 
861 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
862 				      BTRFS_NESTING_NORMAL);
863 	if (IS_ERR(leaf)) {
864 		ret = PTR_ERR(leaf);
865 		leaf = NULL;
866 		goto fail;
867 	}
868 
869 	root->node = leaf;
870 	btrfs_mark_buffer_dirty(trans, leaf);
871 
872 	root->commit_root = btrfs_root_node(root);
873 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
874 
875 	btrfs_set_root_flags(&root->root_item, 0);
876 	btrfs_set_root_limit(&root->root_item, 0);
877 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
878 	btrfs_set_root_generation(&root->root_item, trans->transid);
879 	btrfs_set_root_level(&root->root_item, 0);
880 	btrfs_set_root_refs(&root->root_item, 1);
881 	btrfs_set_root_used(&root->root_item, leaf->len);
882 	btrfs_set_root_last_snapshot(&root->root_item, 0);
883 	btrfs_set_root_dirid(&root->root_item, 0);
884 	if (is_fstree(objectid))
885 		generate_random_guid(root->root_item.uuid);
886 	else
887 		export_guid(root->root_item.uuid, &guid_null);
888 	btrfs_set_root_drop_level(&root->root_item, 0);
889 
890 	btrfs_tree_unlock(leaf);
891 
892 	key.objectid = objectid;
893 	key.type = BTRFS_ROOT_ITEM_KEY;
894 	key.offset = 0;
895 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
896 	if (ret)
897 		goto fail;
898 
899 	return root;
900 
901 fail:
902 	btrfs_put_root(root);
903 
904 	return ERR_PTR(ret);
905 }
906 
907 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
908 					 struct btrfs_fs_info *fs_info)
909 {
910 	struct btrfs_root *root;
911 
912 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
913 	if (!root)
914 		return ERR_PTR(-ENOMEM);
915 
916 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
917 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
918 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
919 
920 	return root;
921 }
922 
923 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
924 			      struct btrfs_root *root)
925 {
926 	struct extent_buffer *leaf;
927 
928 	/*
929 	 * DON'T set SHAREABLE bit for log trees.
930 	 *
931 	 * Log trees are not exposed to user space thus can't be snapshotted,
932 	 * and they go away before a real commit is actually done.
933 	 *
934 	 * They do store pointers to file data extents, and those reference
935 	 * counts still get updated (along with back refs to the log tree).
936 	 */
937 
938 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
939 			NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
940 	if (IS_ERR(leaf))
941 		return PTR_ERR(leaf);
942 
943 	root->node = leaf;
944 
945 	btrfs_mark_buffer_dirty(trans, root->node);
946 	btrfs_tree_unlock(root->node);
947 
948 	return 0;
949 }
950 
951 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
952 			     struct btrfs_fs_info *fs_info)
953 {
954 	struct btrfs_root *log_root;
955 
956 	log_root = alloc_log_tree(trans, fs_info);
957 	if (IS_ERR(log_root))
958 		return PTR_ERR(log_root);
959 
960 	if (!btrfs_is_zoned(fs_info)) {
961 		int ret = btrfs_alloc_log_tree_node(trans, log_root);
962 
963 		if (ret) {
964 			btrfs_put_root(log_root);
965 			return ret;
966 		}
967 	}
968 
969 	WARN_ON(fs_info->log_root_tree);
970 	fs_info->log_root_tree = log_root;
971 	return 0;
972 }
973 
974 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
975 		       struct btrfs_root *root)
976 {
977 	struct btrfs_fs_info *fs_info = root->fs_info;
978 	struct btrfs_root *log_root;
979 	struct btrfs_inode_item *inode_item;
980 	int ret;
981 
982 	log_root = alloc_log_tree(trans, fs_info);
983 	if (IS_ERR(log_root))
984 		return PTR_ERR(log_root);
985 
986 	ret = btrfs_alloc_log_tree_node(trans, log_root);
987 	if (ret) {
988 		btrfs_put_root(log_root);
989 		return ret;
990 	}
991 
992 	log_root->last_trans = trans->transid;
993 	log_root->root_key.offset = root->root_key.objectid;
994 
995 	inode_item = &log_root->root_item.inode;
996 	btrfs_set_stack_inode_generation(inode_item, 1);
997 	btrfs_set_stack_inode_size(inode_item, 3);
998 	btrfs_set_stack_inode_nlink(inode_item, 1);
999 	btrfs_set_stack_inode_nbytes(inode_item,
1000 				     fs_info->nodesize);
1001 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1002 
1003 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1004 
1005 	WARN_ON(root->log_root);
1006 	root->log_root = log_root;
1007 	root->log_transid = 0;
1008 	root->log_transid_committed = -1;
1009 	root->last_log_commit = 0;
1010 	return 0;
1011 }
1012 
1013 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1014 					      struct btrfs_path *path,
1015 					      struct btrfs_key *key)
1016 {
1017 	struct btrfs_root *root;
1018 	struct btrfs_tree_parent_check check = { 0 };
1019 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1020 	u64 generation;
1021 	int ret;
1022 	int level;
1023 
1024 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1025 	if (!root)
1026 		return ERR_PTR(-ENOMEM);
1027 
1028 	ret = btrfs_find_root(tree_root, key, path,
1029 			      &root->root_item, &root->root_key);
1030 	if (ret) {
1031 		if (ret > 0)
1032 			ret = -ENOENT;
1033 		goto fail;
1034 	}
1035 
1036 	generation = btrfs_root_generation(&root->root_item);
1037 	level = btrfs_root_level(&root->root_item);
1038 	check.level = level;
1039 	check.transid = generation;
1040 	check.owner_root = key->objectid;
1041 	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1042 				     &check);
1043 	if (IS_ERR(root->node)) {
1044 		ret = PTR_ERR(root->node);
1045 		root->node = NULL;
1046 		goto fail;
1047 	}
1048 	if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1049 		ret = -EIO;
1050 		goto fail;
1051 	}
1052 
1053 	/*
1054 	 * For real fs, and not log/reloc trees, root owner must
1055 	 * match its root node owner
1056 	 */
1057 	if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state) &&
1058 	    root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1059 	    root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1060 	    root->root_key.objectid != btrfs_header_owner(root->node)) {
1061 		btrfs_crit(fs_info,
1062 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1063 			   root->root_key.objectid, root->node->start,
1064 			   btrfs_header_owner(root->node),
1065 			   root->root_key.objectid);
1066 		ret = -EUCLEAN;
1067 		goto fail;
1068 	}
1069 	root->commit_root = btrfs_root_node(root);
1070 	return root;
1071 fail:
1072 	btrfs_put_root(root);
1073 	return ERR_PTR(ret);
1074 }
1075 
1076 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1077 					struct btrfs_key *key)
1078 {
1079 	struct btrfs_root *root;
1080 	struct btrfs_path *path;
1081 
1082 	path = btrfs_alloc_path();
1083 	if (!path)
1084 		return ERR_PTR(-ENOMEM);
1085 	root = read_tree_root_path(tree_root, path, key);
1086 	btrfs_free_path(path);
1087 
1088 	return root;
1089 }
1090 
1091 /*
1092  * Initialize subvolume root in-memory structure
1093  *
1094  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1095  */
1096 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1097 {
1098 	int ret;
1099 
1100 	btrfs_drew_lock_init(&root->snapshot_lock);
1101 
1102 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1103 	    !btrfs_is_data_reloc_root(root) &&
1104 	    is_fstree(root->root_key.objectid)) {
1105 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1106 		btrfs_check_and_init_root_item(&root->root_item);
1107 	}
1108 
1109 	/*
1110 	 * Don't assign anonymous block device to roots that are not exposed to
1111 	 * userspace, the id pool is limited to 1M
1112 	 */
1113 	if (is_fstree(root->root_key.objectid) &&
1114 	    btrfs_root_refs(&root->root_item) > 0) {
1115 		if (!anon_dev) {
1116 			ret = get_anon_bdev(&root->anon_dev);
1117 			if (ret)
1118 				goto fail;
1119 		} else {
1120 			root->anon_dev = anon_dev;
1121 		}
1122 	}
1123 
1124 	mutex_lock(&root->objectid_mutex);
1125 	ret = btrfs_init_root_free_objectid(root);
1126 	if (ret) {
1127 		mutex_unlock(&root->objectid_mutex);
1128 		goto fail;
1129 	}
1130 
1131 	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1132 
1133 	mutex_unlock(&root->objectid_mutex);
1134 
1135 	return 0;
1136 fail:
1137 	/* The caller is responsible to call btrfs_free_fs_root */
1138 	return ret;
1139 }
1140 
1141 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1142 					       u64 root_id)
1143 {
1144 	struct btrfs_root *root;
1145 
1146 	spin_lock(&fs_info->fs_roots_radix_lock);
1147 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1148 				 (unsigned long)root_id);
1149 	root = btrfs_grab_root(root);
1150 	spin_unlock(&fs_info->fs_roots_radix_lock);
1151 	return root;
1152 }
1153 
1154 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1155 						u64 objectid)
1156 {
1157 	struct btrfs_key key = {
1158 		.objectid = objectid,
1159 		.type = BTRFS_ROOT_ITEM_KEY,
1160 		.offset = 0,
1161 	};
1162 
1163 	switch (objectid) {
1164 	case BTRFS_ROOT_TREE_OBJECTID:
1165 		return btrfs_grab_root(fs_info->tree_root);
1166 	case BTRFS_EXTENT_TREE_OBJECTID:
1167 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1168 	case BTRFS_CHUNK_TREE_OBJECTID:
1169 		return btrfs_grab_root(fs_info->chunk_root);
1170 	case BTRFS_DEV_TREE_OBJECTID:
1171 		return btrfs_grab_root(fs_info->dev_root);
1172 	case BTRFS_CSUM_TREE_OBJECTID:
1173 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1174 	case BTRFS_QUOTA_TREE_OBJECTID:
1175 		return btrfs_grab_root(fs_info->quota_root);
1176 	case BTRFS_UUID_TREE_OBJECTID:
1177 		return btrfs_grab_root(fs_info->uuid_root);
1178 	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1179 		return btrfs_grab_root(fs_info->block_group_root);
1180 	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1181 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1182 	default:
1183 		return NULL;
1184 	}
1185 }
1186 
1187 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1188 			 struct btrfs_root *root)
1189 {
1190 	int ret;
1191 
1192 	ret = radix_tree_preload(GFP_NOFS);
1193 	if (ret)
1194 		return ret;
1195 
1196 	spin_lock(&fs_info->fs_roots_radix_lock);
1197 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1198 				(unsigned long)root->root_key.objectid,
1199 				root);
1200 	if (ret == 0) {
1201 		btrfs_grab_root(root);
1202 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1203 	}
1204 	spin_unlock(&fs_info->fs_roots_radix_lock);
1205 	radix_tree_preload_end();
1206 
1207 	return ret;
1208 }
1209 
1210 void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1211 {
1212 #ifdef CONFIG_BTRFS_DEBUG
1213 	struct btrfs_root *root;
1214 
1215 	while (!list_empty(&fs_info->allocated_roots)) {
1216 		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1217 
1218 		root = list_first_entry(&fs_info->allocated_roots,
1219 					struct btrfs_root, leak_list);
1220 		btrfs_err(fs_info, "leaked root %s refcount %d",
1221 			  btrfs_root_name(&root->root_key, buf),
1222 			  refcount_read(&root->refs));
1223 		while (refcount_read(&root->refs) > 1)
1224 			btrfs_put_root(root);
1225 		btrfs_put_root(root);
1226 	}
1227 #endif
1228 }
1229 
1230 static void free_global_roots(struct btrfs_fs_info *fs_info)
1231 {
1232 	struct btrfs_root *root;
1233 	struct rb_node *node;
1234 
1235 	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1236 		root = rb_entry(node, struct btrfs_root, rb_node);
1237 		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1238 		btrfs_put_root(root);
1239 	}
1240 }
1241 
1242 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1243 {
1244 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1245 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1246 	percpu_counter_destroy(&fs_info->ordered_bytes);
1247 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1248 	btrfs_free_csum_hash(fs_info);
1249 	btrfs_free_stripe_hash_table(fs_info);
1250 	btrfs_free_ref_cache(fs_info);
1251 	kfree(fs_info->balance_ctl);
1252 	kfree(fs_info->delayed_root);
1253 	free_global_roots(fs_info);
1254 	btrfs_put_root(fs_info->tree_root);
1255 	btrfs_put_root(fs_info->chunk_root);
1256 	btrfs_put_root(fs_info->dev_root);
1257 	btrfs_put_root(fs_info->quota_root);
1258 	btrfs_put_root(fs_info->uuid_root);
1259 	btrfs_put_root(fs_info->fs_root);
1260 	btrfs_put_root(fs_info->data_reloc_root);
1261 	btrfs_put_root(fs_info->block_group_root);
1262 	btrfs_check_leaked_roots(fs_info);
1263 	btrfs_extent_buffer_leak_debug_check(fs_info);
1264 	kfree(fs_info->super_copy);
1265 	kfree(fs_info->super_for_commit);
1266 	kfree(fs_info->subpage_info);
1267 	kvfree(fs_info);
1268 }
1269 
1270 
1271 /*
1272  * Get an in-memory reference of a root structure.
1273  *
1274  * For essential trees like root/extent tree, we grab it from fs_info directly.
1275  * For subvolume trees, we check the cached filesystem roots first. If not
1276  * found, then read it from disk and add it to cached fs roots.
1277  *
1278  * Caller should release the root by calling btrfs_put_root() after the usage.
1279  *
1280  * NOTE: Reloc and log trees can't be read by this function as they share the
1281  *	 same root objectid.
1282  *
1283  * @objectid:	root id
1284  * @anon_dev:	preallocated anonymous block device number for new roots,
1285  * 		pass 0 for new allocation.
1286  * @check_ref:	whether to check root item references, If true, return -ENOENT
1287  *		for orphan roots
1288  */
1289 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1290 					     u64 objectid, dev_t anon_dev,
1291 					     bool check_ref)
1292 {
1293 	struct btrfs_root *root;
1294 	struct btrfs_path *path;
1295 	struct btrfs_key key;
1296 	int ret;
1297 
1298 	root = btrfs_get_global_root(fs_info, objectid);
1299 	if (root)
1300 		return root;
1301 
1302 	/*
1303 	 * If we're called for non-subvolume trees, and above function didn't
1304 	 * find one, do not try to read it from disk.
1305 	 *
1306 	 * This is namely for free-space-tree and quota tree, which can change
1307 	 * at runtime and should only be grabbed from fs_info.
1308 	 */
1309 	if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1310 		return ERR_PTR(-ENOENT);
1311 again:
1312 	root = btrfs_lookup_fs_root(fs_info, objectid);
1313 	if (root) {
1314 		/* Shouldn't get preallocated anon_dev for cached roots */
1315 		ASSERT(!anon_dev);
1316 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1317 			btrfs_put_root(root);
1318 			return ERR_PTR(-ENOENT);
1319 		}
1320 		return root;
1321 	}
1322 
1323 	key.objectid = objectid;
1324 	key.type = BTRFS_ROOT_ITEM_KEY;
1325 	key.offset = (u64)-1;
1326 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1327 	if (IS_ERR(root))
1328 		return root;
1329 
1330 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1331 		ret = -ENOENT;
1332 		goto fail;
1333 	}
1334 
1335 	ret = btrfs_init_fs_root(root, anon_dev);
1336 	if (ret)
1337 		goto fail;
1338 
1339 	path = btrfs_alloc_path();
1340 	if (!path) {
1341 		ret = -ENOMEM;
1342 		goto fail;
1343 	}
1344 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1345 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1346 	key.offset = objectid;
1347 
1348 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1349 	btrfs_free_path(path);
1350 	if (ret < 0)
1351 		goto fail;
1352 	if (ret == 0)
1353 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1354 
1355 	ret = btrfs_insert_fs_root(fs_info, root);
1356 	if (ret) {
1357 		if (ret == -EEXIST) {
1358 			btrfs_put_root(root);
1359 			goto again;
1360 		}
1361 		goto fail;
1362 	}
1363 	return root;
1364 fail:
1365 	/*
1366 	 * If our caller provided us an anonymous device, then it's his
1367 	 * responsibility to free it in case we fail. So we have to set our
1368 	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1369 	 * and once again by our caller.
1370 	 */
1371 	if (anon_dev)
1372 		root->anon_dev = 0;
1373 	btrfs_put_root(root);
1374 	return ERR_PTR(ret);
1375 }
1376 
1377 /*
1378  * Get in-memory reference of a root structure
1379  *
1380  * @objectid:	tree objectid
1381  * @check_ref:	if set, verify that the tree exists and the item has at least
1382  *		one reference
1383  */
1384 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1385 				     u64 objectid, bool check_ref)
1386 {
1387 	return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
1388 }
1389 
1390 /*
1391  * Get in-memory reference of a root structure, created as new, optionally pass
1392  * the anonymous block device id
1393  *
1394  * @objectid:	tree objectid
1395  * @anon_dev:	if zero, allocate a new anonymous block device or use the
1396  *		parameter value
1397  */
1398 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1399 					 u64 objectid, dev_t anon_dev)
1400 {
1401 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1402 }
1403 
1404 /*
1405  * btrfs_get_fs_root_commit_root - return a root for the given objectid
1406  * @fs_info:	the fs_info
1407  * @objectid:	the objectid we need to lookup
1408  *
1409  * This is exclusively used for backref walking, and exists specifically because
1410  * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1411  * creation time, which means we may have to read the tree_root in order to look
1412  * up a fs root that is not in memory.  If the root is not in memory we will
1413  * read the tree root commit root and look up the fs root from there.  This is a
1414  * temporary root, it will not be inserted into the radix tree as it doesn't
1415  * have the most uptodate information, it'll simply be discarded once the
1416  * backref code is finished using the root.
1417  */
1418 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1419 						 struct btrfs_path *path,
1420 						 u64 objectid)
1421 {
1422 	struct btrfs_root *root;
1423 	struct btrfs_key key;
1424 
1425 	ASSERT(path->search_commit_root && path->skip_locking);
1426 
1427 	/*
1428 	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1429 	 * since this is called via the backref walking code we won't be looking
1430 	 * up a root that doesn't exist, unless there's corruption.  So if root
1431 	 * != NULL just return it.
1432 	 */
1433 	root = btrfs_get_global_root(fs_info, objectid);
1434 	if (root)
1435 		return root;
1436 
1437 	root = btrfs_lookup_fs_root(fs_info, objectid);
1438 	if (root)
1439 		return root;
1440 
1441 	key.objectid = objectid;
1442 	key.type = BTRFS_ROOT_ITEM_KEY;
1443 	key.offset = (u64)-1;
1444 	root = read_tree_root_path(fs_info->tree_root, path, &key);
1445 	btrfs_release_path(path);
1446 
1447 	return root;
1448 }
1449 
1450 static int cleaner_kthread(void *arg)
1451 {
1452 	struct btrfs_fs_info *fs_info = arg;
1453 	int again;
1454 
1455 	while (1) {
1456 		again = 0;
1457 
1458 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1459 
1460 		/* Make the cleaner go to sleep early. */
1461 		if (btrfs_need_cleaner_sleep(fs_info))
1462 			goto sleep;
1463 
1464 		/*
1465 		 * Do not do anything if we might cause open_ctree() to block
1466 		 * before we have finished mounting the filesystem.
1467 		 */
1468 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1469 			goto sleep;
1470 
1471 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1472 			goto sleep;
1473 
1474 		/*
1475 		 * Avoid the problem that we change the status of the fs
1476 		 * during the above check and trylock.
1477 		 */
1478 		if (btrfs_need_cleaner_sleep(fs_info)) {
1479 			mutex_unlock(&fs_info->cleaner_mutex);
1480 			goto sleep;
1481 		}
1482 
1483 		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1484 			btrfs_sysfs_feature_update(fs_info);
1485 
1486 		btrfs_run_delayed_iputs(fs_info);
1487 
1488 		again = btrfs_clean_one_deleted_snapshot(fs_info);
1489 		mutex_unlock(&fs_info->cleaner_mutex);
1490 
1491 		/*
1492 		 * The defragger has dealt with the R/O remount and umount,
1493 		 * needn't do anything special here.
1494 		 */
1495 		btrfs_run_defrag_inodes(fs_info);
1496 
1497 		/*
1498 		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1499 		 * with relocation (btrfs_relocate_chunk) and relocation
1500 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1501 		 * after acquiring fs_info->reclaim_bgs_lock. So we
1502 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1503 		 * unused block groups.
1504 		 */
1505 		btrfs_delete_unused_bgs(fs_info);
1506 
1507 		/*
1508 		 * Reclaim block groups in the reclaim_bgs list after we deleted
1509 		 * all unused block_groups. This possibly gives us some more free
1510 		 * space.
1511 		 */
1512 		btrfs_reclaim_bgs(fs_info);
1513 sleep:
1514 		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1515 		if (kthread_should_park())
1516 			kthread_parkme();
1517 		if (kthread_should_stop())
1518 			return 0;
1519 		if (!again) {
1520 			set_current_state(TASK_INTERRUPTIBLE);
1521 			schedule();
1522 			__set_current_state(TASK_RUNNING);
1523 		}
1524 	}
1525 }
1526 
1527 static int transaction_kthread(void *arg)
1528 {
1529 	struct btrfs_root *root = arg;
1530 	struct btrfs_fs_info *fs_info = root->fs_info;
1531 	struct btrfs_trans_handle *trans;
1532 	struct btrfs_transaction *cur;
1533 	u64 transid;
1534 	time64_t delta;
1535 	unsigned long delay;
1536 	bool cannot_commit;
1537 
1538 	do {
1539 		cannot_commit = false;
1540 		delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1541 		mutex_lock(&fs_info->transaction_kthread_mutex);
1542 
1543 		spin_lock(&fs_info->trans_lock);
1544 		cur = fs_info->running_transaction;
1545 		if (!cur) {
1546 			spin_unlock(&fs_info->trans_lock);
1547 			goto sleep;
1548 		}
1549 
1550 		delta = ktime_get_seconds() - cur->start_time;
1551 		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1552 		    cur->state < TRANS_STATE_COMMIT_PREP &&
1553 		    delta < fs_info->commit_interval) {
1554 			spin_unlock(&fs_info->trans_lock);
1555 			delay -= msecs_to_jiffies((delta - 1) * 1000);
1556 			delay = min(delay,
1557 				    msecs_to_jiffies(fs_info->commit_interval * 1000));
1558 			goto sleep;
1559 		}
1560 		transid = cur->transid;
1561 		spin_unlock(&fs_info->trans_lock);
1562 
1563 		/* If the file system is aborted, this will always fail. */
1564 		trans = btrfs_attach_transaction(root);
1565 		if (IS_ERR(trans)) {
1566 			if (PTR_ERR(trans) != -ENOENT)
1567 				cannot_commit = true;
1568 			goto sleep;
1569 		}
1570 		if (transid == trans->transid) {
1571 			btrfs_commit_transaction(trans);
1572 		} else {
1573 			btrfs_end_transaction(trans);
1574 		}
1575 sleep:
1576 		wake_up_process(fs_info->cleaner_kthread);
1577 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1578 
1579 		if (BTRFS_FS_ERROR(fs_info))
1580 			btrfs_cleanup_transaction(fs_info);
1581 		if (!kthread_should_stop() &&
1582 				(!btrfs_transaction_blocked(fs_info) ||
1583 				 cannot_commit))
1584 			schedule_timeout_interruptible(delay);
1585 	} while (!kthread_should_stop());
1586 	return 0;
1587 }
1588 
1589 /*
1590  * This will find the highest generation in the array of root backups.  The
1591  * index of the highest array is returned, or -EINVAL if we can't find
1592  * anything.
1593  *
1594  * We check to make sure the array is valid by comparing the
1595  * generation of the latest  root in the array with the generation
1596  * in the super block.  If they don't match we pitch it.
1597  */
1598 static int find_newest_super_backup(struct btrfs_fs_info *info)
1599 {
1600 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1601 	u64 cur;
1602 	struct btrfs_root_backup *root_backup;
1603 	int i;
1604 
1605 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1606 		root_backup = info->super_copy->super_roots + i;
1607 		cur = btrfs_backup_tree_root_gen(root_backup);
1608 		if (cur == newest_gen)
1609 			return i;
1610 	}
1611 
1612 	return -EINVAL;
1613 }
1614 
1615 /*
1616  * copy all the root pointers into the super backup array.
1617  * this will bump the backup pointer by one when it is
1618  * done
1619  */
1620 static void backup_super_roots(struct btrfs_fs_info *info)
1621 {
1622 	const int next_backup = info->backup_root_index;
1623 	struct btrfs_root_backup *root_backup;
1624 
1625 	root_backup = info->super_for_commit->super_roots + next_backup;
1626 
1627 	/*
1628 	 * make sure all of our padding and empty slots get zero filled
1629 	 * regardless of which ones we use today
1630 	 */
1631 	memset(root_backup, 0, sizeof(*root_backup));
1632 
1633 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1634 
1635 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1636 	btrfs_set_backup_tree_root_gen(root_backup,
1637 			       btrfs_header_generation(info->tree_root->node));
1638 
1639 	btrfs_set_backup_tree_root_level(root_backup,
1640 			       btrfs_header_level(info->tree_root->node));
1641 
1642 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1643 	btrfs_set_backup_chunk_root_gen(root_backup,
1644 			       btrfs_header_generation(info->chunk_root->node));
1645 	btrfs_set_backup_chunk_root_level(root_backup,
1646 			       btrfs_header_level(info->chunk_root->node));
1647 
1648 	if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1649 		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1650 		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1651 
1652 		btrfs_set_backup_extent_root(root_backup,
1653 					     extent_root->node->start);
1654 		btrfs_set_backup_extent_root_gen(root_backup,
1655 				btrfs_header_generation(extent_root->node));
1656 		btrfs_set_backup_extent_root_level(root_backup,
1657 					btrfs_header_level(extent_root->node));
1658 
1659 		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1660 		btrfs_set_backup_csum_root_gen(root_backup,
1661 					       btrfs_header_generation(csum_root->node));
1662 		btrfs_set_backup_csum_root_level(root_backup,
1663 						 btrfs_header_level(csum_root->node));
1664 	}
1665 
1666 	/*
1667 	 * we might commit during log recovery, which happens before we set
1668 	 * the fs_root.  Make sure it is valid before we fill it in.
1669 	 */
1670 	if (info->fs_root && info->fs_root->node) {
1671 		btrfs_set_backup_fs_root(root_backup,
1672 					 info->fs_root->node->start);
1673 		btrfs_set_backup_fs_root_gen(root_backup,
1674 			       btrfs_header_generation(info->fs_root->node));
1675 		btrfs_set_backup_fs_root_level(root_backup,
1676 			       btrfs_header_level(info->fs_root->node));
1677 	}
1678 
1679 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1680 	btrfs_set_backup_dev_root_gen(root_backup,
1681 			       btrfs_header_generation(info->dev_root->node));
1682 	btrfs_set_backup_dev_root_level(root_backup,
1683 				       btrfs_header_level(info->dev_root->node));
1684 
1685 	btrfs_set_backup_total_bytes(root_backup,
1686 			     btrfs_super_total_bytes(info->super_copy));
1687 	btrfs_set_backup_bytes_used(root_backup,
1688 			     btrfs_super_bytes_used(info->super_copy));
1689 	btrfs_set_backup_num_devices(root_backup,
1690 			     btrfs_super_num_devices(info->super_copy));
1691 
1692 	/*
1693 	 * if we don't copy this out to the super_copy, it won't get remembered
1694 	 * for the next commit
1695 	 */
1696 	memcpy(&info->super_copy->super_roots,
1697 	       &info->super_for_commit->super_roots,
1698 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1699 }
1700 
1701 /*
1702  * read_backup_root - Reads a backup root based on the passed priority. Prio 0
1703  * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1704  *
1705  * fs_info - filesystem whose backup roots need to be read
1706  * priority - priority of backup root required
1707  *
1708  * Returns backup root index on success and -EINVAL otherwise.
1709  */
1710 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1711 {
1712 	int backup_index = find_newest_super_backup(fs_info);
1713 	struct btrfs_super_block *super = fs_info->super_copy;
1714 	struct btrfs_root_backup *root_backup;
1715 
1716 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1717 		if (priority == 0)
1718 			return backup_index;
1719 
1720 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1721 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1722 	} else {
1723 		return -EINVAL;
1724 	}
1725 
1726 	root_backup = super->super_roots + backup_index;
1727 
1728 	btrfs_set_super_generation(super,
1729 				   btrfs_backup_tree_root_gen(root_backup));
1730 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1731 	btrfs_set_super_root_level(super,
1732 				   btrfs_backup_tree_root_level(root_backup));
1733 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1734 
1735 	/*
1736 	 * Fixme: the total bytes and num_devices need to match or we should
1737 	 * need a fsck
1738 	 */
1739 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1740 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1741 
1742 	return backup_index;
1743 }
1744 
1745 /* helper to cleanup workers */
1746 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1747 {
1748 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1749 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1750 	btrfs_destroy_workqueue(fs_info->workers);
1751 	if (fs_info->endio_workers)
1752 		destroy_workqueue(fs_info->endio_workers);
1753 	if (fs_info->rmw_workers)
1754 		destroy_workqueue(fs_info->rmw_workers);
1755 	if (fs_info->compressed_write_workers)
1756 		destroy_workqueue(fs_info->compressed_write_workers);
1757 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1758 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1759 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1760 	btrfs_destroy_workqueue(fs_info->caching_workers);
1761 	btrfs_destroy_workqueue(fs_info->flush_workers);
1762 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1763 	if (fs_info->discard_ctl.discard_workers)
1764 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1765 	/*
1766 	 * Now that all other work queues are destroyed, we can safely destroy
1767 	 * the queues used for metadata I/O, since tasks from those other work
1768 	 * queues can do metadata I/O operations.
1769 	 */
1770 	if (fs_info->endio_meta_workers)
1771 		destroy_workqueue(fs_info->endio_meta_workers);
1772 }
1773 
1774 static void free_root_extent_buffers(struct btrfs_root *root)
1775 {
1776 	if (root) {
1777 		free_extent_buffer(root->node);
1778 		free_extent_buffer(root->commit_root);
1779 		root->node = NULL;
1780 		root->commit_root = NULL;
1781 	}
1782 }
1783 
1784 static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1785 {
1786 	struct btrfs_root *root, *tmp;
1787 
1788 	rbtree_postorder_for_each_entry_safe(root, tmp,
1789 					     &fs_info->global_root_tree,
1790 					     rb_node)
1791 		free_root_extent_buffers(root);
1792 }
1793 
1794 /* helper to cleanup tree roots */
1795 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1796 {
1797 	free_root_extent_buffers(info->tree_root);
1798 
1799 	free_global_root_pointers(info);
1800 	free_root_extent_buffers(info->dev_root);
1801 	free_root_extent_buffers(info->quota_root);
1802 	free_root_extent_buffers(info->uuid_root);
1803 	free_root_extent_buffers(info->fs_root);
1804 	free_root_extent_buffers(info->data_reloc_root);
1805 	free_root_extent_buffers(info->block_group_root);
1806 	if (free_chunk_root)
1807 		free_root_extent_buffers(info->chunk_root);
1808 }
1809 
1810 void btrfs_put_root(struct btrfs_root *root)
1811 {
1812 	if (!root)
1813 		return;
1814 
1815 	if (refcount_dec_and_test(&root->refs)) {
1816 		WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
1817 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1818 		if (root->anon_dev)
1819 			free_anon_bdev(root->anon_dev);
1820 		free_root_extent_buffers(root);
1821 #ifdef CONFIG_BTRFS_DEBUG
1822 		spin_lock(&root->fs_info->fs_roots_radix_lock);
1823 		list_del_init(&root->leak_list);
1824 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1825 #endif
1826 		kfree(root);
1827 	}
1828 }
1829 
1830 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1831 {
1832 	int ret;
1833 	struct btrfs_root *gang[8];
1834 	int i;
1835 
1836 	while (!list_empty(&fs_info->dead_roots)) {
1837 		gang[0] = list_entry(fs_info->dead_roots.next,
1838 				     struct btrfs_root, root_list);
1839 		list_del(&gang[0]->root_list);
1840 
1841 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1842 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1843 		btrfs_put_root(gang[0]);
1844 	}
1845 
1846 	while (1) {
1847 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1848 					     (void **)gang, 0,
1849 					     ARRAY_SIZE(gang));
1850 		if (!ret)
1851 			break;
1852 		for (i = 0; i < ret; i++)
1853 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1854 	}
1855 }
1856 
1857 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1858 {
1859 	mutex_init(&fs_info->scrub_lock);
1860 	atomic_set(&fs_info->scrubs_running, 0);
1861 	atomic_set(&fs_info->scrub_pause_req, 0);
1862 	atomic_set(&fs_info->scrubs_paused, 0);
1863 	atomic_set(&fs_info->scrub_cancel_req, 0);
1864 	init_waitqueue_head(&fs_info->scrub_pause_wait);
1865 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1866 }
1867 
1868 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1869 {
1870 	spin_lock_init(&fs_info->balance_lock);
1871 	mutex_init(&fs_info->balance_mutex);
1872 	atomic_set(&fs_info->balance_pause_req, 0);
1873 	atomic_set(&fs_info->balance_cancel_req, 0);
1874 	fs_info->balance_ctl = NULL;
1875 	init_waitqueue_head(&fs_info->balance_wait_q);
1876 	atomic_set(&fs_info->reloc_cancel_req, 0);
1877 }
1878 
1879 static int btrfs_init_btree_inode(struct super_block *sb)
1880 {
1881 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1882 	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1883 					      fs_info->tree_root);
1884 	struct inode *inode;
1885 
1886 	inode = new_inode(sb);
1887 	if (!inode)
1888 		return -ENOMEM;
1889 
1890 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1891 	set_nlink(inode, 1);
1892 	/*
1893 	 * we set the i_size on the btree inode to the max possible int.
1894 	 * the real end of the address space is determined by all of
1895 	 * the devices in the system
1896 	 */
1897 	inode->i_size = OFFSET_MAX;
1898 	inode->i_mapping->a_ops = &btree_aops;
1899 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1900 
1901 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
1902 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1903 			    IO_TREE_BTREE_INODE_IO);
1904 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1905 
1906 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1907 	BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
1908 	BTRFS_I(inode)->location.type = 0;
1909 	BTRFS_I(inode)->location.offset = 0;
1910 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1911 	__insert_inode_hash(inode, hash);
1912 	fs_info->btree_inode = inode;
1913 
1914 	return 0;
1915 }
1916 
1917 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1918 {
1919 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1920 	init_rwsem(&fs_info->dev_replace.rwsem);
1921 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1922 }
1923 
1924 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1925 {
1926 	spin_lock_init(&fs_info->qgroup_lock);
1927 	mutex_init(&fs_info->qgroup_ioctl_lock);
1928 	fs_info->qgroup_tree = RB_ROOT;
1929 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1930 	fs_info->qgroup_seq = 1;
1931 	fs_info->qgroup_ulist = NULL;
1932 	fs_info->qgroup_rescan_running = false;
1933 	fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1934 	mutex_init(&fs_info->qgroup_rescan_lock);
1935 }
1936 
1937 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1938 {
1939 	u32 max_active = fs_info->thread_pool_size;
1940 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1941 	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1942 
1943 	fs_info->workers =
1944 		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1945 
1946 	fs_info->delalloc_workers =
1947 		btrfs_alloc_workqueue(fs_info, "delalloc",
1948 				      flags, max_active, 2);
1949 
1950 	fs_info->flush_workers =
1951 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1952 				      flags, max_active, 0);
1953 
1954 	fs_info->caching_workers =
1955 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1956 
1957 	fs_info->fixup_workers =
1958 		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1959 
1960 	fs_info->endio_workers =
1961 		alloc_workqueue("btrfs-endio", flags, max_active);
1962 	fs_info->endio_meta_workers =
1963 		alloc_workqueue("btrfs-endio-meta", flags, max_active);
1964 	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1965 	fs_info->endio_write_workers =
1966 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1967 				      max_active, 2);
1968 	fs_info->compressed_write_workers =
1969 		alloc_workqueue("btrfs-compressed-write", flags, max_active);
1970 	fs_info->endio_freespace_worker =
1971 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
1972 				      max_active, 0);
1973 	fs_info->delayed_workers =
1974 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
1975 				      max_active, 0);
1976 	fs_info->qgroup_rescan_workers =
1977 		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
1978 					      ordered_flags);
1979 	fs_info->discard_ctl.discard_workers =
1980 		alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
1981 
1982 	if (!(fs_info->workers &&
1983 	      fs_info->delalloc_workers && fs_info->flush_workers &&
1984 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
1985 	      fs_info->compressed_write_workers &&
1986 	      fs_info->endio_write_workers &&
1987 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
1988 	      fs_info->caching_workers && fs_info->fixup_workers &&
1989 	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
1990 	      fs_info->discard_ctl.discard_workers)) {
1991 		return -ENOMEM;
1992 	}
1993 
1994 	return 0;
1995 }
1996 
1997 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
1998 {
1999 	struct crypto_shash *csum_shash;
2000 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2001 
2002 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2003 
2004 	if (IS_ERR(csum_shash)) {
2005 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2006 			  csum_driver);
2007 		return PTR_ERR(csum_shash);
2008 	}
2009 
2010 	fs_info->csum_shash = csum_shash;
2011 
2012 	/*
2013 	 * Check if the checksum implementation is a fast accelerated one.
2014 	 * As-is this is a bit of a hack and should be replaced once the csum
2015 	 * implementations provide that information themselves.
2016 	 */
2017 	switch (csum_type) {
2018 	case BTRFS_CSUM_TYPE_CRC32:
2019 		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2020 			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2021 		break;
2022 	case BTRFS_CSUM_TYPE_XXHASH:
2023 		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2024 		break;
2025 	default:
2026 		break;
2027 	}
2028 
2029 	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2030 			btrfs_super_csum_name(csum_type),
2031 			crypto_shash_driver_name(csum_shash));
2032 	return 0;
2033 }
2034 
2035 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2036 			    struct btrfs_fs_devices *fs_devices)
2037 {
2038 	int ret;
2039 	struct btrfs_tree_parent_check check = { 0 };
2040 	struct btrfs_root *log_tree_root;
2041 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2042 	u64 bytenr = btrfs_super_log_root(disk_super);
2043 	int level = btrfs_super_log_root_level(disk_super);
2044 
2045 	if (fs_devices->rw_devices == 0) {
2046 		btrfs_warn(fs_info, "log replay required on RO media");
2047 		return -EIO;
2048 	}
2049 
2050 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2051 					 GFP_KERNEL);
2052 	if (!log_tree_root)
2053 		return -ENOMEM;
2054 
2055 	check.level = level;
2056 	check.transid = fs_info->generation + 1;
2057 	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2058 	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2059 	if (IS_ERR(log_tree_root->node)) {
2060 		btrfs_warn(fs_info, "failed to read log tree");
2061 		ret = PTR_ERR(log_tree_root->node);
2062 		log_tree_root->node = NULL;
2063 		btrfs_put_root(log_tree_root);
2064 		return ret;
2065 	}
2066 	if (!extent_buffer_uptodate(log_tree_root->node)) {
2067 		btrfs_err(fs_info, "failed to read log tree");
2068 		btrfs_put_root(log_tree_root);
2069 		return -EIO;
2070 	}
2071 
2072 	/* returns with log_tree_root freed on success */
2073 	ret = btrfs_recover_log_trees(log_tree_root);
2074 	if (ret) {
2075 		btrfs_handle_fs_error(fs_info, ret,
2076 				      "Failed to recover log tree");
2077 		btrfs_put_root(log_tree_root);
2078 		return ret;
2079 	}
2080 
2081 	if (sb_rdonly(fs_info->sb)) {
2082 		ret = btrfs_commit_super(fs_info);
2083 		if (ret)
2084 			return ret;
2085 	}
2086 
2087 	return 0;
2088 }
2089 
2090 static int load_global_roots_objectid(struct btrfs_root *tree_root,
2091 				      struct btrfs_path *path, u64 objectid,
2092 				      const char *name)
2093 {
2094 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2095 	struct btrfs_root *root;
2096 	u64 max_global_id = 0;
2097 	int ret;
2098 	struct btrfs_key key = {
2099 		.objectid = objectid,
2100 		.type = BTRFS_ROOT_ITEM_KEY,
2101 		.offset = 0,
2102 	};
2103 	bool found = false;
2104 
2105 	/* If we have IGNOREDATACSUMS skip loading these roots. */
2106 	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2107 	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2108 		set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2109 		return 0;
2110 	}
2111 
2112 	while (1) {
2113 		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2114 		if (ret < 0)
2115 			break;
2116 
2117 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2118 			ret = btrfs_next_leaf(tree_root, path);
2119 			if (ret) {
2120 				if (ret > 0)
2121 					ret = 0;
2122 				break;
2123 			}
2124 		}
2125 		ret = 0;
2126 
2127 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2128 		if (key.objectid != objectid)
2129 			break;
2130 		btrfs_release_path(path);
2131 
2132 		/*
2133 		 * Just worry about this for extent tree, it'll be the same for
2134 		 * everybody.
2135 		 */
2136 		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2137 			max_global_id = max(max_global_id, key.offset);
2138 
2139 		found = true;
2140 		root = read_tree_root_path(tree_root, path, &key);
2141 		if (IS_ERR(root)) {
2142 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2143 				ret = PTR_ERR(root);
2144 			break;
2145 		}
2146 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2147 		ret = btrfs_global_root_insert(root);
2148 		if (ret) {
2149 			btrfs_put_root(root);
2150 			break;
2151 		}
2152 		key.offset++;
2153 	}
2154 	btrfs_release_path(path);
2155 
2156 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2157 		fs_info->nr_global_roots = max_global_id + 1;
2158 
2159 	if (!found || ret) {
2160 		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2161 			set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2162 
2163 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2164 			ret = ret ? ret : -ENOENT;
2165 		else
2166 			ret = 0;
2167 		btrfs_err(fs_info, "failed to load root %s", name);
2168 	}
2169 	return ret;
2170 }
2171 
2172 static int load_global_roots(struct btrfs_root *tree_root)
2173 {
2174 	struct btrfs_path *path;
2175 	int ret = 0;
2176 
2177 	path = btrfs_alloc_path();
2178 	if (!path)
2179 		return -ENOMEM;
2180 
2181 	ret = load_global_roots_objectid(tree_root, path,
2182 					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2183 	if (ret)
2184 		goto out;
2185 	ret = load_global_roots_objectid(tree_root, path,
2186 					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2187 	if (ret)
2188 		goto out;
2189 	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2190 		goto out;
2191 	ret = load_global_roots_objectid(tree_root, path,
2192 					 BTRFS_FREE_SPACE_TREE_OBJECTID,
2193 					 "free space");
2194 out:
2195 	btrfs_free_path(path);
2196 	return ret;
2197 }
2198 
2199 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2200 {
2201 	struct btrfs_root *tree_root = fs_info->tree_root;
2202 	struct btrfs_root *root;
2203 	struct btrfs_key location;
2204 	int ret;
2205 
2206 	BUG_ON(!fs_info->tree_root);
2207 
2208 	ret = load_global_roots(tree_root);
2209 	if (ret)
2210 		return ret;
2211 
2212 	location.type = BTRFS_ROOT_ITEM_KEY;
2213 	location.offset = 0;
2214 
2215 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2216 		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2217 		root = btrfs_read_tree_root(tree_root, &location);
2218 		if (IS_ERR(root)) {
2219 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2220 				ret = PTR_ERR(root);
2221 				goto out;
2222 			}
2223 		} else {
2224 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2225 			fs_info->block_group_root = root;
2226 		}
2227 	}
2228 
2229 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2230 	root = btrfs_read_tree_root(tree_root, &location);
2231 	if (IS_ERR(root)) {
2232 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2233 			ret = PTR_ERR(root);
2234 			goto out;
2235 		}
2236 	} else {
2237 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2238 		fs_info->dev_root = root;
2239 	}
2240 	/* Initialize fs_info for all devices in any case */
2241 	ret = btrfs_init_devices_late(fs_info);
2242 	if (ret)
2243 		goto out;
2244 
2245 	/*
2246 	 * This tree can share blocks with some other fs tree during relocation
2247 	 * and we need a proper setup by btrfs_get_fs_root
2248 	 */
2249 	root = btrfs_get_fs_root(tree_root->fs_info,
2250 				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2251 	if (IS_ERR(root)) {
2252 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2253 			ret = PTR_ERR(root);
2254 			goto out;
2255 		}
2256 	} else {
2257 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2258 		fs_info->data_reloc_root = root;
2259 	}
2260 
2261 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2262 	root = btrfs_read_tree_root(tree_root, &location);
2263 	if (!IS_ERR(root)) {
2264 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2265 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2266 		fs_info->quota_root = root;
2267 	}
2268 
2269 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2270 	root = btrfs_read_tree_root(tree_root, &location);
2271 	if (IS_ERR(root)) {
2272 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2273 			ret = PTR_ERR(root);
2274 			if (ret != -ENOENT)
2275 				goto out;
2276 		}
2277 	} else {
2278 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2279 		fs_info->uuid_root = root;
2280 	}
2281 
2282 	return 0;
2283 out:
2284 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2285 		   location.objectid, ret);
2286 	return ret;
2287 }
2288 
2289 /*
2290  * Real super block validation
2291  * NOTE: super csum type and incompat features will not be checked here.
2292  *
2293  * @sb:		super block to check
2294  * @mirror_num:	the super block number to check its bytenr:
2295  * 		0	the primary (1st) sb
2296  * 		1, 2	2nd and 3rd backup copy
2297  * 	       -1	skip bytenr check
2298  */
2299 int btrfs_validate_super(struct btrfs_fs_info *fs_info,
2300 			 struct btrfs_super_block *sb, int mirror_num)
2301 {
2302 	u64 nodesize = btrfs_super_nodesize(sb);
2303 	u64 sectorsize = btrfs_super_sectorsize(sb);
2304 	int ret = 0;
2305 
2306 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2307 		btrfs_err(fs_info, "no valid FS found");
2308 		ret = -EINVAL;
2309 	}
2310 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2311 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2312 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2313 		ret = -EINVAL;
2314 	}
2315 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2316 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2317 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2318 		ret = -EINVAL;
2319 	}
2320 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2321 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2322 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2323 		ret = -EINVAL;
2324 	}
2325 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2326 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2327 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2328 		ret = -EINVAL;
2329 	}
2330 
2331 	/*
2332 	 * Check sectorsize and nodesize first, other check will need it.
2333 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2334 	 */
2335 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2336 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2337 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2338 		ret = -EINVAL;
2339 	}
2340 
2341 	/*
2342 	 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2343 	 *
2344 	 * We can support 16K sectorsize with 64K page size without problem,
2345 	 * but such sectorsize/pagesize combination doesn't make much sense.
2346 	 * 4K will be our future standard, PAGE_SIZE is supported from the very
2347 	 * beginning.
2348 	 */
2349 	if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2350 		btrfs_err(fs_info,
2351 			"sectorsize %llu not yet supported for page size %lu",
2352 			sectorsize, PAGE_SIZE);
2353 		ret = -EINVAL;
2354 	}
2355 
2356 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2357 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2358 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2359 		ret = -EINVAL;
2360 	}
2361 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2362 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2363 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2364 		ret = -EINVAL;
2365 	}
2366 
2367 	/* Root alignment check */
2368 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2369 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2370 			   btrfs_super_root(sb));
2371 		ret = -EINVAL;
2372 	}
2373 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2374 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2375 			   btrfs_super_chunk_root(sb));
2376 		ret = -EINVAL;
2377 	}
2378 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2379 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2380 			   btrfs_super_log_root(sb));
2381 		ret = -EINVAL;
2382 	}
2383 
2384 	if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2385 		btrfs_err(fs_info,
2386 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2387 			  sb->fsid, fs_info->fs_devices->fsid);
2388 		ret = -EINVAL;
2389 	}
2390 
2391 	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2392 		   BTRFS_FSID_SIZE) != 0) {
2393 		btrfs_err(fs_info,
2394 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2395 			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2396 		ret = -EINVAL;
2397 	}
2398 
2399 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2400 		   BTRFS_FSID_SIZE) != 0) {
2401 		btrfs_err(fs_info,
2402 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2403 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2404 		ret = -EINVAL;
2405 	}
2406 
2407 	/*
2408 	 * Artificial requirement for block-group-tree to force newer features
2409 	 * (free-space-tree, no-holes) so the test matrix is smaller.
2410 	 */
2411 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2412 	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2413 	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2414 		btrfs_err(fs_info,
2415 		"block-group-tree feature requires fres-space-tree and no-holes");
2416 		ret = -EINVAL;
2417 	}
2418 
2419 	/*
2420 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2421 	 * done later
2422 	 */
2423 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2424 		btrfs_err(fs_info, "bytes_used is too small %llu",
2425 			  btrfs_super_bytes_used(sb));
2426 		ret = -EINVAL;
2427 	}
2428 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2429 		btrfs_err(fs_info, "invalid stripesize %u",
2430 			  btrfs_super_stripesize(sb));
2431 		ret = -EINVAL;
2432 	}
2433 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2434 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2435 			   btrfs_super_num_devices(sb));
2436 	if (btrfs_super_num_devices(sb) == 0) {
2437 		btrfs_err(fs_info, "number of devices is 0");
2438 		ret = -EINVAL;
2439 	}
2440 
2441 	if (mirror_num >= 0 &&
2442 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2443 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2444 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2445 		ret = -EINVAL;
2446 	}
2447 
2448 	/*
2449 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2450 	 * and one chunk
2451 	 */
2452 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2453 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2454 			  btrfs_super_sys_array_size(sb),
2455 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2456 		ret = -EINVAL;
2457 	}
2458 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2459 			+ sizeof(struct btrfs_chunk)) {
2460 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2461 			  btrfs_super_sys_array_size(sb),
2462 			  sizeof(struct btrfs_disk_key)
2463 			  + sizeof(struct btrfs_chunk));
2464 		ret = -EINVAL;
2465 	}
2466 
2467 	/*
2468 	 * The generation is a global counter, we'll trust it more than the others
2469 	 * but it's still possible that it's the one that's wrong.
2470 	 */
2471 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2472 		btrfs_warn(fs_info,
2473 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2474 			btrfs_super_generation(sb),
2475 			btrfs_super_chunk_root_generation(sb));
2476 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2477 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2478 		btrfs_warn(fs_info,
2479 			"suspicious: generation < cache_generation: %llu < %llu",
2480 			btrfs_super_generation(sb),
2481 			btrfs_super_cache_generation(sb));
2482 
2483 	return ret;
2484 }
2485 
2486 /*
2487  * Validation of super block at mount time.
2488  * Some checks already done early at mount time, like csum type and incompat
2489  * flags will be skipped.
2490  */
2491 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2492 {
2493 	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2494 }
2495 
2496 /*
2497  * Validation of super block at write time.
2498  * Some checks like bytenr check will be skipped as their values will be
2499  * overwritten soon.
2500  * Extra checks like csum type and incompat flags will be done here.
2501  */
2502 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2503 				      struct btrfs_super_block *sb)
2504 {
2505 	int ret;
2506 
2507 	ret = btrfs_validate_super(fs_info, sb, -1);
2508 	if (ret < 0)
2509 		goto out;
2510 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2511 		ret = -EUCLEAN;
2512 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2513 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2514 		goto out;
2515 	}
2516 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2517 		ret = -EUCLEAN;
2518 		btrfs_err(fs_info,
2519 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2520 			  btrfs_super_incompat_flags(sb),
2521 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2522 		goto out;
2523 	}
2524 out:
2525 	if (ret < 0)
2526 		btrfs_err(fs_info,
2527 		"super block corruption detected before writing it to disk");
2528 	return ret;
2529 }
2530 
2531 static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2532 {
2533 	struct btrfs_tree_parent_check check = {
2534 		.level = level,
2535 		.transid = gen,
2536 		.owner_root = root->root_key.objectid
2537 	};
2538 	int ret = 0;
2539 
2540 	root->node = read_tree_block(root->fs_info, bytenr, &check);
2541 	if (IS_ERR(root->node)) {
2542 		ret = PTR_ERR(root->node);
2543 		root->node = NULL;
2544 		return ret;
2545 	}
2546 	if (!extent_buffer_uptodate(root->node)) {
2547 		free_extent_buffer(root->node);
2548 		root->node = NULL;
2549 		return -EIO;
2550 	}
2551 
2552 	btrfs_set_root_node(&root->root_item, root->node);
2553 	root->commit_root = btrfs_root_node(root);
2554 	btrfs_set_root_refs(&root->root_item, 1);
2555 	return ret;
2556 }
2557 
2558 static int load_important_roots(struct btrfs_fs_info *fs_info)
2559 {
2560 	struct btrfs_super_block *sb = fs_info->super_copy;
2561 	u64 gen, bytenr;
2562 	int level, ret;
2563 
2564 	bytenr = btrfs_super_root(sb);
2565 	gen = btrfs_super_generation(sb);
2566 	level = btrfs_super_root_level(sb);
2567 	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2568 	if (ret) {
2569 		btrfs_warn(fs_info, "couldn't read tree root");
2570 		return ret;
2571 	}
2572 	return 0;
2573 }
2574 
2575 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2576 {
2577 	int backup_index = find_newest_super_backup(fs_info);
2578 	struct btrfs_super_block *sb = fs_info->super_copy;
2579 	struct btrfs_root *tree_root = fs_info->tree_root;
2580 	bool handle_error = false;
2581 	int ret = 0;
2582 	int i;
2583 
2584 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2585 		if (handle_error) {
2586 			if (!IS_ERR(tree_root->node))
2587 				free_extent_buffer(tree_root->node);
2588 			tree_root->node = NULL;
2589 
2590 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2591 				break;
2592 
2593 			free_root_pointers(fs_info, 0);
2594 
2595 			/*
2596 			 * Don't use the log in recovery mode, it won't be
2597 			 * valid
2598 			 */
2599 			btrfs_set_super_log_root(sb, 0);
2600 
2601 			/* We can't trust the free space cache either */
2602 			btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2603 
2604 			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2605 			ret = read_backup_root(fs_info, i);
2606 			backup_index = ret;
2607 			if (ret < 0)
2608 				return ret;
2609 		}
2610 
2611 		ret = load_important_roots(fs_info);
2612 		if (ret) {
2613 			handle_error = true;
2614 			continue;
2615 		}
2616 
2617 		/*
2618 		 * No need to hold btrfs_root::objectid_mutex since the fs
2619 		 * hasn't been fully initialised and we are the only user
2620 		 */
2621 		ret = btrfs_init_root_free_objectid(tree_root);
2622 		if (ret < 0) {
2623 			handle_error = true;
2624 			continue;
2625 		}
2626 
2627 		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2628 
2629 		ret = btrfs_read_roots(fs_info);
2630 		if (ret < 0) {
2631 			handle_error = true;
2632 			continue;
2633 		}
2634 
2635 		/* All successful */
2636 		fs_info->generation = btrfs_header_generation(tree_root->node);
2637 		fs_info->last_trans_committed = fs_info->generation;
2638 		fs_info->last_reloc_trans = 0;
2639 
2640 		/* Always begin writing backup roots after the one being used */
2641 		if (backup_index < 0) {
2642 			fs_info->backup_root_index = 0;
2643 		} else {
2644 			fs_info->backup_root_index = backup_index + 1;
2645 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2646 		}
2647 		break;
2648 	}
2649 
2650 	return ret;
2651 }
2652 
2653 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2654 {
2655 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2656 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2657 	INIT_LIST_HEAD(&fs_info->trans_list);
2658 	INIT_LIST_HEAD(&fs_info->dead_roots);
2659 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2660 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2661 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2662 	spin_lock_init(&fs_info->delalloc_root_lock);
2663 	spin_lock_init(&fs_info->trans_lock);
2664 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2665 	spin_lock_init(&fs_info->delayed_iput_lock);
2666 	spin_lock_init(&fs_info->defrag_inodes_lock);
2667 	spin_lock_init(&fs_info->super_lock);
2668 	spin_lock_init(&fs_info->buffer_lock);
2669 	spin_lock_init(&fs_info->unused_bgs_lock);
2670 	spin_lock_init(&fs_info->treelog_bg_lock);
2671 	spin_lock_init(&fs_info->zone_active_bgs_lock);
2672 	spin_lock_init(&fs_info->relocation_bg_lock);
2673 	rwlock_init(&fs_info->tree_mod_log_lock);
2674 	rwlock_init(&fs_info->global_root_lock);
2675 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2676 	mutex_init(&fs_info->reclaim_bgs_lock);
2677 	mutex_init(&fs_info->reloc_mutex);
2678 	mutex_init(&fs_info->delalloc_root_mutex);
2679 	mutex_init(&fs_info->zoned_meta_io_lock);
2680 	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2681 	seqlock_init(&fs_info->profiles_lock);
2682 
2683 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2684 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2685 	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2686 	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2687 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2688 				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2689 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2690 				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2691 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2692 				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2693 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2694 				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2695 
2696 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2697 	INIT_LIST_HEAD(&fs_info->space_info);
2698 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2699 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2700 	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2701 	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2702 #ifdef CONFIG_BTRFS_DEBUG
2703 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2704 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2705 	spin_lock_init(&fs_info->eb_leak_lock);
2706 #endif
2707 	extent_map_tree_init(&fs_info->mapping_tree);
2708 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2709 			     BTRFS_BLOCK_RSV_GLOBAL);
2710 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2711 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2712 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2713 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2714 			     BTRFS_BLOCK_RSV_DELOPS);
2715 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2716 			     BTRFS_BLOCK_RSV_DELREFS);
2717 
2718 	atomic_set(&fs_info->async_delalloc_pages, 0);
2719 	atomic_set(&fs_info->defrag_running, 0);
2720 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2721 	atomic64_set(&fs_info->tree_mod_seq, 0);
2722 	fs_info->global_root_tree = RB_ROOT;
2723 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2724 	fs_info->metadata_ratio = 0;
2725 	fs_info->defrag_inodes = RB_ROOT;
2726 	atomic64_set(&fs_info->free_chunk_space, 0);
2727 	fs_info->tree_mod_log = RB_ROOT;
2728 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2729 	btrfs_init_ref_verify(fs_info);
2730 
2731 	fs_info->thread_pool_size = min_t(unsigned long,
2732 					  num_online_cpus() + 2, 8);
2733 
2734 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2735 	spin_lock_init(&fs_info->ordered_root_lock);
2736 
2737 	btrfs_init_scrub(fs_info);
2738 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2739 	fs_info->check_integrity_print_mask = 0;
2740 #endif
2741 	btrfs_init_balance(fs_info);
2742 	btrfs_init_async_reclaim_work(fs_info);
2743 
2744 	rwlock_init(&fs_info->block_group_cache_lock);
2745 	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2746 
2747 	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2748 			    IO_TREE_FS_EXCLUDED_EXTENTS);
2749 
2750 	mutex_init(&fs_info->ordered_operations_mutex);
2751 	mutex_init(&fs_info->tree_log_mutex);
2752 	mutex_init(&fs_info->chunk_mutex);
2753 	mutex_init(&fs_info->transaction_kthread_mutex);
2754 	mutex_init(&fs_info->cleaner_mutex);
2755 	mutex_init(&fs_info->ro_block_group_mutex);
2756 	init_rwsem(&fs_info->commit_root_sem);
2757 	init_rwsem(&fs_info->cleanup_work_sem);
2758 	init_rwsem(&fs_info->subvol_sem);
2759 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2760 
2761 	btrfs_init_dev_replace_locks(fs_info);
2762 	btrfs_init_qgroup(fs_info);
2763 	btrfs_discard_init(fs_info);
2764 
2765 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2766 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2767 
2768 	init_waitqueue_head(&fs_info->transaction_throttle);
2769 	init_waitqueue_head(&fs_info->transaction_wait);
2770 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2771 	init_waitqueue_head(&fs_info->async_submit_wait);
2772 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2773 
2774 	/* Usable values until the real ones are cached from the superblock */
2775 	fs_info->nodesize = 4096;
2776 	fs_info->sectorsize = 4096;
2777 	fs_info->sectorsize_bits = ilog2(4096);
2778 	fs_info->stripesize = 4096;
2779 
2780 	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2781 
2782 	spin_lock_init(&fs_info->swapfile_pins_lock);
2783 	fs_info->swapfile_pins = RB_ROOT;
2784 
2785 	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2786 	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2787 }
2788 
2789 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2790 {
2791 	int ret;
2792 
2793 	fs_info->sb = sb;
2794 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2795 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2796 
2797 	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2798 	if (ret)
2799 		return ret;
2800 
2801 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2802 	if (ret)
2803 		return ret;
2804 
2805 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2806 					(1 + ilog2(nr_cpu_ids));
2807 
2808 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2809 	if (ret)
2810 		return ret;
2811 
2812 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2813 			GFP_KERNEL);
2814 	if (ret)
2815 		return ret;
2816 
2817 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2818 					GFP_KERNEL);
2819 	if (!fs_info->delayed_root)
2820 		return -ENOMEM;
2821 	btrfs_init_delayed_root(fs_info->delayed_root);
2822 
2823 	if (sb_rdonly(sb))
2824 		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2825 
2826 	return btrfs_alloc_stripe_hash_table(fs_info);
2827 }
2828 
2829 static int btrfs_uuid_rescan_kthread(void *data)
2830 {
2831 	struct btrfs_fs_info *fs_info = data;
2832 	int ret;
2833 
2834 	/*
2835 	 * 1st step is to iterate through the existing UUID tree and
2836 	 * to delete all entries that contain outdated data.
2837 	 * 2nd step is to add all missing entries to the UUID tree.
2838 	 */
2839 	ret = btrfs_uuid_tree_iterate(fs_info);
2840 	if (ret < 0) {
2841 		if (ret != -EINTR)
2842 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2843 				   ret);
2844 		up(&fs_info->uuid_tree_rescan_sem);
2845 		return ret;
2846 	}
2847 	return btrfs_uuid_scan_kthread(data);
2848 }
2849 
2850 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2851 {
2852 	struct task_struct *task;
2853 
2854 	down(&fs_info->uuid_tree_rescan_sem);
2855 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2856 	if (IS_ERR(task)) {
2857 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2858 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2859 		up(&fs_info->uuid_tree_rescan_sem);
2860 		return PTR_ERR(task);
2861 	}
2862 
2863 	return 0;
2864 }
2865 
2866 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2867 {
2868 	u64 root_objectid = 0;
2869 	struct btrfs_root *gang[8];
2870 	int i = 0;
2871 	int err = 0;
2872 	unsigned int ret = 0;
2873 
2874 	while (1) {
2875 		spin_lock(&fs_info->fs_roots_radix_lock);
2876 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2877 					     (void **)gang, root_objectid,
2878 					     ARRAY_SIZE(gang));
2879 		if (!ret) {
2880 			spin_unlock(&fs_info->fs_roots_radix_lock);
2881 			break;
2882 		}
2883 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
2884 
2885 		for (i = 0; i < ret; i++) {
2886 			/* Avoid to grab roots in dead_roots. */
2887 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2888 				gang[i] = NULL;
2889 				continue;
2890 			}
2891 			/* Grab all the search result for later use. */
2892 			gang[i] = btrfs_grab_root(gang[i]);
2893 		}
2894 		spin_unlock(&fs_info->fs_roots_radix_lock);
2895 
2896 		for (i = 0; i < ret; i++) {
2897 			if (!gang[i])
2898 				continue;
2899 			root_objectid = gang[i]->root_key.objectid;
2900 			err = btrfs_orphan_cleanup(gang[i]);
2901 			if (err)
2902 				goto out;
2903 			btrfs_put_root(gang[i]);
2904 		}
2905 		root_objectid++;
2906 	}
2907 out:
2908 	/* Release the uncleaned roots due to error. */
2909 	for (; i < ret; i++) {
2910 		if (gang[i])
2911 			btrfs_put_root(gang[i]);
2912 	}
2913 	return err;
2914 }
2915 
2916 /*
2917  * Some options only have meaning at mount time and shouldn't persist across
2918  * remounts, or be displayed. Clear these at the end of mount and remount
2919  * code paths.
2920  */
2921 void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
2922 {
2923 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
2924 	btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
2925 }
2926 
2927 /*
2928  * Mounting logic specific to read-write file systems. Shared by open_ctree
2929  * and btrfs_remount when remounting from read-only to read-write.
2930  */
2931 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2932 {
2933 	int ret;
2934 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2935 	bool rebuild_free_space_tree = false;
2936 
2937 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2938 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2939 		rebuild_free_space_tree = true;
2940 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2941 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2942 		btrfs_warn(fs_info, "free space tree is invalid");
2943 		rebuild_free_space_tree = true;
2944 	}
2945 
2946 	if (rebuild_free_space_tree) {
2947 		btrfs_info(fs_info, "rebuilding free space tree");
2948 		ret = btrfs_rebuild_free_space_tree(fs_info);
2949 		if (ret) {
2950 			btrfs_warn(fs_info,
2951 				   "failed to rebuild free space tree: %d", ret);
2952 			goto out;
2953 		}
2954 	}
2955 
2956 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2957 	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
2958 		btrfs_info(fs_info, "disabling free space tree");
2959 		ret = btrfs_delete_free_space_tree(fs_info);
2960 		if (ret) {
2961 			btrfs_warn(fs_info,
2962 				   "failed to disable free space tree: %d", ret);
2963 			goto out;
2964 		}
2965 	}
2966 
2967 	/*
2968 	 * btrfs_find_orphan_roots() is responsible for finding all the dead
2969 	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
2970 	 * them into the fs_info->fs_roots_radix tree. This must be done before
2971 	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
2972 	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
2973 	 * item before the root's tree is deleted - this means that if we unmount
2974 	 * or crash before the deletion completes, on the next mount we will not
2975 	 * delete what remains of the tree because the orphan item does not
2976 	 * exists anymore, which is what tells us we have a pending deletion.
2977 	 */
2978 	ret = btrfs_find_orphan_roots(fs_info);
2979 	if (ret)
2980 		goto out;
2981 
2982 	ret = btrfs_cleanup_fs_roots(fs_info);
2983 	if (ret)
2984 		goto out;
2985 
2986 	down_read(&fs_info->cleanup_work_sem);
2987 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2988 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2989 		up_read(&fs_info->cleanup_work_sem);
2990 		goto out;
2991 	}
2992 	up_read(&fs_info->cleanup_work_sem);
2993 
2994 	mutex_lock(&fs_info->cleaner_mutex);
2995 	ret = btrfs_recover_relocation(fs_info);
2996 	mutex_unlock(&fs_info->cleaner_mutex);
2997 	if (ret < 0) {
2998 		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
2999 		goto out;
3000 	}
3001 
3002 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3003 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3004 		btrfs_info(fs_info, "creating free space tree");
3005 		ret = btrfs_create_free_space_tree(fs_info);
3006 		if (ret) {
3007 			btrfs_warn(fs_info,
3008 				"failed to create free space tree: %d", ret);
3009 			goto out;
3010 		}
3011 	}
3012 
3013 	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3014 		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3015 		if (ret)
3016 			goto out;
3017 	}
3018 
3019 	ret = btrfs_resume_balance_async(fs_info);
3020 	if (ret)
3021 		goto out;
3022 
3023 	ret = btrfs_resume_dev_replace_async(fs_info);
3024 	if (ret) {
3025 		btrfs_warn(fs_info, "failed to resume dev_replace");
3026 		goto out;
3027 	}
3028 
3029 	btrfs_qgroup_rescan_resume(fs_info);
3030 
3031 	if (!fs_info->uuid_root) {
3032 		btrfs_info(fs_info, "creating UUID tree");
3033 		ret = btrfs_create_uuid_tree(fs_info);
3034 		if (ret) {
3035 			btrfs_warn(fs_info,
3036 				   "failed to create the UUID tree %d", ret);
3037 			goto out;
3038 		}
3039 	}
3040 
3041 out:
3042 	return ret;
3043 }
3044 
3045 /*
3046  * Do various sanity and dependency checks of different features.
3047  *
3048  * @is_rw_mount:	If the mount is read-write.
3049  *
3050  * This is the place for less strict checks (like for subpage or artificial
3051  * feature dependencies).
3052  *
3053  * For strict checks or possible corruption detection, see
3054  * btrfs_validate_super().
3055  *
3056  * This should be called after btrfs_parse_options(), as some mount options
3057  * (space cache related) can modify on-disk format like free space tree and
3058  * screw up certain feature dependencies.
3059  */
3060 int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3061 {
3062 	struct btrfs_super_block *disk_super = fs_info->super_copy;
3063 	u64 incompat = btrfs_super_incompat_flags(disk_super);
3064 	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3065 	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3066 
3067 	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3068 		btrfs_err(fs_info,
3069 		"cannot mount because of unknown incompat features (0x%llx)",
3070 		    incompat);
3071 		return -EINVAL;
3072 	}
3073 
3074 	/* Runtime limitation for mixed block groups. */
3075 	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3076 	    (fs_info->sectorsize != fs_info->nodesize)) {
3077 		btrfs_err(fs_info,
3078 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3079 			fs_info->nodesize, fs_info->sectorsize);
3080 		return -EINVAL;
3081 	}
3082 
3083 	/* Mixed backref is an always-enabled feature. */
3084 	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3085 
3086 	/* Set compression related flags just in case. */
3087 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3088 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3089 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3090 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3091 
3092 	/*
3093 	 * An ancient flag, which should really be marked deprecated.
3094 	 * Such runtime limitation doesn't really need a incompat flag.
3095 	 */
3096 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3097 		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3098 
3099 	if (compat_ro_unsupp && is_rw_mount) {
3100 		btrfs_err(fs_info,
3101 	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3102 		       compat_ro);
3103 		return -EINVAL;
3104 	}
3105 
3106 	/*
3107 	 * We have unsupported RO compat features, although RO mounted, we
3108 	 * should not cause any metadata writes, including log replay.
3109 	 * Or we could screw up whatever the new feature requires.
3110 	 */
3111 	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3112 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3113 		btrfs_err(fs_info,
3114 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3115 			  compat_ro);
3116 		return -EINVAL;
3117 	}
3118 
3119 	/*
3120 	 * Artificial limitations for block group tree, to force
3121 	 * block-group-tree to rely on no-holes and free-space-tree.
3122 	 */
3123 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3124 	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3125 	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3126 		btrfs_err(fs_info,
3127 "block-group-tree feature requires no-holes and free-space-tree features");
3128 		return -EINVAL;
3129 	}
3130 
3131 	/*
3132 	 * Subpage runtime limitation on v1 cache.
3133 	 *
3134 	 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3135 	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3136 	 * going to be deprecated anyway.
3137 	 */
3138 	if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3139 		btrfs_warn(fs_info,
3140 	"v1 space cache is not supported for page size %lu with sectorsize %u",
3141 			   PAGE_SIZE, fs_info->sectorsize);
3142 		return -EINVAL;
3143 	}
3144 
3145 	/* This can be called by remount, we need to protect the super block. */
3146 	spin_lock(&fs_info->super_lock);
3147 	btrfs_set_super_incompat_flags(disk_super, incompat);
3148 	spin_unlock(&fs_info->super_lock);
3149 
3150 	return 0;
3151 }
3152 
3153 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
3154 		      char *options)
3155 {
3156 	u32 sectorsize;
3157 	u32 nodesize;
3158 	u32 stripesize;
3159 	u64 generation;
3160 	u64 features;
3161 	u16 csum_type;
3162 	struct btrfs_super_block *disk_super;
3163 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3164 	struct btrfs_root *tree_root;
3165 	struct btrfs_root *chunk_root;
3166 	int ret;
3167 	int level;
3168 
3169 	ret = init_mount_fs_info(fs_info, sb);
3170 	if (ret)
3171 		goto fail;
3172 
3173 	/* These need to be init'ed before we start creating inodes and such. */
3174 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3175 				     GFP_KERNEL);
3176 	fs_info->tree_root = tree_root;
3177 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3178 				      GFP_KERNEL);
3179 	fs_info->chunk_root = chunk_root;
3180 	if (!tree_root || !chunk_root) {
3181 		ret = -ENOMEM;
3182 		goto fail;
3183 	}
3184 
3185 	ret = btrfs_init_btree_inode(sb);
3186 	if (ret)
3187 		goto fail;
3188 
3189 	invalidate_bdev(fs_devices->latest_dev->bdev);
3190 
3191 	/*
3192 	 * Read super block and check the signature bytes only
3193 	 */
3194 	disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3195 	if (IS_ERR(disk_super)) {
3196 		ret = PTR_ERR(disk_super);
3197 		goto fail_alloc;
3198 	}
3199 
3200 	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3201 	/*
3202 	 * Verify the type first, if that or the checksum value are
3203 	 * corrupted, we'll find out
3204 	 */
3205 	csum_type = btrfs_super_csum_type(disk_super);
3206 	if (!btrfs_supported_super_csum(csum_type)) {
3207 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3208 			  csum_type);
3209 		ret = -EINVAL;
3210 		btrfs_release_disk_super(disk_super);
3211 		goto fail_alloc;
3212 	}
3213 
3214 	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3215 
3216 	ret = btrfs_init_csum_hash(fs_info, csum_type);
3217 	if (ret) {
3218 		btrfs_release_disk_super(disk_super);
3219 		goto fail_alloc;
3220 	}
3221 
3222 	/*
3223 	 * We want to check superblock checksum, the type is stored inside.
3224 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3225 	 */
3226 	if (btrfs_check_super_csum(fs_info, disk_super)) {
3227 		btrfs_err(fs_info, "superblock checksum mismatch");
3228 		ret = -EINVAL;
3229 		btrfs_release_disk_super(disk_super);
3230 		goto fail_alloc;
3231 	}
3232 
3233 	/*
3234 	 * super_copy is zeroed at allocation time and we never touch the
3235 	 * following bytes up to INFO_SIZE, the checksum is calculated from
3236 	 * the whole block of INFO_SIZE
3237 	 */
3238 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3239 	btrfs_release_disk_super(disk_super);
3240 
3241 	disk_super = fs_info->super_copy;
3242 
3243 
3244 	features = btrfs_super_flags(disk_super);
3245 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
3246 		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
3247 		btrfs_set_super_flags(disk_super, features);
3248 		btrfs_info(fs_info,
3249 			"found metadata UUID change in progress flag, clearing");
3250 	}
3251 
3252 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3253 	       sizeof(*fs_info->super_for_commit));
3254 
3255 	ret = btrfs_validate_mount_super(fs_info);
3256 	if (ret) {
3257 		btrfs_err(fs_info, "superblock contains fatal errors");
3258 		ret = -EINVAL;
3259 		goto fail_alloc;
3260 	}
3261 
3262 	if (!btrfs_super_root(disk_super)) {
3263 		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3264 		ret = -EINVAL;
3265 		goto fail_alloc;
3266 	}
3267 
3268 	/* check FS state, whether FS is broken. */
3269 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3270 		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3271 
3272 	/*
3273 	 * In the long term, we'll store the compression type in the super
3274 	 * block, and it'll be used for per file compression control.
3275 	 */
3276 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
3277 
3278 
3279 	/* Set up fs_info before parsing mount options */
3280 	nodesize = btrfs_super_nodesize(disk_super);
3281 	sectorsize = btrfs_super_sectorsize(disk_super);
3282 	stripesize = sectorsize;
3283 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3284 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3285 
3286 	fs_info->nodesize = nodesize;
3287 	fs_info->sectorsize = sectorsize;
3288 	fs_info->sectorsize_bits = ilog2(sectorsize);
3289 	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3290 	fs_info->stripesize = stripesize;
3291 
3292 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
3293 	if (ret)
3294 		goto fail_alloc;
3295 
3296 	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3297 	if (ret < 0)
3298 		goto fail_alloc;
3299 
3300 	if (sectorsize < PAGE_SIZE) {
3301 		struct btrfs_subpage_info *subpage_info;
3302 
3303 		/*
3304 		 * V1 space cache has some hardcoded PAGE_SIZE usage, and is
3305 		 * going to be deprecated.
3306 		 *
3307 		 * Force to use v2 cache for subpage case.
3308 		 */
3309 		btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
3310 		btrfs_set_and_info(fs_info, FREE_SPACE_TREE,
3311 			"forcing free space tree for sector size %u with page size %lu",
3312 			sectorsize, PAGE_SIZE);
3313 
3314 		btrfs_warn(fs_info,
3315 		"read-write for sector size %u with page size %lu is experimental",
3316 			   sectorsize, PAGE_SIZE);
3317 		subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
3318 		if (!subpage_info) {
3319 			ret = -ENOMEM;
3320 			goto fail_alloc;
3321 		}
3322 		btrfs_init_subpage_info(subpage_info, sectorsize);
3323 		fs_info->subpage_info = subpage_info;
3324 	}
3325 
3326 	ret = btrfs_init_workqueues(fs_info);
3327 	if (ret)
3328 		goto fail_sb_buffer;
3329 
3330 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3331 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3332 
3333 	sb->s_blocksize = sectorsize;
3334 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3335 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3336 
3337 	mutex_lock(&fs_info->chunk_mutex);
3338 	ret = btrfs_read_sys_array(fs_info);
3339 	mutex_unlock(&fs_info->chunk_mutex);
3340 	if (ret) {
3341 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3342 		goto fail_sb_buffer;
3343 	}
3344 
3345 	generation = btrfs_super_chunk_root_generation(disk_super);
3346 	level = btrfs_super_chunk_root_level(disk_super);
3347 	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3348 			      generation, level);
3349 	if (ret) {
3350 		btrfs_err(fs_info, "failed to read chunk root");
3351 		goto fail_tree_roots;
3352 	}
3353 
3354 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3355 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3356 			   BTRFS_UUID_SIZE);
3357 
3358 	ret = btrfs_read_chunk_tree(fs_info);
3359 	if (ret) {
3360 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3361 		goto fail_tree_roots;
3362 	}
3363 
3364 	/*
3365 	 * At this point we know all the devices that make this filesystem,
3366 	 * including the seed devices but we don't know yet if the replace
3367 	 * target is required. So free devices that are not part of this
3368 	 * filesystem but skip the replace target device which is checked
3369 	 * below in btrfs_init_dev_replace().
3370 	 */
3371 	btrfs_free_extra_devids(fs_devices);
3372 	if (!fs_devices->latest_dev->bdev) {
3373 		btrfs_err(fs_info, "failed to read devices");
3374 		ret = -EIO;
3375 		goto fail_tree_roots;
3376 	}
3377 
3378 	ret = init_tree_roots(fs_info);
3379 	if (ret)
3380 		goto fail_tree_roots;
3381 
3382 	/*
3383 	 * Get zone type information of zoned block devices. This will also
3384 	 * handle emulation of a zoned filesystem if a regular device has the
3385 	 * zoned incompat feature flag set.
3386 	 */
3387 	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3388 	if (ret) {
3389 		btrfs_err(fs_info,
3390 			  "zoned: failed to read device zone info: %d", ret);
3391 		goto fail_block_groups;
3392 	}
3393 
3394 	/*
3395 	 * If we have a uuid root and we're not being told to rescan we need to
3396 	 * check the generation here so we can set the
3397 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3398 	 * transaction during a balance or the log replay without updating the
3399 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3400 	 * even though it was perfectly fine.
3401 	 */
3402 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3403 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3404 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3405 
3406 	ret = btrfs_verify_dev_extents(fs_info);
3407 	if (ret) {
3408 		btrfs_err(fs_info,
3409 			  "failed to verify dev extents against chunks: %d",
3410 			  ret);
3411 		goto fail_block_groups;
3412 	}
3413 	ret = btrfs_recover_balance(fs_info);
3414 	if (ret) {
3415 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3416 		goto fail_block_groups;
3417 	}
3418 
3419 	ret = btrfs_init_dev_stats(fs_info);
3420 	if (ret) {
3421 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3422 		goto fail_block_groups;
3423 	}
3424 
3425 	ret = btrfs_init_dev_replace(fs_info);
3426 	if (ret) {
3427 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3428 		goto fail_block_groups;
3429 	}
3430 
3431 	ret = btrfs_check_zoned_mode(fs_info);
3432 	if (ret) {
3433 		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3434 			  ret);
3435 		goto fail_block_groups;
3436 	}
3437 
3438 	ret = btrfs_sysfs_add_fsid(fs_devices);
3439 	if (ret) {
3440 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3441 				ret);
3442 		goto fail_block_groups;
3443 	}
3444 
3445 	ret = btrfs_sysfs_add_mounted(fs_info);
3446 	if (ret) {
3447 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3448 		goto fail_fsdev_sysfs;
3449 	}
3450 
3451 	ret = btrfs_init_space_info(fs_info);
3452 	if (ret) {
3453 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3454 		goto fail_sysfs;
3455 	}
3456 
3457 	ret = btrfs_read_block_groups(fs_info);
3458 	if (ret) {
3459 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3460 		goto fail_sysfs;
3461 	}
3462 
3463 	btrfs_free_zone_cache(fs_info);
3464 
3465 	btrfs_check_active_zone_reservation(fs_info);
3466 
3467 	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3468 	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3469 		btrfs_warn(fs_info,
3470 		"writable mount is not allowed due to too many missing devices");
3471 		ret = -EINVAL;
3472 		goto fail_sysfs;
3473 	}
3474 
3475 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3476 					       "btrfs-cleaner");
3477 	if (IS_ERR(fs_info->cleaner_kthread)) {
3478 		ret = PTR_ERR(fs_info->cleaner_kthread);
3479 		goto fail_sysfs;
3480 	}
3481 
3482 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3483 						   tree_root,
3484 						   "btrfs-transaction");
3485 	if (IS_ERR(fs_info->transaction_kthread)) {
3486 		ret = PTR_ERR(fs_info->transaction_kthread);
3487 		goto fail_cleaner;
3488 	}
3489 
3490 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3491 	    !fs_info->fs_devices->rotating) {
3492 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3493 	}
3494 
3495 	/*
3496 	 * For devices supporting discard turn on discard=async automatically,
3497 	 * unless it's already set or disabled. This could be turned off by
3498 	 * nodiscard for the same mount.
3499 	 *
3500 	 * The zoned mode piggy backs on the discard functionality for
3501 	 * resetting a zone. There is no reason to delay the zone reset as it is
3502 	 * fast enough. So, do not enable async discard for zoned mode.
3503 	 */
3504 	if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
3505 	      btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
3506 	      btrfs_test_opt(fs_info, NODISCARD)) &&
3507 	    fs_info->fs_devices->discardable &&
3508 	    !btrfs_is_zoned(fs_info)) {
3509 		btrfs_set_and_info(fs_info, DISCARD_ASYNC,
3510 				   "auto enabling async discard");
3511 	}
3512 
3513 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3514 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3515 		ret = btrfsic_mount(fs_info, fs_devices,
3516 				    btrfs_test_opt(fs_info,
3517 					CHECK_INTEGRITY_DATA) ? 1 : 0,
3518 				    fs_info->check_integrity_print_mask);
3519 		if (ret)
3520 			btrfs_warn(fs_info,
3521 				"failed to initialize integrity check module: %d",
3522 				ret);
3523 	}
3524 #endif
3525 	ret = btrfs_read_qgroup_config(fs_info);
3526 	if (ret)
3527 		goto fail_trans_kthread;
3528 
3529 	if (btrfs_build_ref_tree(fs_info))
3530 		btrfs_err(fs_info, "couldn't build ref tree");
3531 
3532 	/* do not make disk changes in broken FS or nologreplay is given */
3533 	if (btrfs_super_log_root(disk_super) != 0 &&
3534 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3535 		btrfs_info(fs_info, "start tree-log replay");
3536 		ret = btrfs_replay_log(fs_info, fs_devices);
3537 		if (ret)
3538 			goto fail_qgroup;
3539 	}
3540 
3541 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3542 	if (IS_ERR(fs_info->fs_root)) {
3543 		ret = PTR_ERR(fs_info->fs_root);
3544 		btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3545 		fs_info->fs_root = NULL;
3546 		goto fail_qgroup;
3547 	}
3548 
3549 	if (sb_rdonly(sb))
3550 		goto clear_oneshot;
3551 
3552 	ret = btrfs_start_pre_rw_mount(fs_info);
3553 	if (ret) {
3554 		close_ctree(fs_info);
3555 		return ret;
3556 	}
3557 	btrfs_discard_resume(fs_info);
3558 
3559 	if (fs_info->uuid_root &&
3560 	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3561 	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3562 		btrfs_info(fs_info, "checking UUID tree");
3563 		ret = btrfs_check_uuid_tree(fs_info);
3564 		if (ret) {
3565 			btrfs_warn(fs_info,
3566 				"failed to check the UUID tree: %d", ret);
3567 			close_ctree(fs_info);
3568 			return ret;
3569 		}
3570 	}
3571 
3572 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3573 
3574 	/* Kick the cleaner thread so it'll start deleting snapshots. */
3575 	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3576 		wake_up_process(fs_info->cleaner_kthread);
3577 
3578 clear_oneshot:
3579 	btrfs_clear_oneshot_options(fs_info);
3580 	return 0;
3581 
3582 fail_qgroup:
3583 	btrfs_free_qgroup_config(fs_info);
3584 fail_trans_kthread:
3585 	kthread_stop(fs_info->transaction_kthread);
3586 	btrfs_cleanup_transaction(fs_info);
3587 	btrfs_free_fs_roots(fs_info);
3588 fail_cleaner:
3589 	kthread_stop(fs_info->cleaner_kthread);
3590 
3591 	/*
3592 	 * make sure we're done with the btree inode before we stop our
3593 	 * kthreads
3594 	 */
3595 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3596 
3597 fail_sysfs:
3598 	btrfs_sysfs_remove_mounted(fs_info);
3599 
3600 fail_fsdev_sysfs:
3601 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3602 
3603 fail_block_groups:
3604 	btrfs_put_block_group_cache(fs_info);
3605 
3606 fail_tree_roots:
3607 	if (fs_info->data_reloc_root)
3608 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3609 	free_root_pointers(fs_info, true);
3610 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3611 
3612 fail_sb_buffer:
3613 	btrfs_stop_all_workers(fs_info);
3614 	btrfs_free_block_groups(fs_info);
3615 fail_alloc:
3616 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3617 
3618 	iput(fs_info->btree_inode);
3619 fail:
3620 	btrfs_close_devices(fs_info->fs_devices);
3621 	ASSERT(ret < 0);
3622 	return ret;
3623 }
3624 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3625 
3626 static void btrfs_end_super_write(struct bio *bio)
3627 {
3628 	struct btrfs_device *device = bio->bi_private;
3629 	struct bio_vec *bvec;
3630 	struct bvec_iter_all iter_all;
3631 	struct page *page;
3632 
3633 	bio_for_each_segment_all(bvec, bio, iter_all) {
3634 		page = bvec->bv_page;
3635 
3636 		if (bio->bi_status) {
3637 			btrfs_warn_rl_in_rcu(device->fs_info,
3638 				"lost page write due to IO error on %s (%d)",
3639 				btrfs_dev_name(device),
3640 				blk_status_to_errno(bio->bi_status));
3641 			ClearPageUptodate(page);
3642 			SetPageError(page);
3643 			btrfs_dev_stat_inc_and_print(device,
3644 						     BTRFS_DEV_STAT_WRITE_ERRS);
3645 		} else {
3646 			SetPageUptodate(page);
3647 		}
3648 
3649 		put_page(page);
3650 		unlock_page(page);
3651 	}
3652 
3653 	bio_put(bio);
3654 }
3655 
3656 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3657 						   int copy_num, bool drop_cache)
3658 {
3659 	struct btrfs_super_block *super;
3660 	struct page *page;
3661 	u64 bytenr, bytenr_orig;
3662 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3663 	int ret;
3664 
3665 	bytenr_orig = btrfs_sb_offset(copy_num);
3666 	ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3667 	if (ret == -ENOENT)
3668 		return ERR_PTR(-EINVAL);
3669 	else if (ret)
3670 		return ERR_PTR(ret);
3671 
3672 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3673 		return ERR_PTR(-EINVAL);
3674 
3675 	if (drop_cache) {
3676 		/* This should only be called with the primary sb. */
3677 		ASSERT(copy_num == 0);
3678 
3679 		/*
3680 		 * Drop the page of the primary superblock, so later read will
3681 		 * always read from the device.
3682 		 */
3683 		invalidate_inode_pages2_range(mapping,
3684 				bytenr >> PAGE_SHIFT,
3685 				(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3686 	}
3687 
3688 	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3689 	if (IS_ERR(page))
3690 		return ERR_CAST(page);
3691 
3692 	super = page_address(page);
3693 	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3694 		btrfs_release_disk_super(super);
3695 		return ERR_PTR(-ENODATA);
3696 	}
3697 
3698 	if (btrfs_super_bytenr(super) != bytenr_orig) {
3699 		btrfs_release_disk_super(super);
3700 		return ERR_PTR(-EINVAL);
3701 	}
3702 
3703 	return super;
3704 }
3705 
3706 
3707 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3708 {
3709 	struct btrfs_super_block *super, *latest = NULL;
3710 	int i;
3711 	u64 transid = 0;
3712 
3713 	/* we would like to check all the supers, but that would make
3714 	 * a btrfs mount succeed after a mkfs from a different FS.
3715 	 * So, we need to add a special mount option to scan for
3716 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3717 	 */
3718 	for (i = 0; i < 1; i++) {
3719 		super = btrfs_read_dev_one_super(bdev, i, false);
3720 		if (IS_ERR(super))
3721 			continue;
3722 
3723 		if (!latest || btrfs_super_generation(super) > transid) {
3724 			if (latest)
3725 				btrfs_release_disk_super(super);
3726 
3727 			latest = super;
3728 			transid = btrfs_super_generation(super);
3729 		}
3730 	}
3731 
3732 	return super;
3733 }
3734 
3735 /*
3736  * Write superblock @sb to the @device. Do not wait for completion, all the
3737  * pages we use for writing are locked.
3738  *
3739  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3740  * the expected device size at commit time. Note that max_mirrors must be
3741  * same for write and wait phases.
3742  *
3743  * Return number of errors when page is not found or submission fails.
3744  */
3745 static int write_dev_supers(struct btrfs_device *device,
3746 			    struct btrfs_super_block *sb, int max_mirrors)
3747 {
3748 	struct btrfs_fs_info *fs_info = device->fs_info;
3749 	struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3750 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3751 	int i;
3752 	int errors = 0;
3753 	int ret;
3754 	u64 bytenr, bytenr_orig;
3755 
3756 	if (max_mirrors == 0)
3757 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3758 
3759 	shash->tfm = fs_info->csum_shash;
3760 
3761 	for (i = 0; i < max_mirrors; i++) {
3762 		struct page *page;
3763 		struct bio *bio;
3764 		struct btrfs_super_block *disk_super;
3765 
3766 		bytenr_orig = btrfs_sb_offset(i);
3767 		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3768 		if (ret == -ENOENT) {
3769 			continue;
3770 		} else if (ret < 0) {
3771 			btrfs_err(device->fs_info,
3772 				"couldn't get super block location for mirror %d",
3773 				i);
3774 			errors++;
3775 			continue;
3776 		}
3777 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3778 		    device->commit_total_bytes)
3779 			break;
3780 
3781 		btrfs_set_super_bytenr(sb, bytenr_orig);
3782 
3783 		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3784 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3785 				    sb->csum);
3786 
3787 		page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
3788 					   GFP_NOFS);
3789 		if (!page) {
3790 			btrfs_err(device->fs_info,
3791 			    "couldn't get super block page for bytenr %llu",
3792 			    bytenr);
3793 			errors++;
3794 			continue;
3795 		}
3796 
3797 		/* Bump the refcount for wait_dev_supers() */
3798 		get_page(page);
3799 
3800 		disk_super = page_address(page);
3801 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3802 
3803 		/*
3804 		 * Directly use bios here instead of relying on the page cache
3805 		 * to do I/O, so we don't lose the ability to do integrity
3806 		 * checking.
3807 		 */
3808 		bio = bio_alloc(device->bdev, 1,
3809 				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3810 				GFP_NOFS);
3811 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3812 		bio->bi_private = device;
3813 		bio->bi_end_io = btrfs_end_super_write;
3814 		__bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3815 			       offset_in_page(bytenr));
3816 
3817 		/*
3818 		 * We FUA only the first super block.  The others we allow to
3819 		 * go down lazy and there's a short window where the on-disk
3820 		 * copies might still contain the older version.
3821 		 */
3822 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3823 			bio->bi_opf |= REQ_FUA;
3824 
3825 		btrfsic_check_bio(bio);
3826 		submit_bio(bio);
3827 
3828 		if (btrfs_advance_sb_log(device, i))
3829 			errors++;
3830 	}
3831 	return errors < i ? 0 : -1;
3832 }
3833 
3834 /*
3835  * Wait for write completion of superblocks done by write_dev_supers,
3836  * @max_mirrors same for write and wait phases.
3837  *
3838  * Return number of errors when page is not found or not marked up to
3839  * date.
3840  */
3841 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3842 {
3843 	int i;
3844 	int errors = 0;
3845 	bool primary_failed = false;
3846 	int ret;
3847 	u64 bytenr;
3848 
3849 	if (max_mirrors == 0)
3850 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3851 
3852 	for (i = 0; i < max_mirrors; i++) {
3853 		struct page *page;
3854 
3855 		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3856 		if (ret == -ENOENT) {
3857 			break;
3858 		} else if (ret < 0) {
3859 			errors++;
3860 			if (i == 0)
3861 				primary_failed = true;
3862 			continue;
3863 		}
3864 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3865 		    device->commit_total_bytes)
3866 			break;
3867 
3868 		page = find_get_page(device->bdev->bd_inode->i_mapping,
3869 				     bytenr >> PAGE_SHIFT);
3870 		if (!page) {
3871 			errors++;
3872 			if (i == 0)
3873 				primary_failed = true;
3874 			continue;
3875 		}
3876 		/* Page is submitted locked and unlocked once the IO completes */
3877 		wait_on_page_locked(page);
3878 		if (PageError(page)) {
3879 			errors++;
3880 			if (i == 0)
3881 				primary_failed = true;
3882 		}
3883 
3884 		/* Drop our reference */
3885 		put_page(page);
3886 
3887 		/* Drop the reference from the writing run */
3888 		put_page(page);
3889 	}
3890 
3891 	/* log error, force error return */
3892 	if (primary_failed) {
3893 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3894 			  device->devid);
3895 		return -1;
3896 	}
3897 
3898 	return errors < i ? 0 : -1;
3899 }
3900 
3901 /*
3902  * endio for the write_dev_flush, this will wake anyone waiting
3903  * for the barrier when it is done
3904  */
3905 static void btrfs_end_empty_barrier(struct bio *bio)
3906 {
3907 	bio_uninit(bio);
3908 	complete(bio->bi_private);
3909 }
3910 
3911 /*
3912  * Submit a flush request to the device if it supports it. Error handling is
3913  * done in the waiting counterpart.
3914  */
3915 static void write_dev_flush(struct btrfs_device *device)
3916 {
3917 	struct bio *bio = &device->flush_bio;
3918 
3919 	device->last_flush_error = BLK_STS_OK;
3920 
3921 #ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3922 	/*
3923 	 * When a disk has write caching disabled, we skip submission of a bio
3924 	 * with flush and sync requests before writing the superblock, since
3925 	 * it's not needed. However when the integrity checker is enabled, this
3926 	 * results in reports that there are metadata blocks referred by a
3927 	 * superblock that were not properly flushed. So don't skip the bio
3928 	 * submission only when the integrity checker is enabled for the sake
3929 	 * of simplicity, since this is a debug tool and not meant for use in
3930 	 * non-debug builds.
3931 	 */
3932 	if (!bdev_write_cache(device->bdev))
3933 		return;
3934 #endif
3935 
3936 	bio_init(bio, device->bdev, NULL, 0,
3937 		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3938 	bio->bi_end_io = btrfs_end_empty_barrier;
3939 	init_completion(&device->flush_wait);
3940 	bio->bi_private = &device->flush_wait;
3941 
3942 	btrfsic_check_bio(bio);
3943 	submit_bio(bio);
3944 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3945 }
3946 
3947 /*
3948  * If the flush bio has been submitted by write_dev_flush, wait for it.
3949  * Return true for any error, and false otherwise.
3950  */
3951 static bool wait_dev_flush(struct btrfs_device *device)
3952 {
3953 	struct bio *bio = &device->flush_bio;
3954 
3955 	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3956 		return false;
3957 
3958 	wait_for_completion_io(&device->flush_wait);
3959 
3960 	if (bio->bi_status) {
3961 		device->last_flush_error = bio->bi_status;
3962 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3963 		return true;
3964 	}
3965 
3966 	return false;
3967 }
3968 
3969 /*
3970  * send an empty flush down to each device in parallel,
3971  * then wait for them
3972  */
3973 static int barrier_all_devices(struct btrfs_fs_info *info)
3974 {
3975 	struct list_head *head;
3976 	struct btrfs_device *dev;
3977 	int errors_wait = 0;
3978 
3979 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3980 	/* send down all the barriers */
3981 	head = &info->fs_devices->devices;
3982 	list_for_each_entry(dev, head, dev_list) {
3983 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3984 			continue;
3985 		if (!dev->bdev)
3986 			continue;
3987 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3988 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3989 			continue;
3990 
3991 		write_dev_flush(dev);
3992 	}
3993 
3994 	/* wait for all the barriers */
3995 	list_for_each_entry(dev, head, dev_list) {
3996 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3997 			continue;
3998 		if (!dev->bdev) {
3999 			errors_wait++;
4000 			continue;
4001 		}
4002 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4003 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4004 			continue;
4005 
4006 		if (wait_dev_flush(dev))
4007 			errors_wait++;
4008 	}
4009 
4010 	/*
4011 	 * Checks last_flush_error of disks in order to determine the device
4012 	 * state.
4013 	 */
4014 	if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
4015 		return -EIO;
4016 
4017 	return 0;
4018 }
4019 
4020 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
4021 {
4022 	int raid_type;
4023 	int min_tolerated = INT_MAX;
4024 
4025 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
4026 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
4027 		min_tolerated = min_t(int, min_tolerated,
4028 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
4029 				    tolerated_failures);
4030 
4031 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4032 		if (raid_type == BTRFS_RAID_SINGLE)
4033 			continue;
4034 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
4035 			continue;
4036 		min_tolerated = min_t(int, min_tolerated,
4037 				    btrfs_raid_array[raid_type].
4038 				    tolerated_failures);
4039 	}
4040 
4041 	if (min_tolerated == INT_MAX) {
4042 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
4043 		min_tolerated = 0;
4044 	}
4045 
4046 	return min_tolerated;
4047 }
4048 
4049 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4050 {
4051 	struct list_head *head;
4052 	struct btrfs_device *dev;
4053 	struct btrfs_super_block *sb;
4054 	struct btrfs_dev_item *dev_item;
4055 	int ret;
4056 	int do_barriers;
4057 	int max_errors;
4058 	int total_errors = 0;
4059 	u64 flags;
4060 
4061 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4062 
4063 	/*
4064 	 * max_mirrors == 0 indicates we're from commit_transaction,
4065 	 * not from fsync where the tree roots in fs_info have not
4066 	 * been consistent on disk.
4067 	 */
4068 	if (max_mirrors == 0)
4069 		backup_super_roots(fs_info);
4070 
4071 	sb = fs_info->super_for_commit;
4072 	dev_item = &sb->dev_item;
4073 
4074 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4075 	head = &fs_info->fs_devices->devices;
4076 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4077 
4078 	if (do_barriers) {
4079 		ret = barrier_all_devices(fs_info);
4080 		if (ret) {
4081 			mutex_unlock(
4082 				&fs_info->fs_devices->device_list_mutex);
4083 			btrfs_handle_fs_error(fs_info, ret,
4084 					      "errors while submitting device barriers.");
4085 			return ret;
4086 		}
4087 	}
4088 
4089 	list_for_each_entry(dev, head, dev_list) {
4090 		if (!dev->bdev) {
4091 			total_errors++;
4092 			continue;
4093 		}
4094 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4095 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4096 			continue;
4097 
4098 		btrfs_set_stack_device_generation(dev_item, 0);
4099 		btrfs_set_stack_device_type(dev_item, dev->type);
4100 		btrfs_set_stack_device_id(dev_item, dev->devid);
4101 		btrfs_set_stack_device_total_bytes(dev_item,
4102 						   dev->commit_total_bytes);
4103 		btrfs_set_stack_device_bytes_used(dev_item,
4104 						  dev->commit_bytes_used);
4105 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4106 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4107 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4108 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4109 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4110 		       BTRFS_FSID_SIZE);
4111 
4112 		flags = btrfs_super_flags(sb);
4113 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4114 
4115 		ret = btrfs_validate_write_super(fs_info, sb);
4116 		if (ret < 0) {
4117 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4118 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4119 				"unexpected superblock corruption detected");
4120 			return -EUCLEAN;
4121 		}
4122 
4123 		ret = write_dev_supers(dev, sb, max_mirrors);
4124 		if (ret)
4125 			total_errors++;
4126 	}
4127 	if (total_errors > max_errors) {
4128 		btrfs_err(fs_info, "%d errors while writing supers",
4129 			  total_errors);
4130 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4131 
4132 		/* FUA is masked off if unsupported and can't be the reason */
4133 		btrfs_handle_fs_error(fs_info, -EIO,
4134 				      "%d errors while writing supers",
4135 				      total_errors);
4136 		return -EIO;
4137 	}
4138 
4139 	total_errors = 0;
4140 	list_for_each_entry(dev, head, dev_list) {
4141 		if (!dev->bdev)
4142 			continue;
4143 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4144 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4145 			continue;
4146 
4147 		ret = wait_dev_supers(dev, max_mirrors);
4148 		if (ret)
4149 			total_errors++;
4150 	}
4151 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4152 	if (total_errors > max_errors) {
4153 		btrfs_handle_fs_error(fs_info, -EIO,
4154 				      "%d errors while writing supers",
4155 				      total_errors);
4156 		return -EIO;
4157 	}
4158 	return 0;
4159 }
4160 
4161 /* Drop a fs root from the radix tree and free it. */
4162 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4163 				  struct btrfs_root *root)
4164 {
4165 	bool drop_ref = false;
4166 
4167 	spin_lock(&fs_info->fs_roots_radix_lock);
4168 	radix_tree_delete(&fs_info->fs_roots_radix,
4169 			  (unsigned long)root->root_key.objectid);
4170 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4171 		drop_ref = true;
4172 	spin_unlock(&fs_info->fs_roots_radix_lock);
4173 
4174 	if (BTRFS_FS_ERROR(fs_info)) {
4175 		ASSERT(root->log_root == NULL);
4176 		if (root->reloc_root) {
4177 			btrfs_put_root(root->reloc_root);
4178 			root->reloc_root = NULL;
4179 		}
4180 	}
4181 
4182 	if (drop_ref)
4183 		btrfs_put_root(root);
4184 }
4185 
4186 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4187 {
4188 	struct btrfs_root *root = fs_info->tree_root;
4189 	struct btrfs_trans_handle *trans;
4190 
4191 	mutex_lock(&fs_info->cleaner_mutex);
4192 	btrfs_run_delayed_iputs(fs_info);
4193 	mutex_unlock(&fs_info->cleaner_mutex);
4194 	wake_up_process(fs_info->cleaner_kthread);
4195 
4196 	/* wait until ongoing cleanup work done */
4197 	down_write(&fs_info->cleanup_work_sem);
4198 	up_write(&fs_info->cleanup_work_sem);
4199 
4200 	trans = btrfs_join_transaction(root);
4201 	if (IS_ERR(trans))
4202 		return PTR_ERR(trans);
4203 	return btrfs_commit_transaction(trans);
4204 }
4205 
4206 static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4207 {
4208 	struct btrfs_transaction *trans;
4209 	struct btrfs_transaction *tmp;
4210 	bool found = false;
4211 
4212 	if (list_empty(&fs_info->trans_list))
4213 		return;
4214 
4215 	/*
4216 	 * This function is only called at the very end of close_ctree(),
4217 	 * thus no other running transaction, no need to take trans_lock.
4218 	 */
4219 	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4220 	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4221 		struct extent_state *cached = NULL;
4222 		u64 dirty_bytes = 0;
4223 		u64 cur = 0;
4224 		u64 found_start;
4225 		u64 found_end;
4226 
4227 		found = true;
4228 		while (find_first_extent_bit(&trans->dirty_pages, cur,
4229 			&found_start, &found_end, EXTENT_DIRTY, &cached)) {
4230 			dirty_bytes += found_end + 1 - found_start;
4231 			cur = found_end + 1;
4232 		}
4233 		btrfs_warn(fs_info,
4234 	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4235 			   trans->transid, dirty_bytes);
4236 		btrfs_cleanup_one_transaction(trans, fs_info);
4237 
4238 		if (trans == fs_info->running_transaction)
4239 			fs_info->running_transaction = NULL;
4240 		list_del_init(&trans->list);
4241 
4242 		btrfs_put_transaction(trans);
4243 		trace_btrfs_transaction_commit(fs_info);
4244 	}
4245 	ASSERT(!found);
4246 }
4247 
4248 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4249 {
4250 	int ret;
4251 
4252 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4253 
4254 	/*
4255 	 * If we had UNFINISHED_DROPS we could still be processing them, so
4256 	 * clear that bit and wake up relocation so it can stop.
4257 	 * We must do this before stopping the block group reclaim task, because
4258 	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4259 	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4260 	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4261 	 * return 1.
4262 	 */
4263 	btrfs_wake_unfinished_drop(fs_info);
4264 
4265 	/*
4266 	 * We may have the reclaim task running and relocating a data block group,
4267 	 * in which case it may create delayed iputs. So stop it before we park
4268 	 * the cleaner kthread otherwise we can get new delayed iputs after
4269 	 * parking the cleaner, and that can make the async reclaim task to hang
4270 	 * if it's waiting for delayed iputs to complete, since the cleaner is
4271 	 * parked and can not run delayed iputs - this will make us hang when
4272 	 * trying to stop the async reclaim task.
4273 	 */
4274 	cancel_work_sync(&fs_info->reclaim_bgs_work);
4275 	/*
4276 	 * We don't want the cleaner to start new transactions, add more delayed
4277 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4278 	 * because that frees the task_struct, and the transaction kthread might
4279 	 * still try to wake up the cleaner.
4280 	 */
4281 	kthread_park(fs_info->cleaner_kthread);
4282 
4283 	/* wait for the qgroup rescan worker to stop */
4284 	btrfs_qgroup_wait_for_completion(fs_info, false);
4285 
4286 	/* wait for the uuid_scan task to finish */
4287 	down(&fs_info->uuid_tree_rescan_sem);
4288 	/* avoid complains from lockdep et al., set sem back to initial state */
4289 	up(&fs_info->uuid_tree_rescan_sem);
4290 
4291 	/* pause restriper - we want to resume on mount */
4292 	btrfs_pause_balance(fs_info);
4293 
4294 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4295 
4296 	btrfs_scrub_cancel(fs_info);
4297 
4298 	/* wait for any defraggers to finish */
4299 	wait_event(fs_info->transaction_wait,
4300 		   (atomic_read(&fs_info->defrag_running) == 0));
4301 
4302 	/* clear out the rbtree of defraggable inodes */
4303 	btrfs_cleanup_defrag_inodes(fs_info);
4304 
4305 	/*
4306 	 * After we parked the cleaner kthread, ordered extents may have
4307 	 * completed and created new delayed iputs. If one of the async reclaim
4308 	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4309 	 * can hang forever trying to stop it, because if a delayed iput is
4310 	 * added after it ran btrfs_run_delayed_iputs() and before it called
4311 	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4312 	 * no one else to run iputs.
4313 	 *
4314 	 * So wait for all ongoing ordered extents to complete and then run
4315 	 * delayed iputs. This works because once we reach this point no one
4316 	 * can either create new ordered extents nor create delayed iputs
4317 	 * through some other means.
4318 	 *
4319 	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4320 	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4321 	 * but the delayed iput for the respective inode is made only when doing
4322 	 * the final btrfs_put_ordered_extent() (which must happen at
4323 	 * btrfs_finish_ordered_io() when we are unmounting).
4324 	 */
4325 	btrfs_flush_workqueue(fs_info->endio_write_workers);
4326 	/* Ordered extents for free space inodes. */
4327 	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4328 	btrfs_run_delayed_iputs(fs_info);
4329 
4330 	cancel_work_sync(&fs_info->async_reclaim_work);
4331 	cancel_work_sync(&fs_info->async_data_reclaim_work);
4332 	cancel_work_sync(&fs_info->preempt_reclaim_work);
4333 
4334 	/* Cancel or finish ongoing discard work */
4335 	btrfs_discard_cleanup(fs_info);
4336 
4337 	if (!sb_rdonly(fs_info->sb)) {
4338 		/*
4339 		 * The cleaner kthread is stopped, so do one final pass over
4340 		 * unused block groups.
4341 		 */
4342 		btrfs_delete_unused_bgs(fs_info);
4343 
4344 		/*
4345 		 * There might be existing delayed inode workers still running
4346 		 * and holding an empty delayed inode item. We must wait for
4347 		 * them to complete first because they can create a transaction.
4348 		 * This happens when someone calls btrfs_balance_delayed_items()
4349 		 * and then a transaction commit runs the same delayed nodes
4350 		 * before any delayed worker has done something with the nodes.
4351 		 * We must wait for any worker here and not at transaction
4352 		 * commit time since that could cause a deadlock.
4353 		 * This is a very rare case.
4354 		 */
4355 		btrfs_flush_workqueue(fs_info->delayed_workers);
4356 
4357 		ret = btrfs_commit_super(fs_info);
4358 		if (ret)
4359 			btrfs_err(fs_info, "commit super ret %d", ret);
4360 	}
4361 
4362 	if (BTRFS_FS_ERROR(fs_info))
4363 		btrfs_error_commit_super(fs_info);
4364 
4365 	kthread_stop(fs_info->transaction_kthread);
4366 	kthread_stop(fs_info->cleaner_kthread);
4367 
4368 	ASSERT(list_empty(&fs_info->delayed_iputs));
4369 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4370 
4371 	if (btrfs_check_quota_leak(fs_info)) {
4372 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4373 		btrfs_err(fs_info, "qgroup reserved space leaked");
4374 	}
4375 
4376 	btrfs_free_qgroup_config(fs_info);
4377 	ASSERT(list_empty(&fs_info->delalloc_roots));
4378 
4379 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4380 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4381 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4382 	}
4383 
4384 	if (percpu_counter_sum(&fs_info->ordered_bytes))
4385 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4386 			   percpu_counter_sum(&fs_info->ordered_bytes));
4387 
4388 	btrfs_sysfs_remove_mounted(fs_info);
4389 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4390 
4391 	btrfs_put_block_group_cache(fs_info);
4392 
4393 	/*
4394 	 * we must make sure there is not any read request to
4395 	 * submit after we stopping all workers.
4396 	 */
4397 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4398 	btrfs_stop_all_workers(fs_info);
4399 
4400 	/* We shouldn't have any transaction open at this point */
4401 	warn_about_uncommitted_trans(fs_info);
4402 
4403 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4404 	free_root_pointers(fs_info, true);
4405 	btrfs_free_fs_roots(fs_info);
4406 
4407 	/*
4408 	 * We must free the block groups after dropping the fs_roots as we could
4409 	 * have had an IO error and have left over tree log blocks that aren't
4410 	 * cleaned up until the fs roots are freed.  This makes the block group
4411 	 * accounting appear to be wrong because there's pending reserved bytes,
4412 	 * so make sure we do the block group cleanup afterwards.
4413 	 */
4414 	btrfs_free_block_groups(fs_info);
4415 
4416 	iput(fs_info->btree_inode);
4417 
4418 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4419 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4420 		btrfsic_unmount(fs_info->fs_devices);
4421 #endif
4422 
4423 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4424 	btrfs_close_devices(fs_info->fs_devices);
4425 }
4426 
4427 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4428 			     struct extent_buffer *buf)
4429 {
4430 	struct btrfs_fs_info *fs_info = buf->fs_info;
4431 	u64 transid = btrfs_header_generation(buf);
4432 
4433 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4434 	/*
4435 	 * This is a fast path so only do this check if we have sanity tests
4436 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4437 	 * outside of the sanity tests.
4438 	 */
4439 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4440 		return;
4441 #endif
4442 	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4443 	ASSERT(trans->transid == fs_info->generation);
4444 	btrfs_assert_tree_write_locked(buf);
4445 	if (transid != fs_info->generation) {
4446 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4447 			buf->start, transid, fs_info->generation);
4448 		btrfs_abort_transaction(trans, -EUCLEAN);
4449 	}
4450 	set_extent_buffer_dirty(buf);
4451 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4452 	/*
4453 	 * btrfs_check_leaf() won't check item data if we don't have WRITTEN
4454 	 * set, so this will only validate the basic structure of the items.
4455 	 */
4456 	if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(buf)) {
4457 		btrfs_print_leaf(buf);
4458 		ASSERT(0);
4459 	}
4460 #endif
4461 }
4462 
4463 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4464 					int flush_delayed)
4465 {
4466 	/*
4467 	 * looks as though older kernels can get into trouble with
4468 	 * this code, they end up stuck in balance_dirty_pages forever
4469 	 */
4470 	int ret;
4471 
4472 	if (current->flags & PF_MEMALLOC)
4473 		return;
4474 
4475 	if (flush_delayed)
4476 		btrfs_balance_delayed_items(fs_info);
4477 
4478 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4479 				     BTRFS_DIRTY_METADATA_THRESH,
4480 				     fs_info->dirty_metadata_batch);
4481 	if (ret > 0) {
4482 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4483 	}
4484 }
4485 
4486 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4487 {
4488 	__btrfs_btree_balance_dirty(fs_info, 1);
4489 }
4490 
4491 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4492 {
4493 	__btrfs_btree_balance_dirty(fs_info, 0);
4494 }
4495 
4496 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4497 {
4498 	/* cleanup FS via transaction */
4499 	btrfs_cleanup_transaction(fs_info);
4500 
4501 	mutex_lock(&fs_info->cleaner_mutex);
4502 	btrfs_run_delayed_iputs(fs_info);
4503 	mutex_unlock(&fs_info->cleaner_mutex);
4504 
4505 	down_write(&fs_info->cleanup_work_sem);
4506 	up_write(&fs_info->cleanup_work_sem);
4507 }
4508 
4509 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4510 {
4511 	struct btrfs_root *gang[8];
4512 	u64 root_objectid = 0;
4513 	int ret;
4514 
4515 	spin_lock(&fs_info->fs_roots_radix_lock);
4516 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4517 					     (void **)gang, root_objectid,
4518 					     ARRAY_SIZE(gang))) != 0) {
4519 		int i;
4520 
4521 		for (i = 0; i < ret; i++)
4522 			gang[i] = btrfs_grab_root(gang[i]);
4523 		spin_unlock(&fs_info->fs_roots_radix_lock);
4524 
4525 		for (i = 0; i < ret; i++) {
4526 			if (!gang[i])
4527 				continue;
4528 			root_objectid = gang[i]->root_key.objectid;
4529 			btrfs_free_log(NULL, gang[i]);
4530 			btrfs_put_root(gang[i]);
4531 		}
4532 		root_objectid++;
4533 		spin_lock(&fs_info->fs_roots_radix_lock);
4534 	}
4535 	spin_unlock(&fs_info->fs_roots_radix_lock);
4536 	btrfs_free_log_root_tree(NULL, fs_info);
4537 }
4538 
4539 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4540 {
4541 	struct btrfs_ordered_extent *ordered;
4542 
4543 	spin_lock(&root->ordered_extent_lock);
4544 	/*
4545 	 * This will just short circuit the ordered completion stuff which will
4546 	 * make sure the ordered extent gets properly cleaned up.
4547 	 */
4548 	list_for_each_entry(ordered, &root->ordered_extents,
4549 			    root_extent_list)
4550 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4551 	spin_unlock(&root->ordered_extent_lock);
4552 }
4553 
4554 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4555 {
4556 	struct btrfs_root *root;
4557 	LIST_HEAD(splice);
4558 
4559 	spin_lock(&fs_info->ordered_root_lock);
4560 	list_splice_init(&fs_info->ordered_roots, &splice);
4561 	while (!list_empty(&splice)) {
4562 		root = list_first_entry(&splice, struct btrfs_root,
4563 					ordered_root);
4564 		list_move_tail(&root->ordered_root,
4565 			       &fs_info->ordered_roots);
4566 
4567 		spin_unlock(&fs_info->ordered_root_lock);
4568 		btrfs_destroy_ordered_extents(root);
4569 
4570 		cond_resched();
4571 		spin_lock(&fs_info->ordered_root_lock);
4572 	}
4573 	spin_unlock(&fs_info->ordered_root_lock);
4574 
4575 	/*
4576 	 * We need this here because if we've been flipped read-only we won't
4577 	 * get sync() from the umount, so we need to make sure any ordered
4578 	 * extents that haven't had their dirty pages IO start writeout yet
4579 	 * actually get run and error out properly.
4580 	 */
4581 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4582 }
4583 
4584 static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4585 				       struct btrfs_fs_info *fs_info)
4586 {
4587 	struct rb_node *node;
4588 	struct btrfs_delayed_ref_root *delayed_refs;
4589 	struct btrfs_delayed_ref_node *ref;
4590 
4591 	delayed_refs = &trans->delayed_refs;
4592 
4593 	spin_lock(&delayed_refs->lock);
4594 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4595 		spin_unlock(&delayed_refs->lock);
4596 		btrfs_debug(fs_info, "delayed_refs has NO entry");
4597 		return;
4598 	}
4599 
4600 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4601 		struct btrfs_delayed_ref_head *head;
4602 		struct rb_node *n;
4603 		bool pin_bytes = false;
4604 
4605 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4606 				href_node);
4607 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4608 			continue;
4609 
4610 		spin_lock(&head->lock);
4611 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4612 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4613 				       ref_node);
4614 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4615 			RB_CLEAR_NODE(&ref->ref_node);
4616 			if (!list_empty(&ref->add_list))
4617 				list_del(&ref->add_list);
4618 			atomic_dec(&delayed_refs->num_entries);
4619 			btrfs_put_delayed_ref(ref);
4620 		}
4621 		if (head->must_insert_reserved)
4622 			pin_bytes = true;
4623 		btrfs_free_delayed_extent_op(head->extent_op);
4624 		btrfs_delete_ref_head(delayed_refs, head);
4625 		spin_unlock(&head->lock);
4626 		spin_unlock(&delayed_refs->lock);
4627 		mutex_unlock(&head->mutex);
4628 
4629 		if (pin_bytes) {
4630 			struct btrfs_block_group *cache;
4631 
4632 			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4633 			BUG_ON(!cache);
4634 
4635 			spin_lock(&cache->space_info->lock);
4636 			spin_lock(&cache->lock);
4637 			cache->pinned += head->num_bytes;
4638 			btrfs_space_info_update_bytes_pinned(fs_info,
4639 				cache->space_info, head->num_bytes);
4640 			cache->reserved -= head->num_bytes;
4641 			cache->space_info->bytes_reserved -= head->num_bytes;
4642 			spin_unlock(&cache->lock);
4643 			spin_unlock(&cache->space_info->lock);
4644 
4645 			btrfs_put_block_group(cache);
4646 
4647 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4648 				head->bytenr + head->num_bytes - 1);
4649 		}
4650 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4651 		btrfs_put_delayed_ref_head(head);
4652 		cond_resched();
4653 		spin_lock(&delayed_refs->lock);
4654 	}
4655 	btrfs_qgroup_destroy_extent_records(trans);
4656 
4657 	spin_unlock(&delayed_refs->lock);
4658 }
4659 
4660 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4661 {
4662 	struct btrfs_inode *btrfs_inode;
4663 	LIST_HEAD(splice);
4664 
4665 	spin_lock(&root->delalloc_lock);
4666 	list_splice_init(&root->delalloc_inodes, &splice);
4667 
4668 	while (!list_empty(&splice)) {
4669 		struct inode *inode = NULL;
4670 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4671 					       delalloc_inodes);
4672 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4673 		spin_unlock(&root->delalloc_lock);
4674 
4675 		/*
4676 		 * Make sure we get a live inode and that it'll not disappear
4677 		 * meanwhile.
4678 		 */
4679 		inode = igrab(&btrfs_inode->vfs_inode);
4680 		if (inode) {
4681 			unsigned int nofs_flag;
4682 
4683 			nofs_flag = memalloc_nofs_save();
4684 			invalidate_inode_pages2(inode->i_mapping);
4685 			memalloc_nofs_restore(nofs_flag);
4686 			iput(inode);
4687 		}
4688 		spin_lock(&root->delalloc_lock);
4689 	}
4690 	spin_unlock(&root->delalloc_lock);
4691 }
4692 
4693 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4694 {
4695 	struct btrfs_root *root;
4696 	LIST_HEAD(splice);
4697 
4698 	spin_lock(&fs_info->delalloc_root_lock);
4699 	list_splice_init(&fs_info->delalloc_roots, &splice);
4700 	while (!list_empty(&splice)) {
4701 		root = list_first_entry(&splice, struct btrfs_root,
4702 					 delalloc_root);
4703 		root = btrfs_grab_root(root);
4704 		BUG_ON(!root);
4705 		spin_unlock(&fs_info->delalloc_root_lock);
4706 
4707 		btrfs_destroy_delalloc_inodes(root);
4708 		btrfs_put_root(root);
4709 
4710 		spin_lock(&fs_info->delalloc_root_lock);
4711 	}
4712 	spin_unlock(&fs_info->delalloc_root_lock);
4713 }
4714 
4715 static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4716 					 struct extent_io_tree *dirty_pages,
4717 					 int mark)
4718 {
4719 	struct extent_buffer *eb;
4720 	u64 start = 0;
4721 	u64 end;
4722 
4723 	while (find_first_extent_bit(dirty_pages, start, &start, &end,
4724 				     mark, NULL)) {
4725 		clear_extent_bits(dirty_pages, start, end, mark);
4726 		while (start <= end) {
4727 			eb = find_extent_buffer(fs_info, start);
4728 			start += fs_info->nodesize;
4729 			if (!eb)
4730 				continue;
4731 
4732 			btrfs_tree_lock(eb);
4733 			wait_on_extent_buffer_writeback(eb);
4734 			btrfs_clear_buffer_dirty(NULL, eb);
4735 			btrfs_tree_unlock(eb);
4736 
4737 			free_extent_buffer_stale(eb);
4738 		}
4739 	}
4740 }
4741 
4742 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4743 					struct extent_io_tree *unpin)
4744 {
4745 	u64 start;
4746 	u64 end;
4747 
4748 	while (1) {
4749 		struct extent_state *cached_state = NULL;
4750 
4751 		/*
4752 		 * The btrfs_finish_extent_commit() may get the same range as
4753 		 * ours between find_first_extent_bit and clear_extent_dirty.
4754 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4755 		 * the same extent range.
4756 		 */
4757 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4758 		if (!find_first_extent_bit(unpin, 0, &start, &end,
4759 					   EXTENT_DIRTY, &cached_state)) {
4760 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4761 			break;
4762 		}
4763 
4764 		clear_extent_dirty(unpin, start, end, &cached_state);
4765 		free_extent_state(cached_state);
4766 		btrfs_error_unpin_extent_range(fs_info, start, end);
4767 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4768 		cond_resched();
4769 	}
4770 }
4771 
4772 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4773 {
4774 	struct inode *inode;
4775 
4776 	inode = cache->io_ctl.inode;
4777 	if (inode) {
4778 		unsigned int nofs_flag;
4779 
4780 		nofs_flag = memalloc_nofs_save();
4781 		invalidate_inode_pages2(inode->i_mapping);
4782 		memalloc_nofs_restore(nofs_flag);
4783 
4784 		BTRFS_I(inode)->generation = 0;
4785 		cache->io_ctl.inode = NULL;
4786 		iput(inode);
4787 	}
4788 	ASSERT(cache->io_ctl.pages == NULL);
4789 	btrfs_put_block_group(cache);
4790 }
4791 
4792 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4793 			     struct btrfs_fs_info *fs_info)
4794 {
4795 	struct btrfs_block_group *cache;
4796 
4797 	spin_lock(&cur_trans->dirty_bgs_lock);
4798 	while (!list_empty(&cur_trans->dirty_bgs)) {
4799 		cache = list_first_entry(&cur_trans->dirty_bgs,
4800 					 struct btrfs_block_group,
4801 					 dirty_list);
4802 
4803 		if (!list_empty(&cache->io_list)) {
4804 			spin_unlock(&cur_trans->dirty_bgs_lock);
4805 			list_del_init(&cache->io_list);
4806 			btrfs_cleanup_bg_io(cache);
4807 			spin_lock(&cur_trans->dirty_bgs_lock);
4808 		}
4809 
4810 		list_del_init(&cache->dirty_list);
4811 		spin_lock(&cache->lock);
4812 		cache->disk_cache_state = BTRFS_DC_ERROR;
4813 		spin_unlock(&cache->lock);
4814 
4815 		spin_unlock(&cur_trans->dirty_bgs_lock);
4816 		btrfs_put_block_group(cache);
4817 		btrfs_delayed_refs_rsv_release(fs_info, 1);
4818 		spin_lock(&cur_trans->dirty_bgs_lock);
4819 	}
4820 	spin_unlock(&cur_trans->dirty_bgs_lock);
4821 
4822 	/*
4823 	 * Refer to the definition of io_bgs member for details why it's safe
4824 	 * to use it without any locking
4825 	 */
4826 	while (!list_empty(&cur_trans->io_bgs)) {
4827 		cache = list_first_entry(&cur_trans->io_bgs,
4828 					 struct btrfs_block_group,
4829 					 io_list);
4830 
4831 		list_del_init(&cache->io_list);
4832 		spin_lock(&cache->lock);
4833 		cache->disk_cache_state = BTRFS_DC_ERROR;
4834 		spin_unlock(&cache->lock);
4835 		btrfs_cleanup_bg_io(cache);
4836 	}
4837 }
4838 
4839 static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4840 {
4841 	struct btrfs_root *gang[8];
4842 	int i;
4843 	int ret;
4844 
4845 	spin_lock(&fs_info->fs_roots_radix_lock);
4846 	while (1) {
4847 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4848 						 (void **)gang, 0,
4849 						 ARRAY_SIZE(gang),
4850 						 BTRFS_ROOT_TRANS_TAG);
4851 		if (ret == 0)
4852 			break;
4853 		for (i = 0; i < ret; i++) {
4854 			struct btrfs_root *root = gang[i];
4855 
4856 			btrfs_qgroup_free_meta_all_pertrans(root);
4857 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4858 					(unsigned long)root->root_key.objectid,
4859 					BTRFS_ROOT_TRANS_TAG);
4860 		}
4861 	}
4862 	spin_unlock(&fs_info->fs_roots_radix_lock);
4863 }
4864 
4865 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4866 				   struct btrfs_fs_info *fs_info)
4867 {
4868 	struct btrfs_device *dev, *tmp;
4869 
4870 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4871 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4872 	ASSERT(list_empty(&cur_trans->io_bgs));
4873 
4874 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4875 				 post_commit_list) {
4876 		list_del_init(&dev->post_commit_list);
4877 	}
4878 
4879 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4880 
4881 	cur_trans->state = TRANS_STATE_COMMIT_START;
4882 	wake_up(&fs_info->transaction_blocked_wait);
4883 
4884 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4885 	wake_up(&fs_info->transaction_wait);
4886 
4887 	btrfs_destroy_delayed_inodes(fs_info);
4888 
4889 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4890 				     EXTENT_DIRTY);
4891 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4892 
4893 	btrfs_free_all_qgroup_pertrans(fs_info);
4894 
4895 	cur_trans->state =TRANS_STATE_COMPLETED;
4896 	wake_up(&cur_trans->commit_wait);
4897 }
4898 
4899 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4900 {
4901 	struct btrfs_transaction *t;
4902 
4903 	mutex_lock(&fs_info->transaction_kthread_mutex);
4904 
4905 	spin_lock(&fs_info->trans_lock);
4906 	while (!list_empty(&fs_info->trans_list)) {
4907 		t = list_first_entry(&fs_info->trans_list,
4908 				     struct btrfs_transaction, list);
4909 		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4910 			refcount_inc(&t->use_count);
4911 			spin_unlock(&fs_info->trans_lock);
4912 			btrfs_wait_for_commit(fs_info, t->transid);
4913 			btrfs_put_transaction(t);
4914 			spin_lock(&fs_info->trans_lock);
4915 			continue;
4916 		}
4917 		if (t == fs_info->running_transaction) {
4918 			t->state = TRANS_STATE_COMMIT_DOING;
4919 			spin_unlock(&fs_info->trans_lock);
4920 			/*
4921 			 * We wait for 0 num_writers since we don't hold a trans
4922 			 * handle open currently for this transaction.
4923 			 */
4924 			wait_event(t->writer_wait,
4925 				   atomic_read(&t->num_writers) == 0);
4926 		} else {
4927 			spin_unlock(&fs_info->trans_lock);
4928 		}
4929 		btrfs_cleanup_one_transaction(t, fs_info);
4930 
4931 		spin_lock(&fs_info->trans_lock);
4932 		if (t == fs_info->running_transaction)
4933 			fs_info->running_transaction = NULL;
4934 		list_del_init(&t->list);
4935 		spin_unlock(&fs_info->trans_lock);
4936 
4937 		btrfs_put_transaction(t);
4938 		trace_btrfs_transaction_commit(fs_info);
4939 		spin_lock(&fs_info->trans_lock);
4940 	}
4941 	spin_unlock(&fs_info->trans_lock);
4942 	btrfs_destroy_all_ordered_extents(fs_info);
4943 	btrfs_destroy_delayed_inodes(fs_info);
4944 	btrfs_assert_delayed_root_empty(fs_info);
4945 	btrfs_destroy_all_delalloc_inodes(fs_info);
4946 	btrfs_drop_all_logs(fs_info);
4947 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4948 
4949 	return 0;
4950 }
4951 
4952 int btrfs_init_root_free_objectid(struct btrfs_root *root)
4953 {
4954 	struct btrfs_path *path;
4955 	int ret;
4956 	struct extent_buffer *l;
4957 	struct btrfs_key search_key;
4958 	struct btrfs_key found_key;
4959 	int slot;
4960 
4961 	path = btrfs_alloc_path();
4962 	if (!path)
4963 		return -ENOMEM;
4964 
4965 	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4966 	search_key.type = -1;
4967 	search_key.offset = (u64)-1;
4968 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4969 	if (ret < 0)
4970 		goto error;
4971 	BUG_ON(ret == 0); /* Corruption */
4972 	if (path->slots[0] > 0) {
4973 		slot = path->slots[0] - 1;
4974 		l = path->nodes[0];
4975 		btrfs_item_key_to_cpu(l, &found_key, slot);
4976 		root->free_objectid = max_t(u64, found_key.objectid + 1,
4977 					    BTRFS_FIRST_FREE_OBJECTID);
4978 	} else {
4979 		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4980 	}
4981 	ret = 0;
4982 error:
4983 	btrfs_free_path(path);
4984 	return ret;
4985 }
4986 
4987 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4988 {
4989 	int ret;
4990 	mutex_lock(&root->objectid_mutex);
4991 
4992 	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4993 		btrfs_warn(root->fs_info,
4994 			   "the objectid of root %llu reaches its highest value",
4995 			   root->root_key.objectid);
4996 		ret = -ENOSPC;
4997 		goto out;
4998 	}
4999 
5000 	*objectid = root->free_objectid++;
5001 	ret = 0;
5002 out:
5003 	mutex_unlock(&root->objectid_mutex);
5004 	return ret;
5005 }
5006