xref: /openbmc/linux/fs/btrfs/disk-io.c (revision 2634682f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "inode-map.h"
33 #include "check-integrity.h"
34 #include "rcu-string.h"
35 #include "dev-replace.h"
36 #include "raid56.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39 #include "compression.h"
40 #include "tree-checker.h"
41 #include "ref-verify.h"
42 #include "block-group.h"
43 #include "discard.h"
44 #include "space-info.h"
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static const struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 				      struct btrfs_fs_info *fs_info);
58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 					struct extent_io_tree *dirty_pages,
61 					int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 				       struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66 
67 /*
68  * btrfs_end_io_wq structs are used to do processing in task context when an IO
69  * is complete.  This is used during reads to verify checksums, and it is used
70  * by writes to insert metadata for new file extents after IO is complete.
71  */
72 struct btrfs_end_io_wq {
73 	struct bio *bio;
74 	bio_end_io_t *end_io;
75 	void *private;
76 	struct btrfs_fs_info *info;
77 	blk_status_t status;
78 	enum btrfs_wq_endio_type metadata;
79 	struct btrfs_work work;
80 };
81 
82 static struct kmem_cache *btrfs_end_io_wq_cache;
83 
84 int __init btrfs_end_io_wq_init(void)
85 {
86 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 					sizeof(struct btrfs_end_io_wq),
88 					0,
89 					SLAB_MEM_SPREAD,
90 					NULL);
91 	if (!btrfs_end_io_wq_cache)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
96 void __cold btrfs_end_io_wq_exit(void)
97 {
98 	kmem_cache_destroy(btrfs_end_io_wq_cache);
99 }
100 
101 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
102 {
103 	if (fs_info->csum_shash)
104 		crypto_free_shash(fs_info->csum_shash);
105 }
106 
107 /*
108  * async submit bios are used to offload expensive checksumming
109  * onto the worker threads.  They checksum file and metadata bios
110  * just before they are sent down the IO stack.
111  */
112 struct async_submit_bio {
113 	void *private_data;
114 	struct bio *bio;
115 	extent_submit_bio_start_t *submit_bio_start;
116 	int mirror_num;
117 	/*
118 	 * bio_offset is optional, can be used if the pages in the bio
119 	 * can't tell us where in the file the bio should go
120 	 */
121 	u64 bio_offset;
122 	struct btrfs_work work;
123 	blk_status_t status;
124 };
125 
126 /*
127  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
128  * eb, the lockdep key is determined by the btrfs_root it belongs to and
129  * the level the eb occupies in the tree.
130  *
131  * Different roots are used for different purposes and may nest inside each
132  * other and they require separate keysets.  As lockdep keys should be
133  * static, assign keysets according to the purpose of the root as indicated
134  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
135  * roots have separate keysets.
136  *
137  * Lock-nesting across peer nodes is always done with the immediate parent
138  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
139  * subclass to avoid triggering lockdep warning in such cases.
140  *
141  * The key is set by the readpage_end_io_hook after the buffer has passed
142  * csum validation but before the pages are unlocked.  It is also set by
143  * btrfs_init_new_buffer on freshly allocated blocks.
144  *
145  * We also add a check to make sure the highest level of the tree is the
146  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
147  * needs update as well.
148  */
149 #ifdef CONFIG_DEBUG_LOCK_ALLOC
150 # if BTRFS_MAX_LEVEL != 8
151 #  error
152 # endif
153 
154 static struct btrfs_lockdep_keyset {
155 	u64			id;		/* root objectid */
156 	const char		*name_stem;	/* lock name stem */
157 	char			names[BTRFS_MAX_LEVEL + 1][20];
158 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
159 } btrfs_lockdep_keysets[] = {
160 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
161 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
162 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
163 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
164 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
165 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
166 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
167 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
168 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
169 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
170 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
171 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
172 	{ .id = 0,				.name_stem = "tree"	},
173 };
174 
175 void __init btrfs_init_lockdep(void)
176 {
177 	int i, j;
178 
179 	/* initialize lockdep class names */
180 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
181 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
182 
183 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
184 			snprintf(ks->names[j], sizeof(ks->names[j]),
185 				 "btrfs-%s-%02d", ks->name_stem, j);
186 	}
187 }
188 
189 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
190 				    int level)
191 {
192 	struct btrfs_lockdep_keyset *ks;
193 
194 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
195 
196 	/* find the matching keyset, id 0 is the default entry */
197 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
198 		if (ks->id == objectid)
199 			break;
200 
201 	lockdep_set_class_and_name(&eb->lock,
202 				   &ks->keys[level], ks->names[level]);
203 }
204 
205 #endif
206 
207 /*
208  * extents on the btree inode are pretty simple, there's one extent
209  * that covers the entire device
210  */
211 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
212 				    struct page *page, size_t pg_offset,
213 				    u64 start, u64 len)
214 {
215 	struct extent_map_tree *em_tree = &inode->extent_tree;
216 	struct extent_map *em;
217 	int ret;
218 
219 	read_lock(&em_tree->lock);
220 	em = lookup_extent_mapping(em_tree, start, len);
221 	if (em) {
222 		read_unlock(&em_tree->lock);
223 		goto out;
224 	}
225 	read_unlock(&em_tree->lock);
226 
227 	em = alloc_extent_map();
228 	if (!em) {
229 		em = ERR_PTR(-ENOMEM);
230 		goto out;
231 	}
232 	em->start = 0;
233 	em->len = (u64)-1;
234 	em->block_len = (u64)-1;
235 	em->block_start = 0;
236 
237 	write_lock(&em_tree->lock);
238 	ret = add_extent_mapping(em_tree, em, 0);
239 	if (ret == -EEXIST) {
240 		free_extent_map(em);
241 		em = lookup_extent_mapping(em_tree, start, len);
242 		if (!em)
243 			em = ERR_PTR(-EIO);
244 	} else if (ret) {
245 		free_extent_map(em);
246 		em = ERR_PTR(ret);
247 	}
248 	write_unlock(&em_tree->lock);
249 
250 out:
251 	return em;
252 }
253 
254 /*
255  * Compute the csum of a btree block and store the result to provided buffer.
256  */
257 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
258 {
259 	struct btrfs_fs_info *fs_info = buf->fs_info;
260 	const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
261 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
262 	char *kaddr;
263 	int i;
264 
265 	shash->tfm = fs_info->csum_shash;
266 	crypto_shash_init(shash);
267 	kaddr = page_address(buf->pages[0]);
268 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
269 			    PAGE_SIZE - BTRFS_CSUM_SIZE);
270 
271 	for (i = 1; i < num_pages; i++) {
272 		kaddr = page_address(buf->pages[i]);
273 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
274 	}
275 	memset(result, 0, BTRFS_CSUM_SIZE);
276 	crypto_shash_final(shash, result);
277 }
278 
279 /*
280  * we can't consider a given block up to date unless the transid of the
281  * block matches the transid in the parent node's pointer.  This is how we
282  * detect blocks that either didn't get written at all or got written
283  * in the wrong place.
284  */
285 static int verify_parent_transid(struct extent_io_tree *io_tree,
286 				 struct extent_buffer *eb, u64 parent_transid,
287 				 int atomic)
288 {
289 	struct extent_state *cached_state = NULL;
290 	int ret;
291 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
292 
293 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
294 		return 0;
295 
296 	if (atomic)
297 		return -EAGAIN;
298 
299 	if (need_lock) {
300 		btrfs_tree_read_lock(eb);
301 		btrfs_set_lock_blocking_read(eb);
302 	}
303 
304 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
305 			 &cached_state);
306 	if (extent_buffer_uptodate(eb) &&
307 	    btrfs_header_generation(eb) == parent_transid) {
308 		ret = 0;
309 		goto out;
310 	}
311 	btrfs_err_rl(eb->fs_info,
312 		"parent transid verify failed on %llu wanted %llu found %llu",
313 			eb->start,
314 			parent_transid, btrfs_header_generation(eb));
315 	ret = 1;
316 
317 	/*
318 	 * Things reading via commit roots that don't have normal protection,
319 	 * like send, can have a really old block in cache that may point at a
320 	 * block that has been freed and re-allocated.  So don't clear uptodate
321 	 * if we find an eb that is under IO (dirty/writeback) because we could
322 	 * end up reading in the stale data and then writing it back out and
323 	 * making everybody very sad.
324 	 */
325 	if (!extent_buffer_under_io(eb))
326 		clear_extent_buffer_uptodate(eb);
327 out:
328 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
329 			     &cached_state);
330 	if (need_lock)
331 		btrfs_tree_read_unlock_blocking(eb);
332 	return ret;
333 }
334 
335 static bool btrfs_supported_super_csum(u16 csum_type)
336 {
337 	switch (csum_type) {
338 	case BTRFS_CSUM_TYPE_CRC32:
339 	case BTRFS_CSUM_TYPE_XXHASH:
340 	case BTRFS_CSUM_TYPE_SHA256:
341 	case BTRFS_CSUM_TYPE_BLAKE2:
342 		return true;
343 	default:
344 		return false;
345 	}
346 }
347 
348 /*
349  * Return 0 if the superblock checksum type matches the checksum value of that
350  * algorithm. Pass the raw disk superblock data.
351  */
352 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
353 				  char *raw_disk_sb)
354 {
355 	struct btrfs_super_block *disk_sb =
356 		(struct btrfs_super_block *)raw_disk_sb;
357 	char result[BTRFS_CSUM_SIZE];
358 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
359 
360 	shash->tfm = fs_info->csum_shash;
361 
362 	/*
363 	 * The super_block structure does not span the whole
364 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
365 	 * filled with zeros and is included in the checksum.
366 	 */
367 	crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
368 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
369 
370 	if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
371 		return 1;
372 
373 	return 0;
374 }
375 
376 int btrfs_verify_level_key(struct extent_buffer *eb, int level,
377 			   struct btrfs_key *first_key, u64 parent_transid)
378 {
379 	struct btrfs_fs_info *fs_info = eb->fs_info;
380 	int found_level;
381 	struct btrfs_key found_key;
382 	int ret;
383 
384 	found_level = btrfs_header_level(eb);
385 	if (found_level != level) {
386 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
387 		     KERN_ERR "BTRFS: tree level check failed\n");
388 		btrfs_err(fs_info,
389 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
390 			  eb->start, level, found_level);
391 		return -EIO;
392 	}
393 
394 	if (!first_key)
395 		return 0;
396 
397 	/*
398 	 * For live tree block (new tree blocks in current transaction),
399 	 * we need proper lock context to avoid race, which is impossible here.
400 	 * So we only checks tree blocks which is read from disk, whose
401 	 * generation <= fs_info->last_trans_committed.
402 	 */
403 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
404 		return 0;
405 
406 	/* We have @first_key, so this @eb must have at least one item */
407 	if (btrfs_header_nritems(eb) == 0) {
408 		btrfs_err(fs_info,
409 		"invalid tree nritems, bytenr=%llu nritems=0 expect >0",
410 			  eb->start);
411 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
412 		return -EUCLEAN;
413 	}
414 
415 	if (found_level)
416 		btrfs_node_key_to_cpu(eb, &found_key, 0);
417 	else
418 		btrfs_item_key_to_cpu(eb, &found_key, 0);
419 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
420 
421 	if (ret) {
422 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
423 		     KERN_ERR "BTRFS: tree first key check failed\n");
424 		btrfs_err(fs_info,
425 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
426 			  eb->start, parent_transid, first_key->objectid,
427 			  first_key->type, first_key->offset,
428 			  found_key.objectid, found_key.type,
429 			  found_key.offset);
430 	}
431 	return ret;
432 }
433 
434 /*
435  * helper to read a given tree block, doing retries as required when
436  * the checksums don't match and we have alternate mirrors to try.
437  *
438  * @parent_transid:	expected transid, skip check if 0
439  * @level:		expected level, mandatory check
440  * @first_key:		expected key of first slot, skip check if NULL
441  */
442 static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
443 					  u64 parent_transid, int level,
444 					  struct btrfs_key *first_key)
445 {
446 	struct btrfs_fs_info *fs_info = eb->fs_info;
447 	struct extent_io_tree *io_tree;
448 	int failed = 0;
449 	int ret;
450 	int num_copies = 0;
451 	int mirror_num = 0;
452 	int failed_mirror = 0;
453 
454 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
455 	while (1) {
456 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
457 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
458 		if (!ret) {
459 			if (verify_parent_transid(io_tree, eb,
460 						   parent_transid, 0))
461 				ret = -EIO;
462 			else if (btrfs_verify_level_key(eb, level,
463 						first_key, parent_transid))
464 				ret = -EUCLEAN;
465 			else
466 				break;
467 		}
468 
469 		num_copies = btrfs_num_copies(fs_info,
470 					      eb->start, eb->len);
471 		if (num_copies == 1)
472 			break;
473 
474 		if (!failed_mirror) {
475 			failed = 1;
476 			failed_mirror = eb->read_mirror;
477 		}
478 
479 		mirror_num++;
480 		if (mirror_num == failed_mirror)
481 			mirror_num++;
482 
483 		if (mirror_num > num_copies)
484 			break;
485 	}
486 
487 	if (failed && !ret && failed_mirror)
488 		btrfs_repair_eb_io_failure(eb, failed_mirror);
489 
490 	return ret;
491 }
492 
493 /*
494  * checksum a dirty tree block before IO.  This has extra checks to make sure
495  * we only fill in the checksum field in the first page of a multi-page block
496  */
497 
498 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
499 {
500 	u64 start = page_offset(page);
501 	u64 found_start;
502 	u8 result[BTRFS_CSUM_SIZE];
503 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
504 	struct extent_buffer *eb;
505 	int ret;
506 
507 	eb = (struct extent_buffer *)page->private;
508 	if (page != eb->pages[0])
509 		return 0;
510 
511 	found_start = btrfs_header_bytenr(eb);
512 	/*
513 	 * Please do not consolidate these warnings into a single if.
514 	 * It is useful to know what went wrong.
515 	 */
516 	if (WARN_ON(found_start != start))
517 		return -EUCLEAN;
518 	if (WARN_ON(!PageUptodate(page)))
519 		return -EUCLEAN;
520 
521 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
522 				    offsetof(struct btrfs_header, fsid),
523 				    BTRFS_FSID_SIZE) == 0);
524 
525 	csum_tree_block(eb, result);
526 
527 	if (btrfs_header_level(eb))
528 		ret = btrfs_check_node(eb);
529 	else
530 		ret = btrfs_check_leaf_full(eb);
531 
532 	if (ret < 0) {
533 		btrfs_print_tree(eb, 0);
534 		btrfs_err(fs_info,
535 		"block=%llu write time tree block corruption detected",
536 			  eb->start);
537 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
538 		return ret;
539 	}
540 	write_extent_buffer(eb, result, 0, csum_size);
541 
542 	return 0;
543 }
544 
545 static int check_tree_block_fsid(struct extent_buffer *eb)
546 {
547 	struct btrfs_fs_info *fs_info = eb->fs_info;
548 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
549 	u8 fsid[BTRFS_FSID_SIZE];
550 	int ret = 1;
551 
552 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
553 			   BTRFS_FSID_SIZE);
554 	while (fs_devices) {
555 		u8 *metadata_uuid;
556 
557 		/*
558 		 * Checking the incompat flag is only valid for the current
559 		 * fs. For seed devices it's forbidden to have their uuid
560 		 * changed so reading ->fsid in this case is fine
561 		 */
562 		if (fs_devices == fs_info->fs_devices &&
563 		    btrfs_fs_incompat(fs_info, METADATA_UUID))
564 			metadata_uuid = fs_devices->metadata_uuid;
565 		else
566 			metadata_uuid = fs_devices->fsid;
567 
568 		if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
569 			ret = 0;
570 			break;
571 		}
572 		fs_devices = fs_devices->seed;
573 	}
574 	return ret;
575 }
576 
577 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
578 				      u64 phy_offset, struct page *page,
579 				      u64 start, u64 end, int mirror)
580 {
581 	u64 found_start;
582 	int found_level;
583 	struct extent_buffer *eb;
584 	struct btrfs_fs_info *fs_info;
585 	u16 csum_size;
586 	int ret = 0;
587 	u8 result[BTRFS_CSUM_SIZE];
588 	int reads_done;
589 
590 	if (!page->private)
591 		goto out;
592 
593 	eb = (struct extent_buffer *)page->private;
594 	fs_info = eb->fs_info;
595 	csum_size = btrfs_super_csum_size(fs_info->super_copy);
596 
597 	/* the pending IO might have been the only thing that kept this buffer
598 	 * in memory.  Make sure we have a ref for all this other checks
599 	 */
600 	atomic_inc(&eb->refs);
601 
602 	reads_done = atomic_dec_and_test(&eb->io_pages);
603 	if (!reads_done)
604 		goto err;
605 
606 	eb->read_mirror = mirror;
607 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
608 		ret = -EIO;
609 		goto err;
610 	}
611 
612 	found_start = btrfs_header_bytenr(eb);
613 	if (found_start != eb->start) {
614 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
615 			     eb->start, found_start);
616 		ret = -EIO;
617 		goto err;
618 	}
619 	if (check_tree_block_fsid(eb)) {
620 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
621 			     eb->start);
622 		ret = -EIO;
623 		goto err;
624 	}
625 	found_level = btrfs_header_level(eb);
626 	if (found_level >= BTRFS_MAX_LEVEL) {
627 		btrfs_err(fs_info, "bad tree block level %d on %llu",
628 			  (int)btrfs_header_level(eb), eb->start);
629 		ret = -EIO;
630 		goto err;
631 	}
632 
633 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
634 				       eb, found_level);
635 
636 	csum_tree_block(eb, result);
637 
638 	if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
639 		u32 val;
640 		u32 found = 0;
641 
642 		memcpy(&found, result, csum_size);
643 
644 		read_extent_buffer(eb, &val, 0, csum_size);
645 		btrfs_warn_rl(fs_info,
646 		"%s checksum verify failed on %llu wanted %x found %x level %d",
647 			      fs_info->sb->s_id, eb->start,
648 			      val, found, btrfs_header_level(eb));
649 		ret = -EUCLEAN;
650 		goto err;
651 	}
652 
653 	/*
654 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
655 	 * that we don't try and read the other copies of this block, just
656 	 * return -EIO.
657 	 */
658 	if (found_level == 0 && btrfs_check_leaf_full(eb)) {
659 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
660 		ret = -EIO;
661 	}
662 
663 	if (found_level > 0 && btrfs_check_node(eb))
664 		ret = -EIO;
665 
666 	if (!ret)
667 		set_extent_buffer_uptodate(eb);
668 	else
669 		btrfs_err(fs_info,
670 			  "block=%llu read time tree block corruption detected",
671 			  eb->start);
672 err:
673 	if (reads_done &&
674 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
675 		btree_readahead_hook(eb, ret);
676 
677 	if (ret) {
678 		/*
679 		 * our io error hook is going to dec the io pages
680 		 * again, we have to make sure it has something
681 		 * to decrement
682 		 */
683 		atomic_inc(&eb->io_pages);
684 		clear_extent_buffer_uptodate(eb);
685 	}
686 	free_extent_buffer(eb);
687 out:
688 	return ret;
689 }
690 
691 static void end_workqueue_bio(struct bio *bio)
692 {
693 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
694 	struct btrfs_fs_info *fs_info;
695 	struct btrfs_workqueue *wq;
696 
697 	fs_info = end_io_wq->info;
698 	end_io_wq->status = bio->bi_status;
699 
700 	if (bio_op(bio) == REQ_OP_WRITE) {
701 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
702 			wq = fs_info->endio_meta_write_workers;
703 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
704 			wq = fs_info->endio_freespace_worker;
705 		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
706 			wq = fs_info->endio_raid56_workers;
707 		else
708 			wq = fs_info->endio_write_workers;
709 	} else {
710 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
711 			wq = fs_info->endio_raid56_workers;
712 		else if (end_io_wq->metadata)
713 			wq = fs_info->endio_meta_workers;
714 		else
715 			wq = fs_info->endio_workers;
716 	}
717 
718 	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
719 	btrfs_queue_work(wq, &end_io_wq->work);
720 }
721 
722 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
723 			enum btrfs_wq_endio_type metadata)
724 {
725 	struct btrfs_end_io_wq *end_io_wq;
726 
727 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
728 	if (!end_io_wq)
729 		return BLK_STS_RESOURCE;
730 
731 	end_io_wq->private = bio->bi_private;
732 	end_io_wq->end_io = bio->bi_end_io;
733 	end_io_wq->info = info;
734 	end_io_wq->status = 0;
735 	end_io_wq->bio = bio;
736 	end_io_wq->metadata = metadata;
737 
738 	bio->bi_private = end_io_wq;
739 	bio->bi_end_io = end_workqueue_bio;
740 	return 0;
741 }
742 
743 static void run_one_async_start(struct btrfs_work *work)
744 {
745 	struct async_submit_bio *async;
746 	blk_status_t ret;
747 
748 	async = container_of(work, struct  async_submit_bio, work);
749 	ret = async->submit_bio_start(async->private_data, async->bio,
750 				      async->bio_offset);
751 	if (ret)
752 		async->status = ret;
753 }
754 
755 /*
756  * In order to insert checksums into the metadata in large chunks, we wait
757  * until bio submission time.   All the pages in the bio are checksummed and
758  * sums are attached onto the ordered extent record.
759  *
760  * At IO completion time the csums attached on the ordered extent record are
761  * inserted into the tree.
762  */
763 static void run_one_async_done(struct btrfs_work *work)
764 {
765 	struct async_submit_bio *async;
766 	struct inode *inode;
767 	blk_status_t ret;
768 
769 	async = container_of(work, struct  async_submit_bio, work);
770 	inode = async->private_data;
771 
772 	/* If an error occurred we just want to clean up the bio and move on */
773 	if (async->status) {
774 		async->bio->bi_status = async->status;
775 		bio_endio(async->bio);
776 		return;
777 	}
778 
779 	/*
780 	 * All of the bios that pass through here are from async helpers.
781 	 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
782 	 * This changes nothing when cgroups aren't in use.
783 	 */
784 	async->bio->bi_opf |= REQ_CGROUP_PUNT;
785 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
786 	if (ret) {
787 		async->bio->bi_status = ret;
788 		bio_endio(async->bio);
789 	}
790 }
791 
792 static void run_one_async_free(struct btrfs_work *work)
793 {
794 	struct async_submit_bio *async;
795 
796 	async = container_of(work, struct  async_submit_bio, work);
797 	kfree(async);
798 }
799 
800 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
801 				 int mirror_num, unsigned long bio_flags,
802 				 u64 bio_offset, void *private_data,
803 				 extent_submit_bio_start_t *submit_bio_start)
804 {
805 	struct async_submit_bio *async;
806 
807 	async = kmalloc(sizeof(*async), GFP_NOFS);
808 	if (!async)
809 		return BLK_STS_RESOURCE;
810 
811 	async->private_data = private_data;
812 	async->bio = bio;
813 	async->mirror_num = mirror_num;
814 	async->submit_bio_start = submit_bio_start;
815 
816 	btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
817 			run_one_async_free);
818 
819 	async->bio_offset = bio_offset;
820 
821 	async->status = 0;
822 
823 	if (op_is_sync(bio->bi_opf))
824 		btrfs_set_work_high_priority(&async->work);
825 
826 	btrfs_queue_work(fs_info->workers, &async->work);
827 	return 0;
828 }
829 
830 static blk_status_t btree_csum_one_bio(struct bio *bio)
831 {
832 	struct bio_vec *bvec;
833 	struct btrfs_root *root;
834 	int ret = 0;
835 	struct bvec_iter_all iter_all;
836 
837 	ASSERT(!bio_flagged(bio, BIO_CLONED));
838 	bio_for_each_segment_all(bvec, bio, iter_all) {
839 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
840 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
841 		if (ret)
842 			break;
843 	}
844 
845 	return errno_to_blk_status(ret);
846 }
847 
848 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
849 					     u64 bio_offset)
850 {
851 	/*
852 	 * when we're called for a write, we're already in the async
853 	 * submission context.  Just jump into btrfs_map_bio
854 	 */
855 	return btree_csum_one_bio(bio);
856 }
857 
858 static int check_async_write(struct btrfs_fs_info *fs_info,
859 			     struct btrfs_inode *bi)
860 {
861 	if (atomic_read(&bi->sync_writers))
862 		return 0;
863 	if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
864 		return 0;
865 	return 1;
866 }
867 
868 static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
869 					  int mirror_num,
870 					  unsigned long bio_flags)
871 {
872 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
873 	int async = check_async_write(fs_info, BTRFS_I(inode));
874 	blk_status_t ret;
875 
876 	if (bio_op(bio) != REQ_OP_WRITE) {
877 		/*
878 		 * called for a read, do the setup so that checksum validation
879 		 * can happen in the async kernel threads
880 		 */
881 		ret = btrfs_bio_wq_end_io(fs_info, bio,
882 					  BTRFS_WQ_ENDIO_METADATA);
883 		if (ret)
884 			goto out_w_error;
885 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
886 	} else if (!async) {
887 		ret = btree_csum_one_bio(bio);
888 		if (ret)
889 			goto out_w_error;
890 		ret = btrfs_map_bio(fs_info, bio, mirror_num);
891 	} else {
892 		/*
893 		 * kthread helpers are used to submit writes so that
894 		 * checksumming can happen in parallel across all CPUs
895 		 */
896 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
897 					  0, inode, btree_submit_bio_start);
898 	}
899 
900 	if (ret)
901 		goto out_w_error;
902 	return 0;
903 
904 out_w_error:
905 	bio->bi_status = ret;
906 	bio_endio(bio);
907 	return ret;
908 }
909 
910 #ifdef CONFIG_MIGRATION
911 static int btree_migratepage(struct address_space *mapping,
912 			struct page *newpage, struct page *page,
913 			enum migrate_mode mode)
914 {
915 	/*
916 	 * we can't safely write a btree page from here,
917 	 * we haven't done the locking hook
918 	 */
919 	if (PageDirty(page))
920 		return -EAGAIN;
921 	/*
922 	 * Buffers may be managed in a filesystem specific way.
923 	 * We must have no buffers or drop them.
924 	 */
925 	if (page_has_private(page) &&
926 	    !try_to_release_page(page, GFP_KERNEL))
927 		return -EAGAIN;
928 	return migrate_page(mapping, newpage, page, mode);
929 }
930 #endif
931 
932 
933 static int btree_writepages(struct address_space *mapping,
934 			    struct writeback_control *wbc)
935 {
936 	struct btrfs_fs_info *fs_info;
937 	int ret;
938 
939 	if (wbc->sync_mode == WB_SYNC_NONE) {
940 
941 		if (wbc->for_kupdate)
942 			return 0;
943 
944 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
945 		/* this is a bit racy, but that's ok */
946 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
947 					     BTRFS_DIRTY_METADATA_THRESH,
948 					     fs_info->dirty_metadata_batch);
949 		if (ret < 0)
950 			return 0;
951 	}
952 	return btree_write_cache_pages(mapping, wbc);
953 }
954 
955 static int btree_readpage(struct file *file, struct page *page)
956 {
957 	return extent_read_full_page(page, btree_get_extent, 0);
958 }
959 
960 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
961 {
962 	if (PageWriteback(page) || PageDirty(page))
963 		return 0;
964 
965 	return try_release_extent_buffer(page);
966 }
967 
968 static void btree_invalidatepage(struct page *page, unsigned int offset,
969 				 unsigned int length)
970 {
971 	struct extent_io_tree *tree;
972 	tree = &BTRFS_I(page->mapping->host)->io_tree;
973 	extent_invalidatepage(tree, page, offset);
974 	btree_releasepage(page, GFP_NOFS);
975 	if (PagePrivate(page)) {
976 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
977 			   "page private not zero on page %llu",
978 			   (unsigned long long)page_offset(page));
979 		detach_page_private(page);
980 	}
981 }
982 
983 static int btree_set_page_dirty(struct page *page)
984 {
985 #ifdef DEBUG
986 	struct extent_buffer *eb;
987 
988 	BUG_ON(!PagePrivate(page));
989 	eb = (struct extent_buffer *)page->private;
990 	BUG_ON(!eb);
991 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
992 	BUG_ON(!atomic_read(&eb->refs));
993 	btrfs_assert_tree_locked(eb);
994 #endif
995 	return __set_page_dirty_nobuffers(page);
996 }
997 
998 static const struct address_space_operations btree_aops = {
999 	.readpage	= btree_readpage,
1000 	.writepages	= btree_writepages,
1001 	.releasepage	= btree_releasepage,
1002 	.invalidatepage = btree_invalidatepage,
1003 #ifdef CONFIG_MIGRATION
1004 	.migratepage	= btree_migratepage,
1005 #endif
1006 	.set_page_dirty = btree_set_page_dirty,
1007 };
1008 
1009 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1010 {
1011 	struct extent_buffer *buf = NULL;
1012 	int ret;
1013 
1014 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1015 	if (IS_ERR(buf))
1016 		return;
1017 
1018 	ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
1019 	if (ret < 0)
1020 		free_extent_buffer_stale(buf);
1021 	else
1022 		free_extent_buffer(buf);
1023 }
1024 
1025 struct extent_buffer *btrfs_find_create_tree_block(
1026 						struct btrfs_fs_info *fs_info,
1027 						u64 bytenr)
1028 {
1029 	if (btrfs_is_testing(fs_info))
1030 		return alloc_test_extent_buffer(fs_info, bytenr);
1031 	return alloc_extent_buffer(fs_info, bytenr);
1032 }
1033 
1034 /*
1035  * Read tree block at logical address @bytenr and do variant basic but critical
1036  * verification.
1037  *
1038  * @parent_transid:	expected transid of this tree block, skip check if 0
1039  * @level:		expected level, mandatory check
1040  * @first_key:		expected key in slot 0, skip check if NULL
1041  */
1042 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1043 				      u64 parent_transid, int level,
1044 				      struct btrfs_key *first_key)
1045 {
1046 	struct extent_buffer *buf = NULL;
1047 	int ret;
1048 
1049 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1050 	if (IS_ERR(buf))
1051 		return buf;
1052 
1053 	ret = btree_read_extent_buffer_pages(buf, parent_transid,
1054 					     level, first_key);
1055 	if (ret) {
1056 		free_extent_buffer_stale(buf);
1057 		return ERR_PTR(ret);
1058 	}
1059 	return buf;
1060 
1061 }
1062 
1063 void btrfs_clean_tree_block(struct extent_buffer *buf)
1064 {
1065 	struct btrfs_fs_info *fs_info = buf->fs_info;
1066 	if (btrfs_header_generation(buf) ==
1067 	    fs_info->running_transaction->transid) {
1068 		btrfs_assert_tree_locked(buf);
1069 
1070 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1071 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1072 						 -buf->len,
1073 						 fs_info->dirty_metadata_batch);
1074 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1075 			btrfs_set_lock_blocking_write(buf);
1076 			clear_extent_buffer_dirty(buf);
1077 		}
1078 	}
1079 }
1080 
1081 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1082 			 u64 objectid)
1083 {
1084 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1085 	root->fs_info = fs_info;
1086 	root->node = NULL;
1087 	root->commit_root = NULL;
1088 	root->state = 0;
1089 	root->orphan_cleanup_state = 0;
1090 
1091 	root->last_trans = 0;
1092 	root->highest_objectid = 0;
1093 	root->nr_delalloc_inodes = 0;
1094 	root->nr_ordered_extents = 0;
1095 	root->inode_tree = RB_ROOT;
1096 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1097 	root->block_rsv = NULL;
1098 
1099 	INIT_LIST_HEAD(&root->dirty_list);
1100 	INIT_LIST_HEAD(&root->root_list);
1101 	INIT_LIST_HEAD(&root->delalloc_inodes);
1102 	INIT_LIST_HEAD(&root->delalloc_root);
1103 	INIT_LIST_HEAD(&root->ordered_extents);
1104 	INIT_LIST_HEAD(&root->ordered_root);
1105 	INIT_LIST_HEAD(&root->reloc_dirty_list);
1106 	INIT_LIST_HEAD(&root->logged_list[0]);
1107 	INIT_LIST_HEAD(&root->logged_list[1]);
1108 	spin_lock_init(&root->inode_lock);
1109 	spin_lock_init(&root->delalloc_lock);
1110 	spin_lock_init(&root->ordered_extent_lock);
1111 	spin_lock_init(&root->accounting_lock);
1112 	spin_lock_init(&root->log_extents_lock[0]);
1113 	spin_lock_init(&root->log_extents_lock[1]);
1114 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1115 	mutex_init(&root->objectid_mutex);
1116 	mutex_init(&root->log_mutex);
1117 	mutex_init(&root->ordered_extent_mutex);
1118 	mutex_init(&root->delalloc_mutex);
1119 	init_waitqueue_head(&root->qgroup_flush_wait);
1120 	init_waitqueue_head(&root->log_writer_wait);
1121 	init_waitqueue_head(&root->log_commit_wait[0]);
1122 	init_waitqueue_head(&root->log_commit_wait[1]);
1123 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1124 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1125 	atomic_set(&root->log_commit[0], 0);
1126 	atomic_set(&root->log_commit[1], 0);
1127 	atomic_set(&root->log_writers, 0);
1128 	atomic_set(&root->log_batch, 0);
1129 	refcount_set(&root->refs, 1);
1130 	atomic_set(&root->snapshot_force_cow, 0);
1131 	atomic_set(&root->nr_swapfiles, 0);
1132 	root->log_transid = 0;
1133 	root->log_transid_committed = -1;
1134 	root->last_log_commit = 0;
1135 	if (!dummy) {
1136 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
1137 				    IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
1138 		extent_io_tree_init(fs_info, &root->log_csum_range,
1139 				    IO_TREE_LOG_CSUM_RANGE, NULL);
1140 	}
1141 
1142 	memset(&root->root_key, 0, sizeof(root->root_key));
1143 	memset(&root->root_item, 0, sizeof(root->root_item));
1144 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1145 	root->root_key.objectid = objectid;
1146 	root->anon_dev = 0;
1147 
1148 	spin_lock_init(&root->root_item_lock);
1149 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1150 #ifdef CONFIG_BTRFS_DEBUG
1151 	INIT_LIST_HEAD(&root->leak_list);
1152 	spin_lock(&fs_info->fs_roots_radix_lock);
1153 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
1154 	spin_unlock(&fs_info->fs_roots_radix_lock);
1155 #endif
1156 }
1157 
1158 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1159 					   u64 objectid, gfp_t flags)
1160 {
1161 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1162 	if (root)
1163 		__setup_root(root, fs_info, objectid);
1164 	return root;
1165 }
1166 
1167 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1168 /* Should only be used by the testing infrastructure */
1169 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1170 {
1171 	struct btrfs_root *root;
1172 
1173 	if (!fs_info)
1174 		return ERR_PTR(-EINVAL);
1175 
1176 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
1177 	if (!root)
1178 		return ERR_PTR(-ENOMEM);
1179 
1180 	/* We don't use the stripesize in selftest, set it as sectorsize */
1181 	root->alloc_bytenr = 0;
1182 
1183 	return root;
1184 }
1185 #endif
1186 
1187 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1188 				     u64 objectid)
1189 {
1190 	struct btrfs_fs_info *fs_info = trans->fs_info;
1191 	struct extent_buffer *leaf;
1192 	struct btrfs_root *tree_root = fs_info->tree_root;
1193 	struct btrfs_root *root;
1194 	struct btrfs_key key;
1195 	unsigned int nofs_flag;
1196 	int ret = 0;
1197 
1198 	/*
1199 	 * We're holding a transaction handle, so use a NOFS memory allocation
1200 	 * context to avoid deadlock if reclaim happens.
1201 	 */
1202 	nofs_flag = memalloc_nofs_save();
1203 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
1204 	memalloc_nofs_restore(nofs_flag);
1205 	if (!root)
1206 		return ERR_PTR(-ENOMEM);
1207 
1208 	root->root_key.objectid = objectid;
1209 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1210 	root->root_key.offset = 0;
1211 
1212 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1213 	if (IS_ERR(leaf)) {
1214 		ret = PTR_ERR(leaf);
1215 		leaf = NULL;
1216 		goto fail;
1217 	}
1218 
1219 	root->node = leaf;
1220 	btrfs_mark_buffer_dirty(leaf);
1221 
1222 	root->commit_root = btrfs_root_node(root);
1223 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1224 
1225 	root->root_item.flags = 0;
1226 	root->root_item.byte_limit = 0;
1227 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1228 	btrfs_set_root_generation(&root->root_item, trans->transid);
1229 	btrfs_set_root_level(&root->root_item, 0);
1230 	btrfs_set_root_refs(&root->root_item, 1);
1231 	btrfs_set_root_used(&root->root_item, leaf->len);
1232 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1233 	btrfs_set_root_dirid(&root->root_item, 0);
1234 	if (is_fstree(objectid))
1235 		generate_random_guid(root->root_item.uuid);
1236 	else
1237 		export_guid(root->root_item.uuid, &guid_null);
1238 	root->root_item.drop_level = 0;
1239 
1240 	key.objectid = objectid;
1241 	key.type = BTRFS_ROOT_ITEM_KEY;
1242 	key.offset = 0;
1243 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1244 	if (ret)
1245 		goto fail;
1246 
1247 	btrfs_tree_unlock(leaf);
1248 
1249 	return root;
1250 
1251 fail:
1252 	if (leaf)
1253 		btrfs_tree_unlock(leaf);
1254 	btrfs_put_root(root);
1255 
1256 	return ERR_PTR(ret);
1257 }
1258 
1259 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1260 					 struct btrfs_fs_info *fs_info)
1261 {
1262 	struct btrfs_root *root;
1263 	struct extent_buffer *leaf;
1264 
1265 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
1266 	if (!root)
1267 		return ERR_PTR(-ENOMEM);
1268 
1269 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1270 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1271 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1272 
1273 	/*
1274 	 * DON'T set SHAREABLE bit for log trees.
1275 	 *
1276 	 * Log trees are not exposed to user space thus can't be snapshotted,
1277 	 * and they go away before a real commit is actually done.
1278 	 *
1279 	 * They do store pointers to file data extents, and those reference
1280 	 * counts still get updated (along with back refs to the log tree).
1281 	 */
1282 
1283 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1284 			NULL, 0, 0, 0);
1285 	if (IS_ERR(leaf)) {
1286 		btrfs_put_root(root);
1287 		return ERR_CAST(leaf);
1288 	}
1289 
1290 	root->node = leaf;
1291 
1292 	btrfs_mark_buffer_dirty(root->node);
1293 	btrfs_tree_unlock(root->node);
1294 	return root;
1295 }
1296 
1297 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1298 			     struct btrfs_fs_info *fs_info)
1299 {
1300 	struct btrfs_root *log_root;
1301 
1302 	log_root = alloc_log_tree(trans, fs_info);
1303 	if (IS_ERR(log_root))
1304 		return PTR_ERR(log_root);
1305 	WARN_ON(fs_info->log_root_tree);
1306 	fs_info->log_root_tree = log_root;
1307 	return 0;
1308 }
1309 
1310 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1311 		       struct btrfs_root *root)
1312 {
1313 	struct btrfs_fs_info *fs_info = root->fs_info;
1314 	struct btrfs_root *log_root;
1315 	struct btrfs_inode_item *inode_item;
1316 
1317 	log_root = alloc_log_tree(trans, fs_info);
1318 	if (IS_ERR(log_root))
1319 		return PTR_ERR(log_root);
1320 
1321 	log_root->last_trans = trans->transid;
1322 	log_root->root_key.offset = root->root_key.objectid;
1323 
1324 	inode_item = &log_root->root_item.inode;
1325 	btrfs_set_stack_inode_generation(inode_item, 1);
1326 	btrfs_set_stack_inode_size(inode_item, 3);
1327 	btrfs_set_stack_inode_nlink(inode_item, 1);
1328 	btrfs_set_stack_inode_nbytes(inode_item,
1329 				     fs_info->nodesize);
1330 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1331 
1332 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1333 
1334 	WARN_ON(root->log_root);
1335 	root->log_root = log_root;
1336 	root->log_transid = 0;
1337 	root->log_transid_committed = -1;
1338 	root->last_log_commit = 0;
1339 	return 0;
1340 }
1341 
1342 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1343 					struct btrfs_key *key)
1344 {
1345 	struct btrfs_root *root;
1346 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1347 	struct btrfs_path *path;
1348 	u64 generation;
1349 	int ret;
1350 	int level;
1351 
1352 	path = btrfs_alloc_path();
1353 	if (!path)
1354 		return ERR_PTR(-ENOMEM);
1355 
1356 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1357 	if (!root) {
1358 		ret = -ENOMEM;
1359 		goto alloc_fail;
1360 	}
1361 
1362 	ret = btrfs_find_root(tree_root, key, path,
1363 			      &root->root_item, &root->root_key);
1364 	if (ret) {
1365 		if (ret > 0)
1366 			ret = -ENOENT;
1367 		goto find_fail;
1368 	}
1369 
1370 	generation = btrfs_root_generation(&root->root_item);
1371 	level = btrfs_root_level(&root->root_item);
1372 	root->node = read_tree_block(fs_info,
1373 				     btrfs_root_bytenr(&root->root_item),
1374 				     generation, level, NULL);
1375 	if (IS_ERR(root->node)) {
1376 		ret = PTR_ERR(root->node);
1377 		root->node = NULL;
1378 		goto find_fail;
1379 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1380 		ret = -EIO;
1381 		goto find_fail;
1382 	}
1383 	root->commit_root = btrfs_root_node(root);
1384 out:
1385 	btrfs_free_path(path);
1386 	return root;
1387 
1388 find_fail:
1389 	btrfs_put_root(root);
1390 alloc_fail:
1391 	root = ERR_PTR(ret);
1392 	goto out;
1393 }
1394 
1395 /*
1396  * Initialize subvolume root in-memory structure
1397  *
1398  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1399  */
1400 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1401 {
1402 	int ret;
1403 	unsigned int nofs_flag;
1404 
1405 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1406 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1407 					GFP_NOFS);
1408 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1409 		ret = -ENOMEM;
1410 		goto fail;
1411 	}
1412 
1413 	/*
1414 	 * We might be called under a transaction (e.g. indirect backref
1415 	 * resolution) which could deadlock if it triggers memory reclaim
1416 	 */
1417 	nofs_flag = memalloc_nofs_save();
1418 	ret = btrfs_drew_lock_init(&root->snapshot_lock);
1419 	memalloc_nofs_restore(nofs_flag);
1420 	if (ret)
1421 		goto fail;
1422 
1423 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1424 	    root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
1425 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1426 		btrfs_check_and_init_root_item(&root->root_item);
1427 	}
1428 
1429 	btrfs_init_free_ino_ctl(root);
1430 	spin_lock_init(&root->ino_cache_lock);
1431 	init_waitqueue_head(&root->ino_cache_wait);
1432 
1433 	/*
1434 	 * Don't assign anonymous block device to roots that are not exposed to
1435 	 * userspace, the id pool is limited to 1M
1436 	 */
1437 	if (is_fstree(root->root_key.objectid) &&
1438 	    btrfs_root_refs(&root->root_item) > 0) {
1439 		if (!anon_dev) {
1440 			ret = get_anon_bdev(&root->anon_dev);
1441 			if (ret)
1442 				goto fail;
1443 		} else {
1444 			root->anon_dev = anon_dev;
1445 		}
1446 	}
1447 
1448 	mutex_lock(&root->objectid_mutex);
1449 	ret = btrfs_find_highest_objectid(root,
1450 					&root->highest_objectid);
1451 	if (ret) {
1452 		mutex_unlock(&root->objectid_mutex);
1453 		goto fail;
1454 	}
1455 
1456 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1457 
1458 	mutex_unlock(&root->objectid_mutex);
1459 
1460 	return 0;
1461 fail:
1462 	/* The caller is responsible to call btrfs_free_fs_root */
1463 	return ret;
1464 }
1465 
1466 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1467 					       u64 root_id)
1468 {
1469 	struct btrfs_root *root;
1470 
1471 	spin_lock(&fs_info->fs_roots_radix_lock);
1472 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1473 				 (unsigned long)root_id);
1474 	if (root)
1475 		root = btrfs_grab_root(root);
1476 	spin_unlock(&fs_info->fs_roots_radix_lock);
1477 	return root;
1478 }
1479 
1480 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1481 			 struct btrfs_root *root)
1482 {
1483 	int ret;
1484 
1485 	ret = radix_tree_preload(GFP_NOFS);
1486 	if (ret)
1487 		return ret;
1488 
1489 	spin_lock(&fs_info->fs_roots_radix_lock);
1490 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1491 				(unsigned long)root->root_key.objectid,
1492 				root);
1493 	if (ret == 0) {
1494 		btrfs_grab_root(root);
1495 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1496 	}
1497 	spin_unlock(&fs_info->fs_roots_radix_lock);
1498 	radix_tree_preload_end();
1499 
1500 	return ret;
1501 }
1502 
1503 void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1504 {
1505 #ifdef CONFIG_BTRFS_DEBUG
1506 	struct btrfs_root *root;
1507 
1508 	while (!list_empty(&fs_info->allocated_roots)) {
1509 		root = list_first_entry(&fs_info->allocated_roots,
1510 					struct btrfs_root, leak_list);
1511 		btrfs_err(fs_info, "leaked root %llu-%llu refcount %d",
1512 			  root->root_key.objectid, root->root_key.offset,
1513 			  refcount_read(&root->refs));
1514 		while (refcount_read(&root->refs) > 1)
1515 			btrfs_put_root(root);
1516 		btrfs_put_root(root);
1517 	}
1518 #endif
1519 }
1520 
1521 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1522 {
1523 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1524 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1525 	percpu_counter_destroy(&fs_info->dio_bytes);
1526 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1527 	btrfs_free_csum_hash(fs_info);
1528 	btrfs_free_stripe_hash_table(fs_info);
1529 	btrfs_free_ref_cache(fs_info);
1530 	kfree(fs_info->balance_ctl);
1531 	kfree(fs_info->delayed_root);
1532 	btrfs_put_root(fs_info->extent_root);
1533 	btrfs_put_root(fs_info->tree_root);
1534 	btrfs_put_root(fs_info->chunk_root);
1535 	btrfs_put_root(fs_info->dev_root);
1536 	btrfs_put_root(fs_info->csum_root);
1537 	btrfs_put_root(fs_info->quota_root);
1538 	btrfs_put_root(fs_info->uuid_root);
1539 	btrfs_put_root(fs_info->free_space_root);
1540 	btrfs_put_root(fs_info->fs_root);
1541 	btrfs_put_root(fs_info->data_reloc_root);
1542 	btrfs_check_leaked_roots(fs_info);
1543 	btrfs_extent_buffer_leak_debug_check(fs_info);
1544 	kfree(fs_info->super_copy);
1545 	kfree(fs_info->super_for_commit);
1546 	kvfree(fs_info);
1547 }
1548 
1549 
1550 /*
1551  * Get an in-memory reference of a root structure.
1552  *
1553  * For essential trees like root/extent tree, we grab it from fs_info directly.
1554  * For subvolume trees, we check the cached filesystem roots first. If not
1555  * found, then read it from disk and add it to cached fs roots.
1556  *
1557  * Caller should release the root by calling btrfs_put_root() after the usage.
1558  *
1559  * NOTE: Reloc and log trees can't be read by this function as they share the
1560  *	 same root objectid.
1561  *
1562  * @objectid:	root id
1563  * @anon_dev:	preallocated anonymous block device number for new roots,
1564  * 		pass 0 for new allocation.
1565  * @check_ref:	whether to check root item references, If true, return -ENOENT
1566  *		for orphan roots
1567  */
1568 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1569 					     u64 objectid, dev_t anon_dev,
1570 					     bool check_ref)
1571 {
1572 	struct btrfs_root *root;
1573 	struct btrfs_path *path;
1574 	struct btrfs_key key;
1575 	int ret;
1576 
1577 	if (objectid == BTRFS_ROOT_TREE_OBJECTID)
1578 		return btrfs_grab_root(fs_info->tree_root);
1579 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
1580 		return btrfs_grab_root(fs_info->extent_root);
1581 	if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
1582 		return btrfs_grab_root(fs_info->chunk_root);
1583 	if (objectid == BTRFS_DEV_TREE_OBJECTID)
1584 		return btrfs_grab_root(fs_info->dev_root);
1585 	if (objectid == BTRFS_CSUM_TREE_OBJECTID)
1586 		return btrfs_grab_root(fs_info->csum_root);
1587 	if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
1588 		return btrfs_grab_root(fs_info->quota_root) ?
1589 			fs_info->quota_root : ERR_PTR(-ENOENT);
1590 	if (objectid == BTRFS_UUID_TREE_OBJECTID)
1591 		return btrfs_grab_root(fs_info->uuid_root) ?
1592 			fs_info->uuid_root : ERR_PTR(-ENOENT);
1593 	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1594 		return btrfs_grab_root(fs_info->free_space_root) ?
1595 			fs_info->free_space_root : ERR_PTR(-ENOENT);
1596 again:
1597 	root = btrfs_lookup_fs_root(fs_info, objectid);
1598 	if (root) {
1599 		/* Shouldn't get preallocated anon_dev for cached roots */
1600 		ASSERT(!anon_dev);
1601 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1602 			btrfs_put_root(root);
1603 			return ERR_PTR(-ENOENT);
1604 		}
1605 		return root;
1606 	}
1607 
1608 	key.objectid = objectid;
1609 	key.type = BTRFS_ROOT_ITEM_KEY;
1610 	key.offset = (u64)-1;
1611 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1612 	if (IS_ERR(root))
1613 		return root;
1614 
1615 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1616 		ret = -ENOENT;
1617 		goto fail;
1618 	}
1619 
1620 	ret = btrfs_init_fs_root(root, anon_dev);
1621 	if (ret)
1622 		goto fail;
1623 
1624 	path = btrfs_alloc_path();
1625 	if (!path) {
1626 		ret = -ENOMEM;
1627 		goto fail;
1628 	}
1629 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1630 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1631 	key.offset = objectid;
1632 
1633 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1634 	btrfs_free_path(path);
1635 	if (ret < 0)
1636 		goto fail;
1637 	if (ret == 0)
1638 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1639 
1640 	ret = btrfs_insert_fs_root(fs_info, root);
1641 	if (ret) {
1642 		btrfs_put_root(root);
1643 		if (ret == -EEXIST)
1644 			goto again;
1645 		goto fail;
1646 	}
1647 	return root;
1648 fail:
1649 	btrfs_put_root(root);
1650 	return ERR_PTR(ret);
1651 }
1652 
1653 /*
1654  * Get in-memory reference of a root structure
1655  *
1656  * @objectid:	tree objectid
1657  * @check_ref:	if set, verify that the tree exists and the item has at least
1658  *		one reference
1659  */
1660 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1661 				     u64 objectid, bool check_ref)
1662 {
1663 	return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
1664 }
1665 
1666 /*
1667  * Get in-memory reference of a root structure, created as new, optionally pass
1668  * the anonymous block device id
1669  *
1670  * @objectid:	tree objectid
1671  * @anon_dev:	if zero, allocate a new anonymous block device or use the
1672  *		parameter value
1673  */
1674 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1675 					 u64 objectid, dev_t anon_dev)
1676 {
1677 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1678 }
1679 
1680 /*
1681  * called by the kthread helper functions to finally call the bio end_io
1682  * functions.  This is where read checksum verification actually happens
1683  */
1684 static void end_workqueue_fn(struct btrfs_work *work)
1685 {
1686 	struct bio *bio;
1687 	struct btrfs_end_io_wq *end_io_wq;
1688 
1689 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1690 	bio = end_io_wq->bio;
1691 
1692 	bio->bi_status = end_io_wq->status;
1693 	bio->bi_private = end_io_wq->private;
1694 	bio->bi_end_io = end_io_wq->end_io;
1695 	bio_endio(bio);
1696 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1697 }
1698 
1699 static int cleaner_kthread(void *arg)
1700 {
1701 	struct btrfs_root *root = arg;
1702 	struct btrfs_fs_info *fs_info = root->fs_info;
1703 	int again;
1704 
1705 	while (1) {
1706 		again = 0;
1707 
1708 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1709 
1710 		/* Make the cleaner go to sleep early. */
1711 		if (btrfs_need_cleaner_sleep(fs_info))
1712 			goto sleep;
1713 
1714 		/*
1715 		 * Do not do anything if we might cause open_ctree() to block
1716 		 * before we have finished mounting the filesystem.
1717 		 */
1718 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1719 			goto sleep;
1720 
1721 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1722 			goto sleep;
1723 
1724 		/*
1725 		 * Avoid the problem that we change the status of the fs
1726 		 * during the above check and trylock.
1727 		 */
1728 		if (btrfs_need_cleaner_sleep(fs_info)) {
1729 			mutex_unlock(&fs_info->cleaner_mutex);
1730 			goto sleep;
1731 		}
1732 
1733 		btrfs_run_delayed_iputs(fs_info);
1734 
1735 		again = btrfs_clean_one_deleted_snapshot(root);
1736 		mutex_unlock(&fs_info->cleaner_mutex);
1737 
1738 		/*
1739 		 * The defragger has dealt with the R/O remount and umount,
1740 		 * needn't do anything special here.
1741 		 */
1742 		btrfs_run_defrag_inodes(fs_info);
1743 
1744 		/*
1745 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1746 		 * with relocation (btrfs_relocate_chunk) and relocation
1747 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1748 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1749 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1750 		 * unused block groups.
1751 		 */
1752 		btrfs_delete_unused_bgs(fs_info);
1753 sleep:
1754 		clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1755 		if (kthread_should_park())
1756 			kthread_parkme();
1757 		if (kthread_should_stop())
1758 			return 0;
1759 		if (!again) {
1760 			set_current_state(TASK_INTERRUPTIBLE);
1761 			schedule();
1762 			__set_current_state(TASK_RUNNING);
1763 		}
1764 	}
1765 }
1766 
1767 static int transaction_kthread(void *arg)
1768 {
1769 	struct btrfs_root *root = arg;
1770 	struct btrfs_fs_info *fs_info = root->fs_info;
1771 	struct btrfs_trans_handle *trans;
1772 	struct btrfs_transaction *cur;
1773 	u64 transid;
1774 	time64_t now;
1775 	unsigned long delay;
1776 	bool cannot_commit;
1777 
1778 	do {
1779 		cannot_commit = false;
1780 		delay = HZ * fs_info->commit_interval;
1781 		mutex_lock(&fs_info->transaction_kthread_mutex);
1782 
1783 		spin_lock(&fs_info->trans_lock);
1784 		cur = fs_info->running_transaction;
1785 		if (!cur) {
1786 			spin_unlock(&fs_info->trans_lock);
1787 			goto sleep;
1788 		}
1789 
1790 		now = ktime_get_seconds();
1791 		if (cur->state < TRANS_STATE_COMMIT_START &&
1792 		    (now < cur->start_time ||
1793 		     now - cur->start_time < fs_info->commit_interval)) {
1794 			spin_unlock(&fs_info->trans_lock);
1795 			delay = HZ * 5;
1796 			goto sleep;
1797 		}
1798 		transid = cur->transid;
1799 		spin_unlock(&fs_info->trans_lock);
1800 
1801 		/* If the file system is aborted, this will always fail. */
1802 		trans = btrfs_attach_transaction(root);
1803 		if (IS_ERR(trans)) {
1804 			if (PTR_ERR(trans) != -ENOENT)
1805 				cannot_commit = true;
1806 			goto sleep;
1807 		}
1808 		if (transid == trans->transid) {
1809 			btrfs_commit_transaction(trans);
1810 		} else {
1811 			btrfs_end_transaction(trans);
1812 		}
1813 sleep:
1814 		wake_up_process(fs_info->cleaner_kthread);
1815 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1816 
1817 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1818 				      &fs_info->fs_state)))
1819 			btrfs_cleanup_transaction(fs_info);
1820 		if (!kthread_should_stop() &&
1821 				(!btrfs_transaction_blocked(fs_info) ||
1822 				 cannot_commit))
1823 			schedule_timeout_interruptible(delay);
1824 	} while (!kthread_should_stop());
1825 	return 0;
1826 }
1827 
1828 /*
1829  * This will find the highest generation in the array of root backups.  The
1830  * index of the highest array is returned, or -EINVAL if we can't find
1831  * anything.
1832  *
1833  * We check to make sure the array is valid by comparing the
1834  * generation of the latest  root in the array with the generation
1835  * in the super block.  If they don't match we pitch it.
1836  */
1837 static int find_newest_super_backup(struct btrfs_fs_info *info)
1838 {
1839 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1840 	u64 cur;
1841 	struct btrfs_root_backup *root_backup;
1842 	int i;
1843 
1844 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1845 		root_backup = info->super_copy->super_roots + i;
1846 		cur = btrfs_backup_tree_root_gen(root_backup);
1847 		if (cur == newest_gen)
1848 			return i;
1849 	}
1850 
1851 	return -EINVAL;
1852 }
1853 
1854 /*
1855  * copy all the root pointers into the super backup array.
1856  * this will bump the backup pointer by one when it is
1857  * done
1858  */
1859 static void backup_super_roots(struct btrfs_fs_info *info)
1860 {
1861 	const int next_backup = info->backup_root_index;
1862 	struct btrfs_root_backup *root_backup;
1863 
1864 	root_backup = info->super_for_commit->super_roots + next_backup;
1865 
1866 	/*
1867 	 * make sure all of our padding and empty slots get zero filled
1868 	 * regardless of which ones we use today
1869 	 */
1870 	memset(root_backup, 0, sizeof(*root_backup));
1871 
1872 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1873 
1874 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1875 	btrfs_set_backup_tree_root_gen(root_backup,
1876 			       btrfs_header_generation(info->tree_root->node));
1877 
1878 	btrfs_set_backup_tree_root_level(root_backup,
1879 			       btrfs_header_level(info->tree_root->node));
1880 
1881 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1882 	btrfs_set_backup_chunk_root_gen(root_backup,
1883 			       btrfs_header_generation(info->chunk_root->node));
1884 	btrfs_set_backup_chunk_root_level(root_backup,
1885 			       btrfs_header_level(info->chunk_root->node));
1886 
1887 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1888 	btrfs_set_backup_extent_root_gen(root_backup,
1889 			       btrfs_header_generation(info->extent_root->node));
1890 	btrfs_set_backup_extent_root_level(root_backup,
1891 			       btrfs_header_level(info->extent_root->node));
1892 
1893 	/*
1894 	 * we might commit during log recovery, which happens before we set
1895 	 * the fs_root.  Make sure it is valid before we fill it in.
1896 	 */
1897 	if (info->fs_root && info->fs_root->node) {
1898 		btrfs_set_backup_fs_root(root_backup,
1899 					 info->fs_root->node->start);
1900 		btrfs_set_backup_fs_root_gen(root_backup,
1901 			       btrfs_header_generation(info->fs_root->node));
1902 		btrfs_set_backup_fs_root_level(root_backup,
1903 			       btrfs_header_level(info->fs_root->node));
1904 	}
1905 
1906 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1907 	btrfs_set_backup_dev_root_gen(root_backup,
1908 			       btrfs_header_generation(info->dev_root->node));
1909 	btrfs_set_backup_dev_root_level(root_backup,
1910 				       btrfs_header_level(info->dev_root->node));
1911 
1912 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1913 	btrfs_set_backup_csum_root_gen(root_backup,
1914 			       btrfs_header_generation(info->csum_root->node));
1915 	btrfs_set_backup_csum_root_level(root_backup,
1916 			       btrfs_header_level(info->csum_root->node));
1917 
1918 	btrfs_set_backup_total_bytes(root_backup,
1919 			     btrfs_super_total_bytes(info->super_copy));
1920 	btrfs_set_backup_bytes_used(root_backup,
1921 			     btrfs_super_bytes_used(info->super_copy));
1922 	btrfs_set_backup_num_devices(root_backup,
1923 			     btrfs_super_num_devices(info->super_copy));
1924 
1925 	/*
1926 	 * if we don't copy this out to the super_copy, it won't get remembered
1927 	 * for the next commit
1928 	 */
1929 	memcpy(&info->super_copy->super_roots,
1930 	       &info->super_for_commit->super_roots,
1931 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1932 }
1933 
1934 /*
1935  * read_backup_root - Reads a backup root based on the passed priority. Prio 0
1936  * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1937  *
1938  * fs_info - filesystem whose backup roots need to be read
1939  * priority - priority of backup root required
1940  *
1941  * Returns backup root index on success and -EINVAL otherwise.
1942  */
1943 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1944 {
1945 	int backup_index = find_newest_super_backup(fs_info);
1946 	struct btrfs_super_block *super = fs_info->super_copy;
1947 	struct btrfs_root_backup *root_backup;
1948 
1949 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1950 		if (priority == 0)
1951 			return backup_index;
1952 
1953 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1954 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1955 	} else {
1956 		return -EINVAL;
1957 	}
1958 
1959 	root_backup = super->super_roots + backup_index;
1960 
1961 	btrfs_set_super_generation(super,
1962 				   btrfs_backup_tree_root_gen(root_backup));
1963 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1964 	btrfs_set_super_root_level(super,
1965 				   btrfs_backup_tree_root_level(root_backup));
1966 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1967 
1968 	/*
1969 	 * Fixme: the total bytes and num_devices need to match or we should
1970 	 * need a fsck
1971 	 */
1972 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1973 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1974 
1975 	return backup_index;
1976 }
1977 
1978 /* helper to cleanup workers */
1979 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1980 {
1981 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1982 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1983 	btrfs_destroy_workqueue(fs_info->workers);
1984 	btrfs_destroy_workqueue(fs_info->endio_workers);
1985 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1986 	btrfs_destroy_workqueue(fs_info->rmw_workers);
1987 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1988 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1989 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1990 	btrfs_destroy_workqueue(fs_info->caching_workers);
1991 	btrfs_destroy_workqueue(fs_info->readahead_workers);
1992 	btrfs_destroy_workqueue(fs_info->flush_workers);
1993 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1994 	if (fs_info->discard_ctl.discard_workers)
1995 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1996 	/*
1997 	 * Now that all other work queues are destroyed, we can safely destroy
1998 	 * the queues used for metadata I/O, since tasks from those other work
1999 	 * queues can do metadata I/O operations.
2000 	 */
2001 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2002 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2003 }
2004 
2005 static void free_root_extent_buffers(struct btrfs_root *root)
2006 {
2007 	if (root) {
2008 		free_extent_buffer(root->node);
2009 		free_extent_buffer(root->commit_root);
2010 		root->node = NULL;
2011 		root->commit_root = NULL;
2012 	}
2013 }
2014 
2015 /* helper to cleanup tree roots */
2016 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
2017 {
2018 	free_root_extent_buffers(info->tree_root);
2019 
2020 	free_root_extent_buffers(info->dev_root);
2021 	free_root_extent_buffers(info->extent_root);
2022 	free_root_extent_buffers(info->csum_root);
2023 	free_root_extent_buffers(info->quota_root);
2024 	free_root_extent_buffers(info->uuid_root);
2025 	free_root_extent_buffers(info->fs_root);
2026 	free_root_extent_buffers(info->data_reloc_root);
2027 	if (free_chunk_root)
2028 		free_root_extent_buffers(info->chunk_root);
2029 	free_root_extent_buffers(info->free_space_root);
2030 }
2031 
2032 void btrfs_put_root(struct btrfs_root *root)
2033 {
2034 	if (!root)
2035 		return;
2036 
2037 	if (refcount_dec_and_test(&root->refs)) {
2038 		WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2039 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
2040 		if (root->anon_dev)
2041 			free_anon_bdev(root->anon_dev);
2042 		btrfs_drew_lock_destroy(&root->snapshot_lock);
2043 		free_root_extent_buffers(root);
2044 		kfree(root->free_ino_ctl);
2045 		kfree(root->free_ino_pinned);
2046 #ifdef CONFIG_BTRFS_DEBUG
2047 		spin_lock(&root->fs_info->fs_roots_radix_lock);
2048 		list_del_init(&root->leak_list);
2049 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
2050 #endif
2051 		kfree(root);
2052 	}
2053 }
2054 
2055 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2056 {
2057 	int ret;
2058 	struct btrfs_root *gang[8];
2059 	int i;
2060 
2061 	while (!list_empty(&fs_info->dead_roots)) {
2062 		gang[0] = list_entry(fs_info->dead_roots.next,
2063 				     struct btrfs_root, root_list);
2064 		list_del(&gang[0]->root_list);
2065 
2066 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
2067 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2068 		btrfs_put_root(gang[0]);
2069 	}
2070 
2071 	while (1) {
2072 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2073 					     (void **)gang, 0,
2074 					     ARRAY_SIZE(gang));
2075 		if (!ret)
2076 			break;
2077 		for (i = 0; i < ret; i++)
2078 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2079 	}
2080 }
2081 
2082 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2083 {
2084 	mutex_init(&fs_info->scrub_lock);
2085 	atomic_set(&fs_info->scrubs_running, 0);
2086 	atomic_set(&fs_info->scrub_pause_req, 0);
2087 	atomic_set(&fs_info->scrubs_paused, 0);
2088 	atomic_set(&fs_info->scrub_cancel_req, 0);
2089 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2090 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
2091 }
2092 
2093 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2094 {
2095 	spin_lock_init(&fs_info->balance_lock);
2096 	mutex_init(&fs_info->balance_mutex);
2097 	atomic_set(&fs_info->balance_pause_req, 0);
2098 	atomic_set(&fs_info->balance_cancel_req, 0);
2099 	fs_info->balance_ctl = NULL;
2100 	init_waitqueue_head(&fs_info->balance_wait_q);
2101 }
2102 
2103 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2104 {
2105 	struct inode *inode = fs_info->btree_inode;
2106 
2107 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2108 	set_nlink(inode, 1);
2109 	/*
2110 	 * we set the i_size on the btree inode to the max possible int.
2111 	 * the real end of the address space is determined by all of
2112 	 * the devices in the system
2113 	 */
2114 	inode->i_size = OFFSET_MAX;
2115 	inode->i_mapping->a_ops = &btree_aops;
2116 
2117 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2118 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
2119 			    IO_TREE_INODE_IO, inode);
2120 	BTRFS_I(inode)->io_tree.track_uptodate = false;
2121 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2122 
2123 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2124 
2125 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
2126 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2127 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2128 	btrfs_insert_inode_hash(inode);
2129 }
2130 
2131 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2132 {
2133 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2134 	init_rwsem(&fs_info->dev_replace.rwsem);
2135 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2136 }
2137 
2138 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2139 {
2140 	spin_lock_init(&fs_info->qgroup_lock);
2141 	mutex_init(&fs_info->qgroup_ioctl_lock);
2142 	fs_info->qgroup_tree = RB_ROOT;
2143 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2144 	fs_info->qgroup_seq = 1;
2145 	fs_info->qgroup_ulist = NULL;
2146 	fs_info->qgroup_rescan_running = false;
2147 	mutex_init(&fs_info->qgroup_rescan_lock);
2148 }
2149 
2150 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2151 		struct btrfs_fs_devices *fs_devices)
2152 {
2153 	u32 max_active = fs_info->thread_pool_size;
2154 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2155 
2156 	fs_info->workers =
2157 		btrfs_alloc_workqueue(fs_info, "worker",
2158 				      flags | WQ_HIGHPRI, max_active, 16);
2159 
2160 	fs_info->delalloc_workers =
2161 		btrfs_alloc_workqueue(fs_info, "delalloc",
2162 				      flags, max_active, 2);
2163 
2164 	fs_info->flush_workers =
2165 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2166 				      flags, max_active, 0);
2167 
2168 	fs_info->caching_workers =
2169 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2170 
2171 	fs_info->fixup_workers =
2172 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2173 
2174 	/*
2175 	 * endios are largely parallel and should have a very
2176 	 * low idle thresh
2177 	 */
2178 	fs_info->endio_workers =
2179 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2180 	fs_info->endio_meta_workers =
2181 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2182 				      max_active, 4);
2183 	fs_info->endio_meta_write_workers =
2184 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2185 				      max_active, 2);
2186 	fs_info->endio_raid56_workers =
2187 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2188 				      max_active, 4);
2189 	fs_info->rmw_workers =
2190 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2191 	fs_info->endio_write_workers =
2192 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2193 				      max_active, 2);
2194 	fs_info->endio_freespace_worker =
2195 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2196 				      max_active, 0);
2197 	fs_info->delayed_workers =
2198 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2199 				      max_active, 0);
2200 	fs_info->readahead_workers =
2201 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2202 				      max_active, 2);
2203 	fs_info->qgroup_rescan_workers =
2204 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2205 	fs_info->discard_ctl.discard_workers =
2206 		alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
2207 
2208 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2209 	      fs_info->flush_workers &&
2210 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2211 	      fs_info->endio_meta_write_workers &&
2212 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2213 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2214 	      fs_info->caching_workers && fs_info->readahead_workers &&
2215 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2216 	      fs_info->qgroup_rescan_workers &&
2217 	      fs_info->discard_ctl.discard_workers)) {
2218 		return -ENOMEM;
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2225 {
2226 	struct crypto_shash *csum_shash;
2227 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2228 
2229 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2230 
2231 	if (IS_ERR(csum_shash)) {
2232 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2233 			  csum_driver);
2234 		return PTR_ERR(csum_shash);
2235 	}
2236 
2237 	fs_info->csum_shash = csum_shash;
2238 
2239 	return 0;
2240 }
2241 
2242 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2243 			    struct btrfs_fs_devices *fs_devices)
2244 {
2245 	int ret;
2246 	struct btrfs_root *log_tree_root;
2247 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2248 	u64 bytenr = btrfs_super_log_root(disk_super);
2249 	int level = btrfs_super_log_root_level(disk_super);
2250 
2251 	if (fs_devices->rw_devices == 0) {
2252 		btrfs_warn(fs_info, "log replay required on RO media");
2253 		return -EIO;
2254 	}
2255 
2256 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2257 					 GFP_KERNEL);
2258 	if (!log_tree_root)
2259 		return -ENOMEM;
2260 
2261 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2262 					      fs_info->generation + 1,
2263 					      level, NULL);
2264 	if (IS_ERR(log_tree_root->node)) {
2265 		btrfs_warn(fs_info, "failed to read log tree");
2266 		ret = PTR_ERR(log_tree_root->node);
2267 		log_tree_root->node = NULL;
2268 		btrfs_put_root(log_tree_root);
2269 		return ret;
2270 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2271 		btrfs_err(fs_info, "failed to read log tree");
2272 		btrfs_put_root(log_tree_root);
2273 		return -EIO;
2274 	}
2275 	/* returns with log_tree_root freed on success */
2276 	ret = btrfs_recover_log_trees(log_tree_root);
2277 	if (ret) {
2278 		btrfs_handle_fs_error(fs_info, ret,
2279 				      "Failed to recover log tree");
2280 		btrfs_put_root(log_tree_root);
2281 		return ret;
2282 	}
2283 
2284 	if (sb_rdonly(fs_info->sb)) {
2285 		ret = btrfs_commit_super(fs_info);
2286 		if (ret)
2287 			return ret;
2288 	}
2289 
2290 	return 0;
2291 }
2292 
2293 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2294 {
2295 	struct btrfs_root *tree_root = fs_info->tree_root;
2296 	struct btrfs_root *root;
2297 	struct btrfs_key location;
2298 	int ret;
2299 
2300 	BUG_ON(!fs_info->tree_root);
2301 
2302 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2303 	location.type = BTRFS_ROOT_ITEM_KEY;
2304 	location.offset = 0;
2305 
2306 	root = btrfs_read_tree_root(tree_root, &location);
2307 	if (IS_ERR(root)) {
2308 		ret = PTR_ERR(root);
2309 		goto out;
2310 	}
2311 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2312 	fs_info->extent_root = root;
2313 
2314 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2315 	root = btrfs_read_tree_root(tree_root, &location);
2316 	if (IS_ERR(root)) {
2317 		ret = PTR_ERR(root);
2318 		goto out;
2319 	}
2320 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2321 	fs_info->dev_root = root;
2322 	btrfs_init_devices_late(fs_info);
2323 
2324 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2325 	root = btrfs_read_tree_root(tree_root, &location);
2326 	if (IS_ERR(root)) {
2327 		ret = PTR_ERR(root);
2328 		goto out;
2329 	}
2330 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2331 	fs_info->csum_root = root;
2332 
2333 	/*
2334 	 * This tree can share blocks with some other fs tree during relocation
2335 	 * and we need a proper setup by btrfs_get_fs_root
2336 	 */
2337 	root = btrfs_get_fs_root(tree_root->fs_info,
2338 				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2339 	if (IS_ERR(root)) {
2340 		ret = PTR_ERR(root);
2341 		goto out;
2342 	}
2343 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2344 	fs_info->data_reloc_root = root;
2345 
2346 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2347 	root = btrfs_read_tree_root(tree_root, &location);
2348 	if (!IS_ERR(root)) {
2349 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2350 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2351 		fs_info->quota_root = root;
2352 	}
2353 
2354 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2355 	root = btrfs_read_tree_root(tree_root, &location);
2356 	if (IS_ERR(root)) {
2357 		ret = PTR_ERR(root);
2358 		if (ret != -ENOENT)
2359 			goto out;
2360 	} else {
2361 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2362 		fs_info->uuid_root = root;
2363 	}
2364 
2365 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2366 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2367 		root = btrfs_read_tree_root(tree_root, &location);
2368 		if (IS_ERR(root)) {
2369 			ret = PTR_ERR(root);
2370 			goto out;
2371 		}
2372 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2373 		fs_info->free_space_root = root;
2374 	}
2375 
2376 	return 0;
2377 out:
2378 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2379 		   location.objectid, ret);
2380 	return ret;
2381 }
2382 
2383 /*
2384  * Real super block validation
2385  * NOTE: super csum type and incompat features will not be checked here.
2386  *
2387  * @sb:		super block to check
2388  * @mirror_num:	the super block number to check its bytenr:
2389  * 		0	the primary (1st) sb
2390  * 		1, 2	2nd and 3rd backup copy
2391  * 	       -1	skip bytenr check
2392  */
2393 static int validate_super(struct btrfs_fs_info *fs_info,
2394 			    struct btrfs_super_block *sb, int mirror_num)
2395 {
2396 	u64 nodesize = btrfs_super_nodesize(sb);
2397 	u64 sectorsize = btrfs_super_sectorsize(sb);
2398 	int ret = 0;
2399 
2400 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2401 		btrfs_err(fs_info, "no valid FS found");
2402 		ret = -EINVAL;
2403 	}
2404 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2405 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2406 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2407 		ret = -EINVAL;
2408 	}
2409 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2410 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2411 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2412 		ret = -EINVAL;
2413 	}
2414 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2415 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2416 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2417 		ret = -EINVAL;
2418 	}
2419 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2420 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2421 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2422 		ret = -EINVAL;
2423 	}
2424 
2425 	/*
2426 	 * Check sectorsize and nodesize first, other check will need it.
2427 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2428 	 */
2429 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2430 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2431 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2432 		ret = -EINVAL;
2433 	}
2434 	/* Only PAGE SIZE is supported yet */
2435 	if (sectorsize != PAGE_SIZE) {
2436 		btrfs_err(fs_info,
2437 			"sectorsize %llu not supported yet, only support %lu",
2438 			sectorsize, PAGE_SIZE);
2439 		ret = -EINVAL;
2440 	}
2441 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2442 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2443 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2444 		ret = -EINVAL;
2445 	}
2446 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2447 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2448 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2449 		ret = -EINVAL;
2450 	}
2451 
2452 	/* Root alignment check */
2453 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2454 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2455 			   btrfs_super_root(sb));
2456 		ret = -EINVAL;
2457 	}
2458 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2459 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2460 			   btrfs_super_chunk_root(sb));
2461 		ret = -EINVAL;
2462 	}
2463 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2464 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2465 			   btrfs_super_log_root(sb));
2466 		ret = -EINVAL;
2467 	}
2468 
2469 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2470 		   BTRFS_FSID_SIZE) != 0) {
2471 		btrfs_err(fs_info,
2472 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2473 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2474 		ret = -EINVAL;
2475 	}
2476 
2477 	/*
2478 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2479 	 * done later
2480 	 */
2481 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2482 		btrfs_err(fs_info, "bytes_used is too small %llu",
2483 			  btrfs_super_bytes_used(sb));
2484 		ret = -EINVAL;
2485 	}
2486 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2487 		btrfs_err(fs_info, "invalid stripesize %u",
2488 			  btrfs_super_stripesize(sb));
2489 		ret = -EINVAL;
2490 	}
2491 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2492 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2493 			   btrfs_super_num_devices(sb));
2494 	if (btrfs_super_num_devices(sb) == 0) {
2495 		btrfs_err(fs_info, "number of devices is 0");
2496 		ret = -EINVAL;
2497 	}
2498 
2499 	if (mirror_num >= 0 &&
2500 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2501 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2502 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2503 		ret = -EINVAL;
2504 	}
2505 
2506 	/*
2507 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2508 	 * and one chunk
2509 	 */
2510 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2511 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2512 			  btrfs_super_sys_array_size(sb),
2513 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2514 		ret = -EINVAL;
2515 	}
2516 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2517 			+ sizeof(struct btrfs_chunk)) {
2518 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2519 			  btrfs_super_sys_array_size(sb),
2520 			  sizeof(struct btrfs_disk_key)
2521 			  + sizeof(struct btrfs_chunk));
2522 		ret = -EINVAL;
2523 	}
2524 
2525 	/*
2526 	 * The generation is a global counter, we'll trust it more than the others
2527 	 * but it's still possible that it's the one that's wrong.
2528 	 */
2529 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2530 		btrfs_warn(fs_info,
2531 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2532 			btrfs_super_generation(sb),
2533 			btrfs_super_chunk_root_generation(sb));
2534 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2535 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2536 		btrfs_warn(fs_info,
2537 			"suspicious: generation < cache_generation: %llu < %llu",
2538 			btrfs_super_generation(sb),
2539 			btrfs_super_cache_generation(sb));
2540 
2541 	return ret;
2542 }
2543 
2544 /*
2545  * Validation of super block at mount time.
2546  * Some checks already done early at mount time, like csum type and incompat
2547  * flags will be skipped.
2548  */
2549 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2550 {
2551 	return validate_super(fs_info, fs_info->super_copy, 0);
2552 }
2553 
2554 /*
2555  * Validation of super block at write time.
2556  * Some checks like bytenr check will be skipped as their values will be
2557  * overwritten soon.
2558  * Extra checks like csum type and incompat flags will be done here.
2559  */
2560 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2561 				      struct btrfs_super_block *sb)
2562 {
2563 	int ret;
2564 
2565 	ret = validate_super(fs_info, sb, -1);
2566 	if (ret < 0)
2567 		goto out;
2568 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2569 		ret = -EUCLEAN;
2570 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2571 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2572 		goto out;
2573 	}
2574 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2575 		ret = -EUCLEAN;
2576 		btrfs_err(fs_info,
2577 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2578 			  btrfs_super_incompat_flags(sb),
2579 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2580 		goto out;
2581 	}
2582 out:
2583 	if (ret < 0)
2584 		btrfs_err(fs_info,
2585 		"super block corruption detected before writing it to disk");
2586 	return ret;
2587 }
2588 
2589 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2590 {
2591 	int backup_index = find_newest_super_backup(fs_info);
2592 	struct btrfs_super_block *sb = fs_info->super_copy;
2593 	struct btrfs_root *tree_root = fs_info->tree_root;
2594 	bool handle_error = false;
2595 	int ret = 0;
2596 	int i;
2597 
2598 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2599 		u64 generation;
2600 		int level;
2601 
2602 		if (handle_error) {
2603 			if (!IS_ERR(tree_root->node))
2604 				free_extent_buffer(tree_root->node);
2605 			tree_root->node = NULL;
2606 
2607 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2608 				break;
2609 
2610 			free_root_pointers(fs_info, 0);
2611 
2612 			/*
2613 			 * Don't use the log in recovery mode, it won't be
2614 			 * valid
2615 			 */
2616 			btrfs_set_super_log_root(sb, 0);
2617 
2618 			/* We can't trust the free space cache either */
2619 			btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2620 
2621 			ret = read_backup_root(fs_info, i);
2622 			backup_index = ret;
2623 			if (ret < 0)
2624 				return ret;
2625 		}
2626 		generation = btrfs_super_generation(sb);
2627 		level = btrfs_super_root_level(sb);
2628 		tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
2629 						  generation, level, NULL);
2630 		if (IS_ERR(tree_root->node) ||
2631 		    !extent_buffer_uptodate(tree_root->node)) {
2632 			handle_error = true;
2633 
2634 			if (IS_ERR(tree_root->node)) {
2635 				ret = PTR_ERR(tree_root->node);
2636 				tree_root->node = NULL;
2637 			} else if (!extent_buffer_uptodate(tree_root->node)) {
2638 				ret = -EUCLEAN;
2639 			}
2640 
2641 			btrfs_warn(fs_info, "failed to read tree root");
2642 			continue;
2643 		}
2644 
2645 		btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2646 		tree_root->commit_root = btrfs_root_node(tree_root);
2647 		btrfs_set_root_refs(&tree_root->root_item, 1);
2648 
2649 		/*
2650 		 * No need to hold btrfs_root::objectid_mutex since the fs
2651 		 * hasn't been fully initialised and we are the only user
2652 		 */
2653 		ret = btrfs_find_highest_objectid(tree_root,
2654 						&tree_root->highest_objectid);
2655 		if (ret < 0) {
2656 			handle_error = true;
2657 			continue;
2658 		}
2659 
2660 		ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2661 
2662 		ret = btrfs_read_roots(fs_info);
2663 		if (ret < 0) {
2664 			handle_error = true;
2665 			continue;
2666 		}
2667 
2668 		/* All successful */
2669 		fs_info->generation = generation;
2670 		fs_info->last_trans_committed = generation;
2671 
2672 		/* Always begin writing backup roots after the one being used */
2673 		if (backup_index < 0) {
2674 			fs_info->backup_root_index = 0;
2675 		} else {
2676 			fs_info->backup_root_index = backup_index + 1;
2677 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2678 		}
2679 		break;
2680 	}
2681 
2682 	return ret;
2683 }
2684 
2685 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2686 {
2687 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2688 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2689 	INIT_LIST_HEAD(&fs_info->trans_list);
2690 	INIT_LIST_HEAD(&fs_info->dead_roots);
2691 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2692 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2693 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2694 	spin_lock_init(&fs_info->delalloc_root_lock);
2695 	spin_lock_init(&fs_info->trans_lock);
2696 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2697 	spin_lock_init(&fs_info->delayed_iput_lock);
2698 	spin_lock_init(&fs_info->defrag_inodes_lock);
2699 	spin_lock_init(&fs_info->super_lock);
2700 	spin_lock_init(&fs_info->buffer_lock);
2701 	spin_lock_init(&fs_info->unused_bgs_lock);
2702 	rwlock_init(&fs_info->tree_mod_log_lock);
2703 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2704 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2705 	mutex_init(&fs_info->reloc_mutex);
2706 	mutex_init(&fs_info->delalloc_root_mutex);
2707 	seqlock_init(&fs_info->profiles_lock);
2708 
2709 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2710 	INIT_LIST_HEAD(&fs_info->space_info);
2711 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2712 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2713 #ifdef CONFIG_BTRFS_DEBUG
2714 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2715 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2716 	spin_lock_init(&fs_info->eb_leak_lock);
2717 #endif
2718 	extent_map_tree_init(&fs_info->mapping_tree);
2719 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2720 			     BTRFS_BLOCK_RSV_GLOBAL);
2721 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2722 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2723 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2724 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2725 			     BTRFS_BLOCK_RSV_DELOPS);
2726 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2727 			     BTRFS_BLOCK_RSV_DELREFS);
2728 
2729 	atomic_set(&fs_info->async_delalloc_pages, 0);
2730 	atomic_set(&fs_info->defrag_running, 0);
2731 	atomic_set(&fs_info->reada_works_cnt, 0);
2732 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2733 	atomic64_set(&fs_info->tree_mod_seq, 0);
2734 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2735 	fs_info->metadata_ratio = 0;
2736 	fs_info->defrag_inodes = RB_ROOT;
2737 	atomic64_set(&fs_info->free_chunk_space, 0);
2738 	fs_info->tree_mod_log = RB_ROOT;
2739 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2740 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2741 	/* readahead state */
2742 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2743 	spin_lock_init(&fs_info->reada_lock);
2744 	btrfs_init_ref_verify(fs_info);
2745 
2746 	fs_info->thread_pool_size = min_t(unsigned long,
2747 					  num_online_cpus() + 2, 8);
2748 
2749 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2750 	spin_lock_init(&fs_info->ordered_root_lock);
2751 
2752 	btrfs_init_scrub(fs_info);
2753 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2754 	fs_info->check_integrity_print_mask = 0;
2755 #endif
2756 	btrfs_init_balance(fs_info);
2757 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2758 
2759 	spin_lock_init(&fs_info->block_group_cache_lock);
2760 	fs_info->block_group_cache_tree = RB_ROOT;
2761 	fs_info->first_logical_byte = (u64)-1;
2762 
2763 	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2764 			    IO_TREE_FS_EXCLUDED_EXTENTS, NULL);
2765 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2766 
2767 	mutex_init(&fs_info->ordered_operations_mutex);
2768 	mutex_init(&fs_info->tree_log_mutex);
2769 	mutex_init(&fs_info->chunk_mutex);
2770 	mutex_init(&fs_info->transaction_kthread_mutex);
2771 	mutex_init(&fs_info->cleaner_mutex);
2772 	mutex_init(&fs_info->ro_block_group_mutex);
2773 	init_rwsem(&fs_info->commit_root_sem);
2774 	init_rwsem(&fs_info->cleanup_work_sem);
2775 	init_rwsem(&fs_info->subvol_sem);
2776 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2777 
2778 	btrfs_init_dev_replace_locks(fs_info);
2779 	btrfs_init_qgroup(fs_info);
2780 	btrfs_discard_init(fs_info);
2781 
2782 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2783 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2784 
2785 	init_waitqueue_head(&fs_info->transaction_throttle);
2786 	init_waitqueue_head(&fs_info->transaction_wait);
2787 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2788 	init_waitqueue_head(&fs_info->async_submit_wait);
2789 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2790 
2791 	/* Usable values until the real ones are cached from the superblock */
2792 	fs_info->nodesize = 4096;
2793 	fs_info->sectorsize = 4096;
2794 	fs_info->stripesize = 4096;
2795 
2796 	spin_lock_init(&fs_info->swapfile_pins_lock);
2797 	fs_info->swapfile_pins = RB_ROOT;
2798 
2799 	fs_info->send_in_progress = 0;
2800 }
2801 
2802 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2803 {
2804 	int ret;
2805 
2806 	fs_info->sb = sb;
2807 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2808 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2809 
2810 	ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
2811 	if (ret)
2812 		return ret;
2813 
2814 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2815 	if (ret)
2816 		return ret;
2817 
2818 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2819 					(1 + ilog2(nr_cpu_ids));
2820 
2821 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2822 	if (ret)
2823 		return ret;
2824 
2825 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2826 			GFP_KERNEL);
2827 	if (ret)
2828 		return ret;
2829 
2830 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2831 					GFP_KERNEL);
2832 	if (!fs_info->delayed_root)
2833 		return -ENOMEM;
2834 	btrfs_init_delayed_root(fs_info->delayed_root);
2835 
2836 	return btrfs_alloc_stripe_hash_table(fs_info);
2837 }
2838 
2839 static int btrfs_uuid_rescan_kthread(void *data)
2840 {
2841 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
2842 	int ret;
2843 
2844 	/*
2845 	 * 1st step is to iterate through the existing UUID tree and
2846 	 * to delete all entries that contain outdated data.
2847 	 * 2nd step is to add all missing entries to the UUID tree.
2848 	 */
2849 	ret = btrfs_uuid_tree_iterate(fs_info);
2850 	if (ret < 0) {
2851 		if (ret != -EINTR)
2852 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2853 				   ret);
2854 		up(&fs_info->uuid_tree_rescan_sem);
2855 		return ret;
2856 	}
2857 	return btrfs_uuid_scan_kthread(data);
2858 }
2859 
2860 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2861 {
2862 	struct task_struct *task;
2863 
2864 	down(&fs_info->uuid_tree_rescan_sem);
2865 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2866 	if (IS_ERR(task)) {
2867 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2868 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2869 		up(&fs_info->uuid_tree_rescan_sem);
2870 		return PTR_ERR(task);
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
2877 		      char *options)
2878 {
2879 	u32 sectorsize;
2880 	u32 nodesize;
2881 	u32 stripesize;
2882 	u64 generation;
2883 	u64 features;
2884 	u16 csum_type;
2885 	struct btrfs_super_block *disk_super;
2886 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2887 	struct btrfs_root *tree_root;
2888 	struct btrfs_root *chunk_root;
2889 	int ret;
2890 	int err = -EINVAL;
2891 	int clear_free_space_tree = 0;
2892 	int level;
2893 
2894 	ret = init_mount_fs_info(fs_info, sb);
2895 	if (ret) {
2896 		err = ret;
2897 		goto fail;
2898 	}
2899 
2900 	/* These need to be init'ed before we start creating inodes and such. */
2901 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
2902 				     GFP_KERNEL);
2903 	fs_info->tree_root = tree_root;
2904 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
2905 				      GFP_KERNEL);
2906 	fs_info->chunk_root = chunk_root;
2907 	if (!tree_root || !chunk_root) {
2908 		err = -ENOMEM;
2909 		goto fail;
2910 	}
2911 
2912 	fs_info->btree_inode = new_inode(sb);
2913 	if (!fs_info->btree_inode) {
2914 		err = -ENOMEM;
2915 		goto fail;
2916 	}
2917 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2918 	btrfs_init_btree_inode(fs_info);
2919 
2920 	invalidate_bdev(fs_devices->latest_bdev);
2921 
2922 	/*
2923 	 * Read super block and check the signature bytes only
2924 	 */
2925 	disk_super = btrfs_read_dev_super(fs_devices->latest_bdev);
2926 	if (IS_ERR(disk_super)) {
2927 		err = PTR_ERR(disk_super);
2928 		goto fail_alloc;
2929 	}
2930 
2931 	/*
2932 	 * Verify the type first, if that or the the checksum value are
2933 	 * corrupted, we'll find out
2934 	 */
2935 	csum_type = btrfs_super_csum_type(disk_super);
2936 	if (!btrfs_supported_super_csum(csum_type)) {
2937 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
2938 			  csum_type);
2939 		err = -EINVAL;
2940 		btrfs_release_disk_super(disk_super);
2941 		goto fail_alloc;
2942 	}
2943 
2944 	ret = btrfs_init_csum_hash(fs_info, csum_type);
2945 	if (ret) {
2946 		err = ret;
2947 		btrfs_release_disk_super(disk_super);
2948 		goto fail_alloc;
2949 	}
2950 
2951 	/*
2952 	 * We want to check superblock checksum, the type is stored inside.
2953 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2954 	 */
2955 	if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) {
2956 		btrfs_err(fs_info, "superblock checksum mismatch");
2957 		err = -EINVAL;
2958 		btrfs_release_disk_super(disk_super);
2959 		goto fail_alloc;
2960 	}
2961 
2962 	/*
2963 	 * super_copy is zeroed at allocation time and we never touch the
2964 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2965 	 * the whole block of INFO_SIZE
2966 	 */
2967 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
2968 	btrfs_release_disk_super(disk_super);
2969 
2970 	disk_super = fs_info->super_copy;
2971 
2972 	ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2973 		       BTRFS_FSID_SIZE));
2974 
2975 	if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2976 		ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2977 				fs_info->super_copy->metadata_uuid,
2978 				BTRFS_FSID_SIZE));
2979 	}
2980 
2981 	features = btrfs_super_flags(disk_super);
2982 	if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2983 		features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2984 		btrfs_set_super_flags(disk_super, features);
2985 		btrfs_info(fs_info,
2986 			"found metadata UUID change in progress flag, clearing");
2987 	}
2988 
2989 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2990 	       sizeof(*fs_info->super_for_commit));
2991 
2992 	ret = btrfs_validate_mount_super(fs_info);
2993 	if (ret) {
2994 		btrfs_err(fs_info, "superblock contains fatal errors");
2995 		err = -EINVAL;
2996 		goto fail_alloc;
2997 	}
2998 
2999 	if (!btrfs_super_root(disk_super))
3000 		goto fail_alloc;
3001 
3002 	/* check FS state, whether FS is broken. */
3003 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3004 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
3005 
3006 	/*
3007 	 * In the long term, we'll store the compression type in the super
3008 	 * block, and it'll be used for per file compression control.
3009 	 */
3010 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
3011 
3012 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
3013 	if (ret) {
3014 		err = ret;
3015 		goto fail_alloc;
3016 	}
3017 
3018 	features = btrfs_super_incompat_flags(disk_super) &
3019 		~BTRFS_FEATURE_INCOMPAT_SUPP;
3020 	if (features) {
3021 		btrfs_err(fs_info,
3022 		    "cannot mount because of unsupported optional features (%llx)",
3023 		    features);
3024 		err = -EINVAL;
3025 		goto fail_alloc;
3026 	}
3027 
3028 	features = btrfs_super_incompat_flags(disk_super);
3029 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3030 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3031 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3032 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3033 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3034 
3035 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
3036 		btrfs_info(fs_info, "has skinny extents");
3037 
3038 	/*
3039 	 * flag our filesystem as having big metadata blocks if
3040 	 * they are bigger than the page size
3041 	 */
3042 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
3043 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
3044 			btrfs_info(fs_info,
3045 				"flagging fs with big metadata feature");
3046 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3047 	}
3048 
3049 	nodesize = btrfs_super_nodesize(disk_super);
3050 	sectorsize = btrfs_super_sectorsize(disk_super);
3051 	stripesize = sectorsize;
3052 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3053 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3054 
3055 	/* Cache block sizes */
3056 	fs_info->nodesize = nodesize;
3057 	fs_info->sectorsize = sectorsize;
3058 	fs_info->stripesize = stripesize;
3059 
3060 	/*
3061 	 * mixed block groups end up with duplicate but slightly offset
3062 	 * extent buffers for the same range.  It leads to corruptions
3063 	 */
3064 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3065 	    (sectorsize != nodesize)) {
3066 		btrfs_err(fs_info,
3067 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3068 			nodesize, sectorsize);
3069 		goto fail_alloc;
3070 	}
3071 
3072 	/*
3073 	 * Needn't use the lock because there is no other task which will
3074 	 * update the flag.
3075 	 */
3076 	btrfs_set_super_incompat_flags(disk_super, features);
3077 
3078 	features = btrfs_super_compat_ro_flags(disk_super) &
3079 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
3080 	if (!sb_rdonly(sb) && features) {
3081 		btrfs_err(fs_info,
3082 	"cannot mount read-write because of unsupported optional features (%llx)",
3083 		       features);
3084 		err = -EINVAL;
3085 		goto fail_alloc;
3086 	}
3087 
3088 	ret = btrfs_init_workqueues(fs_info, fs_devices);
3089 	if (ret) {
3090 		err = ret;
3091 		goto fail_sb_buffer;
3092 	}
3093 
3094 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
3095 	sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
3096 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3097 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3098 
3099 	sb->s_blocksize = sectorsize;
3100 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3101 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3102 
3103 	mutex_lock(&fs_info->chunk_mutex);
3104 	ret = btrfs_read_sys_array(fs_info);
3105 	mutex_unlock(&fs_info->chunk_mutex);
3106 	if (ret) {
3107 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3108 		goto fail_sb_buffer;
3109 	}
3110 
3111 	generation = btrfs_super_chunk_root_generation(disk_super);
3112 	level = btrfs_super_chunk_root_level(disk_super);
3113 
3114 	chunk_root->node = read_tree_block(fs_info,
3115 					   btrfs_super_chunk_root(disk_super),
3116 					   generation, level, NULL);
3117 	if (IS_ERR(chunk_root->node) ||
3118 	    !extent_buffer_uptodate(chunk_root->node)) {
3119 		btrfs_err(fs_info, "failed to read chunk root");
3120 		if (!IS_ERR(chunk_root->node))
3121 			free_extent_buffer(chunk_root->node);
3122 		chunk_root->node = NULL;
3123 		goto fail_tree_roots;
3124 	}
3125 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
3126 	chunk_root->commit_root = btrfs_root_node(chunk_root);
3127 
3128 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3129 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3130 			   BTRFS_UUID_SIZE);
3131 
3132 	ret = btrfs_read_chunk_tree(fs_info);
3133 	if (ret) {
3134 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3135 		goto fail_tree_roots;
3136 	}
3137 
3138 	/*
3139 	 * Keep the devid that is marked to be the target device for the
3140 	 * device replace procedure
3141 	 */
3142 	btrfs_free_extra_devids(fs_devices, 0);
3143 
3144 	if (!fs_devices->latest_bdev) {
3145 		btrfs_err(fs_info, "failed to read devices");
3146 		goto fail_tree_roots;
3147 	}
3148 
3149 	ret = init_tree_roots(fs_info);
3150 	if (ret)
3151 		goto fail_tree_roots;
3152 
3153 	/*
3154 	 * If we have a uuid root and we're not being told to rescan we need to
3155 	 * check the generation here so we can set the
3156 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3157 	 * transaction during a balance or the log replay without updating the
3158 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3159 	 * even though it was perfectly fine.
3160 	 */
3161 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3162 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3163 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3164 
3165 	ret = btrfs_verify_dev_extents(fs_info);
3166 	if (ret) {
3167 		btrfs_err(fs_info,
3168 			  "failed to verify dev extents against chunks: %d",
3169 			  ret);
3170 		goto fail_block_groups;
3171 	}
3172 	ret = btrfs_recover_balance(fs_info);
3173 	if (ret) {
3174 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3175 		goto fail_block_groups;
3176 	}
3177 
3178 	ret = btrfs_init_dev_stats(fs_info);
3179 	if (ret) {
3180 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3181 		goto fail_block_groups;
3182 	}
3183 
3184 	ret = btrfs_init_dev_replace(fs_info);
3185 	if (ret) {
3186 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3187 		goto fail_block_groups;
3188 	}
3189 
3190 	btrfs_free_extra_devids(fs_devices, 1);
3191 
3192 	ret = btrfs_sysfs_add_fsid(fs_devices);
3193 	if (ret) {
3194 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3195 				ret);
3196 		goto fail_block_groups;
3197 	}
3198 
3199 	ret = btrfs_sysfs_add_mounted(fs_info);
3200 	if (ret) {
3201 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3202 		goto fail_fsdev_sysfs;
3203 	}
3204 
3205 	ret = btrfs_init_space_info(fs_info);
3206 	if (ret) {
3207 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3208 		goto fail_sysfs;
3209 	}
3210 
3211 	ret = btrfs_read_block_groups(fs_info);
3212 	if (ret) {
3213 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3214 		goto fail_sysfs;
3215 	}
3216 
3217 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3218 		btrfs_warn(fs_info,
3219 		"writable mount is not allowed due to too many missing devices");
3220 		goto fail_sysfs;
3221 	}
3222 
3223 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3224 					       "btrfs-cleaner");
3225 	if (IS_ERR(fs_info->cleaner_kthread))
3226 		goto fail_sysfs;
3227 
3228 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3229 						   tree_root,
3230 						   "btrfs-transaction");
3231 	if (IS_ERR(fs_info->transaction_kthread))
3232 		goto fail_cleaner;
3233 
3234 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3235 	    !fs_info->fs_devices->rotating) {
3236 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3237 	}
3238 
3239 	/*
3240 	 * Mount does not set all options immediately, we can do it now and do
3241 	 * not have to wait for transaction commit
3242 	 */
3243 	btrfs_apply_pending_changes(fs_info);
3244 
3245 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3246 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3247 		ret = btrfsic_mount(fs_info, fs_devices,
3248 				    btrfs_test_opt(fs_info,
3249 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3250 				    1 : 0,
3251 				    fs_info->check_integrity_print_mask);
3252 		if (ret)
3253 			btrfs_warn(fs_info,
3254 				"failed to initialize integrity check module: %d",
3255 				ret);
3256 	}
3257 #endif
3258 	ret = btrfs_read_qgroup_config(fs_info);
3259 	if (ret)
3260 		goto fail_trans_kthread;
3261 
3262 	if (btrfs_build_ref_tree(fs_info))
3263 		btrfs_err(fs_info, "couldn't build ref tree");
3264 
3265 	/* do not make disk changes in broken FS or nologreplay is given */
3266 	if (btrfs_super_log_root(disk_super) != 0 &&
3267 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3268 		btrfs_info(fs_info, "start tree-log replay");
3269 		ret = btrfs_replay_log(fs_info, fs_devices);
3270 		if (ret) {
3271 			err = ret;
3272 			goto fail_qgroup;
3273 		}
3274 	}
3275 
3276 	ret = btrfs_find_orphan_roots(fs_info);
3277 	if (ret)
3278 		goto fail_qgroup;
3279 
3280 	if (!sb_rdonly(sb)) {
3281 		ret = btrfs_cleanup_fs_roots(fs_info);
3282 		if (ret)
3283 			goto fail_qgroup;
3284 
3285 		mutex_lock(&fs_info->cleaner_mutex);
3286 		ret = btrfs_recover_relocation(tree_root);
3287 		mutex_unlock(&fs_info->cleaner_mutex);
3288 		if (ret < 0) {
3289 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3290 					ret);
3291 			err = -EINVAL;
3292 			goto fail_qgroup;
3293 		}
3294 	}
3295 
3296 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3297 	if (IS_ERR(fs_info->fs_root)) {
3298 		err = PTR_ERR(fs_info->fs_root);
3299 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3300 		fs_info->fs_root = NULL;
3301 		goto fail_qgroup;
3302 	}
3303 
3304 	if (sb_rdonly(sb))
3305 		return 0;
3306 
3307 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3308 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3309 		clear_free_space_tree = 1;
3310 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3311 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3312 		btrfs_warn(fs_info, "free space tree is invalid");
3313 		clear_free_space_tree = 1;
3314 	}
3315 
3316 	if (clear_free_space_tree) {
3317 		btrfs_info(fs_info, "clearing free space tree");
3318 		ret = btrfs_clear_free_space_tree(fs_info);
3319 		if (ret) {
3320 			btrfs_warn(fs_info,
3321 				   "failed to clear free space tree: %d", ret);
3322 			close_ctree(fs_info);
3323 			return ret;
3324 		}
3325 	}
3326 
3327 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3328 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3329 		btrfs_info(fs_info, "creating free space tree");
3330 		ret = btrfs_create_free_space_tree(fs_info);
3331 		if (ret) {
3332 			btrfs_warn(fs_info,
3333 				"failed to create free space tree: %d", ret);
3334 			close_ctree(fs_info);
3335 			return ret;
3336 		}
3337 	}
3338 
3339 	down_read(&fs_info->cleanup_work_sem);
3340 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3341 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3342 		up_read(&fs_info->cleanup_work_sem);
3343 		close_ctree(fs_info);
3344 		return ret;
3345 	}
3346 	up_read(&fs_info->cleanup_work_sem);
3347 
3348 	ret = btrfs_resume_balance_async(fs_info);
3349 	if (ret) {
3350 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3351 		close_ctree(fs_info);
3352 		return ret;
3353 	}
3354 
3355 	ret = btrfs_resume_dev_replace_async(fs_info);
3356 	if (ret) {
3357 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3358 		close_ctree(fs_info);
3359 		return ret;
3360 	}
3361 
3362 	btrfs_qgroup_rescan_resume(fs_info);
3363 	btrfs_discard_resume(fs_info);
3364 
3365 	if (!fs_info->uuid_root) {
3366 		btrfs_info(fs_info, "creating UUID tree");
3367 		ret = btrfs_create_uuid_tree(fs_info);
3368 		if (ret) {
3369 			btrfs_warn(fs_info,
3370 				"failed to create the UUID tree: %d", ret);
3371 			close_ctree(fs_info);
3372 			return ret;
3373 		}
3374 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3375 		   fs_info->generation !=
3376 				btrfs_super_uuid_tree_generation(disk_super)) {
3377 		btrfs_info(fs_info, "checking UUID tree");
3378 		ret = btrfs_check_uuid_tree(fs_info);
3379 		if (ret) {
3380 			btrfs_warn(fs_info,
3381 				"failed to check the UUID tree: %d", ret);
3382 			close_ctree(fs_info);
3383 			return ret;
3384 		}
3385 	}
3386 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3387 
3388 	/*
3389 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3390 	 * no need to keep the flag
3391 	 */
3392 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3393 
3394 	return 0;
3395 
3396 fail_qgroup:
3397 	btrfs_free_qgroup_config(fs_info);
3398 fail_trans_kthread:
3399 	kthread_stop(fs_info->transaction_kthread);
3400 	btrfs_cleanup_transaction(fs_info);
3401 	btrfs_free_fs_roots(fs_info);
3402 fail_cleaner:
3403 	kthread_stop(fs_info->cleaner_kthread);
3404 
3405 	/*
3406 	 * make sure we're done with the btree inode before we stop our
3407 	 * kthreads
3408 	 */
3409 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3410 
3411 fail_sysfs:
3412 	btrfs_sysfs_remove_mounted(fs_info);
3413 
3414 fail_fsdev_sysfs:
3415 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3416 
3417 fail_block_groups:
3418 	btrfs_put_block_group_cache(fs_info);
3419 
3420 fail_tree_roots:
3421 	if (fs_info->data_reloc_root)
3422 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3423 	free_root_pointers(fs_info, true);
3424 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3425 
3426 fail_sb_buffer:
3427 	btrfs_stop_all_workers(fs_info);
3428 	btrfs_free_block_groups(fs_info);
3429 fail_alloc:
3430 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3431 
3432 	iput(fs_info->btree_inode);
3433 fail:
3434 	btrfs_close_devices(fs_info->fs_devices);
3435 	return err;
3436 }
3437 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3438 
3439 static void btrfs_end_super_write(struct bio *bio)
3440 {
3441 	struct btrfs_device *device = bio->bi_private;
3442 	struct bio_vec *bvec;
3443 	struct bvec_iter_all iter_all;
3444 	struct page *page;
3445 
3446 	bio_for_each_segment_all(bvec, bio, iter_all) {
3447 		page = bvec->bv_page;
3448 
3449 		if (bio->bi_status) {
3450 			btrfs_warn_rl_in_rcu(device->fs_info,
3451 				"lost page write due to IO error on %s (%d)",
3452 				rcu_str_deref(device->name),
3453 				blk_status_to_errno(bio->bi_status));
3454 			ClearPageUptodate(page);
3455 			SetPageError(page);
3456 			btrfs_dev_stat_inc_and_print(device,
3457 						     BTRFS_DEV_STAT_WRITE_ERRS);
3458 		} else {
3459 			SetPageUptodate(page);
3460 		}
3461 
3462 		put_page(page);
3463 		unlock_page(page);
3464 	}
3465 
3466 	bio_put(bio);
3467 }
3468 
3469 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3470 						   int copy_num)
3471 {
3472 	struct btrfs_super_block *super;
3473 	struct page *page;
3474 	u64 bytenr;
3475 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3476 
3477 	bytenr = btrfs_sb_offset(copy_num);
3478 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3479 		return ERR_PTR(-EINVAL);
3480 
3481 	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3482 	if (IS_ERR(page))
3483 		return ERR_CAST(page);
3484 
3485 	super = page_address(page);
3486 	if (btrfs_super_bytenr(super) != bytenr ||
3487 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3488 		btrfs_release_disk_super(super);
3489 		return ERR_PTR(-EINVAL);
3490 	}
3491 
3492 	return super;
3493 }
3494 
3495 
3496 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3497 {
3498 	struct btrfs_super_block *super, *latest = NULL;
3499 	int i;
3500 	u64 transid = 0;
3501 
3502 	/* we would like to check all the supers, but that would make
3503 	 * a btrfs mount succeed after a mkfs from a different FS.
3504 	 * So, we need to add a special mount option to scan for
3505 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3506 	 */
3507 	for (i = 0; i < 1; i++) {
3508 		super = btrfs_read_dev_one_super(bdev, i);
3509 		if (IS_ERR(super))
3510 			continue;
3511 
3512 		if (!latest || btrfs_super_generation(super) > transid) {
3513 			if (latest)
3514 				btrfs_release_disk_super(super);
3515 
3516 			latest = super;
3517 			transid = btrfs_super_generation(super);
3518 		}
3519 	}
3520 
3521 	return super;
3522 }
3523 
3524 /*
3525  * Write superblock @sb to the @device. Do not wait for completion, all the
3526  * pages we use for writing are locked.
3527  *
3528  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3529  * the expected device size at commit time. Note that max_mirrors must be
3530  * same for write and wait phases.
3531  *
3532  * Return number of errors when page is not found or submission fails.
3533  */
3534 static int write_dev_supers(struct btrfs_device *device,
3535 			    struct btrfs_super_block *sb, int max_mirrors)
3536 {
3537 	struct btrfs_fs_info *fs_info = device->fs_info;
3538 	struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3539 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3540 	int i;
3541 	int errors = 0;
3542 	u64 bytenr;
3543 
3544 	if (max_mirrors == 0)
3545 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3546 
3547 	shash->tfm = fs_info->csum_shash;
3548 
3549 	for (i = 0; i < max_mirrors; i++) {
3550 		struct page *page;
3551 		struct bio *bio;
3552 		struct btrfs_super_block *disk_super;
3553 
3554 		bytenr = btrfs_sb_offset(i);
3555 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3556 		    device->commit_total_bytes)
3557 			break;
3558 
3559 		btrfs_set_super_bytenr(sb, bytenr);
3560 
3561 		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3562 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3563 				    sb->csum);
3564 
3565 		page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
3566 					   GFP_NOFS);
3567 		if (!page) {
3568 			btrfs_err(device->fs_info,
3569 			    "couldn't get super block page for bytenr %llu",
3570 			    bytenr);
3571 			errors++;
3572 			continue;
3573 		}
3574 
3575 		/* Bump the refcount for wait_dev_supers() */
3576 		get_page(page);
3577 
3578 		disk_super = page_address(page);
3579 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3580 
3581 		/*
3582 		 * Directly use bios here instead of relying on the page cache
3583 		 * to do I/O, so we don't lose the ability to do integrity
3584 		 * checking.
3585 		 */
3586 		bio = bio_alloc(GFP_NOFS, 1);
3587 		bio_set_dev(bio, device->bdev);
3588 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3589 		bio->bi_private = device;
3590 		bio->bi_end_io = btrfs_end_super_write;
3591 		__bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3592 			       offset_in_page(bytenr));
3593 
3594 		/*
3595 		 * We FUA only the first super block.  The others we allow to
3596 		 * go down lazy and there's a short window where the on-disk
3597 		 * copies might still contain the older version.
3598 		 */
3599 		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
3600 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3601 			bio->bi_opf |= REQ_FUA;
3602 
3603 		btrfsic_submit_bio(bio);
3604 	}
3605 	return errors < i ? 0 : -1;
3606 }
3607 
3608 /*
3609  * Wait for write completion of superblocks done by write_dev_supers,
3610  * @max_mirrors same for write and wait phases.
3611  *
3612  * Return number of errors when page is not found or not marked up to
3613  * date.
3614  */
3615 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3616 {
3617 	int i;
3618 	int errors = 0;
3619 	bool primary_failed = false;
3620 	u64 bytenr;
3621 
3622 	if (max_mirrors == 0)
3623 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3624 
3625 	for (i = 0; i < max_mirrors; i++) {
3626 		struct page *page;
3627 
3628 		bytenr = btrfs_sb_offset(i);
3629 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3630 		    device->commit_total_bytes)
3631 			break;
3632 
3633 		page = find_get_page(device->bdev->bd_inode->i_mapping,
3634 				     bytenr >> PAGE_SHIFT);
3635 		if (!page) {
3636 			errors++;
3637 			if (i == 0)
3638 				primary_failed = true;
3639 			continue;
3640 		}
3641 		/* Page is submitted locked and unlocked once the IO completes */
3642 		wait_on_page_locked(page);
3643 		if (PageError(page)) {
3644 			errors++;
3645 			if (i == 0)
3646 				primary_failed = true;
3647 		}
3648 
3649 		/* Drop our reference */
3650 		put_page(page);
3651 
3652 		/* Drop the reference from the writing run */
3653 		put_page(page);
3654 	}
3655 
3656 	/* log error, force error return */
3657 	if (primary_failed) {
3658 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3659 			  device->devid);
3660 		return -1;
3661 	}
3662 
3663 	return errors < i ? 0 : -1;
3664 }
3665 
3666 /*
3667  * endio for the write_dev_flush, this will wake anyone waiting
3668  * for the barrier when it is done
3669  */
3670 static void btrfs_end_empty_barrier(struct bio *bio)
3671 {
3672 	complete(bio->bi_private);
3673 }
3674 
3675 /*
3676  * Submit a flush request to the device if it supports it. Error handling is
3677  * done in the waiting counterpart.
3678  */
3679 static void write_dev_flush(struct btrfs_device *device)
3680 {
3681 	struct request_queue *q = bdev_get_queue(device->bdev);
3682 	struct bio *bio = device->flush_bio;
3683 
3684 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3685 		return;
3686 
3687 	bio_reset(bio);
3688 	bio->bi_end_io = btrfs_end_empty_barrier;
3689 	bio_set_dev(bio, device->bdev);
3690 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3691 	init_completion(&device->flush_wait);
3692 	bio->bi_private = &device->flush_wait;
3693 
3694 	btrfsic_submit_bio(bio);
3695 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3696 }
3697 
3698 /*
3699  * If the flush bio has been submitted by write_dev_flush, wait for it.
3700  */
3701 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3702 {
3703 	struct bio *bio = device->flush_bio;
3704 
3705 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3706 		return BLK_STS_OK;
3707 
3708 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3709 	wait_for_completion_io(&device->flush_wait);
3710 
3711 	return bio->bi_status;
3712 }
3713 
3714 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3715 {
3716 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3717 		return -EIO;
3718 	return 0;
3719 }
3720 
3721 /*
3722  * send an empty flush down to each device in parallel,
3723  * then wait for them
3724  */
3725 static int barrier_all_devices(struct btrfs_fs_info *info)
3726 {
3727 	struct list_head *head;
3728 	struct btrfs_device *dev;
3729 	int errors_wait = 0;
3730 	blk_status_t ret;
3731 
3732 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3733 	/* send down all the barriers */
3734 	head = &info->fs_devices->devices;
3735 	list_for_each_entry(dev, head, dev_list) {
3736 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3737 			continue;
3738 		if (!dev->bdev)
3739 			continue;
3740 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3741 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3742 			continue;
3743 
3744 		write_dev_flush(dev);
3745 		dev->last_flush_error = BLK_STS_OK;
3746 	}
3747 
3748 	/* wait for all the barriers */
3749 	list_for_each_entry(dev, head, dev_list) {
3750 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3751 			continue;
3752 		if (!dev->bdev) {
3753 			errors_wait++;
3754 			continue;
3755 		}
3756 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3757 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3758 			continue;
3759 
3760 		ret = wait_dev_flush(dev);
3761 		if (ret) {
3762 			dev->last_flush_error = ret;
3763 			btrfs_dev_stat_inc_and_print(dev,
3764 					BTRFS_DEV_STAT_FLUSH_ERRS);
3765 			errors_wait++;
3766 		}
3767 	}
3768 
3769 	if (errors_wait) {
3770 		/*
3771 		 * At some point we need the status of all disks
3772 		 * to arrive at the volume status. So error checking
3773 		 * is being pushed to a separate loop.
3774 		 */
3775 		return check_barrier_error(info);
3776 	}
3777 	return 0;
3778 }
3779 
3780 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3781 {
3782 	int raid_type;
3783 	int min_tolerated = INT_MAX;
3784 
3785 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3786 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3787 		min_tolerated = min_t(int, min_tolerated,
3788 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3789 				    tolerated_failures);
3790 
3791 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3792 		if (raid_type == BTRFS_RAID_SINGLE)
3793 			continue;
3794 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3795 			continue;
3796 		min_tolerated = min_t(int, min_tolerated,
3797 				    btrfs_raid_array[raid_type].
3798 				    tolerated_failures);
3799 	}
3800 
3801 	if (min_tolerated == INT_MAX) {
3802 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3803 		min_tolerated = 0;
3804 	}
3805 
3806 	return min_tolerated;
3807 }
3808 
3809 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3810 {
3811 	struct list_head *head;
3812 	struct btrfs_device *dev;
3813 	struct btrfs_super_block *sb;
3814 	struct btrfs_dev_item *dev_item;
3815 	int ret;
3816 	int do_barriers;
3817 	int max_errors;
3818 	int total_errors = 0;
3819 	u64 flags;
3820 
3821 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3822 
3823 	/*
3824 	 * max_mirrors == 0 indicates we're from commit_transaction,
3825 	 * not from fsync where the tree roots in fs_info have not
3826 	 * been consistent on disk.
3827 	 */
3828 	if (max_mirrors == 0)
3829 		backup_super_roots(fs_info);
3830 
3831 	sb = fs_info->super_for_commit;
3832 	dev_item = &sb->dev_item;
3833 
3834 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3835 	head = &fs_info->fs_devices->devices;
3836 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3837 
3838 	if (do_barriers) {
3839 		ret = barrier_all_devices(fs_info);
3840 		if (ret) {
3841 			mutex_unlock(
3842 				&fs_info->fs_devices->device_list_mutex);
3843 			btrfs_handle_fs_error(fs_info, ret,
3844 					      "errors while submitting device barriers.");
3845 			return ret;
3846 		}
3847 	}
3848 
3849 	list_for_each_entry(dev, head, dev_list) {
3850 		if (!dev->bdev) {
3851 			total_errors++;
3852 			continue;
3853 		}
3854 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3855 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3856 			continue;
3857 
3858 		btrfs_set_stack_device_generation(dev_item, 0);
3859 		btrfs_set_stack_device_type(dev_item, dev->type);
3860 		btrfs_set_stack_device_id(dev_item, dev->devid);
3861 		btrfs_set_stack_device_total_bytes(dev_item,
3862 						   dev->commit_total_bytes);
3863 		btrfs_set_stack_device_bytes_used(dev_item,
3864 						  dev->commit_bytes_used);
3865 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3866 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3867 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3868 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3869 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3870 		       BTRFS_FSID_SIZE);
3871 
3872 		flags = btrfs_super_flags(sb);
3873 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3874 
3875 		ret = btrfs_validate_write_super(fs_info, sb);
3876 		if (ret < 0) {
3877 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3878 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3879 				"unexpected superblock corruption detected");
3880 			return -EUCLEAN;
3881 		}
3882 
3883 		ret = write_dev_supers(dev, sb, max_mirrors);
3884 		if (ret)
3885 			total_errors++;
3886 	}
3887 	if (total_errors > max_errors) {
3888 		btrfs_err(fs_info, "%d errors while writing supers",
3889 			  total_errors);
3890 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3891 
3892 		/* FUA is masked off if unsupported and can't be the reason */
3893 		btrfs_handle_fs_error(fs_info, -EIO,
3894 				      "%d errors while writing supers",
3895 				      total_errors);
3896 		return -EIO;
3897 	}
3898 
3899 	total_errors = 0;
3900 	list_for_each_entry(dev, head, dev_list) {
3901 		if (!dev->bdev)
3902 			continue;
3903 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3904 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3905 			continue;
3906 
3907 		ret = wait_dev_supers(dev, max_mirrors);
3908 		if (ret)
3909 			total_errors++;
3910 	}
3911 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3912 	if (total_errors > max_errors) {
3913 		btrfs_handle_fs_error(fs_info, -EIO,
3914 				      "%d errors while writing supers",
3915 				      total_errors);
3916 		return -EIO;
3917 	}
3918 	return 0;
3919 }
3920 
3921 /* Drop a fs root from the radix tree and free it. */
3922 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3923 				  struct btrfs_root *root)
3924 {
3925 	bool drop_ref = false;
3926 
3927 	spin_lock(&fs_info->fs_roots_radix_lock);
3928 	radix_tree_delete(&fs_info->fs_roots_radix,
3929 			  (unsigned long)root->root_key.objectid);
3930 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
3931 		drop_ref = true;
3932 	spin_unlock(&fs_info->fs_roots_radix_lock);
3933 
3934 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3935 		ASSERT(root->log_root == NULL);
3936 		if (root->reloc_root) {
3937 			btrfs_put_root(root->reloc_root);
3938 			root->reloc_root = NULL;
3939 		}
3940 	}
3941 
3942 	if (root->free_ino_pinned)
3943 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3944 	if (root->free_ino_ctl)
3945 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3946 	if (root->ino_cache_inode) {
3947 		iput(root->ino_cache_inode);
3948 		root->ino_cache_inode = NULL;
3949 	}
3950 	if (drop_ref)
3951 		btrfs_put_root(root);
3952 }
3953 
3954 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3955 {
3956 	u64 root_objectid = 0;
3957 	struct btrfs_root *gang[8];
3958 	int i = 0;
3959 	int err = 0;
3960 	unsigned int ret = 0;
3961 
3962 	while (1) {
3963 		spin_lock(&fs_info->fs_roots_radix_lock);
3964 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3965 					     (void **)gang, root_objectid,
3966 					     ARRAY_SIZE(gang));
3967 		if (!ret) {
3968 			spin_unlock(&fs_info->fs_roots_radix_lock);
3969 			break;
3970 		}
3971 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3972 
3973 		for (i = 0; i < ret; i++) {
3974 			/* Avoid to grab roots in dead_roots */
3975 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3976 				gang[i] = NULL;
3977 				continue;
3978 			}
3979 			/* grab all the search result for later use */
3980 			gang[i] = btrfs_grab_root(gang[i]);
3981 		}
3982 		spin_unlock(&fs_info->fs_roots_radix_lock);
3983 
3984 		for (i = 0; i < ret; i++) {
3985 			if (!gang[i])
3986 				continue;
3987 			root_objectid = gang[i]->root_key.objectid;
3988 			err = btrfs_orphan_cleanup(gang[i]);
3989 			if (err)
3990 				break;
3991 			btrfs_put_root(gang[i]);
3992 		}
3993 		root_objectid++;
3994 	}
3995 
3996 	/* release the uncleaned roots due to error */
3997 	for (; i < ret; i++) {
3998 		if (gang[i])
3999 			btrfs_put_root(gang[i]);
4000 	}
4001 	return err;
4002 }
4003 
4004 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4005 {
4006 	struct btrfs_root *root = fs_info->tree_root;
4007 	struct btrfs_trans_handle *trans;
4008 
4009 	mutex_lock(&fs_info->cleaner_mutex);
4010 	btrfs_run_delayed_iputs(fs_info);
4011 	mutex_unlock(&fs_info->cleaner_mutex);
4012 	wake_up_process(fs_info->cleaner_kthread);
4013 
4014 	/* wait until ongoing cleanup work done */
4015 	down_write(&fs_info->cleanup_work_sem);
4016 	up_write(&fs_info->cleanup_work_sem);
4017 
4018 	trans = btrfs_join_transaction(root);
4019 	if (IS_ERR(trans))
4020 		return PTR_ERR(trans);
4021 	return btrfs_commit_transaction(trans);
4022 }
4023 
4024 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4025 {
4026 	int ret;
4027 
4028 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4029 	/*
4030 	 * We don't want the cleaner to start new transactions, add more delayed
4031 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4032 	 * because that frees the task_struct, and the transaction kthread might
4033 	 * still try to wake up the cleaner.
4034 	 */
4035 	kthread_park(fs_info->cleaner_kthread);
4036 
4037 	/* wait for the qgroup rescan worker to stop */
4038 	btrfs_qgroup_wait_for_completion(fs_info, false);
4039 
4040 	/* wait for the uuid_scan task to finish */
4041 	down(&fs_info->uuid_tree_rescan_sem);
4042 	/* avoid complains from lockdep et al., set sem back to initial state */
4043 	up(&fs_info->uuid_tree_rescan_sem);
4044 
4045 	/* pause restriper - we want to resume on mount */
4046 	btrfs_pause_balance(fs_info);
4047 
4048 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4049 
4050 	btrfs_scrub_cancel(fs_info);
4051 
4052 	/* wait for any defraggers to finish */
4053 	wait_event(fs_info->transaction_wait,
4054 		   (atomic_read(&fs_info->defrag_running) == 0));
4055 
4056 	/* clear out the rbtree of defraggable inodes */
4057 	btrfs_cleanup_defrag_inodes(fs_info);
4058 
4059 	cancel_work_sync(&fs_info->async_reclaim_work);
4060 
4061 	/* Cancel or finish ongoing discard work */
4062 	btrfs_discard_cleanup(fs_info);
4063 
4064 	if (!sb_rdonly(fs_info->sb)) {
4065 		/*
4066 		 * The cleaner kthread is stopped, so do one final pass over
4067 		 * unused block groups.
4068 		 */
4069 		btrfs_delete_unused_bgs(fs_info);
4070 
4071 		/*
4072 		 * There might be existing delayed inode workers still running
4073 		 * and holding an empty delayed inode item. We must wait for
4074 		 * them to complete first because they can create a transaction.
4075 		 * This happens when someone calls btrfs_balance_delayed_items()
4076 		 * and then a transaction commit runs the same delayed nodes
4077 		 * before any delayed worker has done something with the nodes.
4078 		 * We must wait for any worker here and not at transaction
4079 		 * commit time since that could cause a deadlock.
4080 		 * This is a very rare case.
4081 		 */
4082 		btrfs_flush_workqueue(fs_info->delayed_workers);
4083 
4084 		ret = btrfs_commit_super(fs_info);
4085 		if (ret)
4086 			btrfs_err(fs_info, "commit super ret %d", ret);
4087 	}
4088 
4089 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4090 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4091 		btrfs_error_commit_super(fs_info);
4092 
4093 	kthread_stop(fs_info->transaction_kthread);
4094 	kthread_stop(fs_info->cleaner_kthread);
4095 
4096 	ASSERT(list_empty(&fs_info->delayed_iputs));
4097 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4098 
4099 	if (btrfs_check_quota_leak(fs_info)) {
4100 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4101 		btrfs_err(fs_info, "qgroup reserved space leaked");
4102 	}
4103 
4104 	btrfs_free_qgroup_config(fs_info);
4105 	ASSERT(list_empty(&fs_info->delalloc_roots));
4106 
4107 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4108 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4109 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4110 	}
4111 
4112 	if (percpu_counter_sum(&fs_info->dio_bytes))
4113 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4114 			   percpu_counter_sum(&fs_info->dio_bytes));
4115 
4116 	btrfs_sysfs_remove_mounted(fs_info);
4117 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4118 
4119 	btrfs_put_block_group_cache(fs_info);
4120 
4121 	/*
4122 	 * we must make sure there is not any read request to
4123 	 * submit after we stopping all workers.
4124 	 */
4125 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4126 	btrfs_stop_all_workers(fs_info);
4127 
4128 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4129 	free_root_pointers(fs_info, true);
4130 	btrfs_free_fs_roots(fs_info);
4131 
4132 	/*
4133 	 * We must free the block groups after dropping the fs_roots as we could
4134 	 * have had an IO error and have left over tree log blocks that aren't
4135 	 * cleaned up until the fs roots are freed.  This makes the block group
4136 	 * accounting appear to be wrong because there's pending reserved bytes,
4137 	 * so make sure we do the block group cleanup afterwards.
4138 	 */
4139 	btrfs_free_block_groups(fs_info);
4140 
4141 	iput(fs_info->btree_inode);
4142 
4143 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4144 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4145 		btrfsic_unmount(fs_info->fs_devices);
4146 #endif
4147 
4148 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4149 	btrfs_close_devices(fs_info->fs_devices);
4150 }
4151 
4152 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4153 			  int atomic)
4154 {
4155 	int ret;
4156 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4157 
4158 	ret = extent_buffer_uptodate(buf);
4159 	if (!ret)
4160 		return ret;
4161 
4162 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4163 				    parent_transid, atomic);
4164 	if (ret == -EAGAIN)
4165 		return ret;
4166 	return !ret;
4167 }
4168 
4169 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4170 {
4171 	struct btrfs_fs_info *fs_info;
4172 	struct btrfs_root *root;
4173 	u64 transid = btrfs_header_generation(buf);
4174 	int was_dirty;
4175 
4176 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4177 	/*
4178 	 * This is a fast path so only do this check if we have sanity tests
4179 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4180 	 * outside of the sanity tests.
4181 	 */
4182 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4183 		return;
4184 #endif
4185 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4186 	fs_info = root->fs_info;
4187 	btrfs_assert_tree_locked(buf);
4188 	if (transid != fs_info->generation)
4189 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4190 			buf->start, transid, fs_info->generation);
4191 	was_dirty = set_extent_buffer_dirty(buf);
4192 	if (!was_dirty)
4193 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4194 					 buf->len,
4195 					 fs_info->dirty_metadata_batch);
4196 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4197 	/*
4198 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4199 	 * but item data not updated.
4200 	 * So here we should only check item pointers, not item data.
4201 	 */
4202 	if (btrfs_header_level(buf) == 0 &&
4203 	    btrfs_check_leaf_relaxed(buf)) {
4204 		btrfs_print_leaf(buf);
4205 		ASSERT(0);
4206 	}
4207 #endif
4208 }
4209 
4210 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4211 					int flush_delayed)
4212 {
4213 	/*
4214 	 * looks as though older kernels can get into trouble with
4215 	 * this code, they end up stuck in balance_dirty_pages forever
4216 	 */
4217 	int ret;
4218 
4219 	if (current->flags & PF_MEMALLOC)
4220 		return;
4221 
4222 	if (flush_delayed)
4223 		btrfs_balance_delayed_items(fs_info);
4224 
4225 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4226 				     BTRFS_DIRTY_METADATA_THRESH,
4227 				     fs_info->dirty_metadata_batch);
4228 	if (ret > 0) {
4229 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4230 	}
4231 }
4232 
4233 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4234 {
4235 	__btrfs_btree_balance_dirty(fs_info, 1);
4236 }
4237 
4238 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4239 {
4240 	__btrfs_btree_balance_dirty(fs_info, 0);
4241 }
4242 
4243 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4244 		      struct btrfs_key *first_key)
4245 {
4246 	return btree_read_extent_buffer_pages(buf, parent_transid,
4247 					      level, first_key);
4248 }
4249 
4250 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4251 {
4252 	/* cleanup FS via transaction */
4253 	btrfs_cleanup_transaction(fs_info);
4254 
4255 	mutex_lock(&fs_info->cleaner_mutex);
4256 	btrfs_run_delayed_iputs(fs_info);
4257 	mutex_unlock(&fs_info->cleaner_mutex);
4258 
4259 	down_write(&fs_info->cleanup_work_sem);
4260 	up_write(&fs_info->cleanup_work_sem);
4261 }
4262 
4263 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4264 {
4265 	struct btrfs_root *gang[8];
4266 	u64 root_objectid = 0;
4267 	int ret;
4268 
4269 	spin_lock(&fs_info->fs_roots_radix_lock);
4270 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4271 					     (void **)gang, root_objectid,
4272 					     ARRAY_SIZE(gang))) != 0) {
4273 		int i;
4274 
4275 		for (i = 0; i < ret; i++)
4276 			gang[i] = btrfs_grab_root(gang[i]);
4277 		spin_unlock(&fs_info->fs_roots_radix_lock);
4278 
4279 		for (i = 0; i < ret; i++) {
4280 			if (!gang[i])
4281 				continue;
4282 			root_objectid = gang[i]->root_key.objectid;
4283 			btrfs_free_log(NULL, gang[i]);
4284 			btrfs_put_root(gang[i]);
4285 		}
4286 		root_objectid++;
4287 		spin_lock(&fs_info->fs_roots_radix_lock);
4288 	}
4289 	spin_unlock(&fs_info->fs_roots_radix_lock);
4290 	btrfs_free_log_root_tree(NULL, fs_info);
4291 }
4292 
4293 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4294 {
4295 	struct btrfs_ordered_extent *ordered;
4296 
4297 	spin_lock(&root->ordered_extent_lock);
4298 	/*
4299 	 * This will just short circuit the ordered completion stuff which will
4300 	 * make sure the ordered extent gets properly cleaned up.
4301 	 */
4302 	list_for_each_entry(ordered, &root->ordered_extents,
4303 			    root_extent_list)
4304 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4305 	spin_unlock(&root->ordered_extent_lock);
4306 }
4307 
4308 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4309 {
4310 	struct btrfs_root *root;
4311 	struct list_head splice;
4312 
4313 	INIT_LIST_HEAD(&splice);
4314 
4315 	spin_lock(&fs_info->ordered_root_lock);
4316 	list_splice_init(&fs_info->ordered_roots, &splice);
4317 	while (!list_empty(&splice)) {
4318 		root = list_first_entry(&splice, struct btrfs_root,
4319 					ordered_root);
4320 		list_move_tail(&root->ordered_root,
4321 			       &fs_info->ordered_roots);
4322 
4323 		spin_unlock(&fs_info->ordered_root_lock);
4324 		btrfs_destroy_ordered_extents(root);
4325 
4326 		cond_resched();
4327 		spin_lock(&fs_info->ordered_root_lock);
4328 	}
4329 	spin_unlock(&fs_info->ordered_root_lock);
4330 
4331 	/*
4332 	 * We need this here because if we've been flipped read-only we won't
4333 	 * get sync() from the umount, so we need to make sure any ordered
4334 	 * extents that haven't had their dirty pages IO start writeout yet
4335 	 * actually get run and error out properly.
4336 	 */
4337 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4338 }
4339 
4340 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4341 				      struct btrfs_fs_info *fs_info)
4342 {
4343 	struct rb_node *node;
4344 	struct btrfs_delayed_ref_root *delayed_refs;
4345 	struct btrfs_delayed_ref_node *ref;
4346 	int ret = 0;
4347 
4348 	delayed_refs = &trans->delayed_refs;
4349 
4350 	spin_lock(&delayed_refs->lock);
4351 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4352 		spin_unlock(&delayed_refs->lock);
4353 		btrfs_debug(fs_info, "delayed_refs has NO entry");
4354 		return ret;
4355 	}
4356 
4357 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4358 		struct btrfs_delayed_ref_head *head;
4359 		struct rb_node *n;
4360 		bool pin_bytes = false;
4361 
4362 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4363 				href_node);
4364 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4365 			continue;
4366 
4367 		spin_lock(&head->lock);
4368 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4369 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4370 				       ref_node);
4371 			ref->in_tree = 0;
4372 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4373 			RB_CLEAR_NODE(&ref->ref_node);
4374 			if (!list_empty(&ref->add_list))
4375 				list_del(&ref->add_list);
4376 			atomic_dec(&delayed_refs->num_entries);
4377 			btrfs_put_delayed_ref(ref);
4378 		}
4379 		if (head->must_insert_reserved)
4380 			pin_bytes = true;
4381 		btrfs_free_delayed_extent_op(head->extent_op);
4382 		btrfs_delete_ref_head(delayed_refs, head);
4383 		spin_unlock(&head->lock);
4384 		spin_unlock(&delayed_refs->lock);
4385 		mutex_unlock(&head->mutex);
4386 
4387 		if (pin_bytes) {
4388 			struct btrfs_block_group *cache;
4389 
4390 			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4391 			BUG_ON(!cache);
4392 
4393 			spin_lock(&cache->space_info->lock);
4394 			spin_lock(&cache->lock);
4395 			cache->pinned += head->num_bytes;
4396 			btrfs_space_info_update_bytes_pinned(fs_info,
4397 				cache->space_info, head->num_bytes);
4398 			cache->reserved -= head->num_bytes;
4399 			cache->space_info->bytes_reserved -= head->num_bytes;
4400 			spin_unlock(&cache->lock);
4401 			spin_unlock(&cache->space_info->lock);
4402 			percpu_counter_add_batch(
4403 				&cache->space_info->total_bytes_pinned,
4404 				head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
4405 
4406 			btrfs_put_block_group(cache);
4407 
4408 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4409 				head->bytenr + head->num_bytes - 1);
4410 		}
4411 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4412 		btrfs_put_delayed_ref_head(head);
4413 		cond_resched();
4414 		spin_lock(&delayed_refs->lock);
4415 	}
4416 	btrfs_qgroup_destroy_extent_records(trans);
4417 
4418 	spin_unlock(&delayed_refs->lock);
4419 
4420 	return ret;
4421 }
4422 
4423 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4424 {
4425 	struct btrfs_inode *btrfs_inode;
4426 	struct list_head splice;
4427 
4428 	INIT_LIST_HEAD(&splice);
4429 
4430 	spin_lock(&root->delalloc_lock);
4431 	list_splice_init(&root->delalloc_inodes, &splice);
4432 
4433 	while (!list_empty(&splice)) {
4434 		struct inode *inode = NULL;
4435 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4436 					       delalloc_inodes);
4437 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4438 		spin_unlock(&root->delalloc_lock);
4439 
4440 		/*
4441 		 * Make sure we get a live inode and that it'll not disappear
4442 		 * meanwhile.
4443 		 */
4444 		inode = igrab(&btrfs_inode->vfs_inode);
4445 		if (inode) {
4446 			invalidate_inode_pages2(inode->i_mapping);
4447 			iput(inode);
4448 		}
4449 		spin_lock(&root->delalloc_lock);
4450 	}
4451 	spin_unlock(&root->delalloc_lock);
4452 }
4453 
4454 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4455 {
4456 	struct btrfs_root *root;
4457 	struct list_head splice;
4458 
4459 	INIT_LIST_HEAD(&splice);
4460 
4461 	spin_lock(&fs_info->delalloc_root_lock);
4462 	list_splice_init(&fs_info->delalloc_roots, &splice);
4463 	while (!list_empty(&splice)) {
4464 		root = list_first_entry(&splice, struct btrfs_root,
4465 					 delalloc_root);
4466 		root = btrfs_grab_root(root);
4467 		BUG_ON(!root);
4468 		spin_unlock(&fs_info->delalloc_root_lock);
4469 
4470 		btrfs_destroy_delalloc_inodes(root);
4471 		btrfs_put_root(root);
4472 
4473 		spin_lock(&fs_info->delalloc_root_lock);
4474 	}
4475 	spin_unlock(&fs_info->delalloc_root_lock);
4476 }
4477 
4478 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4479 					struct extent_io_tree *dirty_pages,
4480 					int mark)
4481 {
4482 	int ret;
4483 	struct extent_buffer *eb;
4484 	u64 start = 0;
4485 	u64 end;
4486 
4487 	while (1) {
4488 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4489 					    mark, NULL);
4490 		if (ret)
4491 			break;
4492 
4493 		clear_extent_bits(dirty_pages, start, end, mark);
4494 		while (start <= end) {
4495 			eb = find_extent_buffer(fs_info, start);
4496 			start += fs_info->nodesize;
4497 			if (!eb)
4498 				continue;
4499 			wait_on_extent_buffer_writeback(eb);
4500 
4501 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4502 					       &eb->bflags))
4503 				clear_extent_buffer_dirty(eb);
4504 			free_extent_buffer_stale(eb);
4505 		}
4506 	}
4507 
4508 	return ret;
4509 }
4510 
4511 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4512 				       struct extent_io_tree *unpin)
4513 {
4514 	u64 start;
4515 	u64 end;
4516 	int ret;
4517 
4518 	while (1) {
4519 		struct extent_state *cached_state = NULL;
4520 
4521 		/*
4522 		 * The btrfs_finish_extent_commit() may get the same range as
4523 		 * ours between find_first_extent_bit and clear_extent_dirty.
4524 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4525 		 * the same extent range.
4526 		 */
4527 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4528 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4529 					    EXTENT_DIRTY, &cached_state);
4530 		if (ret) {
4531 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4532 			break;
4533 		}
4534 
4535 		clear_extent_dirty(unpin, start, end, &cached_state);
4536 		free_extent_state(cached_state);
4537 		btrfs_error_unpin_extent_range(fs_info, start, end);
4538 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4539 		cond_resched();
4540 	}
4541 
4542 	return 0;
4543 }
4544 
4545 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4546 {
4547 	struct inode *inode;
4548 
4549 	inode = cache->io_ctl.inode;
4550 	if (inode) {
4551 		invalidate_inode_pages2(inode->i_mapping);
4552 		BTRFS_I(inode)->generation = 0;
4553 		cache->io_ctl.inode = NULL;
4554 		iput(inode);
4555 	}
4556 	ASSERT(cache->io_ctl.pages == NULL);
4557 	btrfs_put_block_group(cache);
4558 }
4559 
4560 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4561 			     struct btrfs_fs_info *fs_info)
4562 {
4563 	struct btrfs_block_group *cache;
4564 
4565 	spin_lock(&cur_trans->dirty_bgs_lock);
4566 	while (!list_empty(&cur_trans->dirty_bgs)) {
4567 		cache = list_first_entry(&cur_trans->dirty_bgs,
4568 					 struct btrfs_block_group,
4569 					 dirty_list);
4570 
4571 		if (!list_empty(&cache->io_list)) {
4572 			spin_unlock(&cur_trans->dirty_bgs_lock);
4573 			list_del_init(&cache->io_list);
4574 			btrfs_cleanup_bg_io(cache);
4575 			spin_lock(&cur_trans->dirty_bgs_lock);
4576 		}
4577 
4578 		list_del_init(&cache->dirty_list);
4579 		spin_lock(&cache->lock);
4580 		cache->disk_cache_state = BTRFS_DC_ERROR;
4581 		spin_unlock(&cache->lock);
4582 
4583 		spin_unlock(&cur_trans->dirty_bgs_lock);
4584 		btrfs_put_block_group(cache);
4585 		btrfs_delayed_refs_rsv_release(fs_info, 1);
4586 		spin_lock(&cur_trans->dirty_bgs_lock);
4587 	}
4588 	spin_unlock(&cur_trans->dirty_bgs_lock);
4589 
4590 	/*
4591 	 * Refer to the definition of io_bgs member for details why it's safe
4592 	 * to use it without any locking
4593 	 */
4594 	while (!list_empty(&cur_trans->io_bgs)) {
4595 		cache = list_first_entry(&cur_trans->io_bgs,
4596 					 struct btrfs_block_group,
4597 					 io_list);
4598 
4599 		list_del_init(&cache->io_list);
4600 		spin_lock(&cache->lock);
4601 		cache->disk_cache_state = BTRFS_DC_ERROR;
4602 		spin_unlock(&cache->lock);
4603 		btrfs_cleanup_bg_io(cache);
4604 	}
4605 }
4606 
4607 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4608 				   struct btrfs_fs_info *fs_info)
4609 {
4610 	struct btrfs_device *dev, *tmp;
4611 
4612 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4613 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4614 	ASSERT(list_empty(&cur_trans->io_bgs));
4615 
4616 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4617 				 post_commit_list) {
4618 		list_del_init(&dev->post_commit_list);
4619 	}
4620 
4621 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4622 
4623 	cur_trans->state = TRANS_STATE_COMMIT_START;
4624 	wake_up(&fs_info->transaction_blocked_wait);
4625 
4626 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4627 	wake_up(&fs_info->transaction_wait);
4628 
4629 	btrfs_destroy_delayed_inodes(fs_info);
4630 
4631 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4632 				     EXTENT_DIRTY);
4633 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4634 
4635 	cur_trans->state =TRANS_STATE_COMPLETED;
4636 	wake_up(&cur_trans->commit_wait);
4637 }
4638 
4639 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4640 {
4641 	struct btrfs_transaction *t;
4642 
4643 	mutex_lock(&fs_info->transaction_kthread_mutex);
4644 
4645 	spin_lock(&fs_info->trans_lock);
4646 	while (!list_empty(&fs_info->trans_list)) {
4647 		t = list_first_entry(&fs_info->trans_list,
4648 				     struct btrfs_transaction, list);
4649 		if (t->state >= TRANS_STATE_COMMIT_START) {
4650 			refcount_inc(&t->use_count);
4651 			spin_unlock(&fs_info->trans_lock);
4652 			btrfs_wait_for_commit(fs_info, t->transid);
4653 			btrfs_put_transaction(t);
4654 			spin_lock(&fs_info->trans_lock);
4655 			continue;
4656 		}
4657 		if (t == fs_info->running_transaction) {
4658 			t->state = TRANS_STATE_COMMIT_DOING;
4659 			spin_unlock(&fs_info->trans_lock);
4660 			/*
4661 			 * We wait for 0 num_writers since we don't hold a trans
4662 			 * handle open currently for this transaction.
4663 			 */
4664 			wait_event(t->writer_wait,
4665 				   atomic_read(&t->num_writers) == 0);
4666 		} else {
4667 			spin_unlock(&fs_info->trans_lock);
4668 		}
4669 		btrfs_cleanup_one_transaction(t, fs_info);
4670 
4671 		spin_lock(&fs_info->trans_lock);
4672 		if (t == fs_info->running_transaction)
4673 			fs_info->running_transaction = NULL;
4674 		list_del_init(&t->list);
4675 		spin_unlock(&fs_info->trans_lock);
4676 
4677 		btrfs_put_transaction(t);
4678 		trace_btrfs_transaction_commit(fs_info->tree_root);
4679 		spin_lock(&fs_info->trans_lock);
4680 	}
4681 	spin_unlock(&fs_info->trans_lock);
4682 	btrfs_destroy_all_ordered_extents(fs_info);
4683 	btrfs_destroy_delayed_inodes(fs_info);
4684 	btrfs_assert_delayed_root_empty(fs_info);
4685 	btrfs_destroy_all_delalloc_inodes(fs_info);
4686 	btrfs_drop_all_logs(fs_info);
4687 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4688 
4689 	return 0;
4690 }
4691 
4692 static const struct extent_io_ops btree_extent_io_ops = {
4693 	/* mandatory callbacks */
4694 	.submit_bio_hook = btree_submit_bio_hook,
4695 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4696 };
4697