xref: /openbmc/linux/fs/btrfs/super.c (revision 34facb04)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/blkdev.h>
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/pagemap.h>
10 #include <linux/highmem.h>
11 #include <linux/time.h>
12 #include <linux/init.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/writeback.h>
18 #include <linux/statfs.h>
19 #include <linux/compat.h>
20 #include <linux/parser.h>
21 #include <linux/ctype.h>
22 #include <linux/namei.h>
23 #include <linux/miscdevice.h>
24 #include <linux/magic.h>
25 #include <linux/slab.h>
26 #include <linux/cleancache.h>
27 #include <linux/ratelimit.h>
28 #include <linux/crc32c.h>
29 #include <linux/btrfs.h>
30 #include "delayed-inode.h"
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "print-tree.h"
36 #include "props.h"
37 #include "xattr.h"
38 #include "volumes.h"
39 #include "export.h"
40 #include "compression.h"
41 #include "rcu-string.h"
42 #include "dev-replace.h"
43 #include "free-space-cache.h"
44 #include "backref.h"
45 #include "space-info.h"
46 #include "sysfs.h"
47 #include "tests/btrfs-tests.h"
48 #include "block-group.h"
49 #include "discard.h"
50 
51 #include "qgroup.h"
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/btrfs.h>
54 
55 static const struct super_operations btrfs_super_ops;
56 
57 /*
58  * Types for mounting the default subvolume and a subvolume explicitly
59  * requested by subvol=/path. That way the callchain is straightforward and we
60  * don't have to play tricks with the mount options and recursive calls to
61  * btrfs_mount.
62  *
63  * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
64  */
65 static struct file_system_type btrfs_fs_type;
66 static struct file_system_type btrfs_root_fs_type;
67 
68 static int btrfs_remount(struct super_block *sb, int *flags, char *data);
69 
70 const char * __attribute_const__ btrfs_decode_error(int errno)
71 {
72 	char *errstr = "unknown";
73 
74 	switch (errno) {
75 	case -ENOENT:		/* -2 */
76 		errstr = "No such entry";
77 		break;
78 	case -EIO:		/* -5 */
79 		errstr = "IO failure";
80 		break;
81 	case -ENOMEM:		/* -12*/
82 		errstr = "Out of memory";
83 		break;
84 	case -EEXIST:		/* -17 */
85 		errstr = "Object already exists";
86 		break;
87 	case -ENOSPC:		/* -28 */
88 		errstr = "No space left";
89 		break;
90 	case -EROFS:		/* -30 */
91 		errstr = "Readonly filesystem";
92 		break;
93 	case -EOPNOTSUPP:	/* -95 */
94 		errstr = "Operation not supported";
95 		break;
96 	case -EUCLEAN:		/* -117 */
97 		errstr = "Filesystem corrupted";
98 		break;
99 	case -EDQUOT:		/* -122 */
100 		errstr = "Quota exceeded";
101 		break;
102 	}
103 
104 	return errstr;
105 }
106 
107 /*
108  * __btrfs_handle_fs_error decodes expected errors from the caller and
109  * invokes the appropriate error response.
110  */
111 __cold
112 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
113 		       unsigned int line, int errno, const char *fmt, ...)
114 {
115 	struct super_block *sb = fs_info->sb;
116 #ifdef CONFIG_PRINTK
117 	const char *errstr;
118 #endif
119 
120 	/*
121 	 * Special case: if the error is EROFS, and we're already
122 	 * under SB_RDONLY, then it is safe here.
123 	 */
124 	if (errno == -EROFS && sb_rdonly(sb))
125   		return;
126 
127 #ifdef CONFIG_PRINTK
128 	errstr = btrfs_decode_error(errno);
129 	if (fmt) {
130 		struct va_format vaf;
131 		va_list args;
132 
133 		va_start(args, fmt);
134 		vaf.fmt = fmt;
135 		vaf.va = &args;
136 
137 		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
138 			sb->s_id, function, line, errno, errstr, &vaf);
139 		va_end(args);
140 	} else {
141 		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
142 			sb->s_id, function, line, errno, errstr);
143 	}
144 #endif
145 
146 	/*
147 	 * Today we only save the error info to memory.  Long term we'll
148 	 * also send it down to the disk
149 	 */
150 	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
151 
152 	/* Don't go through full error handling during mount */
153 	if (!(sb->s_flags & SB_BORN))
154 		return;
155 
156 	if (sb_rdonly(sb))
157 		return;
158 
159 	btrfs_discard_stop(fs_info);
160 
161 	/* btrfs handle error by forcing the filesystem readonly */
162 	sb->s_flags |= SB_RDONLY;
163 	btrfs_info(fs_info, "forced readonly");
164 	/*
165 	 * Note that a running device replace operation is not canceled here
166 	 * although there is no way to update the progress. It would add the
167 	 * risk of a deadlock, therefore the canceling is omitted. The only
168 	 * penalty is that some I/O remains active until the procedure
169 	 * completes. The next time when the filesystem is mounted writable
170 	 * again, the device replace operation continues.
171 	 */
172 }
173 
174 #ifdef CONFIG_PRINTK
175 static const char * const logtypes[] = {
176 	"emergency",
177 	"alert",
178 	"critical",
179 	"error",
180 	"warning",
181 	"notice",
182 	"info",
183 	"debug",
184 };
185 
186 
187 /*
188  * Use one ratelimit state per log level so that a flood of less important
189  * messages doesn't cause more important ones to be dropped.
190  */
191 static struct ratelimit_state printk_limits[] = {
192 	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
193 	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
194 	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
195 	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
196 	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
197 	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
198 	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
199 	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
200 };
201 
202 void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
203 {
204 	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
205 	struct va_format vaf;
206 	va_list args;
207 	int kern_level;
208 	const char *type = logtypes[4];
209 	struct ratelimit_state *ratelimit = &printk_limits[4];
210 
211 	va_start(args, fmt);
212 
213 	while ((kern_level = printk_get_level(fmt)) != 0) {
214 		size_t size = printk_skip_level(fmt) - fmt;
215 
216 		if (kern_level >= '0' && kern_level <= '7') {
217 			memcpy(lvl, fmt,  size);
218 			lvl[size] = '\0';
219 			type = logtypes[kern_level - '0'];
220 			ratelimit = &printk_limits[kern_level - '0'];
221 		}
222 		fmt += size;
223 	}
224 
225 	vaf.fmt = fmt;
226 	vaf.va = &args;
227 
228 	if (__ratelimit(ratelimit))
229 		printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
230 			fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
231 
232 	va_end(args);
233 }
234 #endif
235 
236 /*
237  * We only mark the transaction aborted and then set the file system read-only.
238  * This will prevent new transactions from starting or trying to join this
239  * one.
240  *
241  * This means that error recovery at the call site is limited to freeing
242  * any local memory allocations and passing the error code up without
243  * further cleanup. The transaction should complete as it normally would
244  * in the call path but will return -EIO.
245  *
246  * We'll complete the cleanup in btrfs_end_transaction and
247  * btrfs_commit_transaction.
248  */
249 __cold
250 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
251 			       const char *function,
252 			       unsigned int line, int errno)
253 {
254 	struct btrfs_fs_info *fs_info = trans->fs_info;
255 
256 	WRITE_ONCE(trans->aborted, errno);
257 	/* Nothing used. The other threads that have joined this
258 	 * transaction may be able to continue. */
259 	if (!trans->dirty && list_empty(&trans->new_bgs)) {
260 		const char *errstr;
261 
262 		errstr = btrfs_decode_error(errno);
263 		btrfs_warn(fs_info,
264 		           "%s:%d: Aborting unused transaction(%s).",
265 		           function, line, errstr);
266 		return;
267 	}
268 	WRITE_ONCE(trans->transaction->aborted, errno);
269 	/* Wake up anybody who may be waiting on this transaction */
270 	wake_up(&fs_info->transaction_wait);
271 	wake_up(&fs_info->transaction_blocked_wait);
272 	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
273 }
274 /*
275  * __btrfs_panic decodes unexpected, fatal errors from the caller,
276  * issues an alert, and either panics or BUGs, depending on mount options.
277  */
278 __cold
279 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
280 		   unsigned int line, int errno, const char *fmt, ...)
281 {
282 	char *s_id = "<unknown>";
283 	const char *errstr;
284 	struct va_format vaf = { .fmt = fmt };
285 	va_list args;
286 
287 	if (fs_info)
288 		s_id = fs_info->sb->s_id;
289 
290 	va_start(args, fmt);
291 	vaf.va = &args;
292 
293 	errstr = btrfs_decode_error(errno);
294 	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
295 		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
296 			s_id, function, line, &vaf, errno, errstr);
297 
298 	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
299 		   function, line, &vaf, errno, errstr);
300 	va_end(args);
301 	/* Caller calls BUG() */
302 }
303 
304 static void btrfs_put_super(struct super_block *sb)
305 {
306 	close_ctree(btrfs_sb(sb));
307 }
308 
309 enum {
310 	Opt_acl, Opt_noacl,
311 	Opt_clear_cache,
312 	Opt_commit_interval,
313 	Opt_compress,
314 	Opt_compress_force,
315 	Opt_compress_force_type,
316 	Opt_compress_type,
317 	Opt_degraded,
318 	Opt_device,
319 	Opt_fatal_errors,
320 	Opt_flushoncommit, Opt_noflushoncommit,
321 	Opt_inode_cache, Opt_noinode_cache,
322 	Opt_max_inline,
323 	Opt_barrier, Opt_nobarrier,
324 	Opt_datacow, Opt_nodatacow,
325 	Opt_datasum, Opt_nodatasum,
326 	Opt_defrag, Opt_nodefrag,
327 	Opt_discard, Opt_nodiscard,
328 	Opt_discard_mode,
329 	Opt_nologreplay,
330 	Opt_norecovery,
331 	Opt_ratio,
332 	Opt_rescan_uuid_tree,
333 	Opt_skip_balance,
334 	Opt_space_cache, Opt_no_space_cache,
335 	Opt_space_cache_version,
336 	Opt_ssd, Opt_nossd,
337 	Opt_ssd_spread, Opt_nossd_spread,
338 	Opt_subvol,
339 	Opt_subvol_empty,
340 	Opt_subvolid,
341 	Opt_thread_pool,
342 	Opt_treelog, Opt_notreelog,
343 	Opt_usebackuproot,
344 	Opt_user_subvol_rm_allowed,
345 
346 	/* Deprecated options */
347 	Opt_alloc_start,
348 	Opt_recovery,
349 	Opt_subvolrootid,
350 
351 	/* Debugging options */
352 	Opt_check_integrity,
353 	Opt_check_integrity_including_extent_data,
354 	Opt_check_integrity_print_mask,
355 	Opt_enospc_debug, Opt_noenospc_debug,
356 #ifdef CONFIG_BTRFS_DEBUG
357 	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
358 #endif
359 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
360 	Opt_ref_verify,
361 #endif
362 	Opt_err,
363 };
364 
365 static const match_table_t tokens = {
366 	{Opt_acl, "acl"},
367 	{Opt_noacl, "noacl"},
368 	{Opt_clear_cache, "clear_cache"},
369 	{Opt_commit_interval, "commit=%u"},
370 	{Opt_compress, "compress"},
371 	{Opt_compress_type, "compress=%s"},
372 	{Opt_compress_force, "compress-force"},
373 	{Opt_compress_force_type, "compress-force=%s"},
374 	{Opt_degraded, "degraded"},
375 	{Opt_device, "device=%s"},
376 	{Opt_fatal_errors, "fatal_errors=%s"},
377 	{Opt_flushoncommit, "flushoncommit"},
378 	{Opt_noflushoncommit, "noflushoncommit"},
379 	{Opt_inode_cache, "inode_cache"},
380 	{Opt_noinode_cache, "noinode_cache"},
381 	{Opt_max_inline, "max_inline=%s"},
382 	{Opt_barrier, "barrier"},
383 	{Opt_nobarrier, "nobarrier"},
384 	{Opt_datacow, "datacow"},
385 	{Opt_nodatacow, "nodatacow"},
386 	{Opt_datasum, "datasum"},
387 	{Opt_nodatasum, "nodatasum"},
388 	{Opt_defrag, "autodefrag"},
389 	{Opt_nodefrag, "noautodefrag"},
390 	{Opt_discard, "discard"},
391 	{Opt_discard_mode, "discard=%s"},
392 	{Opt_nodiscard, "nodiscard"},
393 	{Opt_nologreplay, "nologreplay"},
394 	{Opt_norecovery, "norecovery"},
395 	{Opt_ratio, "metadata_ratio=%u"},
396 	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
397 	{Opt_skip_balance, "skip_balance"},
398 	{Opt_space_cache, "space_cache"},
399 	{Opt_no_space_cache, "nospace_cache"},
400 	{Opt_space_cache_version, "space_cache=%s"},
401 	{Opt_ssd, "ssd"},
402 	{Opt_nossd, "nossd"},
403 	{Opt_ssd_spread, "ssd_spread"},
404 	{Opt_nossd_spread, "nossd_spread"},
405 	{Opt_subvol, "subvol=%s"},
406 	{Opt_subvol_empty, "subvol="},
407 	{Opt_subvolid, "subvolid=%s"},
408 	{Opt_thread_pool, "thread_pool=%u"},
409 	{Opt_treelog, "treelog"},
410 	{Opt_notreelog, "notreelog"},
411 	{Opt_usebackuproot, "usebackuproot"},
412 	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
413 
414 	/* Deprecated options */
415 	{Opt_alloc_start, "alloc_start=%s"},
416 	{Opt_recovery, "recovery"},
417 	{Opt_subvolrootid, "subvolrootid=%d"},
418 
419 	/* Debugging options */
420 	{Opt_check_integrity, "check_int"},
421 	{Opt_check_integrity_including_extent_data, "check_int_data"},
422 	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
423 	{Opt_enospc_debug, "enospc_debug"},
424 	{Opt_noenospc_debug, "noenospc_debug"},
425 #ifdef CONFIG_BTRFS_DEBUG
426 	{Opt_fragment_data, "fragment=data"},
427 	{Opt_fragment_metadata, "fragment=metadata"},
428 	{Opt_fragment_all, "fragment=all"},
429 #endif
430 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
431 	{Opt_ref_verify, "ref_verify"},
432 #endif
433 	{Opt_err, NULL},
434 };
435 
436 /*
437  * Regular mount options parser.  Everything that is needed only when
438  * reading in a new superblock is parsed here.
439  * XXX JDM: This needs to be cleaned up for remount.
440  */
441 int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
442 			unsigned long new_flags)
443 {
444 	substring_t args[MAX_OPT_ARGS];
445 	char *p, *num;
446 	u64 cache_gen;
447 	int intarg;
448 	int ret = 0;
449 	char *compress_type;
450 	bool compress_force = false;
451 	enum btrfs_compression_type saved_compress_type;
452 	bool saved_compress_force;
453 	int no_compress = 0;
454 
455 	cache_gen = btrfs_super_cache_generation(info->super_copy);
456 	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
457 		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
458 	else if (cache_gen)
459 		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
460 
461 	/*
462 	 * Even the options are empty, we still need to do extra check
463 	 * against new flags
464 	 */
465 	if (!options)
466 		goto check;
467 
468 	while ((p = strsep(&options, ",")) != NULL) {
469 		int token;
470 		if (!*p)
471 			continue;
472 
473 		token = match_token(p, tokens, args);
474 		switch (token) {
475 		case Opt_degraded:
476 			btrfs_info(info, "allowing degraded mounts");
477 			btrfs_set_opt(info->mount_opt, DEGRADED);
478 			break;
479 		case Opt_subvol:
480 		case Opt_subvol_empty:
481 		case Opt_subvolid:
482 		case Opt_subvolrootid:
483 		case Opt_device:
484 			/*
485 			 * These are parsed by btrfs_parse_subvol_options or
486 			 * btrfs_parse_device_options and can be ignored here.
487 			 */
488 			break;
489 		case Opt_nodatasum:
490 			btrfs_set_and_info(info, NODATASUM,
491 					   "setting nodatasum");
492 			break;
493 		case Opt_datasum:
494 			if (btrfs_test_opt(info, NODATASUM)) {
495 				if (btrfs_test_opt(info, NODATACOW))
496 					btrfs_info(info,
497 						   "setting datasum, datacow enabled");
498 				else
499 					btrfs_info(info, "setting datasum");
500 			}
501 			btrfs_clear_opt(info->mount_opt, NODATACOW);
502 			btrfs_clear_opt(info->mount_opt, NODATASUM);
503 			break;
504 		case Opt_nodatacow:
505 			if (!btrfs_test_opt(info, NODATACOW)) {
506 				if (!btrfs_test_opt(info, COMPRESS) ||
507 				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
508 					btrfs_info(info,
509 						   "setting nodatacow, compression disabled");
510 				} else {
511 					btrfs_info(info, "setting nodatacow");
512 				}
513 			}
514 			btrfs_clear_opt(info->mount_opt, COMPRESS);
515 			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
516 			btrfs_set_opt(info->mount_opt, NODATACOW);
517 			btrfs_set_opt(info->mount_opt, NODATASUM);
518 			break;
519 		case Opt_datacow:
520 			btrfs_clear_and_info(info, NODATACOW,
521 					     "setting datacow");
522 			break;
523 		case Opt_compress_force:
524 		case Opt_compress_force_type:
525 			compress_force = true;
526 			/* Fallthrough */
527 		case Opt_compress:
528 		case Opt_compress_type:
529 			saved_compress_type = btrfs_test_opt(info,
530 							     COMPRESS) ?
531 				info->compress_type : BTRFS_COMPRESS_NONE;
532 			saved_compress_force =
533 				btrfs_test_opt(info, FORCE_COMPRESS);
534 			if (token == Opt_compress ||
535 			    token == Opt_compress_force ||
536 			    strncmp(args[0].from, "zlib", 4) == 0) {
537 				compress_type = "zlib";
538 
539 				info->compress_type = BTRFS_COMPRESS_ZLIB;
540 				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
541 				/*
542 				 * args[0] contains uninitialized data since
543 				 * for these tokens we don't expect any
544 				 * parameter.
545 				 */
546 				if (token != Opt_compress &&
547 				    token != Opt_compress_force)
548 					info->compress_level =
549 					  btrfs_compress_str2level(
550 							BTRFS_COMPRESS_ZLIB,
551 							args[0].from + 4);
552 				btrfs_set_opt(info->mount_opt, COMPRESS);
553 				btrfs_clear_opt(info->mount_opt, NODATACOW);
554 				btrfs_clear_opt(info->mount_opt, NODATASUM);
555 				no_compress = 0;
556 			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
557 				compress_type = "lzo";
558 				info->compress_type = BTRFS_COMPRESS_LZO;
559 				btrfs_set_opt(info->mount_opt, COMPRESS);
560 				btrfs_clear_opt(info->mount_opt, NODATACOW);
561 				btrfs_clear_opt(info->mount_opt, NODATASUM);
562 				btrfs_set_fs_incompat(info, COMPRESS_LZO);
563 				no_compress = 0;
564 			} else if (strncmp(args[0].from, "zstd", 4) == 0) {
565 				compress_type = "zstd";
566 				info->compress_type = BTRFS_COMPRESS_ZSTD;
567 				info->compress_level =
568 					btrfs_compress_str2level(
569 							 BTRFS_COMPRESS_ZSTD,
570 							 args[0].from + 4);
571 				btrfs_set_opt(info->mount_opt, COMPRESS);
572 				btrfs_clear_opt(info->mount_opt, NODATACOW);
573 				btrfs_clear_opt(info->mount_opt, NODATASUM);
574 				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
575 				no_compress = 0;
576 			} else if (strncmp(args[0].from, "no", 2) == 0) {
577 				compress_type = "no";
578 				btrfs_clear_opt(info->mount_opt, COMPRESS);
579 				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
580 				compress_force = false;
581 				no_compress++;
582 			} else {
583 				ret = -EINVAL;
584 				goto out;
585 			}
586 
587 			if (compress_force) {
588 				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
589 			} else {
590 				/*
591 				 * If we remount from compress-force=xxx to
592 				 * compress=xxx, we need clear FORCE_COMPRESS
593 				 * flag, otherwise, there is no way for users
594 				 * to disable forcible compression separately.
595 				 */
596 				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
597 			}
598 			if ((btrfs_test_opt(info, COMPRESS) &&
599 			     (info->compress_type != saved_compress_type ||
600 			      compress_force != saved_compress_force)) ||
601 			    (!btrfs_test_opt(info, COMPRESS) &&
602 			     no_compress == 1)) {
603 				btrfs_info(info, "%s %s compression, level %d",
604 					   (compress_force) ? "force" : "use",
605 					   compress_type, info->compress_level);
606 			}
607 			compress_force = false;
608 			break;
609 		case Opt_ssd:
610 			btrfs_set_and_info(info, SSD,
611 					   "enabling ssd optimizations");
612 			btrfs_clear_opt(info->mount_opt, NOSSD);
613 			break;
614 		case Opt_ssd_spread:
615 			btrfs_set_and_info(info, SSD,
616 					   "enabling ssd optimizations");
617 			btrfs_set_and_info(info, SSD_SPREAD,
618 					   "using spread ssd allocation scheme");
619 			btrfs_clear_opt(info->mount_opt, NOSSD);
620 			break;
621 		case Opt_nossd:
622 			btrfs_set_opt(info->mount_opt, NOSSD);
623 			btrfs_clear_and_info(info, SSD,
624 					     "not using ssd optimizations");
625 			/* Fallthrough */
626 		case Opt_nossd_spread:
627 			btrfs_clear_and_info(info, SSD_SPREAD,
628 					     "not using spread ssd allocation scheme");
629 			break;
630 		case Opt_barrier:
631 			btrfs_clear_and_info(info, NOBARRIER,
632 					     "turning on barriers");
633 			break;
634 		case Opt_nobarrier:
635 			btrfs_set_and_info(info, NOBARRIER,
636 					   "turning off barriers");
637 			break;
638 		case Opt_thread_pool:
639 			ret = match_int(&args[0], &intarg);
640 			if (ret) {
641 				goto out;
642 			} else if (intarg == 0) {
643 				ret = -EINVAL;
644 				goto out;
645 			}
646 			info->thread_pool_size = intarg;
647 			break;
648 		case Opt_max_inline:
649 			num = match_strdup(&args[0]);
650 			if (num) {
651 				info->max_inline = memparse(num, NULL);
652 				kfree(num);
653 
654 				if (info->max_inline) {
655 					info->max_inline = min_t(u64,
656 						info->max_inline,
657 						info->sectorsize);
658 				}
659 				btrfs_info(info, "max_inline at %llu",
660 					   info->max_inline);
661 			} else {
662 				ret = -ENOMEM;
663 				goto out;
664 			}
665 			break;
666 		case Opt_alloc_start:
667 			btrfs_info(info,
668 				"option alloc_start is obsolete, ignored");
669 			break;
670 		case Opt_acl:
671 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
672 			info->sb->s_flags |= SB_POSIXACL;
673 			break;
674 #else
675 			btrfs_err(info, "support for ACL not compiled in!");
676 			ret = -EINVAL;
677 			goto out;
678 #endif
679 		case Opt_noacl:
680 			info->sb->s_flags &= ~SB_POSIXACL;
681 			break;
682 		case Opt_notreelog:
683 			btrfs_set_and_info(info, NOTREELOG,
684 					   "disabling tree log");
685 			break;
686 		case Opt_treelog:
687 			btrfs_clear_and_info(info, NOTREELOG,
688 					     "enabling tree log");
689 			break;
690 		case Opt_norecovery:
691 		case Opt_nologreplay:
692 			btrfs_set_and_info(info, NOLOGREPLAY,
693 					   "disabling log replay at mount time");
694 			break;
695 		case Opt_flushoncommit:
696 			btrfs_set_and_info(info, FLUSHONCOMMIT,
697 					   "turning on flush-on-commit");
698 			break;
699 		case Opt_noflushoncommit:
700 			btrfs_clear_and_info(info, FLUSHONCOMMIT,
701 					     "turning off flush-on-commit");
702 			break;
703 		case Opt_ratio:
704 			ret = match_int(&args[0], &intarg);
705 			if (ret)
706 				goto out;
707 			info->metadata_ratio = intarg;
708 			btrfs_info(info, "metadata ratio %u",
709 				   info->metadata_ratio);
710 			break;
711 		case Opt_discard:
712 		case Opt_discard_mode:
713 			if (token == Opt_discard ||
714 			    strcmp(args[0].from, "sync") == 0) {
715 				btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC);
716 				btrfs_set_and_info(info, DISCARD_SYNC,
717 						   "turning on sync discard");
718 			} else if (strcmp(args[0].from, "async") == 0) {
719 				btrfs_clear_opt(info->mount_opt, DISCARD_SYNC);
720 				btrfs_set_and_info(info, DISCARD_ASYNC,
721 						   "turning on async discard");
722 			} else {
723 				ret = -EINVAL;
724 				goto out;
725 			}
726 			break;
727 		case Opt_nodiscard:
728 			btrfs_clear_and_info(info, DISCARD_SYNC,
729 					     "turning off discard");
730 			btrfs_clear_and_info(info, DISCARD_ASYNC,
731 					     "turning off async discard");
732 			break;
733 		case Opt_space_cache:
734 		case Opt_space_cache_version:
735 			if (token == Opt_space_cache ||
736 			    strcmp(args[0].from, "v1") == 0) {
737 				btrfs_clear_opt(info->mount_opt,
738 						FREE_SPACE_TREE);
739 				btrfs_set_and_info(info, SPACE_CACHE,
740 					   "enabling disk space caching");
741 			} else if (strcmp(args[0].from, "v2") == 0) {
742 				btrfs_clear_opt(info->mount_opt,
743 						SPACE_CACHE);
744 				btrfs_set_and_info(info, FREE_SPACE_TREE,
745 						   "enabling free space tree");
746 			} else {
747 				ret = -EINVAL;
748 				goto out;
749 			}
750 			break;
751 		case Opt_rescan_uuid_tree:
752 			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
753 			break;
754 		case Opt_no_space_cache:
755 			if (btrfs_test_opt(info, SPACE_CACHE)) {
756 				btrfs_clear_and_info(info, SPACE_CACHE,
757 					     "disabling disk space caching");
758 			}
759 			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
760 				btrfs_clear_and_info(info, FREE_SPACE_TREE,
761 					     "disabling free space tree");
762 			}
763 			break;
764 		case Opt_inode_cache:
765 			btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
766 					   "enabling inode map caching");
767 			break;
768 		case Opt_noinode_cache:
769 			btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
770 					     "disabling inode map caching");
771 			break;
772 		case Opt_clear_cache:
773 			btrfs_set_and_info(info, CLEAR_CACHE,
774 					   "force clearing of disk cache");
775 			break;
776 		case Opt_user_subvol_rm_allowed:
777 			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
778 			break;
779 		case Opt_enospc_debug:
780 			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
781 			break;
782 		case Opt_noenospc_debug:
783 			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
784 			break;
785 		case Opt_defrag:
786 			btrfs_set_and_info(info, AUTO_DEFRAG,
787 					   "enabling auto defrag");
788 			break;
789 		case Opt_nodefrag:
790 			btrfs_clear_and_info(info, AUTO_DEFRAG,
791 					     "disabling auto defrag");
792 			break;
793 		case Opt_recovery:
794 			btrfs_warn(info,
795 				   "'recovery' is deprecated, use 'usebackuproot' instead");
796 			/* fall through */
797 		case Opt_usebackuproot:
798 			btrfs_info(info,
799 				   "trying to use backup root at mount time");
800 			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
801 			break;
802 		case Opt_skip_balance:
803 			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
804 			break;
805 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
806 		case Opt_check_integrity_including_extent_data:
807 			btrfs_info(info,
808 				   "enabling check integrity including extent data");
809 			btrfs_set_opt(info->mount_opt,
810 				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
811 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
812 			break;
813 		case Opt_check_integrity:
814 			btrfs_info(info, "enabling check integrity");
815 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
816 			break;
817 		case Opt_check_integrity_print_mask:
818 			ret = match_int(&args[0], &intarg);
819 			if (ret)
820 				goto out;
821 			info->check_integrity_print_mask = intarg;
822 			btrfs_info(info, "check_integrity_print_mask 0x%x",
823 				   info->check_integrity_print_mask);
824 			break;
825 #else
826 		case Opt_check_integrity_including_extent_data:
827 		case Opt_check_integrity:
828 		case Opt_check_integrity_print_mask:
829 			btrfs_err(info,
830 				  "support for check_integrity* not compiled in!");
831 			ret = -EINVAL;
832 			goto out;
833 #endif
834 		case Opt_fatal_errors:
835 			if (strcmp(args[0].from, "panic") == 0)
836 				btrfs_set_opt(info->mount_opt,
837 					      PANIC_ON_FATAL_ERROR);
838 			else if (strcmp(args[0].from, "bug") == 0)
839 				btrfs_clear_opt(info->mount_opt,
840 					      PANIC_ON_FATAL_ERROR);
841 			else {
842 				ret = -EINVAL;
843 				goto out;
844 			}
845 			break;
846 		case Opt_commit_interval:
847 			intarg = 0;
848 			ret = match_int(&args[0], &intarg);
849 			if (ret)
850 				goto out;
851 			if (intarg == 0) {
852 				btrfs_info(info,
853 					   "using default commit interval %us",
854 					   BTRFS_DEFAULT_COMMIT_INTERVAL);
855 				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
856 			} else if (intarg > 300) {
857 				btrfs_warn(info, "excessive commit interval %d",
858 					   intarg);
859 			}
860 			info->commit_interval = intarg;
861 			break;
862 #ifdef CONFIG_BTRFS_DEBUG
863 		case Opt_fragment_all:
864 			btrfs_info(info, "fragmenting all space");
865 			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
866 			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
867 			break;
868 		case Opt_fragment_metadata:
869 			btrfs_info(info, "fragmenting metadata");
870 			btrfs_set_opt(info->mount_opt,
871 				      FRAGMENT_METADATA);
872 			break;
873 		case Opt_fragment_data:
874 			btrfs_info(info, "fragmenting data");
875 			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
876 			break;
877 #endif
878 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
879 		case Opt_ref_verify:
880 			btrfs_info(info, "doing ref verification");
881 			btrfs_set_opt(info->mount_opt, REF_VERIFY);
882 			break;
883 #endif
884 		case Opt_err:
885 			btrfs_err(info, "unrecognized mount option '%s'", p);
886 			ret = -EINVAL;
887 			goto out;
888 		default:
889 			break;
890 		}
891 	}
892 check:
893 	/*
894 	 * Extra check for current option against current flag
895 	 */
896 	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
897 		btrfs_err(info,
898 			  "nologreplay must be used with ro mount option");
899 		ret = -EINVAL;
900 	}
901 out:
902 	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
903 	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
904 	    !btrfs_test_opt(info, CLEAR_CACHE)) {
905 		btrfs_err(info, "cannot disable free space tree");
906 		ret = -EINVAL;
907 
908 	}
909 	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
910 		btrfs_info(info, "disk space caching is enabled");
911 	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
912 		btrfs_info(info, "using free space tree");
913 	return ret;
914 }
915 
916 /*
917  * Parse mount options that are required early in the mount process.
918  *
919  * All other options will be parsed on much later in the mount process and
920  * only when we need to allocate a new super block.
921  */
922 static int btrfs_parse_device_options(const char *options, fmode_t flags,
923 				      void *holder)
924 {
925 	substring_t args[MAX_OPT_ARGS];
926 	char *device_name, *opts, *orig, *p;
927 	struct btrfs_device *device = NULL;
928 	int error = 0;
929 
930 	lockdep_assert_held(&uuid_mutex);
931 
932 	if (!options)
933 		return 0;
934 
935 	/*
936 	 * strsep changes the string, duplicate it because btrfs_parse_options
937 	 * gets called later
938 	 */
939 	opts = kstrdup(options, GFP_KERNEL);
940 	if (!opts)
941 		return -ENOMEM;
942 	orig = opts;
943 
944 	while ((p = strsep(&opts, ",")) != NULL) {
945 		int token;
946 
947 		if (!*p)
948 			continue;
949 
950 		token = match_token(p, tokens, args);
951 		if (token == Opt_device) {
952 			device_name = match_strdup(&args[0]);
953 			if (!device_name) {
954 				error = -ENOMEM;
955 				goto out;
956 			}
957 			device = btrfs_scan_one_device(device_name, flags,
958 					holder);
959 			kfree(device_name);
960 			if (IS_ERR(device)) {
961 				error = PTR_ERR(device);
962 				goto out;
963 			}
964 		}
965 	}
966 
967 out:
968 	kfree(orig);
969 	return error;
970 }
971 
972 /*
973  * Parse mount options that are related to subvolume id
974  *
975  * The value is later passed to mount_subvol()
976  */
977 static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
978 		u64 *subvol_objectid)
979 {
980 	substring_t args[MAX_OPT_ARGS];
981 	char *opts, *orig, *p;
982 	int error = 0;
983 	u64 subvolid;
984 
985 	if (!options)
986 		return 0;
987 
988 	/*
989 	 * strsep changes the string, duplicate it because
990 	 * btrfs_parse_device_options gets called later
991 	 */
992 	opts = kstrdup(options, GFP_KERNEL);
993 	if (!opts)
994 		return -ENOMEM;
995 	orig = opts;
996 
997 	while ((p = strsep(&opts, ",")) != NULL) {
998 		int token;
999 		if (!*p)
1000 			continue;
1001 
1002 		token = match_token(p, tokens, args);
1003 		switch (token) {
1004 		case Opt_subvol:
1005 			kfree(*subvol_name);
1006 			*subvol_name = match_strdup(&args[0]);
1007 			if (!*subvol_name) {
1008 				error = -ENOMEM;
1009 				goto out;
1010 			}
1011 			break;
1012 		case Opt_subvolid:
1013 			error = match_u64(&args[0], &subvolid);
1014 			if (error)
1015 				goto out;
1016 
1017 			/* we want the original fs_tree */
1018 			if (subvolid == 0)
1019 				subvolid = BTRFS_FS_TREE_OBJECTID;
1020 
1021 			*subvol_objectid = subvolid;
1022 			break;
1023 		case Opt_subvolrootid:
1024 			pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
1025 			break;
1026 		default:
1027 			break;
1028 		}
1029 	}
1030 
1031 out:
1032 	kfree(orig);
1033 	return error;
1034 }
1035 
1036 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1037 					  u64 subvol_objectid)
1038 {
1039 	struct btrfs_root *root = fs_info->tree_root;
1040 	struct btrfs_root *fs_root = NULL;
1041 	struct btrfs_root_ref *root_ref;
1042 	struct btrfs_inode_ref *inode_ref;
1043 	struct btrfs_key key;
1044 	struct btrfs_path *path = NULL;
1045 	char *name = NULL, *ptr;
1046 	u64 dirid;
1047 	int len;
1048 	int ret;
1049 
1050 	path = btrfs_alloc_path();
1051 	if (!path) {
1052 		ret = -ENOMEM;
1053 		goto err;
1054 	}
1055 	path->leave_spinning = 1;
1056 
1057 	name = kmalloc(PATH_MAX, GFP_KERNEL);
1058 	if (!name) {
1059 		ret = -ENOMEM;
1060 		goto err;
1061 	}
1062 	ptr = name + PATH_MAX - 1;
1063 	ptr[0] = '\0';
1064 
1065 	/*
1066 	 * Walk up the subvolume trees in the tree of tree roots by root
1067 	 * backrefs until we hit the top-level subvolume.
1068 	 */
1069 	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1070 		key.objectid = subvol_objectid;
1071 		key.type = BTRFS_ROOT_BACKREF_KEY;
1072 		key.offset = (u64)-1;
1073 
1074 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1075 		if (ret < 0) {
1076 			goto err;
1077 		} else if (ret > 0) {
1078 			ret = btrfs_previous_item(root, path, subvol_objectid,
1079 						  BTRFS_ROOT_BACKREF_KEY);
1080 			if (ret < 0) {
1081 				goto err;
1082 			} else if (ret > 0) {
1083 				ret = -ENOENT;
1084 				goto err;
1085 			}
1086 		}
1087 
1088 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1089 		subvol_objectid = key.offset;
1090 
1091 		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1092 					  struct btrfs_root_ref);
1093 		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1094 		ptr -= len + 1;
1095 		if (ptr < name) {
1096 			ret = -ENAMETOOLONG;
1097 			goto err;
1098 		}
1099 		read_extent_buffer(path->nodes[0], ptr + 1,
1100 				   (unsigned long)(root_ref + 1), len);
1101 		ptr[0] = '/';
1102 		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1103 		btrfs_release_path(path);
1104 
1105 		fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
1106 		if (IS_ERR(fs_root)) {
1107 			ret = PTR_ERR(fs_root);
1108 			fs_root = NULL;
1109 			goto err;
1110 		}
1111 
1112 		/*
1113 		 * Walk up the filesystem tree by inode refs until we hit the
1114 		 * root directory.
1115 		 */
1116 		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1117 			key.objectid = dirid;
1118 			key.type = BTRFS_INODE_REF_KEY;
1119 			key.offset = (u64)-1;
1120 
1121 			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1122 			if (ret < 0) {
1123 				goto err;
1124 			} else if (ret > 0) {
1125 				ret = btrfs_previous_item(fs_root, path, dirid,
1126 							  BTRFS_INODE_REF_KEY);
1127 				if (ret < 0) {
1128 					goto err;
1129 				} else if (ret > 0) {
1130 					ret = -ENOENT;
1131 					goto err;
1132 				}
1133 			}
1134 
1135 			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1136 			dirid = key.offset;
1137 
1138 			inode_ref = btrfs_item_ptr(path->nodes[0],
1139 						   path->slots[0],
1140 						   struct btrfs_inode_ref);
1141 			len = btrfs_inode_ref_name_len(path->nodes[0],
1142 						       inode_ref);
1143 			ptr -= len + 1;
1144 			if (ptr < name) {
1145 				ret = -ENAMETOOLONG;
1146 				goto err;
1147 			}
1148 			read_extent_buffer(path->nodes[0], ptr + 1,
1149 					   (unsigned long)(inode_ref + 1), len);
1150 			ptr[0] = '/';
1151 			btrfs_release_path(path);
1152 		}
1153 		btrfs_put_root(fs_root);
1154 		fs_root = NULL;
1155 	}
1156 
1157 	btrfs_free_path(path);
1158 	if (ptr == name + PATH_MAX - 1) {
1159 		name[0] = '/';
1160 		name[1] = '\0';
1161 	} else {
1162 		memmove(name, ptr, name + PATH_MAX - ptr);
1163 	}
1164 	return name;
1165 
1166 err:
1167 	btrfs_put_root(fs_root);
1168 	btrfs_free_path(path);
1169 	kfree(name);
1170 	return ERR_PTR(ret);
1171 }
1172 
1173 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1174 {
1175 	struct btrfs_root *root = fs_info->tree_root;
1176 	struct btrfs_dir_item *di;
1177 	struct btrfs_path *path;
1178 	struct btrfs_key location;
1179 	u64 dir_id;
1180 
1181 	path = btrfs_alloc_path();
1182 	if (!path)
1183 		return -ENOMEM;
1184 	path->leave_spinning = 1;
1185 
1186 	/*
1187 	 * Find the "default" dir item which points to the root item that we
1188 	 * will mount by default if we haven't been given a specific subvolume
1189 	 * to mount.
1190 	 */
1191 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1192 	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1193 	if (IS_ERR(di)) {
1194 		btrfs_free_path(path);
1195 		return PTR_ERR(di);
1196 	}
1197 	if (!di) {
1198 		/*
1199 		 * Ok the default dir item isn't there.  This is weird since
1200 		 * it's always been there, but don't freak out, just try and
1201 		 * mount the top-level subvolume.
1202 		 */
1203 		btrfs_free_path(path);
1204 		*objectid = BTRFS_FS_TREE_OBJECTID;
1205 		return 0;
1206 	}
1207 
1208 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1209 	btrfs_free_path(path);
1210 	*objectid = location.objectid;
1211 	return 0;
1212 }
1213 
1214 static int btrfs_fill_super(struct super_block *sb,
1215 			    struct btrfs_fs_devices *fs_devices,
1216 			    void *data)
1217 {
1218 	struct inode *inode;
1219 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1220 	int err;
1221 
1222 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1223 	sb->s_magic = BTRFS_SUPER_MAGIC;
1224 	sb->s_op = &btrfs_super_ops;
1225 	sb->s_d_op = &btrfs_dentry_operations;
1226 	sb->s_export_op = &btrfs_export_ops;
1227 	sb->s_xattr = btrfs_xattr_handlers;
1228 	sb->s_time_gran = 1;
1229 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
1230 	sb->s_flags |= SB_POSIXACL;
1231 #endif
1232 	sb->s_flags |= SB_I_VERSION;
1233 	sb->s_iflags |= SB_I_CGROUPWB;
1234 
1235 	err = super_setup_bdi(sb);
1236 	if (err) {
1237 		btrfs_err(fs_info, "super_setup_bdi failed");
1238 		return err;
1239 	}
1240 
1241 	err = open_ctree(sb, fs_devices, (char *)data);
1242 	if (err) {
1243 		btrfs_err(fs_info, "open_ctree failed");
1244 		return err;
1245 	}
1246 
1247 	inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
1248 	if (IS_ERR(inode)) {
1249 		err = PTR_ERR(inode);
1250 		goto fail_close;
1251 	}
1252 
1253 	sb->s_root = d_make_root(inode);
1254 	if (!sb->s_root) {
1255 		err = -ENOMEM;
1256 		goto fail_close;
1257 	}
1258 
1259 	cleancache_init_fs(sb);
1260 	sb->s_flags |= SB_ACTIVE;
1261 	return 0;
1262 
1263 fail_close:
1264 	close_ctree(fs_info);
1265 	return err;
1266 }
1267 
1268 int btrfs_sync_fs(struct super_block *sb, int wait)
1269 {
1270 	struct btrfs_trans_handle *trans;
1271 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1272 	struct btrfs_root *root = fs_info->tree_root;
1273 
1274 	trace_btrfs_sync_fs(fs_info, wait);
1275 
1276 	if (!wait) {
1277 		filemap_flush(fs_info->btree_inode->i_mapping);
1278 		return 0;
1279 	}
1280 
1281 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1282 
1283 	trans = btrfs_attach_transaction_barrier(root);
1284 	if (IS_ERR(trans)) {
1285 		/* no transaction, don't bother */
1286 		if (PTR_ERR(trans) == -ENOENT) {
1287 			/*
1288 			 * Exit unless we have some pending changes
1289 			 * that need to go through commit
1290 			 */
1291 			if (fs_info->pending_changes == 0)
1292 				return 0;
1293 			/*
1294 			 * A non-blocking test if the fs is frozen. We must not
1295 			 * start a new transaction here otherwise a deadlock
1296 			 * happens. The pending operations are delayed to the
1297 			 * next commit after thawing.
1298 			 */
1299 			if (sb_start_write_trylock(sb))
1300 				sb_end_write(sb);
1301 			else
1302 				return 0;
1303 			trans = btrfs_start_transaction(root, 0);
1304 		}
1305 		if (IS_ERR(trans))
1306 			return PTR_ERR(trans);
1307 	}
1308 	return btrfs_commit_transaction(trans);
1309 }
1310 
1311 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1312 {
1313 	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1314 	const char *compress_type;
1315 
1316 	if (btrfs_test_opt(info, DEGRADED))
1317 		seq_puts(seq, ",degraded");
1318 	if (btrfs_test_opt(info, NODATASUM))
1319 		seq_puts(seq, ",nodatasum");
1320 	if (btrfs_test_opt(info, NODATACOW))
1321 		seq_puts(seq, ",nodatacow");
1322 	if (btrfs_test_opt(info, NOBARRIER))
1323 		seq_puts(seq, ",nobarrier");
1324 	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1325 		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1326 	if (info->thread_pool_size !=  min_t(unsigned long,
1327 					     num_online_cpus() + 2, 8))
1328 		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1329 	if (btrfs_test_opt(info, COMPRESS)) {
1330 		compress_type = btrfs_compress_type2str(info->compress_type);
1331 		if (btrfs_test_opt(info, FORCE_COMPRESS))
1332 			seq_printf(seq, ",compress-force=%s", compress_type);
1333 		else
1334 			seq_printf(seq, ",compress=%s", compress_type);
1335 		if (info->compress_level)
1336 			seq_printf(seq, ":%d", info->compress_level);
1337 	}
1338 	if (btrfs_test_opt(info, NOSSD))
1339 		seq_puts(seq, ",nossd");
1340 	if (btrfs_test_opt(info, SSD_SPREAD))
1341 		seq_puts(seq, ",ssd_spread");
1342 	else if (btrfs_test_opt(info, SSD))
1343 		seq_puts(seq, ",ssd");
1344 	if (btrfs_test_opt(info, NOTREELOG))
1345 		seq_puts(seq, ",notreelog");
1346 	if (btrfs_test_opt(info, NOLOGREPLAY))
1347 		seq_puts(seq, ",nologreplay");
1348 	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1349 		seq_puts(seq, ",flushoncommit");
1350 	if (btrfs_test_opt(info, DISCARD_SYNC))
1351 		seq_puts(seq, ",discard");
1352 	if (btrfs_test_opt(info, DISCARD_ASYNC))
1353 		seq_puts(seq, ",discard=async");
1354 	if (!(info->sb->s_flags & SB_POSIXACL))
1355 		seq_puts(seq, ",noacl");
1356 	if (btrfs_test_opt(info, SPACE_CACHE))
1357 		seq_puts(seq, ",space_cache");
1358 	else if (btrfs_test_opt(info, FREE_SPACE_TREE))
1359 		seq_puts(seq, ",space_cache=v2");
1360 	else
1361 		seq_puts(seq, ",nospace_cache");
1362 	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1363 		seq_puts(seq, ",rescan_uuid_tree");
1364 	if (btrfs_test_opt(info, CLEAR_CACHE))
1365 		seq_puts(seq, ",clear_cache");
1366 	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1367 		seq_puts(seq, ",user_subvol_rm_allowed");
1368 	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1369 		seq_puts(seq, ",enospc_debug");
1370 	if (btrfs_test_opt(info, AUTO_DEFRAG))
1371 		seq_puts(seq, ",autodefrag");
1372 	if (btrfs_test_opt(info, INODE_MAP_CACHE))
1373 		seq_puts(seq, ",inode_cache");
1374 	if (btrfs_test_opt(info, SKIP_BALANCE))
1375 		seq_puts(seq, ",skip_balance");
1376 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1377 	if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
1378 		seq_puts(seq, ",check_int_data");
1379 	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1380 		seq_puts(seq, ",check_int");
1381 	if (info->check_integrity_print_mask)
1382 		seq_printf(seq, ",check_int_print_mask=%d",
1383 				info->check_integrity_print_mask);
1384 #endif
1385 	if (info->metadata_ratio)
1386 		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1387 	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1388 		seq_puts(seq, ",fatal_errors=panic");
1389 	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1390 		seq_printf(seq, ",commit=%u", info->commit_interval);
1391 #ifdef CONFIG_BTRFS_DEBUG
1392 	if (btrfs_test_opt(info, FRAGMENT_DATA))
1393 		seq_puts(seq, ",fragment=data");
1394 	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1395 		seq_puts(seq, ",fragment=metadata");
1396 #endif
1397 	if (btrfs_test_opt(info, REF_VERIFY))
1398 		seq_puts(seq, ",ref_verify");
1399 	seq_printf(seq, ",subvolid=%llu",
1400 		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1401 	seq_puts(seq, ",subvol=");
1402 	seq_dentry(seq, dentry, " \t\n\\");
1403 	return 0;
1404 }
1405 
1406 static int btrfs_test_super(struct super_block *s, void *data)
1407 {
1408 	struct btrfs_fs_info *p = data;
1409 	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1410 
1411 	return fs_info->fs_devices == p->fs_devices;
1412 }
1413 
1414 static int btrfs_set_super(struct super_block *s, void *data)
1415 {
1416 	int err = set_anon_super(s, data);
1417 	if (!err)
1418 		s->s_fs_info = data;
1419 	return err;
1420 }
1421 
1422 /*
1423  * subvolumes are identified by ino 256
1424  */
1425 static inline int is_subvolume_inode(struct inode *inode)
1426 {
1427 	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1428 		return 1;
1429 	return 0;
1430 }
1431 
1432 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1433 				   struct vfsmount *mnt)
1434 {
1435 	struct dentry *root;
1436 	int ret;
1437 
1438 	if (!subvol_name) {
1439 		if (!subvol_objectid) {
1440 			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1441 							  &subvol_objectid);
1442 			if (ret) {
1443 				root = ERR_PTR(ret);
1444 				goto out;
1445 			}
1446 		}
1447 		subvol_name = btrfs_get_subvol_name_from_objectid(
1448 					btrfs_sb(mnt->mnt_sb), subvol_objectid);
1449 		if (IS_ERR(subvol_name)) {
1450 			root = ERR_CAST(subvol_name);
1451 			subvol_name = NULL;
1452 			goto out;
1453 		}
1454 
1455 	}
1456 
1457 	root = mount_subtree(mnt, subvol_name);
1458 	/* mount_subtree() drops our reference on the vfsmount. */
1459 	mnt = NULL;
1460 
1461 	if (!IS_ERR(root)) {
1462 		struct super_block *s = root->d_sb;
1463 		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1464 		struct inode *root_inode = d_inode(root);
1465 		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1466 
1467 		ret = 0;
1468 		if (!is_subvolume_inode(root_inode)) {
1469 			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1470 			       subvol_name);
1471 			ret = -EINVAL;
1472 		}
1473 		if (subvol_objectid && root_objectid != subvol_objectid) {
1474 			/*
1475 			 * This will also catch a race condition where a
1476 			 * subvolume which was passed by ID is renamed and
1477 			 * another subvolume is renamed over the old location.
1478 			 */
1479 			btrfs_err(fs_info,
1480 				  "subvol '%s' does not match subvolid %llu",
1481 				  subvol_name, subvol_objectid);
1482 			ret = -EINVAL;
1483 		}
1484 		if (ret) {
1485 			dput(root);
1486 			root = ERR_PTR(ret);
1487 			deactivate_locked_super(s);
1488 		}
1489 	}
1490 
1491 out:
1492 	mntput(mnt);
1493 	kfree(subvol_name);
1494 	return root;
1495 }
1496 
1497 /*
1498  * Find a superblock for the given device / mount point.
1499  *
1500  * Note: This is based on mount_bdev from fs/super.c with a few additions
1501  *       for multiple device setup.  Make sure to keep it in sync.
1502  */
1503 static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1504 		int flags, const char *device_name, void *data)
1505 {
1506 	struct block_device *bdev = NULL;
1507 	struct super_block *s;
1508 	struct btrfs_device *device = NULL;
1509 	struct btrfs_fs_devices *fs_devices = NULL;
1510 	struct btrfs_fs_info *fs_info = NULL;
1511 	void *new_sec_opts = NULL;
1512 	fmode_t mode = FMODE_READ;
1513 	int error = 0;
1514 
1515 	if (!(flags & SB_RDONLY))
1516 		mode |= FMODE_WRITE;
1517 
1518 	if (data) {
1519 		error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1520 		if (error)
1521 			return ERR_PTR(error);
1522 	}
1523 
1524 	/*
1525 	 * Setup a dummy root and fs_info for test/set super.  This is because
1526 	 * we don't actually fill this stuff out until open_ctree, but we need
1527 	 * then open_ctree will properly initialize the file system specific
1528 	 * settings later.  btrfs_init_fs_info initializes the static elements
1529 	 * of the fs_info (locks and such) to make cleanup easier if we find a
1530 	 * superblock with our given fs_devices later on at sget() time.
1531 	 */
1532 	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1533 	if (!fs_info) {
1534 		error = -ENOMEM;
1535 		goto error_sec_opts;
1536 	}
1537 	btrfs_init_fs_info(fs_info);
1538 
1539 	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1540 	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1541 	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1542 		error = -ENOMEM;
1543 		goto error_fs_info;
1544 	}
1545 
1546 	mutex_lock(&uuid_mutex);
1547 	error = btrfs_parse_device_options(data, mode, fs_type);
1548 	if (error) {
1549 		mutex_unlock(&uuid_mutex);
1550 		goto error_fs_info;
1551 	}
1552 
1553 	device = btrfs_scan_one_device(device_name, mode, fs_type);
1554 	if (IS_ERR(device)) {
1555 		mutex_unlock(&uuid_mutex);
1556 		error = PTR_ERR(device);
1557 		goto error_fs_info;
1558 	}
1559 
1560 	fs_devices = device->fs_devices;
1561 	fs_info->fs_devices = fs_devices;
1562 
1563 	error = btrfs_open_devices(fs_devices, mode, fs_type);
1564 	mutex_unlock(&uuid_mutex);
1565 	if (error)
1566 		goto error_fs_info;
1567 
1568 	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1569 		error = -EACCES;
1570 		goto error_close_devices;
1571 	}
1572 
1573 	bdev = fs_devices->latest_bdev;
1574 	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1575 		 fs_info);
1576 	if (IS_ERR(s)) {
1577 		error = PTR_ERR(s);
1578 		goto error_close_devices;
1579 	}
1580 
1581 	if (s->s_root) {
1582 		btrfs_close_devices(fs_devices);
1583 		btrfs_free_fs_info(fs_info);
1584 		if ((flags ^ s->s_flags) & SB_RDONLY)
1585 			error = -EBUSY;
1586 	} else {
1587 		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1588 		btrfs_sb(s)->bdev_holder = fs_type;
1589 		if (!strstr(crc32c_impl(), "generic"))
1590 			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1591 		error = btrfs_fill_super(s, fs_devices, data);
1592 	}
1593 	if (!error)
1594 		error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1595 	security_free_mnt_opts(&new_sec_opts);
1596 	if (error) {
1597 		deactivate_locked_super(s);
1598 		return ERR_PTR(error);
1599 	}
1600 
1601 	return dget(s->s_root);
1602 
1603 error_close_devices:
1604 	btrfs_close_devices(fs_devices);
1605 error_fs_info:
1606 	btrfs_free_fs_info(fs_info);
1607 error_sec_opts:
1608 	security_free_mnt_opts(&new_sec_opts);
1609 	return ERR_PTR(error);
1610 }
1611 
1612 /*
1613  * Mount function which is called by VFS layer.
1614  *
1615  * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1616  * which needs vfsmount* of device's root (/).  This means device's root has to
1617  * be mounted internally in any case.
1618  *
1619  * Operation flow:
1620  *   1. Parse subvol id related options for later use in mount_subvol().
1621  *
1622  *   2. Mount device's root (/) by calling vfs_kern_mount().
1623  *
1624  *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1625  *      first place. In order to avoid calling btrfs_mount() again, we use
1626  *      different file_system_type which is not registered to VFS by
1627  *      register_filesystem() (btrfs_root_fs_type). As a result,
1628  *      btrfs_mount_root() is called. The return value will be used by
1629  *      mount_subtree() in mount_subvol().
1630  *
1631  *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1632  *      "btrfs subvolume set-default", mount_subvol() is called always.
1633  */
1634 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1635 		const char *device_name, void *data)
1636 {
1637 	struct vfsmount *mnt_root;
1638 	struct dentry *root;
1639 	char *subvol_name = NULL;
1640 	u64 subvol_objectid = 0;
1641 	int error = 0;
1642 
1643 	error = btrfs_parse_subvol_options(data, &subvol_name,
1644 					&subvol_objectid);
1645 	if (error) {
1646 		kfree(subvol_name);
1647 		return ERR_PTR(error);
1648 	}
1649 
1650 	/* mount device's root (/) */
1651 	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1652 	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1653 		if (flags & SB_RDONLY) {
1654 			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1655 				flags & ~SB_RDONLY, device_name, data);
1656 		} else {
1657 			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1658 				flags | SB_RDONLY, device_name, data);
1659 			if (IS_ERR(mnt_root)) {
1660 				root = ERR_CAST(mnt_root);
1661 				kfree(subvol_name);
1662 				goto out;
1663 			}
1664 
1665 			down_write(&mnt_root->mnt_sb->s_umount);
1666 			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1667 			up_write(&mnt_root->mnt_sb->s_umount);
1668 			if (error < 0) {
1669 				root = ERR_PTR(error);
1670 				mntput(mnt_root);
1671 				kfree(subvol_name);
1672 				goto out;
1673 			}
1674 		}
1675 	}
1676 	if (IS_ERR(mnt_root)) {
1677 		root = ERR_CAST(mnt_root);
1678 		kfree(subvol_name);
1679 		goto out;
1680 	}
1681 
1682 	/* mount_subvol() will free subvol_name and mnt_root */
1683 	root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1684 
1685 out:
1686 	return root;
1687 }
1688 
1689 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1690 				     u32 new_pool_size, u32 old_pool_size)
1691 {
1692 	if (new_pool_size == old_pool_size)
1693 		return;
1694 
1695 	fs_info->thread_pool_size = new_pool_size;
1696 
1697 	btrfs_info(fs_info, "resize thread pool %d -> %d",
1698 	       old_pool_size, new_pool_size);
1699 
1700 	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1701 	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1702 	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1703 	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1704 	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1705 	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1706 				new_pool_size);
1707 	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1708 	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1709 	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1710 	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1711 	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1712 				new_pool_size);
1713 }
1714 
1715 static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
1716 {
1717 	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1718 }
1719 
1720 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1721 				       unsigned long old_opts, int flags)
1722 {
1723 	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1724 	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1725 	     (flags & SB_RDONLY))) {
1726 		/* wait for any defraggers to finish */
1727 		wait_event(fs_info->transaction_wait,
1728 			   (atomic_read(&fs_info->defrag_running) == 0));
1729 		if (flags & SB_RDONLY)
1730 			sync_filesystem(fs_info->sb);
1731 	}
1732 }
1733 
1734 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1735 					 unsigned long old_opts)
1736 {
1737 	/*
1738 	 * We need to cleanup all defragable inodes if the autodefragment is
1739 	 * close or the filesystem is read only.
1740 	 */
1741 	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1742 	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1743 		btrfs_cleanup_defrag_inodes(fs_info);
1744 	}
1745 
1746 	/* If we toggled discard async */
1747 	if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1748 	    btrfs_test_opt(fs_info, DISCARD_ASYNC))
1749 		btrfs_discard_resume(fs_info);
1750 	else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1751 		 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1752 		btrfs_discard_cleanup(fs_info);
1753 
1754 	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1755 }
1756 
1757 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1758 {
1759 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1760 	struct btrfs_root *root = fs_info->tree_root;
1761 	unsigned old_flags = sb->s_flags;
1762 	unsigned long old_opts = fs_info->mount_opt;
1763 	unsigned long old_compress_type = fs_info->compress_type;
1764 	u64 old_max_inline = fs_info->max_inline;
1765 	u32 old_thread_pool_size = fs_info->thread_pool_size;
1766 	u32 old_metadata_ratio = fs_info->metadata_ratio;
1767 	int ret;
1768 
1769 	sync_filesystem(sb);
1770 	btrfs_remount_prepare(fs_info);
1771 
1772 	if (data) {
1773 		void *new_sec_opts = NULL;
1774 
1775 		ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
1776 		if (!ret)
1777 			ret = security_sb_remount(sb, new_sec_opts);
1778 		security_free_mnt_opts(&new_sec_opts);
1779 		if (ret)
1780 			goto restore;
1781 	}
1782 
1783 	ret = btrfs_parse_options(fs_info, data, *flags);
1784 	if (ret)
1785 		goto restore;
1786 
1787 	btrfs_remount_begin(fs_info, old_opts, *flags);
1788 	btrfs_resize_thread_pool(fs_info,
1789 		fs_info->thread_pool_size, old_thread_pool_size);
1790 
1791 	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1792 		goto out;
1793 
1794 	if (*flags & SB_RDONLY) {
1795 		/*
1796 		 * this also happens on 'umount -rf' or on shutdown, when
1797 		 * the filesystem is busy.
1798 		 */
1799 		cancel_work_sync(&fs_info->async_reclaim_work);
1800 
1801 		btrfs_discard_cleanup(fs_info);
1802 
1803 		/* wait for the uuid_scan task to finish */
1804 		down(&fs_info->uuid_tree_rescan_sem);
1805 		/* avoid complains from lockdep et al. */
1806 		up(&fs_info->uuid_tree_rescan_sem);
1807 
1808 		sb->s_flags |= SB_RDONLY;
1809 
1810 		/*
1811 		 * Setting SB_RDONLY will put the cleaner thread to
1812 		 * sleep at the next loop if it's already active.
1813 		 * If it's already asleep, we'll leave unused block
1814 		 * groups on disk until we're mounted read-write again
1815 		 * unless we clean them up here.
1816 		 */
1817 		btrfs_delete_unused_bgs(fs_info);
1818 
1819 		btrfs_dev_replace_suspend_for_unmount(fs_info);
1820 		btrfs_scrub_cancel(fs_info);
1821 		btrfs_pause_balance(fs_info);
1822 
1823 		ret = btrfs_commit_super(fs_info);
1824 		if (ret)
1825 			goto restore;
1826 	} else {
1827 		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1828 			btrfs_err(fs_info,
1829 				"Remounting read-write after error is not allowed");
1830 			ret = -EINVAL;
1831 			goto restore;
1832 		}
1833 		if (fs_info->fs_devices->rw_devices == 0) {
1834 			ret = -EACCES;
1835 			goto restore;
1836 		}
1837 
1838 		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1839 			btrfs_warn(fs_info,
1840 		"too many missing devices, writable remount is not allowed");
1841 			ret = -EACCES;
1842 			goto restore;
1843 		}
1844 
1845 		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1846 			btrfs_warn(fs_info,
1847 		"mount required to replay tree-log, cannot remount read-write");
1848 			ret = -EINVAL;
1849 			goto restore;
1850 		}
1851 
1852 		ret = btrfs_cleanup_fs_roots(fs_info);
1853 		if (ret)
1854 			goto restore;
1855 
1856 		/* recover relocation */
1857 		mutex_lock(&fs_info->cleaner_mutex);
1858 		ret = btrfs_recover_relocation(root);
1859 		mutex_unlock(&fs_info->cleaner_mutex);
1860 		if (ret)
1861 			goto restore;
1862 
1863 		ret = btrfs_resume_balance_async(fs_info);
1864 		if (ret)
1865 			goto restore;
1866 
1867 		ret = btrfs_resume_dev_replace_async(fs_info);
1868 		if (ret) {
1869 			btrfs_warn(fs_info, "failed to resume dev_replace");
1870 			goto restore;
1871 		}
1872 
1873 		btrfs_qgroup_rescan_resume(fs_info);
1874 
1875 		if (!fs_info->uuid_root) {
1876 			btrfs_info(fs_info, "creating UUID tree");
1877 			ret = btrfs_create_uuid_tree(fs_info);
1878 			if (ret) {
1879 				btrfs_warn(fs_info,
1880 					   "failed to create the UUID tree %d",
1881 					   ret);
1882 				goto restore;
1883 			}
1884 		}
1885 		sb->s_flags &= ~SB_RDONLY;
1886 
1887 		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1888 	}
1889 out:
1890 	wake_up_process(fs_info->transaction_kthread);
1891 	btrfs_remount_cleanup(fs_info, old_opts);
1892 	return 0;
1893 
1894 restore:
1895 	/* We've hit an error - don't reset SB_RDONLY */
1896 	if (sb_rdonly(sb))
1897 		old_flags |= SB_RDONLY;
1898 	sb->s_flags = old_flags;
1899 	fs_info->mount_opt = old_opts;
1900 	fs_info->compress_type = old_compress_type;
1901 	fs_info->max_inline = old_max_inline;
1902 	btrfs_resize_thread_pool(fs_info,
1903 		old_thread_pool_size, fs_info->thread_pool_size);
1904 	fs_info->metadata_ratio = old_metadata_ratio;
1905 	btrfs_remount_cleanup(fs_info, old_opts);
1906 	return ret;
1907 }
1908 
1909 /* Used to sort the devices by max_avail(descending sort) */
1910 static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
1911 				       const void *dev_info2)
1912 {
1913 	if (((struct btrfs_device_info *)dev_info1)->max_avail >
1914 	    ((struct btrfs_device_info *)dev_info2)->max_avail)
1915 		return -1;
1916 	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
1917 		 ((struct btrfs_device_info *)dev_info2)->max_avail)
1918 		return 1;
1919 	else
1920 	return 0;
1921 }
1922 
1923 /*
1924  * sort the devices by max_avail, in which max free extent size of each device
1925  * is stored.(Descending Sort)
1926  */
1927 static inline void btrfs_descending_sort_devices(
1928 					struct btrfs_device_info *devices,
1929 					size_t nr_devices)
1930 {
1931 	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1932 	     btrfs_cmp_device_free_bytes, NULL);
1933 }
1934 
1935 /*
1936  * The helper to calc the free space on the devices that can be used to store
1937  * file data.
1938  */
1939 static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1940 					      u64 *free_bytes)
1941 {
1942 	struct btrfs_device_info *devices_info;
1943 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1944 	struct btrfs_device *device;
1945 	u64 type;
1946 	u64 avail_space;
1947 	u64 min_stripe_size;
1948 	int num_stripes = 1;
1949 	int i = 0, nr_devices;
1950 	const struct btrfs_raid_attr *rattr;
1951 
1952 	/*
1953 	 * We aren't under the device list lock, so this is racy-ish, but good
1954 	 * enough for our purposes.
1955 	 */
1956 	nr_devices = fs_info->fs_devices->open_devices;
1957 	if (!nr_devices) {
1958 		smp_mb();
1959 		nr_devices = fs_info->fs_devices->open_devices;
1960 		ASSERT(nr_devices);
1961 		if (!nr_devices) {
1962 			*free_bytes = 0;
1963 			return 0;
1964 		}
1965 	}
1966 
1967 	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1968 			       GFP_KERNEL);
1969 	if (!devices_info)
1970 		return -ENOMEM;
1971 
1972 	/* calc min stripe number for data space allocation */
1973 	type = btrfs_data_alloc_profile(fs_info);
1974 	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
1975 
1976 	if (type & BTRFS_BLOCK_GROUP_RAID0)
1977 		num_stripes = nr_devices;
1978 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
1979 		num_stripes = 2;
1980 	else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
1981 		num_stripes = 3;
1982 	else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
1983 		num_stripes = 4;
1984 	else if (type & BTRFS_BLOCK_GROUP_RAID10)
1985 		num_stripes = 4;
1986 
1987 	/* Adjust for more than 1 stripe per device */
1988 	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
1989 
1990 	rcu_read_lock();
1991 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1992 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1993 						&device->dev_state) ||
1994 		    !device->bdev ||
1995 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1996 			continue;
1997 
1998 		if (i >= nr_devices)
1999 			break;
2000 
2001 		avail_space = device->total_bytes - device->bytes_used;
2002 
2003 		/* align with stripe_len */
2004 		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
2005 
2006 		/*
2007 		 * In order to avoid overwriting the superblock on the drive,
2008 		 * btrfs starts at an offset of at least 1MB when doing chunk
2009 		 * allocation.
2010 		 *
2011 		 * This ensures we have at least min_stripe_size free space
2012 		 * after excluding 1MB.
2013 		 */
2014 		if (avail_space <= SZ_1M + min_stripe_size)
2015 			continue;
2016 
2017 		avail_space -= SZ_1M;
2018 
2019 		devices_info[i].dev = device;
2020 		devices_info[i].max_avail = avail_space;
2021 
2022 		i++;
2023 	}
2024 	rcu_read_unlock();
2025 
2026 	nr_devices = i;
2027 
2028 	btrfs_descending_sort_devices(devices_info, nr_devices);
2029 
2030 	i = nr_devices - 1;
2031 	avail_space = 0;
2032 	while (nr_devices >= rattr->devs_min) {
2033 		num_stripes = min(num_stripes, nr_devices);
2034 
2035 		if (devices_info[i].max_avail >= min_stripe_size) {
2036 			int j;
2037 			u64 alloc_size;
2038 
2039 			avail_space += devices_info[i].max_avail * num_stripes;
2040 			alloc_size = devices_info[i].max_avail;
2041 			for (j = i + 1 - num_stripes; j <= i; j++)
2042 				devices_info[j].max_avail -= alloc_size;
2043 		}
2044 		i--;
2045 		nr_devices--;
2046 	}
2047 
2048 	kfree(devices_info);
2049 	*free_bytes = avail_space;
2050 	return 0;
2051 }
2052 
2053 /*
2054  * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2055  *
2056  * If there's a redundant raid level at DATA block groups, use the respective
2057  * multiplier to scale the sizes.
2058  *
2059  * Unused device space usage is based on simulating the chunk allocator
2060  * algorithm that respects the device sizes and order of allocations.  This is
2061  * a close approximation of the actual use but there are other factors that may
2062  * change the result (like a new metadata chunk).
2063  *
2064  * If metadata is exhausted, f_bavail will be 0.
2065  */
2066 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2067 {
2068 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2069 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2070 	struct btrfs_space_info *found;
2071 	u64 total_used = 0;
2072 	u64 total_free_data = 0;
2073 	u64 total_free_meta = 0;
2074 	int bits = dentry->d_sb->s_blocksize_bits;
2075 	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2076 	unsigned factor = 1;
2077 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2078 	int ret;
2079 	u64 thresh = 0;
2080 	int mixed = 0;
2081 
2082 	rcu_read_lock();
2083 	list_for_each_entry_rcu(found, &fs_info->space_info, list) {
2084 		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2085 			int i;
2086 
2087 			total_free_data += found->disk_total - found->disk_used;
2088 			total_free_data -=
2089 				btrfs_account_ro_block_groups_free_space(found);
2090 
2091 			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2092 				if (!list_empty(&found->block_groups[i]))
2093 					factor = btrfs_bg_type_to_factor(
2094 						btrfs_raid_array[i].bg_flag);
2095 			}
2096 		}
2097 
2098 		/*
2099 		 * Metadata in mixed block goup profiles are accounted in data
2100 		 */
2101 		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2102 			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2103 				mixed = 1;
2104 			else
2105 				total_free_meta += found->disk_total -
2106 					found->disk_used;
2107 		}
2108 
2109 		total_used += found->disk_used;
2110 	}
2111 
2112 	rcu_read_unlock();
2113 
2114 	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2115 	buf->f_blocks >>= bits;
2116 	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2117 
2118 	/* Account global block reserve as used, it's in logical size already */
2119 	spin_lock(&block_rsv->lock);
2120 	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2121 	if (buf->f_bfree >= block_rsv->size >> bits)
2122 		buf->f_bfree -= block_rsv->size >> bits;
2123 	else
2124 		buf->f_bfree = 0;
2125 	spin_unlock(&block_rsv->lock);
2126 
2127 	buf->f_bavail = div_u64(total_free_data, factor);
2128 	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2129 	if (ret)
2130 		return ret;
2131 	buf->f_bavail += div_u64(total_free_data, factor);
2132 	buf->f_bavail = buf->f_bavail >> bits;
2133 
2134 	/*
2135 	 * We calculate the remaining metadata space minus global reserve. If
2136 	 * this is (supposedly) smaller than zero, there's no space. But this
2137 	 * does not hold in practice, the exhausted state happens where's still
2138 	 * some positive delta. So we apply some guesswork and compare the
2139 	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2140 	 *
2141 	 * We probably cannot calculate the exact threshold value because this
2142 	 * depends on the internal reservations requested by various
2143 	 * operations, so some operations that consume a few metadata will
2144 	 * succeed even if the Avail is zero. But this is better than the other
2145 	 * way around.
2146 	 */
2147 	thresh = SZ_4M;
2148 
2149 	/*
2150 	 * We only want to claim there's no available space if we can no longer
2151 	 * allocate chunks for our metadata profile and our global reserve will
2152 	 * not fit in the free metadata space.  If we aren't ->full then we
2153 	 * still can allocate chunks and thus are fine using the currently
2154 	 * calculated f_bavail.
2155 	 */
2156 	if (!mixed && block_rsv->space_info->full &&
2157 	    total_free_meta - thresh < block_rsv->size)
2158 		buf->f_bavail = 0;
2159 
2160 	buf->f_type = BTRFS_SUPER_MAGIC;
2161 	buf->f_bsize = dentry->d_sb->s_blocksize;
2162 	buf->f_namelen = BTRFS_NAME_LEN;
2163 
2164 	/* We treat it as constant endianness (it doesn't matter _which_)
2165 	   because we want the fsid to come out the same whether mounted
2166 	   on a big-endian or little-endian host */
2167 	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2168 	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2169 	/* Mask in the root object ID too, to disambiguate subvols */
2170 	buf->f_fsid.val[0] ^=
2171 		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2172 	buf->f_fsid.val[1] ^=
2173 		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2174 
2175 	return 0;
2176 }
2177 
2178 static void btrfs_kill_super(struct super_block *sb)
2179 {
2180 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2181 	kill_anon_super(sb);
2182 	btrfs_free_fs_info(fs_info);
2183 }
2184 
2185 static struct file_system_type btrfs_fs_type = {
2186 	.owner		= THIS_MODULE,
2187 	.name		= "btrfs",
2188 	.mount		= btrfs_mount,
2189 	.kill_sb	= btrfs_kill_super,
2190 	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2191 };
2192 
2193 static struct file_system_type btrfs_root_fs_type = {
2194 	.owner		= THIS_MODULE,
2195 	.name		= "btrfs",
2196 	.mount		= btrfs_mount_root,
2197 	.kill_sb	= btrfs_kill_super,
2198 	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2199 };
2200 
2201 MODULE_ALIAS_FS("btrfs");
2202 
2203 static int btrfs_control_open(struct inode *inode, struct file *file)
2204 {
2205 	/*
2206 	 * The control file's private_data is used to hold the
2207 	 * transaction when it is started and is used to keep
2208 	 * track of whether a transaction is already in progress.
2209 	 */
2210 	file->private_data = NULL;
2211 	return 0;
2212 }
2213 
2214 /*
2215  * Used by /dev/btrfs-control for devices ioctls.
2216  */
2217 static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2218 				unsigned long arg)
2219 {
2220 	struct btrfs_ioctl_vol_args *vol;
2221 	struct btrfs_device *device = NULL;
2222 	int ret = -ENOTTY;
2223 
2224 	if (!capable(CAP_SYS_ADMIN))
2225 		return -EPERM;
2226 
2227 	vol = memdup_user((void __user *)arg, sizeof(*vol));
2228 	if (IS_ERR(vol))
2229 		return PTR_ERR(vol);
2230 	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2231 
2232 	switch (cmd) {
2233 	case BTRFS_IOC_SCAN_DEV:
2234 		mutex_lock(&uuid_mutex);
2235 		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2236 					       &btrfs_root_fs_type);
2237 		ret = PTR_ERR_OR_ZERO(device);
2238 		mutex_unlock(&uuid_mutex);
2239 		break;
2240 	case BTRFS_IOC_FORGET_DEV:
2241 		ret = btrfs_forget_devices(vol->name);
2242 		break;
2243 	case BTRFS_IOC_DEVICES_READY:
2244 		mutex_lock(&uuid_mutex);
2245 		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2246 					       &btrfs_root_fs_type);
2247 		if (IS_ERR(device)) {
2248 			mutex_unlock(&uuid_mutex);
2249 			ret = PTR_ERR(device);
2250 			break;
2251 		}
2252 		ret = !(device->fs_devices->num_devices ==
2253 			device->fs_devices->total_devices);
2254 		mutex_unlock(&uuid_mutex);
2255 		break;
2256 	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2257 		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2258 		break;
2259 	}
2260 
2261 	kfree(vol);
2262 	return ret;
2263 }
2264 
2265 static int btrfs_freeze(struct super_block *sb)
2266 {
2267 	struct btrfs_trans_handle *trans;
2268 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2269 	struct btrfs_root *root = fs_info->tree_root;
2270 
2271 	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2272 	/*
2273 	 * We don't need a barrier here, we'll wait for any transaction that
2274 	 * could be in progress on other threads (and do delayed iputs that
2275 	 * we want to avoid on a frozen filesystem), or do the commit
2276 	 * ourselves.
2277 	 */
2278 	trans = btrfs_attach_transaction_barrier(root);
2279 	if (IS_ERR(trans)) {
2280 		/* no transaction, don't bother */
2281 		if (PTR_ERR(trans) == -ENOENT)
2282 			return 0;
2283 		return PTR_ERR(trans);
2284 	}
2285 	return btrfs_commit_transaction(trans);
2286 }
2287 
2288 static int btrfs_unfreeze(struct super_block *sb)
2289 {
2290 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2291 
2292 	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2293 	return 0;
2294 }
2295 
2296 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2297 {
2298 	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2299 	struct btrfs_fs_devices *cur_devices;
2300 	struct btrfs_device *dev, *first_dev = NULL;
2301 	struct list_head *head;
2302 
2303 	/*
2304 	 * Lightweight locking of the devices. We should not need
2305 	 * device_list_mutex here as we only read the device data and the list
2306 	 * is protected by RCU.  Even if a device is deleted during the list
2307 	 * traversals, we'll get valid data, the freeing callback will wait at
2308 	 * least until the rcu_read_unlock.
2309 	 */
2310 	rcu_read_lock();
2311 	cur_devices = fs_info->fs_devices;
2312 	while (cur_devices) {
2313 		head = &cur_devices->devices;
2314 		list_for_each_entry_rcu(dev, head, dev_list) {
2315 			if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2316 				continue;
2317 			if (!dev->name)
2318 				continue;
2319 			if (!first_dev || dev->devid < first_dev->devid)
2320 				first_dev = dev;
2321 		}
2322 		cur_devices = cur_devices->seed;
2323 	}
2324 
2325 	if (first_dev)
2326 		seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
2327 	else
2328 		WARN_ON(1);
2329 	rcu_read_unlock();
2330 	return 0;
2331 }
2332 
2333 static const struct super_operations btrfs_super_ops = {
2334 	.drop_inode	= btrfs_drop_inode,
2335 	.evict_inode	= btrfs_evict_inode,
2336 	.put_super	= btrfs_put_super,
2337 	.sync_fs	= btrfs_sync_fs,
2338 	.show_options	= btrfs_show_options,
2339 	.show_devname	= btrfs_show_devname,
2340 	.alloc_inode	= btrfs_alloc_inode,
2341 	.destroy_inode	= btrfs_destroy_inode,
2342 	.free_inode	= btrfs_free_inode,
2343 	.statfs		= btrfs_statfs,
2344 	.remount_fs	= btrfs_remount,
2345 	.freeze_fs	= btrfs_freeze,
2346 	.unfreeze_fs	= btrfs_unfreeze,
2347 };
2348 
2349 static const struct file_operations btrfs_ctl_fops = {
2350 	.open = btrfs_control_open,
2351 	.unlocked_ioctl	 = btrfs_control_ioctl,
2352 	.compat_ioctl = compat_ptr_ioctl,
2353 	.owner	 = THIS_MODULE,
2354 	.llseek = noop_llseek,
2355 };
2356 
2357 static struct miscdevice btrfs_misc = {
2358 	.minor		= BTRFS_MINOR,
2359 	.name		= "btrfs-control",
2360 	.fops		= &btrfs_ctl_fops
2361 };
2362 
2363 MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2364 MODULE_ALIAS("devname:btrfs-control");
2365 
2366 static int __init btrfs_interface_init(void)
2367 {
2368 	return misc_register(&btrfs_misc);
2369 }
2370 
2371 static __cold void btrfs_interface_exit(void)
2372 {
2373 	misc_deregister(&btrfs_misc);
2374 }
2375 
2376 static void __init btrfs_print_mod_info(void)
2377 {
2378 	static const char options[] = ""
2379 #ifdef CONFIG_BTRFS_DEBUG
2380 			", debug=on"
2381 #endif
2382 #ifdef CONFIG_BTRFS_ASSERT
2383 			", assert=on"
2384 #endif
2385 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2386 			", integrity-checker=on"
2387 #endif
2388 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
2389 			", ref-verify=on"
2390 #endif
2391 			;
2392 	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
2393 }
2394 
2395 static int __init init_btrfs_fs(void)
2396 {
2397 	int err;
2398 
2399 	btrfs_props_init();
2400 
2401 	err = btrfs_init_sysfs();
2402 	if (err)
2403 		return err;
2404 
2405 	btrfs_init_compress();
2406 
2407 	err = btrfs_init_cachep();
2408 	if (err)
2409 		goto free_compress;
2410 
2411 	err = extent_io_init();
2412 	if (err)
2413 		goto free_cachep;
2414 
2415 	err = extent_state_cache_init();
2416 	if (err)
2417 		goto free_extent_io;
2418 
2419 	err = extent_map_init();
2420 	if (err)
2421 		goto free_extent_state_cache;
2422 
2423 	err = ordered_data_init();
2424 	if (err)
2425 		goto free_extent_map;
2426 
2427 	err = btrfs_delayed_inode_init();
2428 	if (err)
2429 		goto free_ordered_data;
2430 
2431 	err = btrfs_auto_defrag_init();
2432 	if (err)
2433 		goto free_delayed_inode;
2434 
2435 	err = btrfs_delayed_ref_init();
2436 	if (err)
2437 		goto free_auto_defrag;
2438 
2439 	err = btrfs_prelim_ref_init();
2440 	if (err)
2441 		goto free_delayed_ref;
2442 
2443 	err = btrfs_end_io_wq_init();
2444 	if (err)
2445 		goto free_prelim_ref;
2446 
2447 	err = btrfs_interface_init();
2448 	if (err)
2449 		goto free_end_io_wq;
2450 
2451 	btrfs_init_lockdep();
2452 
2453 	btrfs_print_mod_info();
2454 
2455 	err = btrfs_run_sanity_tests();
2456 	if (err)
2457 		goto unregister_ioctl;
2458 
2459 	err = register_filesystem(&btrfs_fs_type);
2460 	if (err)
2461 		goto unregister_ioctl;
2462 
2463 	return 0;
2464 
2465 unregister_ioctl:
2466 	btrfs_interface_exit();
2467 free_end_io_wq:
2468 	btrfs_end_io_wq_exit();
2469 free_prelim_ref:
2470 	btrfs_prelim_ref_exit();
2471 free_delayed_ref:
2472 	btrfs_delayed_ref_exit();
2473 free_auto_defrag:
2474 	btrfs_auto_defrag_exit();
2475 free_delayed_inode:
2476 	btrfs_delayed_inode_exit();
2477 free_ordered_data:
2478 	ordered_data_exit();
2479 free_extent_map:
2480 	extent_map_exit();
2481 free_extent_state_cache:
2482 	extent_state_cache_exit();
2483 free_extent_io:
2484 	extent_io_exit();
2485 free_cachep:
2486 	btrfs_destroy_cachep();
2487 free_compress:
2488 	btrfs_exit_compress();
2489 	btrfs_exit_sysfs();
2490 
2491 	return err;
2492 }
2493 
2494 static void __exit exit_btrfs_fs(void)
2495 {
2496 	btrfs_destroy_cachep();
2497 	btrfs_delayed_ref_exit();
2498 	btrfs_auto_defrag_exit();
2499 	btrfs_delayed_inode_exit();
2500 	btrfs_prelim_ref_exit();
2501 	ordered_data_exit();
2502 	extent_map_exit();
2503 	extent_state_cache_exit();
2504 	extent_io_exit();
2505 	btrfs_interface_exit();
2506 	btrfs_end_io_wq_exit();
2507 	unregister_filesystem(&btrfs_fs_type);
2508 	btrfs_exit_sysfs();
2509 	btrfs_cleanup_fs_uuids();
2510 	btrfs_exit_compress();
2511 }
2512 
2513 late_initcall(init_btrfs_fs);
2514 module_exit(exit_btrfs_fs)
2515 
2516 MODULE_LICENSE("GPL");
2517 MODULE_SOFTDEP("pre: crc32c");
2518 MODULE_SOFTDEP("pre: xxhash64");
2519 MODULE_SOFTDEP("pre: sha256");
2520 MODULE_SOFTDEP("pre: blake2b-256");
2521