xref: /openbmc/linux/fs/xfs/xfs_super.c (revision 479965a2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "scrub/stats.h"
46 
47 #include <linux/magic.h>
48 #include <linux/fs_context.h>
49 #include <linux/fs_parser.h>
50 
51 static const struct super_operations xfs_super_operations;
52 
53 static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
54 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
55 #ifdef DEBUG
56 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
57 #endif
58 
59 #ifdef CONFIG_HOTPLUG_CPU
60 static LIST_HEAD(xfs_mount_list);
61 static DEFINE_SPINLOCK(xfs_mount_list_lock);
62 
63 static inline void xfs_mount_list_add(struct xfs_mount *mp)
64 {
65 	spin_lock(&xfs_mount_list_lock);
66 	list_add(&mp->m_mount_list, &xfs_mount_list);
67 	spin_unlock(&xfs_mount_list_lock);
68 }
69 
70 static inline void xfs_mount_list_del(struct xfs_mount *mp)
71 {
72 	spin_lock(&xfs_mount_list_lock);
73 	list_del(&mp->m_mount_list);
74 	spin_unlock(&xfs_mount_list_lock);
75 }
76 #else /* !CONFIG_HOTPLUG_CPU */
77 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
78 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
79 #endif
80 
81 enum xfs_dax_mode {
82 	XFS_DAX_INODE = 0,
83 	XFS_DAX_ALWAYS = 1,
84 	XFS_DAX_NEVER = 2,
85 };
86 
87 static void
88 xfs_mount_set_dax_mode(
89 	struct xfs_mount	*mp,
90 	enum xfs_dax_mode	mode)
91 {
92 	switch (mode) {
93 	case XFS_DAX_INODE:
94 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
95 		break;
96 	case XFS_DAX_ALWAYS:
97 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
98 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
99 		break;
100 	case XFS_DAX_NEVER:
101 		mp->m_features |= XFS_FEAT_DAX_NEVER;
102 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
103 		break;
104 	}
105 }
106 
107 static const struct constant_table dax_param_enums[] = {
108 	{"inode",	XFS_DAX_INODE },
109 	{"always",	XFS_DAX_ALWAYS },
110 	{"never",	XFS_DAX_NEVER },
111 	{}
112 };
113 
114 /*
115  * Table driven mount option parser.
116  */
117 enum {
118 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
119 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
120 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
121 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
122 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
123 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
124 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
125 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
126 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
127 };
128 
129 static const struct fs_parameter_spec xfs_fs_parameters[] = {
130 	fsparam_u32("logbufs",		Opt_logbufs),
131 	fsparam_string("logbsize",	Opt_logbsize),
132 	fsparam_string("logdev",	Opt_logdev),
133 	fsparam_string("rtdev",		Opt_rtdev),
134 	fsparam_flag("wsync",		Opt_wsync),
135 	fsparam_flag("noalign",		Opt_noalign),
136 	fsparam_flag("swalloc",		Opt_swalloc),
137 	fsparam_u32("sunit",		Opt_sunit),
138 	fsparam_u32("swidth",		Opt_swidth),
139 	fsparam_flag("nouuid",		Opt_nouuid),
140 	fsparam_flag("grpid",		Opt_grpid),
141 	fsparam_flag("nogrpid",		Opt_nogrpid),
142 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
143 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
144 	fsparam_string("allocsize",	Opt_allocsize),
145 	fsparam_flag("norecovery",	Opt_norecovery),
146 	fsparam_flag("inode64",		Opt_inode64),
147 	fsparam_flag("inode32",		Opt_inode32),
148 	fsparam_flag("ikeep",		Opt_ikeep),
149 	fsparam_flag("noikeep",		Opt_noikeep),
150 	fsparam_flag("largeio",		Opt_largeio),
151 	fsparam_flag("nolargeio",	Opt_nolargeio),
152 	fsparam_flag("attr2",		Opt_attr2),
153 	fsparam_flag("noattr2",		Opt_noattr2),
154 	fsparam_flag("filestreams",	Opt_filestreams),
155 	fsparam_flag("quota",		Opt_quota),
156 	fsparam_flag("noquota",		Opt_noquota),
157 	fsparam_flag("usrquota",	Opt_usrquota),
158 	fsparam_flag("grpquota",	Opt_grpquota),
159 	fsparam_flag("prjquota",	Opt_prjquota),
160 	fsparam_flag("uquota",		Opt_uquota),
161 	fsparam_flag("gquota",		Opt_gquota),
162 	fsparam_flag("pquota",		Opt_pquota),
163 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
164 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
165 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
166 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
167 	fsparam_flag("discard",		Opt_discard),
168 	fsparam_flag("nodiscard",	Opt_nodiscard),
169 	fsparam_flag("dax",		Opt_dax),
170 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
171 	{}
172 };
173 
174 struct proc_xfs_info {
175 	uint64_t	flag;
176 	char		*str;
177 };
178 
179 static int
180 xfs_fs_show_options(
181 	struct seq_file		*m,
182 	struct dentry		*root)
183 {
184 	static struct proc_xfs_info xfs_info_set[] = {
185 		/* the few simple ones we can get from the mount struct */
186 		{ XFS_FEAT_IKEEP,		",ikeep" },
187 		{ XFS_FEAT_WSYNC,		",wsync" },
188 		{ XFS_FEAT_NOALIGN,		",noalign" },
189 		{ XFS_FEAT_SWALLOC,		",swalloc" },
190 		{ XFS_FEAT_NOUUID,		",nouuid" },
191 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
192 		{ XFS_FEAT_ATTR2,		",attr2" },
193 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
194 		{ XFS_FEAT_GRPID,		",grpid" },
195 		{ XFS_FEAT_DISCARD,		",discard" },
196 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
197 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
198 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
199 		{ 0, NULL }
200 	};
201 	struct xfs_mount	*mp = XFS_M(root->d_sb);
202 	struct proc_xfs_info	*xfs_infop;
203 
204 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
205 		if (mp->m_features & xfs_infop->flag)
206 			seq_puts(m, xfs_infop->str);
207 	}
208 
209 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
210 
211 	if (xfs_has_allocsize(mp))
212 		seq_printf(m, ",allocsize=%dk",
213 			   (1 << mp->m_allocsize_log) >> 10);
214 
215 	if (mp->m_logbufs > 0)
216 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
217 	if (mp->m_logbsize > 0)
218 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
219 
220 	if (mp->m_logname)
221 		seq_show_option(m, "logdev", mp->m_logname);
222 	if (mp->m_rtname)
223 		seq_show_option(m, "rtdev", mp->m_rtname);
224 
225 	if (mp->m_dalign > 0)
226 		seq_printf(m, ",sunit=%d",
227 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
228 	if (mp->m_swidth > 0)
229 		seq_printf(m, ",swidth=%d",
230 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
231 
232 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
233 		seq_puts(m, ",usrquota");
234 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
235 		seq_puts(m, ",uqnoenforce");
236 
237 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
238 		seq_puts(m, ",prjquota");
239 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
240 		seq_puts(m, ",pqnoenforce");
241 
242 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
243 		seq_puts(m, ",grpquota");
244 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
245 		seq_puts(m, ",gqnoenforce");
246 
247 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
248 		seq_puts(m, ",noquota");
249 
250 	return 0;
251 }
252 
253 static bool
254 xfs_set_inode_alloc_perag(
255 	struct xfs_perag	*pag,
256 	xfs_ino_t		ino,
257 	xfs_agnumber_t		max_metadata)
258 {
259 	if (!xfs_is_inode32(pag->pag_mount)) {
260 		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
261 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
262 		return false;
263 	}
264 
265 	if (ino > XFS_MAXINUMBER_32) {
266 		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
267 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
268 		return false;
269 	}
270 
271 	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
272 	if (pag->pag_agno < max_metadata)
273 		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
274 	else
275 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
276 	return true;
277 }
278 
279 /*
280  * Set parameters for inode allocation heuristics, taking into account
281  * filesystem size and inode32/inode64 mount options; i.e. specifically
282  * whether or not XFS_FEAT_SMALL_INUMS is set.
283  *
284  * Inode allocation patterns are altered only if inode32 is requested
285  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
286  * If altered, XFS_OPSTATE_INODE32 is set as well.
287  *
288  * An agcount independent of that in the mount structure is provided
289  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
290  * to the potentially higher ag count.
291  *
292  * Returns the maximum AG index which may contain inodes.
293  */
294 xfs_agnumber_t
295 xfs_set_inode_alloc(
296 	struct xfs_mount *mp,
297 	xfs_agnumber_t	agcount)
298 {
299 	xfs_agnumber_t	index;
300 	xfs_agnumber_t	maxagi = 0;
301 	xfs_sb_t	*sbp = &mp->m_sb;
302 	xfs_agnumber_t	max_metadata;
303 	xfs_agino_t	agino;
304 	xfs_ino_t	ino;
305 
306 	/*
307 	 * Calculate how much should be reserved for inodes to meet
308 	 * the max inode percentage.  Used only for inode32.
309 	 */
310 	if (M_IGEO(mp)->maxicount) {
311 		uint64_t	icount;
312 
313 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
314 		do_div(icount, 100);
315 		icount += sbp->sb_agblocks - 1;
316 		do_div(icount, sbp->sb_agblocks);
317 		max_metadata = icount;
318 	} else {
319 		max_metadata = agcount;
320 	}
321 
322 	/* Get the last possible inode in the filesystem */
323 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
324 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
325 
326 	/*
327 	 * If user asked for no more than 32-bit inodes, and the fs is
328 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
329 	 * the allocator to accommodate the request.
330 	 */
331 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
332 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
333 	else
334 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
335 
336 	for (index = 0; index < agcount; index++) {
337 		struct xfs_perag	*pag;
338 
339 		ino = XFS_AGINO_TO_INO(mp, index, agino);
340 
341 		pag = xfs_perag_get(mp, index);
342 		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
343 			maxagi++;
344 		xfs_perag_put(pag);
345 	}
346 
347 	return xfs_is_inode32(mp) ? maxagi : agcount;
348 }
349 
350 static int
351 xfs_setup_dax_always(
352 	struct xfs_mount	*mp)
353 {
354 	if (!mp->m_ddev_targp->bt_daxdev &&
355 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
356 		xfs_alert(mp,
357 			"DAX unsupported by block device. Turning off DAX.");
358 		goto disable_dax;
359 	}
360 
361 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
362 		xfs_alert(mp,
363 			"DAX not supported for blocksize. Turning off DAX.");
364 		goto disable_dax;
365 	}
366 
367 	if (xfs_has_reflink(mp) &&
368 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
369 		xfs_alert(mp,
370 			"DAX and reflink cannot work with multi-partitions!");
371 		return -EINVAL;
372 	}
373 
374 	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
375 	return 0;
376 
377 disable_dax:
378 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
379 	return 0;
380 }
381 
382 STATIC int
383 xfs_blkdev_get(
384 	xfs_mount_t		*mp,
385 	const char		*name,
386 	struct block_device	**bdevp)
387 {
388 	int			error = 0;
389 
390 	*bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
391 				    mp->m_super, &fs_holder_ops);
392 	if (IS_ERR(*bdevp)) {
393 		error = PTR_ERR(*bdevp);
394 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
395 	}
396 
397 	return error;
398 }
399 
400 STATIC void
401 xfs_shutdown_devices(
402 	struct xfs_mount	*mp)
403 {
404 	/*
405 	 * Udev is triggered whenever anyone closes a block device or unmounts
406 	 * a file systemm on a block device.
407 	 * The default udev rules invoke blkid to read the fs super and create
408 	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
409 	 * reads through the page cache.
410 	 *
411 	 * xfs_db also uses buffered reads to examine metadata.  There is no
412 	 * coordination between xfs_db and udev, which means that they can run
413 	 * concurrently.  Note there is no coordination between the kernel and
414 	 * blkid either.
415 	 *
416 	 * On a system with 64k pages, the page cache can cache the superblock
417 	 * and the root inode (and hence the root directory) with the same 64k
418 	 * page.  If udev spawns blkid after the mkfs and the system is busy
419 	 * enough that it is still running when xfs_db starts up, they'll both
420 	 * read from the same page in the pagecache.
421 	 *
422 	 * The unmount writes updated inode metadata to disk directly.  The XFS
423 	 * buffer cache does not use the bdev pagecache, so it needs to
424 	 * invalidate that pagecache on unmount.  If the above scenario occurs,
425 	 * the pagecache no longer reflects what's on disk, xfs_db reads the
426 	 * stale metadata, and fails to find /a.  Most of the time this succeeds
427 	 * because closing a bdev invalidates the page cache, but when processes
428 	 * race, everyone loses.
429 	 */
430 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
431 		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
432 		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
433 	}
434 	if (mp->m_rtdev_targp) {
435 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
436 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
437 	}
438 	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
439 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
440 }
441 
442 /*
443  * The file system configurations are:
444  *	(1) device (partition) with data and internal log
445  *	(2) logical volume with data and log subvolumes.
446  *	(3) logical volume with data, log, and realtime subvolumes.
447  *
448  * We only have to handle opening the log and realtime volumes here if
449  * they are present.  The data subvolume has already been opened by
450  * get_sb_bdev() and is stored in sb->s_bdev.
451  */
452 STATIC int
453 xfs_open_devices(
454 	struct xfs_mount	*mp)
455 {
456 	struct super_block	*sb = mp->m_super;
457 	struct block_device	*ddev = sb->s_bdev;
458 	struct block_device	*logdev = NULL, *rtdev = NULL;
459 	int			error;
460 
461 	/*
462 	 * blkdev_put() can't be called under s_umount, see the comment
463 	 * in get_tree_bdev() for more details
464 	 */
465 	up_write(&sb->s_umount);
466 
467 	/*
468 	 * Open real time and log devices - order is important.
469 	 */
470 	if (mp->m_logname) {
471 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
472 		if (error)
473 			goto out_relock;
474 	}
475 
476 	if (mp->m_rtname) {
477 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
478 		if (error)
479 			goto out_close_logdev;
480 
481 		if (rtdev == ddev || rtdev == logdev) {
482 			xfs_warn(mp,
483 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
484 			error = -EINVAL;
485 			goto out_close_rtdev;
486 		}
487 	}
488 
489 	/*
490 	 * Setup xfs_mount buffer target pointers
491 	 */
492 	error = -ENOMEM;
493 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
494 	if (!mp->m_ddev_targp)
495 		goto out_close_rtdev;
496 
497 	if (rtdev) {
498 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
499 		if (!mp->m_rtdev_targp)
500 			goto out_free_ddev_targ;
501 	}
502 
503 	if (logdev && logdev != ddev) {
504 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
505 		if (!mp->m_logdev_targp)
506 			goto out_free_rtdev_targ;
507 	} else {
508 		mp->m_logdev_targp = mp->m_ddev_targp;
509 	}
510 
511 	error = 0;
512 out_relock:
513 	down_write(&sb->s_umount);
514 	return error;
515 
516  out_free_rtdev_targ:
517 	if (mp->m_rtdev_targp)
518 		xfs_free_buftarg(mp->m_rtdev_targp);
519  out_free_ddev_targ:
520 	xfs_free_buftarg(mp->m_ddev_targp);
521  out_close_rtdev:
522 	 if (rtdev)
523 		 blkdev_put(rtdev, sb);
524  out_close_logdev:
525 	if (logdev && logdev != ddev)
526 		blkdev_put(logdev, sb);
527 	goto out_relock;
528 }
529 
530 /*
531  * Setup xfs_mount buffer target pointers based on superblock
532  */
533 STATIC int
534 xfs_setup_devices(
535 	struct xfs_mount	*mp)
536 {
537 	int			error;
538 
539 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
540 	if (error)
541 		return error;
542 
543 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
544 		unsigned int	log_sector_size = BBSIZE;
545 
546 		if (xfs_has_sector(mp))
547 			log_sector_size = mp->m_sb.sb_logsectsize;
548 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
549 					    log_sector_size);
550 		if (error)
551 			return error;
552 	}
553 	if (mp->m_rtdev_targp) {
554 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
555 					    mp->m_sb.sb_sectsize);
556 		if (error)
557 			return error;
558 	}
559 
560 	return 0;
561 }
562 
563 STATIC int
564 xfs_init_mount_workqueues(
565 	struct xfs_mount	*mp)
566 {
567 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
568 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
569 			1, mp->m_super->s_id);
570 	if (!mp->m_buf_workqueue)
571 		goto out;
572 
573 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
574 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
575 			0, mp->m_super->s_id);
576 	if (!mp->m_unwritten_workqueue)
577 		goto out_destroy_buf;
578 
579 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
580 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
581 			0, mp->m_super->s_id);
582 	if (!mp->m_reclaim_workqueue)
583 		goto out_destroy_unwritten;
584 
585 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
586 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
587 			0, mp->m_super->s_id);
588 	if (!mp->m_blockgc_wq)
589 		goto out_destroy_reclaim;
590 
591 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
592 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
593 			1, mp->m_super->s_id);
594 	if (!mp->m_inodegc_wq)
595 		goto out_destroy_blockgc;
596 
597 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
598 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
599 	if (!mp->m_sync_workqueue)
600 		goto out_destroy_inodegc;
601 
602 	return 0;
603 
604 out_destroy_inodegc:
605 	destroy_workqueue(mp->m_inodegc_wq);
606 out_destroy_blockgc:
607 	destroy_workqueue(mp->m_blockgc_wq);
608 out_destroy_reclaim:
609 	destroy_workqueue(mp->m_reclaim_workqueue);
610 out_destroy_unwritten:
611 	destroy_workqueue(mp->m_unwritten_workqueue);
612 out_destroy_buf:
613 	destroy_workqueue(mp->m_buf_workqueue);
614 out:
615 	return -ENOMEM;
616 }
617 
618 STATIC void
619 xfs_destroy_mount_workqueues(
620 	struct xfs_mount	*mp)
621 {
622 	destroy_workqueue(mp->m_sync_workqueue);
623 	destroy_workqueue(mp->m_blockgc_wq);
624 	destroy_workqueue(mp->m_inodegc_wq);
625 	destroy_workqueue(mp->m_reclaim_workqueue);
626 	destroy_workqueue(mp->m_unwritten_workqueue);
627 	destroy_workqueue(mp->m_buf_workqueue);
628 }
629 
630 static void
631 xfs_flush_inodes_worker(
632 	struct work_struct	*work)
633 {
634 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
635 						   m_flush_inodes_work);
636 	struct super_block	*sb = mp->m_super;
637 
638 	if (down_read_trylock(&sb->s_umount)) {
639 		sync_inodes_sb(sb);
640 		up_read(&sb->s_umount);
641 	}
642 }
643 
644 /*
645  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
646  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
647  * for IO to complete so that we effectively throttle multiple callers to the
648  * rate at which IO is completing.
649  */
650 void
651 xfs_flush_inodes(
652 	struct xfs_mount	*mp)
653 {
654 	/*
655 	 * If flush_work() returns true then that means we waited for a flush
656 	 * which was already in progress.  Don't bother running another scan.
657 	 */
658 	if (flush_work(&mp->m_flush_inodes_work))
659 		return;
660 
661 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
662 	flush_work(&mp->m_flush_inodes_work);
663 }
664 
665 /* Catch misguided souls that try to use this interface on XFS */
666 STATIC struct inode *
667 xfs_fs_alloc_inode(
668 	struct super_block	*sb)
669 {
670 	BUG();
671 	return NULL;
672 }
673 
674 /*
675  * Now that the generic code is guaranteed not to be accessing
676  * the linux inode, we can inactivate and reclaim the inode.
677  */
678 STATIC void
679 xfs_fs_destroy_inode(
680 	struct inode		*inode)
681 {
682 	struct xfs_inode	*ip = XFS_I(inode);
683 
684 	trace_xfs_destroy_inode(ip);
685 
686 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
687 	XFS_STATS_INC(ip->i_mount, vn_rele);
688 	XFS_STATS_INC(ip->i_mount, vn_remove);
689 	xfs_inode_mark_reclaimable(ip);
690 }
691 
692 static void
693 xfs_fs_dirty_inode(
694 	struct inode			*inode,
695 	int				flags)
696 {
697 	struct xfs_inode		*ip = XFS_I(inode);
698 	struct xfs_mount		*mp = ip->i_mount;
699 	struct xfs_trans		*tp;
700 
701 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
702 		return;
703 
704 	/*
705 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
706 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
707 	 * in flags possibly together with I_DIRTY_SYNC.
708 	 */
709 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
710 		return;
711 
712 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
713 		return;
714 	xfs_ilock(ip, XFS_ILOCK_EXCL);
715 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
716 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
717 	xfs_trans_commit(tp);
718 }
719 
720 /*
721  * Slab object creation initialisation for the XFS inode.
722  * This covers only the idempotent fields in the XFS inode;
723  * all other fields need to be initialised on allocation
724  * from the slab. This avoids the need to repeatedly initialise
725  * fields in the xfs inode that left in the initialise state
726  * when freeing the inode.
727  */
728 STATIC void
729 xfs_fs_inode_init_once(
730 	void			*inode)
731 {
732 	struct xfs_inode	*ip = inode;
733 
734 	memset(ip, 0, sizeof(struct xfs_inode));
735 
736 	/* vfs inode */
737 	inode_init_once(VFS_I(ip));
738 
739 	/* xfs inode */
740 	atomic_set(&ip->i_pincount, 0);
741 	spin_lock_init(&ip->i_flags_lock);
742 
743 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
744 		     "xfsino", ip->i_ino);
745 }
746 
747 /*
748  * We do an unlocked check for XFS_IDONTCACHE here because we are already
749  * serialised against cache hits here via the inode->i_lock and igrab() in
750  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
751  * racing with us, and it avoids needing to grab a spinlock here for every inode
752  * we drop the final reference on.
753  */
754 STATIC int
755 xfs_fs_drop_inode(
756 	struct inode		*inode)
757 {
758 	struct xfs_inode	*ip = XFS_I(inode);
759 
760 	/*
761 	 * If this unlinked inode is in the middle of recovery, don't
762 	 * drop the inode just yet; log recovery will take care of
763 	 * that.  See the comment for this inode flag.
764 	 */
765 	if (ip->i_flags & XFS_IRECOVERY) {
766 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
767 		return 0;
768 	}
769 
770 	return generic_drop_inode(inode);
771 }
772 
773 static void
774 xfs_mount_free(
775 	struct xfs_mount	*mp)
776 {
777 	/*
778 	 * Free the buftargs here because blkdev_put needs to be called outside
779 	 * of sb->s_umount, which is held around the call to ->put_super.
780 	 */
781 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
782 		xfs_free_buftarg(mp->m_logdev_targp);
783 	if (mp->m_rtdev_targp)
784 		xfs_free_buftarg(mp->m_rtdev_targp);
785 	if (mp->m_ddev_targp)
786 		xfs_free_buftarg(mp->m_ddev_targp);
787 
788 	debugfs_remove(mp->m_debugfs);
789 	kfree(mp->m_rtname);
790 	kfree(mp->m_logname);
791 	kmem_free(mp);
792 }
793 
794 STATIC int
795 xfs_fs_sync_fs(
796 	struct super_block	*sb,
797 	int			wait)
798 {
799 	struct xfs_mount	*mp = XFS_M(sb);
800 	int			error;
801 
802 	trace_xfs_fs_sync_fs(mp, __return_address);
803 
804 	/*
805 	 * Doing anything during the async pass would be counterproductive.
806 	 */
807 	if (!wait)
808 		return 0;
809 
810 	error = xfs_log_force(mp, XFS_LOG_SYNC);
811 	if (error)
812 		return error;
813 
814 	if (laptop_mode) {
815 		/*
816 		 * The disk must be active because we're syncing.
817 		 * We schedule log work now (now that the disk is
818 		 * active) instead of later (when it might not be).
819 		 */
820 		flush_delayed_work(&mp->m_log->l_work);
821 	}
822 
823 	/*
824 	 * If we are called with page faults frozen out, it means we are about
825 	 * to freeze the transaction subsystem. Take the opportunity to shut
826 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
827 	 * prevent inactivation races with freeze. The fs doesn't get called
828 	 * again by the freezing process until after SB_FREEZE_FS has been set,
829 	 * so it's now or never.  Same logic applies to speculative allocation
830 	 * garbage collection.
831 	 *
832 	 * We don't care if this is a normal syncfs call that does this or
833 	 * freeze that does this - we can run this multiple times without issue
834 	 * and we won't race with a restart because a restart can only occur
835 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
836 	 */
837 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
838 		xfs_inodegc_stop(mp);
839 		xfs_blockgc_stop(mp);
840 	}
841 
842 	return 0;
843 }
844 
845 STATIC int
846 xfs_fs_statfs(
847 	struct dentry		*dentry,
848 	struct kstatfs		*statp)
849 {
850 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
851 	xfs_sb_t		*sbp = &mp->m_sb;
852 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
853 	uint64_t		fakeinos, id;
854 	uint64_t		icount;
855 	uint64_t		ifree;
856 	uint64_t		fdblocks;
857 	xfs_extlen_t		lsize;
858 	int64_t			ffree;
859 
860 	/*
861 	 * Expedite background inodegc but don't wait. We do not want to block
862 	 * here waiting hours for a billion extent file to be truncated.
863 	 */
864 	xfs_inodegc_push(mp);
865 
866 	statp->f_type = XFS_SUPER_MAGIC;
867 	statp->f_namelen = MAXNAMELEN - 1;
868 
869 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
870 	statp->f_fsid = u64_to_fsid(id);
871 
872 	icount = percpu_counter_sum(&mp->m_icount);
873 	ifree = percpu_counter_sum(&mp->m_ifree);
874 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
875 
876 	spin_lock(&mp->m_sb_lock);
877 	statp->f_bsize = sbp->sb_blocksize;
878 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
879 	statp->f_blocks = sbp->sb_dblocks - lsize;
880 	spin_unlock(&mp->m_sb_lock);
881 
882 	/* make sure statp->f_bfree does not underflow */
883 	statp->f_bfree = max_t(int64_t, 0,
884 				fdblocks - xfs_fdblocks_unavailable(mp));
885 	statp->f_bavail = statp->f_bfree;
886 
887 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
888 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
889 	if (M_IGEO(mp)->maxicount)
890 		statp->f_files = min_t(typeof(statp->f_files),
891 					statp->f_files,
892 					M_IGEO(mp)->maxicount);
893 
894 	/* If sb_icount overshot maxicount, report actual allocation */
895 	statp->f_files = max_t(typeof(statp->f_files),
896 					statp->f_files,
897 					sbp->sb_icount);
898 
899 	/* make sure statp->f_ffree does not underflow */
900 	ffree = statp->f_files - (icount - ifree);
901 	statp->f_ffree = max_t(int64_t, ffree, 0);
902 
903 
904 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
905 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
906 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
907 		xfs_qm_statvfs(ip, statp);
908 
909 	if (XFS_IS_REALTIME_MOUNT(mp) &&
910 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
911 		s64	freertx;
912 
913 		statp->f_blocks = sbp->sb_rblocks;
914 		freertx = percpu_counter_sum_positive(&mp->m_frextents);
915 		statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
916 	}
917 
918 	return 0;
919 }
920 
921 STATIC void
922 xfs_save_resvblks(struct xfs_mount *mp)
923 {
924 	uint64_t resblks = 0;
925 
926 	mp->m_resblks_save = mp->m_resblks;
927 	xfs_reserve_blocks(mp, &resblks, NULL);
928 }
929 
930 STATIC void
931 xfs_restore_resvblks(struct xfs_mount *mp)
932 {
933 	uint64_t resblks;
934 
935 	if (mp->m_resblks_save) {
936 		resblks = mp->m_resblks_save;
937 		mp->m_resblks_save = 0;
938 	} else
939 		resblks = xfs_default_resblks(mp);
940 
941 	xfs_reserve_blocks(mp, &resblks, NULL);
942 }
943 
944 /*
945  * Second stage of a freeze. The data is already frozen so we only
946  * need to take care of the metadata. Once that's done sync the superblock
947  * to the log to dirty it in case of a crash while frozen. This ensures that we
948  * will recover the unlinked inode lists on the next mount.
949  */
950 STATIC int
951 xfs_fs_freeze(
952 	struct super_block	*sb)
953 {
954 	struct xfs_mount	*mp = XFS_M(sb);
955 	unsigned int		flags;
956 	int			ret;
957 
958 	/*
959 	 * The filesystem is now frozen far enough that memory reclaim
960 	 * cannot safely operate on the filesystem. Hence we need to
961 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
962 	 */
963 	flags = memalloc_nofs_save();
964 	xfs_save_resvblks(mp);
965 	ret = xfs_log_quiesce(mp);
966 	memalloc_nofs_restore(flags);
967 
968 	/*
969 	 * For read-write filesystems, we need to restart the inodegc on error
970 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
971 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
972 	 * here, so we can restart safely without racing with a stop in
973 	 * xfs_fs_sync_fs().
974 	 */
975 	if (ret && !xfs_is_readonly(mp)) {
976 		xfs_blockgc_start(mp);
977 		xfs_inodegc_start(mp);
978 	}
979 
980 	return ret;
981 }
982 
983 STATIC int
984 xfs_fs_unfreeze(
985 	struct super_block	*sb)
986 {
987 	struct xfs_mount	*mp = XFS_M(sb);
988 
989 	xfs_restore_resvblks(mp);
990 	xfs_log_work_queue(mp);
991 
992 	/*
993 	 * Don't reactivate the inodegc worker on a readonly filesystem because
994 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
995 	 * worker because there are no speculative preallocations on a readonly
996 	 * filesystem.
997 	 */
998 	if (!xfs_is_readonly(mp)) {
999 		xfs_blockgc_start(mp);
1000 		xfs_inodegc_start(mp);
1001 	}
1002 
1003 	return 0;
1004 }
1005 
1006 /*
1007  * This function fills in xfs_mount_t fields based on mount args.
1008  * Note: the superblock _has_ now been read in.
1009  */
1010 STATIC int
1011 xfs_finish_flags(
1012 	struct xfs_mount	*mp)
1013 {
1014 	/* Fail a mount where the logbuf is smaller than the log stripe */
1015 	if (xfs_has_logv2(mp)) {
1016 		if (mp->m_logbsize <= 0 &&
1017 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1018 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1019 		} else if (mp->m_logbsize > 0 &&
1020 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1021 			xfs_warn(mp,
1022 		"logbuf size must be greater than or equal to log stripe size");
1023 			return -EINVAL;
1024 		}
1025 	} else {
1026 		/* Fail a mount if the logbuf is larger than 32K */
1027 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1028 			xfs_warn(mp,
1029 		"logbuf size for version 1 logs must be 16K or 32K");
1030 			return -EINVAL;
1031 		}
1032 	}
1033 
1034 	/*
1035 	 * V5 filesystems always use attr2 format for attributes.
1036 	 */
1037 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1038 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1039 			     "attr2 is always enabled for V5 filesystems.");
1040 		return -EINVAL;
1041 	}
1042 
1043 	/*
1044 	 * prohibit r/w mounts of read-only filesystems
1045 	 */
1046 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1047 		xfs_warn(mp,
1048 			"cannot mount a read-only filesystem as read-write");
1049 		return -EROFS;
1050 	}
1051 
1052 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1053 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1054 	    !xfs_has_pquotino(mp)) {
1055 		xfs_warn(mp,
1056 		  "Super block does not support project and group quota together");
1057 		return -EINVAL;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static int
1064 xfs_init_percpu_counters(
1065 	struct xfs_mount	*mp)
1066 {
1067 	int		error;
1068 
1069 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1070 	if (error)
1071 		return -ENOMEM;
1072 
1073 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1074 	if (error)
1075 		goto free_icount;
1076 
1077 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1078 	if (error)
1079 		goto free_ifree;
1080 
1081 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1082 	if (error)
1083 		goto free_fdblocks;
1084 
1085 	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1086 	if (error)
1087 		goto free_delalloc;
1088 
1089 	return 0;
1090 
1091 free_delalloc:
1092 	percpu_counter_destroy(&mp->m_delalloc_blks);
1093 free_fdblocks:
1094 	percpu_counter_destroy(&mp->m_fdblocks);
1095 free_ifree:
1096 	percpu_counter_destroy(&mp->m_ifree);
1097 free_icount:
1098 	percpu_counter_destroy(&mp->m_icount);
1099 	return -ENOMEM;
1100 }
1101 
1102 void
1103 xfs_reinit_percpu_counters(
1104 	struct xfs_mount	*mp)
1105 {
1106 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1107 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1108 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1109 	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1110 }
1111 
1112 static void
1113 xfs_destroy_percpu_counters(
1114 	struct xfs_mount	*mp)
1115 {
1116 	percpu_counter_destroy(&mp->m_icount);
1117 	percpu_counter_destroy(&mp->m_ifree);
1118 	percpu_counter_destroy(&mp->m_fdblocks);
1119 	ASSERT(xfs_is_shutdown(mp) ||
1120 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1121 	percpu_counter_destroy(&mp->m_delalloc_blks);
1122 	percpu_counter_destroy(&mp->m_frextents);
1123 }
1124 
1125 static int
1126 xfs_inodegc_init_percpu(
1127 	struct xfs_mount	*mp)
1128 {
1129 	struct xfs_inodegc	*gc;
1130 	int			cpu;
1131 
1132 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1133 	if (!mp->m_inodegc)
1134 		return -ENOMEM;
1135 
1136 	for_each_possible_cpu(cpu) {
1137 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1138 #if defined(DEBUG) || defined(XFS_WARN)
1139 		gc->cpu = cpu;
1140 #endif
1141 		init_llist_head(&gc->list);
1142 		gc->items = 0;
1143 		gc->error = 0;
1144 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1145 	}
1146 	return 0;
1147 }
1148 
1149 static void
1150 xfs_inodegc_free_percpu(
1151 	struct xfs_mount	*mp)
1152 {
1153 	if (!mp->m_inodegc)
1154 		return;
1155 	free_percpu(mp->m_inodegc);
1156 }
1157 
1158 static void
1159 xfs_fs_put_super(
1160 	struct super_block	*sb)
1161 {
1162 	struct xfs_mount	*mp = XFS_M(sb);
1163 
1164 	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1165 	xfs_filestream_unmount(mp);
1166 	xfs_unmountfs(mp);
1167 
1168 	xfs_freesb(mp);
1169 	xchk_mount_stats_free(mp);
1170 	free_percpu(mp->m_stats.xs_stats);
1171 	xfs_mount_list_del(mp);
1172 	xfs_inodegc_free_percpu(mp);
1173 	xfs_destroy_percpu_counters(mp);
1174 	xfs_destroy_mount_workqueues(mp);
1175 	xfs_shutdown_devices(mp);
1176 }
1177 
1178 static long
1179 xfs_fs_nr_cached_objects(
1180 	struct super_block	*sb,
1181 	struct shrink_control	*sc)
1182 {
1183 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1184 	if (WARN_ON_ONCE(!sb->s_fs_info))
1185 		return 0;
1186 	return xfs_reclaim_inodes_count(XFS_M(sb));
1187 }
1188 
1189 static long
1190 xfs_fs_free_cached_objects(
1191 	struct super_block	*sb,
1192 	struct shrink_control	*sc)
1193 {
1194 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1195 }
1196 
1197 static void
1198 xfs_fs_shutdown(
1199 	struct super_block	*sb)
1200 {
1201 	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1202 }
1203 
1204 static const struct super_operations xfs_super_operations = {
1205 	.alloc_inode		= xfs_fs_alloc_inode,
1206 	.destroy_inode		= xfs_fs_destroy_inode,
1207 	.dirty_inode		= xfs_fs_dirty_inode,
1208 	.drop_inode		= xfs_fs_drop_inode,
1209 	.put_super		= xfs_fs_put_super,
1210 	.sync_fs		= xfs_fs_sync_fs,
1211 	.freeze_fs		= xfs_fs_freeze,
1212 	.unfreeze_fs		= xfs_fs_unfreeze,
1213 	.statfs			= xfs_fs_statfs,
1214 	.show_options		= xfs_fs_show_options,
1215 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1216 	.free_cached_objects	= xfs_fs_free_cached_objects,
1217 	.shutdown		= xfs_fs_shutdown,
1218 };
1219 
1220 static int
1221 suffix_kstrtoint(
1222 	const char	*s,
1223 	unsigned int	base,
1224 	int		*res)
1225 {
1226 	int		last, shift_left_factor = 0, _res;
1227 	char		*value;
1228 	int		ret = 0;
1229 
1230 	value = kstrdup(s, GFP_KERNEL);
1231 	if (!value)
1232 		return -ENOMEM;
1233 
1234 	last = strlen(value) - 1;
1235 	if (value[last] == 'K' || value[last] == 'k') {
1236 		shift_left_factor = 10;
1237 		value[last] = '\0';
1238 	}
1239 	if (value[last] == 'M' || value[last] == 'm') {
1240 		shift_left_factor = 20;
1241 		value[last] = '\0';
1242 	}
1243 	if (value[last] == 'G' || value[last] == 'g') {
1244 		shift_left_factor = 30;
1245 		value[last] = '\0';
1246 	}
1247 
1248 	if (kstrtoint(value, base, &_res))
1249 		ret = -EINVAL;
1250 	kfree(value);
1251 	*res = _res << shift_left_factor;
1252 	return ret;
1253 }
1254 
1255 static inline void
1256 xfs_fs_warn_deprecated(
1257 	struct fs_context	*fc,
1258 	struct fs_parameter	*param,
1259 	uint64_t		flag,
1260 	bool			value)
1261 {
1262 	/* Don't print the warning if reconfiguring and current mount point
1263 	 * already had the flag set
1264 	 */
1265 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1266             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1267 		return;
1268 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1269 }
1270 
1271 /*
1272  * Set mount state from a mount option.
1273  *
1274  * NOTE: mp->m_super is NULL here!
1275  */
1276 static int
1277 xfs_fs_parse_param(
1278 	struct fs_context	*fc,
1279 	struct fs_parameter	*param)
1280 {
1281 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1282 	struct fs_parse_result	result;
1283 	int			size = 0;
1284 	int			opt;
1285 
1286 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1287 	if (opt < 0)
1288 		return opt;
1289 
1290 	switch (opt) {
1291 	case Opt_logbufs:
1292 		parsing_mp->m_logbufs = result.uint_32;
1293 		return 0;
1294 	case Opt_logbsize:
1295 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1296 			return -EINVAL;
1297 		return 0;
1298 	case Opt_logdev:
1299 		kfree(parsing_mp->m_logname);
1300 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1301 		if (!parsing_mp->m_logname)
1302 			return -ENOMEM;
1303 		return 0;
1304 	case Opt_rtdev:
1305 		kfree(parsing_mp->m_rtname);
1306 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1307 		if (!parsing_mp->m_rtname)
1308 			return -ENOMEM;
1309 		return 0;
1310 	case Opt_allocsize:
1311 		if (suffix_kstrtoint(param->string, 10, &size))
1312 			return -EINVAL;
1313 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1314 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1315 		return 0;
1316 	case Opt_grpid:
1317 	case Opt_bsdgroups:
1318 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1319 		return 0;
1320 	case Opt_nogrpid:
1321 	case Opt_sysvgroups:
1322 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1323 		return 0;
1324 	case Opt_wsync:
1325 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1326 		return 0;
1327 	case Opt_norecovery:
1328 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1329 		return 0;
1330 	case Opt_noalign:
1331 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1332 		return 0;
1333 	case Opt_swalloc:
1334 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1335 		return 0;
1336 	case Opt_sunit:
1337 		parsing_mp->m_dalign = result.uint_32;
1338 		return 0;
1339 	case Opt_swidth:
1340 		parsing_mp->m_swidth = result.uint_32;
1341 		return 0;
1342 	case Opt_inode32:
1343 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1344 		return 0;
1345 	case Opt_inode64:
1346 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1347 		return 0;
1348 	case Opt_nouuid:
1349 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1350 		return 0;
1351 	case Opt_largeio:
1352 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1353 		return 0;
1354 	case Opt_nolargeio:
1355 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1356 		return 0;
1357 	case Opt_filestreams:
1358 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1359 		return 0;
1360 	case Opt_noquota:
1361 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1362 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1363 		return 0;
1364 	case Opt_quota:
1365 	case Opt_uquota:
1366 	case Opt_usrquota:
1367 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1368 		return 0;
1369 	case Opt_qnoenforce:
1370 	case Opt_uqnoenforce:
1371 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1372 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1373 		return 0;
1374 	case Opt_pquota:
1375 	case Opt_prjquota:
1376 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1377 		return 0;
1378 	case Opt_pqnoenforce:
1379 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1380 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1381 		return 0;
1382 	case Opt_gquota:
1383 	case Opt_grpquota:
1384 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1385 		return 0;
1386 	case Opt_gqnoenforce:
1387 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1388 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1389 		return 0;
1390 	case Opt_discard:
1391 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1392 		return 0;
1393 	case Opt_nodiscard:
1394 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1395 		return 0;
1396 #ifdef CONFIG_FS_DAX
1397 	case Opt_dax:
1398 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1399 		return 0;
1400 	case Opt_dax_enum:
1401 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1402 		return 0;
1403 #endif
1404 	/* Following mount options will be removed in September 2025 */
1405 	case Opt_ikeep:
1406 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1407 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1408 		return 0;
1409 	case Opt_noikeep:
1410 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1411 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1412 		return 0;
1413 	case Opt_attr2:
1414 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1415 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1416 		return 0;
1417 	case Opt_noattr2:
1418 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1419 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1420 		return 0;
1421 	default:
1422 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1423 		return -EINVAL;
1424 	}
1425 
1426 	return 0;
1427 }
1428 
1429 static int
1430 xfs_fs_validate_params(
1431 	struct xfs_mount	*mp)
1432 {
1433 	/* No recovery flag requires a read-only mount */
1434 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1435 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1436 		return -EINVAL;
1437 	}
1438 
1439 	/*
1440 	 * We have not read the superblock at this point, so only the attr2
1441 	 * mount option can set the attr2 feature by this stage.
1442 	 */
1443 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1444 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1445 		return -EINVAL;
1446 	}
1447 
1448 
1449 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1450 		xfs_warn(mp,
1451 	"sunit and swidth options incompatible with the noalign option");
1452 		return -EINVAL;
1453 	}
1454 
1455 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1456 		xfs_warn(mp, "quota support not available in this kernel.");
1457 		return -EINVAL;
1458 	}
1459 
1460 	if ((mp->m_dalign && !mp->m_swidth) ||
1461 	    (!mp->m_dalign && mp->m_swidth)) {
1462 		xfs_warn(mp, "sunit and swidth must be specified together");
1463 		return -EINVAL;
1464 	}
1465 
1466 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1467 		xfs_warn(mp,
1468 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1469 			mp->m_swidth, mp->m_dalign);
1470 		return -EINVAL;
1471 	}
1472 
1473 	if (mp->m_logbufs != -1 &&
1474 	    mp->m_logbufs != 0 &&
1475 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1476 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1477 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1478 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1479 		return -EINVAL;
1480 	}
1481 
1482 	if (mp->m_logbsize != -1 &&
1483 	    mp->m_logbsize !=  0 &&
1484 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1485 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1486 	     !is_power_of_2(mp->m_logbsize))) {
1487 		xfs_warn(mp,
1488 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1489 			mp->m_logbsize);
1490 		return -EINVAL;
1491 	}
1492 
1493 	if (xfs_has_allocsize(mp) &&
1494 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1495 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1496 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1497 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1498 		return -EINVAL;
1499 	}
1500 
1501 	return 0;
1502 }
1503 
1504 struct dentry *
1505 xfs_debugfs_mkdir(
1506 	const char	*name,
1507 	struct dentry	*parent)
1508 {
1509 	struct dentry	*child;
1510 
1511 	/* Apparently we're expected to ignore error returns?? */
1512 	child = debugfs_create_dir(name, parent);
1513 	if (IS_ERR(child))
1514 		return NULL;
1515 
1516 	return child;
1517 }
1518 
1519 static int
1520 xfs_fs_fill_super(
1521 	struct super_block	*sb,
1522 	struct fs_context	*fc)
1523 {
1524 	struct xfs_mount	*mp = sb->s_fs_info;
1525 	struct inode		*root;
1526 	int			flags = 0, error;
1527 
1528 	mp->m_super = sb;
1529 
1530 	error = xfs_fs_validate_params(mp);
1531 	if (error)
1532 		return error;
1533 
1534 	sb_min_blocksize(sb, BBSIZE);
1535 	sb->s_xattr = xfs_xattr_handlers;
1536 	sb->s_export_op = &xfs_export_operations;
1537 #ifdef CONFIG_XFS_QUOTA
1538 	sb->s_qcop = &xfs_quotactl_operations;
1539 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1540 #endif
1541 	sb->s_op = &xfs_super_operations;
1542 
1543 	/*
1544 	 * Delay mount work if the debug hook is set. This is debug
1545 	 * instrumention to coordinate simulation of xfs mount failures with
1546 	 * VFS superblock operations
1547 	 */
1548 	if (xfs_globals.mount_delay) {
1549 		xfs_notice(mp, "Delaying mount for %d seconds.",
1550 			xfs_globals.mount_delay);
1551 		msleep(xfs_globals.mount_delay * 1000);
1552 	}
1553 
1554 	if (fc->sb_flags & SB_SILENT)
1555 		flags |= XFS_MFSI_QUIET;
1556 
1557 	error = xfs_open_devices(mp);
1558 	if (error)
1559 		return error;
1560 
1561 	if (xfs_debugfs) {
1562 		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1563 						  xfs_debugfs);
1564 	} else {
1565 		mp->m_debugfs = NULL;
1566 	}
1567 
1568 	error = xfs_init_mount_workqueues(mp);
1569 	if (error)
1570 		goto out_shutdown_devices;
1571 
1572 	error = xfs_init_percpu_counters(mp);
1573 	if (error)
1574 		goto out_destroy_workqueues;
1575 
1576 	error = xfs_inodegc_init_percpu(mp);
1577 	if (error)
1578 		goto out_destroy_counters;
1579 
1580 	/*
1581 	 * All percpu data structures requiring cleanup when a cpu goes offline
1582 	 * must be allocated before adding this @mp to the cpu-dead handler's
1583 	 * mount list.
1584 	 */
1585 	xfs_mount_list_add(mp);
1586 
1587 	/* Allocate stats memory before we do operations that might use it */
1588 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1589 	if (!mp->m_stats.xs_stats) {
1590 		error = -ENOMEM;
1591 		goto out_destroy_inodegc;
1592 	}
1593 
1594 	error = xchk_mount_stats_alloc(mp);
1595 	if (error)
1596 		goto out_free_stats;
1597 
1598 	error = xfs_readsb(mp, flags);
1599 	if (error)
1600 		goto out_free_scrub_stats;
1601 
1602 	error = xfs_finish_flags(mp);
1603 	if (error)
1604 		goto out_free_sb;
1605 
1606 	error = xfs_setup_devices(mp);
1607 	if (error)
1608 		goto out_free_sb;
1609 
1610 	/* V4 support is undergoing deprecation. */
1611 	if (!xfs_has_crc(mp)) {
1612 #ifdef CONFIG_XFS_SUPPORT_V4
1613 		xfs_warn_once(mp,
1614 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1615 #else
1616 		xfs_warn(mp,
1617 	"Deprecated V4 format (crc=0) not supported by kernel.");
1618 		error = -EINVAL;
1619 		goto out_free_sb;
1620 #endif
1621 	}
1622 
1623 	/* ASCII case insensitivity is undergoing deprecation. */
1624 	if (xfs_has_asciici(mp)) {
1625 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1626 		xfs_warn_once(mp,
1627 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1628 #else
1629 		xfs_warn(mp,
1630 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1631 		error = -EINVAL;
1632 		goto out_free_sb;
1633 #endif
1634 	}
1635 
1636 	/* Filesystem claims it needs repair, so refuse the mount. */
1637 	if (xfs_has_needsrepair(mp)) {
1638 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1639 		error = -EFSCORRUPTED;
1640 		goto out_free_sb;
1641 	}
1642 
1643 	/*
1644 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1645 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1646 	 * we don't check them at all.
1647 	 */
1648 	if (mp->m_sb.sb_inprogress) {
1649 		xfs_warn(mp, "Offline file system operation in progress!");
1650 		error = -EFSCORRUPTED;
1651 		goto out_free_sb;
1652 	}
1653 
1654 	/*
1655 	 * Until this is fixed only page-sized or smaller data blocks work.
1656 	 */
1657 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1658 		xfs_warn(mp,
1659 		"File system with blocksize %d bytes. "
1660 		"Only pagesize (%ld) or less will currently work.",
1661 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1662 		error = -ENOSYS;
1663 		goto out_free_sb;
1664 	}
1665 
1666 	/* Ensure this filesystem fits in the page cache limits */
1667 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1668 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1669 		xfs_warn(mp,
1670 		"file system too large to be mounted on this system.");
1671 		error = -EFBIG;
1672 		goto out_free_sb;
1673 	}
1674 
1675 	/*
1676 	 * XFS block mappings use 54 bits to store the logical block offset.
1677 	 * This should suffice to handle the maximum file size that the VFS
1678 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1679 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1680 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1681 	 * to check this assertion.
1682 	 *
1683 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1684 	 * maximum pagecache offset in units of fs blocks.
1685 	 */
1686 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1687 		xfs_warn(mp,
1688 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1689 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1690 			 XFS_MAX_FILEOFF);
1691 		error = -EINVAL;
1692 		goto out_free_sb;
1693 	}
1694 
1695 	error = xfs_filestream_mount(mp);
1696 	if (error)
1697 		goto out_free_sb;
1698 
1699 	/*
1700 	 * we must configure the block size in the superblock before we run the
1701 	 * full mount process as the mount process can lookup and cache inodes.
1702 	 */
1703 	sb->s_magic = XFS_SUPER_MAGIC;
1704 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1705 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1706 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1707 	sb->s_max_links = XFS_MAXLINK;
1708 	sb->s_time_gran = 1;
1709 	if (xfs_has_bigtime(mp)) {
1710 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1711 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1712 	} else {
1713 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1714 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1715 	}
1716 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1717 	sb->s_iflags |= SB_I_CGROUPWB;
1718 
1719 	set_posix_acl_flag(sb);
1720 
1721 	/* version 5 superblocks support inode version counters. */
1722 	if (xfs_has_crc(mp))
1723 		sb->s_flags |= SB_I_VERSION;
1724 
1725 	if (xfs_has_dax_always(mp)) {
1726 		error = xfs_setup_dax_always(mp);
1727 		if (error)
1728 			goto out_filestream_unmount;
1729 	}
1730 
1731 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1732 		xfs_warn(mp,
1733 	"mounting with \"discard\" option, but the device does not support discard");
1734 		mp->m_features &= ~XFS_FEAT_DISCARD;
1735 	}
1736 
1737 	if (xfs_has_reflink(mp)) {
1738 		if (mp->m_sb.sb_rblocks) {
1739 			xfs_alert(mp,
1740 	"reflink not compatible with realtime device!");
1741 			error = -EINVAL;
1742 			goto out_filestream_unmount;
1743 		}
1744 
1745 		if (xfs_globals.always_cow) {
1746 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1747 			mp->m_always_cow = true;
1748 		}
1749 	}
1750 
1751 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1752 		xfs_alert(mp,
1753 	"reverse mapping btree not compatible with realtime device!");
1754 		error = -EINVAL;
1755 		goto out_filestream_unmount;
1756 	}
1757 
1758 	error = xfs_mountfs(mp);
1759 	if (error)
1760 		goto out_filestream_unmount;
1761 
1762 	root = igrab(VFS_I(mp->m_rootip));
1763 	if (!root) {
1764 		error = -ENOENT;
1765 		goto out_unmount;
1766 	}
1767 	sb->s_root = d_make_root(root);
1768 	if (!sb->s_root) {
1769 		error = -ENOMEM;
1770 		goto out_unmount;
1771 	}
1772 
1773 	return 0;
1774 
1775  out_filestream_unmount:
1776 	xfs_filestream_unmount(mp);
1777  out_free_sb:
1778 	xfs_freesb(mp);
1779  out_free_scrub_stats:
1780 	xchk_mount_stats_free(mp);
1781  out_free_stats:
1782 	free_percpu(mp->m_stats.xs_stats);
1783  out_destroy_inodegc:
1784 	xfs_mount_list_del(mp);
1785 	xfs_inodegc_free_percpu(mp);
1786  out_destroy_counters:
1787 	xfs_destroy_percpu_counters(mp);
1788  out_destroy_workqueues:
1789 	xfs_destroy_mount_workqueues(mp);
1790  out_shutdown_devices:
1791 	xfs_shutdown_devices(mp);
1792 	return error;
1793 
1794  out_unmount:
1795 	xfs_filestream_unmount(mp);
1796 	xfs_unmountfs(mp);
1797 	goto out_free_sb;
1798 }
1799 
1800 static int
1801 xfs_fs_get_tree(
1802 	struct fs_context	*fc)
1803 {
1804 	return get_tree_bdev(fc, xfs_fs_fill_super);
1805 }
1806 
1807 static int
1808 xfs_remount_rw(
1809 	struct xfs_mount	*mp)
1810 {
1811 	struct xfs_sb		*sbp = &mp->m_sb;
1812 	int error;
1813 
1814 	if (xfs_has_norecovery(mp)) {
1815 		xfs_warn(mp,
1816 			"ro->rw transition prohibited on norecovery mount");
1817 		return -EINVAL;
1818 	}
1819 
1820 	if (xfs_sb_is_v5(sbp) &&
1821 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1822 		xfs_warn(mp,
1823 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1824 			(sbp->sb_features_ro_compat &
1825 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1826 		return -EINVAL;
1827 	}
1828 
1829 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1830 
1831 	/*
1832 	 * If this is the first remount to writeable state we might have some
1833 	 * superblock changes to update.
1834 	 */
1835 	if (mp->m_update_sb) {
1836 		error = xfs_sync_sb(mp, false);
1837 		if (error) {
1838 			xfs_warn(mp, "failed to write sb changes");
1839 			return error;
1840 		}
1841 		mp->m_update_sb = false;
1842 	}
1843 
1844 	/*
1845 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1846 	 * it is non-zero, otherwise go with the default.
1847 	 */
1848 	xfs_restore_resvblks(mp);
1849 	xfs_log_work_queue(mp);
1850 	xfs_blockgc_start(mp);
1851 
1852 	/* Create the per-AG metadata reservation pool .*/
1853 	error = xfs_fs_reserve_ag_blocks(mp);
1854 	if (error && error != -ENOSPC)
1855 		return error;
1856 
1857 	/* Re-enable the background inode inactivation worker. */
1858 	xfs_inodegc_start(mp);
1859 
1860 	return 0;
1861 }
1862 
1863 static int
1864 xfs_remount_ro(
1865 	struct xfs_mount	*mp)
1866 {
1867 	struct xfs_icwalk	icw = {
1868 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1869 	};
1870 	int			error;
1871 
1872 	/* Flush all the dirty data to disk. */
1873 	error = sync_filesystem(mp->m_super);
1874 	if (error)
1875 		return error;
1876 
1877 	/*
1878 	 * Cancel background eofb scanning so it cannot race with the final
1879 	 * log force+buftarg wait and deadlock the remount.
1880 	 */
1881 	xfs_blockgc_stop(mp);
1882 
1883 	/*
1884 	 * Clear out all remaining COW staging extents and speculative post-EOF
1885 	 * preallocations so that we don't leave inodes requiring inactivation
1886 	 * cleanups during reclaim on a read-only mount.  We must process every
1887 	 * cached inode, so this requires a synchronous cache scan.
1888 	 */
1889 	error = xfs_blockgc_free_space(mp, &icw);
1890 	if (error) {
1891 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1892 		return error;
1893 	}
1894 
1895 	/*
1896 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1897 	 * flushed all pending inodegc work when it sync'd the filesystem.
1898 	 * The VFS holds s_umount, so we know that inodes cannot enter
1899 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1900 	 * we send inodes straight to reclaim, so no inodes will be queued.
1901 	 */
1902 	xfs_inodegc_stop(mp);
1903 
1904 	/* Free the per-AG metadata reservation pool. */
1905 	error = xfs_fs_unreserve_ag_blocks(mp);
1906 	if (error) {
1907 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1908 		return error;
1909 	}
1910 
1911 	/*
1912 	 * Before we sync the metadata, we need to free up the reserve block
1913 	 * pool so that the used block count in the superblock on disk is
1914 	 * correct at the end of the remount. Stash the current* reserve pool
1915 	 * size so that if we get remounted rw, we can return it to the same
1916 	 * size.
1917 	 */
1918 	xfs_save_resvblks(mp);
1919 
1920 	xfs_log_clean(mp);
1921 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1922 
1923 	return 0;
1924 }
1925 
1926 /*
1927  * Logically we would return an error here to prevent users from believing
1928  * they might have changed mount options using remount which can't be changed.
1929  *
1930  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1931  * arguments in some cases so we can't blindly reject options, but have to
1932  * check for each specified option if it actually differs from the currently
1933  * set option and only reject it if that's the case.
1934  *
1935  * Until that is implemented we return success for every remount request, and
1936  * silently ignore all options that we can't actually change.
1937  */
1938 static int
1939 xfs_fs_reconfigure(
1940 	struct fs_context *fc)
1941 {
1942 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1943 	struct xfs_mount        *new_mp = fc->s_fs_info;
1944 	int			flags = fc->sb_flags;
1945 	int			error;
1946 
1947 	/* version 5 superblocks always support version counters. */
1948 	if (xfs_has_crc(mp))
1949 		fc->sb_flags |= SB_I_VERSION;
1950 
1951 	error = xfs_fs_validate_params(new_mp);
1952 	if (error)
1953 		return error;
1954 
1955 	/* inode32 -> inode64 */
1956 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1957 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1958 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1959 	}
1960 
1961 	/* inode64 -> inode32 */
1962 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1963 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1964 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1965 	}
1966 
1967 	/* ro -> rw */
1968 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1969 		error = xfs_remount_rw(mp);
1970 		if (error)
1971 			return error;
1972 	}
1973 
1974 	/* rw -> ro */
1975 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1976 		error = xfs_remount_ro(mp);
1977 		if (error)
1978 			return error;
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 static void
1985 xfs_fs_free(
1986 	struct fs_context	*fc)
1987 {
1988 	struct xfs_mount	*mp = fc->s_fs_info;
1989 
1990 	/*
1991 	 * mp is stored in the fs_context when it is initialized.
1992 	 * mp is transferred to the superblock on a successful mount,
1993 	 * but if an error occurs before the transfer we have to free
1994 	 * it here.
1995 	 */
1996 	if (mp)
1997 		xfs_mount_free(mp);
1998 }
1999 
2000 static const struct fs_context_operations xfs_context_ops = {
2001 	.parse_param = xfs_fs_parse_param,
2002 	.get_tree    = xfs_fs_get_tree,
2003 	.reconfigure = xfs_fs_reconfigure,
2004 	.free        = xfs_fs_free,
2005 };
2006 
2007 static int xfs_init_fs_context(
2008 	struct fs_context	*fc)
2009 {
2010 	struct xfs_mount	*mp;
2011 
2012 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
2013 	if (!mp)
2014 		return -ENOMEM;
2015 
2016 	spin_lock_init(&mp->m_sb_lock);
2017 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
2018 	spin_lock_init(&mp->m_perag_lock);
2019 	mutex_init(&mp->m_growlock);
2020 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2021 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2022 	mp->m_kobj.kobject.kset = xfs_kset;
2023 	/*
2024 	 * We don't create the finobt per-ag space reservation until after log
2025 	 * recovery, so we must set this to true so that an ifree transaction
2026 	 * started during log recovery will not depend on space reservations
2027 	 * for finobt expansion.
2028 	 */
2029 	mp->m_finobt_nores = true;
2030 
2031 	/*
2032 	 * These can be overridden by the mount option parsing.
2033 	 */
2034 	mp->m_logbufs = -1;
2035 	mp->m_logbsize = -1;
2036 	mp->m_allocsize_log = 16; /* 64k */
2037 
2038 	/*
2039 	 * Copy binary VFS mount flags we are interested in.
2040 	 */
2041 	if (fc->sb_flags & SB_RDONLY)
2042 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
2043 	if (fc->sb_flags & SB_DIRSYNC)
2044 		mp->m_features |= XFS_FEAT_DIRSYNC;
2045 	if (fc->sb_flags & SB_SYNCHRONOUS)
2046 		mp->m_features |= XFS_FEAT_WSYNC;
2047 
2048 	fc->s_fs_info = mp;
2049 	fc->ops = &xfs_context_ops;
2050 
2051 	return 0;
2052 }
2053 
2054 static void
2055 xfs_kill_sb(
2056 	struct super_block		*sb)
2057 {
2058 	kill_block_super(sb);
2059 	xfs_mount_free(XFS_M(sb));
2060 }
2061 
2062 static struct file_system_type xfs_fs_type = {
2063 	.owner			= THIS_MODULE,
2064 	.name			= "xfs",
2065 	.init_fs_context	= xfs_init_fs_context,
2066 	.parameters		= xfs_fs_parameters,
2067 	.kill_sb		= xfs_kill_sb,
2068 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
2069 };
2070 MODULE_ALIAS_FS("xfs");
2071 
2072 STATIC int __init
2073 xfs_init_caches(void)
2074 {
2075 	int		error;
2076 
2077 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2078 					 SLAB_HWCACHE_ALIGN |
2079 					 SLAB_RECLAIM_ACCOUNT |
2080 					 SLAB_MEM_SPREAD,
2081 					 NULL);
2082 	if (!xfs_buf_cache)
2083 		goto out;
2084 
2085 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2086 						sizeof(struct xlog_ticket),
2087 						0, 0, NULL);
2088 	if (!xfs_log_ticket_cache)
2089 		goto out_destroy_buf_cache;
2090 
2091 	error = xfs_btree_init_cur_caches();
2092 	if (error)
2093 		goto out_destroy_log_ticket_cache;
2094 
2095 	error = xfs_defer_init_item_caches();
2096 	if (error)
2097 		goto out_destroy_btree_cur_cache;
2098 
2099 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2100 					      sizeof(struct xfs_da_state),
2101 					      0, 0, NULL);
2102 	if (!xfs_da_state_cache)
2103 		goto out_destroy_defer_item_cache;
2104 
2105 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2106 					   sizeof(struct xfs_ifork),
2107 					   0, 0, NULL);
2108 	if (!xfs_ifork_cache)
2109 		goto out_destroy_da_state_cache;
2110 
2111 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2112 					   sizeof(struct xfs_trans),
2113 					   0, 0, NULL);
2114 	if (!xfs_trans_cache)
2115 		goto out_destroy_ifork_cache;
2116 
2117 
2118 	/*
2119 	 * The size of the cache-allocated buf log item is the maximum
2120 	 * size possible under XFS.  This wastes a little bit of memory,
2121 	 * but it is much faster.
2122 	 */
2123 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2124 					      sizeof(struct xfs_buf_log_item),
2125 					      0, 0, NULL);
2126 	if (!xfs_buf_item_cache)
2127 		goto out_destroy_trans_cache;
2128 
2129 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2130 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2131 			0, 0, NULL);
2132 	if (!xfs_efd_cache)
2133 		goto out_destroy_buf_item_cache;
2134 
2135 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2136 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2137 			0, 0, NULL);
2138 	if (!xfs_efi_cache)
2139 		goto out_destroy_efd_cache;
2140 
2141 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2142 					   sizeof(struct xfs_inode), 0,
2143 					   (SLAB_HWCACHE_ALIGN |
2144 					    SLAB_RECLAIM_ACCOUNT |
2145 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2146 					   xfs_fs_inode_init_once);
2147 	if (!xfs_inode_cache)
2148 		goto out_destroy_efi_cache;
2149 
2150 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2151 					 sizeof(struct xfs_inode_log_item), 0,
2152 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2153 					 NULL);
2154 	if (!xfs_ili_cache)
2155 		goto out_destroy_inode_cache;
2156 
2157 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2158 					     sizeof(struct xfs_icreate_item),
2159 					     0, 0, NULL);
2160 	if (!xfs_icreate_cache)
2161 		goto out_destroy_ili_cache;
2162 
2163 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2164 					 sizeof(struct xfs_rud_log_item),
2165 					 0, 0, NULL);
2166 	if (!xfs_rud_cache)
2167 		goto out_destroy_icreate_cache;
2168 
2169 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2170 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2171 			0, 0, NULL);
2172 	if (!xfs_rui_cache)
2173 		goto out_destroy_rud_cache;
2174 
2175 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2176 					 sizeof(struct xfs_cud_log_item),
2177 					 0, 0, NULL);
2178 	if (!xfs_cud_cache)
2179 		goto out_destroy_rui_cache;
2180 
2181 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2182 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2183 			0, 0, NULL);
2184 	if (!xfs_cui_cache)
2185 		goto out_destroy_cud_cache;
2186 
2187 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2188 					 sizeof(struct xfs_bud_log_item),
2189 					 0, 0, NULL);
2190 	if (!xfs_bud_cache)
2191 		goto out_destroy_cui_cache;
2192 
2193 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2194 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2195 			0, 0, NULL);
2196 	if (!xfs_bui_cache)
2197 		goto out_destroy_bud_cache;
2198 
2199 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2200 					    sizeof(struct xfs_attrd_log_item),
2201 					    0, 0, NULL);
2202 	if (!xfs_attrd_cache)
2203 		goto out_destroy_bui_cache;
2204 
2205 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2206 					    sizeof(struct xfs_attri_log_item),
2207 					    0, 0, NULL);
2208 	if (!xfs_attri_cache)
2209 		goto out_destroy_attrd_cache;
2210 
2211 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2212 					     sizeof(struct xfs_iunlink_item),
2213 					     0, 0, NULL);
2214 	if (!xfs_iunlink_cache)
2215 		goto out_destroy_attri_cache;
2216 
2217 	return 0;
2218 
2219  out_destroy_attri_cache:
2220 	kmem_cache_destroy(xfs_attri_cache);
2221  out_destroy_attrd_cache:
2222 	kmem_cache_destroy(xfs_attrd_cache);
2223  out_destroy_bui_cache:
2224 	kmem_cache_destroy(xfs_bui_cache);
2225  out_destroy_bud_cache:
2226 	kmem_cache_destroy(xfs_bud_cache);
2227  out_destroy_cui_cache:
2228 	kmem_cache_destroy(xfs_cui_cache);
2229  out_destroy_cud_cache:
2230 	kmem_cache_destroy(xfs_cud_cache);
2231  out_destroy_rui_cache:
2232 	kmem_cache_destroy(xfs_rui_cache);
2233  out_destroy_rud_cache:
2234 	kmem_cache_destroy(xfs_rud_cache);
2235  out_destroy_icreate_cache:
2236 	kmem_cache_destroy(xfs_icreate_cache);
2237  out_destroy_ili_cache:
2238 	kmem_cache_destroy(xfs_ili_cache);
2239  out_destroy_inode_cache:
2240 	kmem_cache_destroy(xfs_inode_cache);
2241  out_destroy_efi_cache:
2242 	kmem_cache_destroy(xfs_efi_cache);
2243  out_destroy_efd_cache:
2244 	kmem_cache_destroy(xfs_efd_cache);
2245  out_destroy_buf_item_cache:
2246 	kmem_cache_destroy(xfs_buf_item_cache);
2247  out_destroy_trans_cache:
2248 	kmem_cache_destroy(xfs_trans_cache);
2249  out_destroy_ifork_cache:
2250 	kmem_cache_destroy(xfs_ifork_cache);
2251  out_destroy_da_state_cache:
2252 	kmem_cache_destroy(xfs_da_state_cache);
2253  out_destroy_defer_item_cache:
2254 	xfs_defer_destroy_item_caches();
2255  out_destroy_btree_cur_cache:
2256 	xfs_btree_destroy_cur_caches();
2257  out_destroy_log_ticket_cache:
2258 	kmem_cache_destroy(xfs_log_ticket_cache);
2259  out_destroy_buf_cache:
2260 	kmem_cache_destroy(xfs_buf_cache);
2261  out:
2262 	return -ENOMEM;
2263 }
2264 
2265 STATIC void
2266 xfs_destroy_caches(void)
2267 {
2268 	/*
2269 	 * Make sure all delayed rcu free are flushed before we
2270 	 * destroy caches.
2271 	 */
2272 	rcu_barrier();
2273 	kmem_cache_destroy(xfs_iunlink_cache);
2274 	kmem_cache_destroy(xfs_attri_cache);
2275 	kmem_cache_destroy(xfs_attrd_cache);
2276 	kmem_cache_destroy(xfs_bui_cache);
2277 	kmem_cache_destroy(xfs_bud_cache);
2278 	kmem_cache_destroy(xfs_cui_cache);
2279 	kmem_cache_destroy(xfs_cud_cache);
2280 	kmem_cache_destroy(xfs_rui_cache);
2281 	kmem_cache_destroy(xfs_rud_cache);
2282 	kmem_cache_destroy(xfs_icreate_cache);
2283 	kmem_cache_destroy(xfs_ili_cache);
2284 	kmem_cache_destroy(xfs_inode_cache);
2285 	kmem_cache_destroy(xfs_efi_cache);
2286 	kmem_cache_destroy(xfs_efd_cache);
2287 	kmem_cache_destroy(xfs_buf_item_cache);
2288 	kmem_cache_destroy(xfs_trans_cache);
2289 	kmem_cache_destroy(xfs_ifork_cache);
2290 	kmem_cache_destroy(xfs_da_state_cache);
2291 	xfs_defer_destroy_item_caches();
2292 	xfs_btree_destroy_cur_caches();
2293 	kmem_cache_destroy(xfs_log_ticket_cache);
2294 	kmem_cache_destroy(xfs_buf_cache);
2295 }
2296 
2297 STATIC int __init
2298 xfs_init_workqueues(void)
2299 {
2300 	/*
2301 	 * The allocation workqueue can be used in memory reclaim situations
2302 	 * (writepage path), and parallelism is only limited by the number of
2303 	 * AGs in all the filesystems mounted. Hence use the default large
2304 	 * max_active value for this workqueue.
2305 	 */
2306 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2307 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2308 	if (!xfs_alloc_wq)
2309 		return -ENOMEM;
2310 
2311 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2312 			0);
2313 	if (!xfs_discard_wq)
2314 		goto out_free_alloc_wq;
2315 
2316 	return 0;
2317 out_free_alloc_wq:
2318 	destroy_workqueue(xfs_alloc_wq);
2319 	return -ENOMEM;
2320 }
2321 
2322 STATIC void
2323 xfs_destroy_workqueues(void)
2324 {
2325 	destroy_workqueue(xfs_discard_wq);
2326 	destroy_workqueue(xfs_alloc_wq);
2327 }
2328 
2329 #ifdef CONFIG_HOTPLUG_CPU
2330 static int
2331 xfs_cpu_dead(
2332 	unsigned int		cpu)
2333 {
2334 	struct xfs_mount	*mp, *n;
2335 
2336 	spin_lock(&xfs_mount_list_lock);
2337 	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2338 		spin_unlock(&xfs_mount_list_lock);
2339 		xfs_inodegc_cpu_dead(mp, cpu);
2340 		xlog_cil_pcp_dead(mp->m_log, cpu);
2341 		spin_lock(&xfs_mount_list_lock);
2342 	}
2343 	spin_unlock(&xfs_mount_list_lock);
2344 	return 0;
2345 }
2346 
2347 static int __init
2348 xfs_cpu_hotplug_init(void)
2349 {
2350 	int	error;
2351 
2352 	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2353 			xfs_cpu_dead);
2354 	if (error < 0)
2355 		xfs_alert(NULL,
2356 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2357 			error);
2358 	return error;
2359 }
2360 
2361 static void
2362 xfs_cpu_hotplug_destroy(void)
2363 {
2364 	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2365 }
2366 
2367 #else /* !CONFIG_HOTPLUG_CPU */
2368 static inline int xfs_cpu_hotplug_init(void) { return 0; }
2369 static inline void xfs_cpu_hotplug_destroy(void) {}
2370 #endif
2371 
2372 STATIC int __init
2373 init_xfs_fs(void)
2374 {
2375 	int			error;
2376 
2377 	xfs_check_ondisk_structs();
2378 
2379 	error = xfs_dahash_test();
2380 	if (error)
2381 		return error;
2382 
2383 	printk(KERN_INFO XFS_VERSION_STRING " with "
2384 			 XFS_BUILD_OPTIONS " enabled\n");
2385 
2386 	xfs_dir_startup();
2387 
2388 	error = xfs_cpu_hotplug_init();
2389 	if (error)
2390 		goto out;
2391 
2392 	error = xfs_init_caches();
2393 	if (error)
2394 		goto out_destroy_hp;
2395 
2396 	error = xfs_init_workqueues();
2397 	if (error)
2398 		goto out_destroy_caches;
2399 
2400 	error = xfs_mru_cache_init();
2401 	if (error)
2402 		goto out_destroy_wq;
2403 
2404 	error = xfs_init_procfs();
2405 	if (error)
2406 		goto out_mru_cache_uninit;
2407 
2408 	error = xfs_sysctl_register();
2409 	if (error)
2410 		goto out_cleanup_procfs;
2411 
2412 	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2413 
2414 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2415 	if (!xfs_kset) {
2416 		error = -ENOMEM;
2417 		goto out_debugfs_unregister;
2418 	}
2419 
2420 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2421 
2422 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2423 	if (!xfsstats.xs_stats) {
2424 		error = -ENOMEM;
2425 		goto out_kset_unregister;
2426 	}
2427 
2428 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2429 			       "stats");
2430 	if (error)
2431 		goto out_free_stats;
2432 
2433 	error = xchk_global_stats_setup(xfs_debugfs);
2434 	if (error)
2435 		goto out_remove_stats_kobj;
2436 
2437 #ifdef DEBUG
2438 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2439 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2440 	if (error)
2441 		goto out_remove_scrub_stats;
2442 #endif
2443 
2444 	error = xfs_qm_init();
2445 	if (error)
2446 		goto out_remove_dbg_kobj;
2447 
2448 	error = register_filesystem(&xfs_fs_type);
2449 	if (error)
2450 		goto out_qm_exit;
2451 	return 0;
2452 
2453  out_qm_exit:
2454 	xfs_qm_exit();
2455  out_remove_dbg_kobj:
2456 #ifdef DEBUG
2457 	xfs_sysfs_del(&xfs_dbg_kobj);
2458  out_remove_scrub_stats:
2459 #endif
2460 	xchk_global_stats_teardown();
2461  out_remove_stats_kobj:
2462 	xfs_sysfs_del(&xfsstats.xs_kobj);
2463  out_free_stats:
2464 	free_percpu(xfsstats.xs_stats);
2465  out_kset_unregister:
2466 	kset_unregister(xfs_kset);
2467  out_debugfs_unregister:
2468 	debugfs_remove(xfs_debugfs);
2469 	xfs_sysctl_unregister();
2470  out_cleanup_procfs:
2471 	xfs_cleanup_procfs();
2472  out_mru_cache_uninit:
2473 	xfs_mru_cache_uninit();
2474  out_destroy_wq:
2475 	xfs_destroy_workqueues();
2476  out_destroy_caches:
2477 	xfs_destroy_caches();
2478  out_destroy_hp:
2479 	xfs_cpu_hotplug_destroy();
2480  out:
2481 	return error;
2482 }
2483 
2484 STATIC void __exit
2485 exit_xfs_fs(void)
2486 {
2487 	xfs_qm_exit();
2488 	unregister_filesystem(&xfs_fs_type);
2489 #ifdef DEBUG
2490 	xfs_sysfs_del(&xfs_dbg_kobj);
2491 #endif
2492 	xchk_global_stats_teardown();
2493 	xfs_sysfs_del(&xfsstats.xs_kobj);
2494 	free_percpu(xfsstats.xs_stats);
2495 	kset_unregister(xfs_kset);
2496 	debugfs_remove(xfs_debugfs);
2497 	xfs_sysctl_unregister();
2498 	xfs_cleanup_procfs();
2499 	xfs_mru_cache_uninit();
2500 	xfs_destroy_workqueues();
2501 	xfs_destroy_caches();
2502 	xfs_uuid_table_free();
2503 	xfs_cpu_hotplug_destroy();
2504 }
2505 
2506 module_init(init_xfs_fs);
2507 module_exit(exit_xfs_fs);
2508 
2509 MODULE_AUTHOR("Silicon Graphics, Inc.");
2510 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2511 MODULE_LICENSE("GPL");
2512