xref: /openbmc/linux/fs/xfs/xfs_super.c (revision 1b36955c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 
46 #include <linux/magic.h>
47 #include <linux/fs_context.h>
48 #include <linux/fs_parser.h>
49 
50 static const struct super_operations xfs_super_operations;
51 
52 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
53 #ifdef DEBUG
54 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
55 #endif
56 
57 #ifdef CONFIG_HOTPLUG_CPU
58 static LIST_HEAD(xfs_mount_list);
59 static DEFINE_SPINLOCK(xfs_mount_list_lock);
60 
61 static inline void xfs_mount_list_add(struct xfs_mount *mp)
62 {
63 	spin_lock(&xfs_mount_list_lock);
64 	list_add(&mp->m_mount_list, &xfs_mount_list);
65 	spin_unlock(&xfs_mount_list_lock);
66 }
67 
68 static inline void xfs_mount_list_del(struct xfs_mount *mp)
69 {
70 	spin_lock(&xfs_mount_list_lock);
71 	list_del(&mp->m_mount_list);
72 	spin_unlock(&xfs_mount_list_lock);
73 }
74 #else /* !CONFIG_HOTPLUG_CPU */
75 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
76 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
77 #endif
78 
79 enum xfs_dax_mode {
80 	XFS_DAX_INODE = 0,
81 	XFS_DAX_ALWAYS = 1,
82 	XFS_DAX_NEVER = 2,
83 };
84 
85 static void
86 xfs_mount_set_dax_mode(
87 	struct xfs_mount	*mp,
88 	enum xfs_dax_mode	mode)
89 {
90 	switch (mode) {
91 	case XFS_DAX_INODE:
92 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
93 		break;
94 	case XFS_DAX_ALWAYS:
95 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
96 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
97 		break;
98 	case XFS_DAX_NEVER:
99 		mp->m_features |= XFS_FEAT_DAX_NEVER;
100 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
101 		break;
102 	}
103 }
104 
105 static const struct constant_table dax_param_enums[] = {
106 	{"inode",	XFS_DAX_INODE },
107 	{"always",	XFS_DAX_ALWAYS },
108 	{"never",	XFS_DAX_NEVER },
109 	{}
110 };
111 
112 /*
113  * Table driven mount option parser.
114  */
115 enum {
116 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
117 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
118 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
119 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
120 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
121 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
122 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
123 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
124 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
125 };
126 
127 static const struct fs_parameter_spec xfs_fs_parameters[] = {
128 	fsparam_u32("logbufs",		Opt_logbufs),
129 	fsparam_string("logbsize",	Opt_logbsize),
130 	fsparam_string("logdev",	Opt_logdev),
131 	fsparam_string("rtdev",		Opt_rtdev),
132 	fsparam_flag("wsync",		Opt_wsync),
133 	fsparam_flag("noalign",		Opt_noalign),
134 	fsparam_flag("swalloc",		Opt_swalloc),
135 	fsparam_u32("sunit",		Opt_sunit),
136 	fsparam_u32("swidth",		Opt_swidth),
137 	fsparam_flag("nouuid",		Opt_nouuid),
138 	fsparam_flag("grpid",		Opt_grpid),
139 	fsparam_flag("nogrpid",		Opt_nogrpid),
140 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
141 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
142 	fsparam_string("allocsize",	Opt_allocsize),
143 	fsparam_flag("norecovery",	Opt_norecovery),
144 	fsparam_flag("inode64",		Opt_inode64),
145 	fsparam_flag("inode32",		Opt_inode32),
146 	fsparam_flag("ikeep",		Opt_ikeep),
147 	fsparam_flag("noikeep",		Opt_noikeep),
148 	fsparam_flag("largeio",		Opt_largeio),
149 	fsparam_flag("nolargeio",	Opt_nolargeio),
150 	fsparam_flag("attr2",		Opt_attr2),
151 	fsparam_flag("noattr2",		Opt_noattr2),
152 	fsparam_flag("filestreams",	Opt_filestreams),
153 	fsparam_flag("quota",		Opt_quota),
154 	fsparam_flag("noquota",		Opt_noquota),
155 	fsparam_flag("usrquota",	Opt_usrquota),
156 	fsparam_flag("grpquota",	Opt_grpquota),
157 	fsparam_flag("prjquota",	Opt_prjquota),
158 	fsparam_flag("uquota",		Opt_uquota),
159 	fsparam_flag("gquota",		Opt_gquota),
160 	fsparam_flag("pquota",		Opt_pquota),
161 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
162 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
163 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
164 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
165 	fsparam_flag("discard",		Opt_discard),
166 	fsparam_flag("nodiscard",	Opt_nodiscard),
167 	fsparam_flag("dax",		Opt_dax),
168 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
169 	{}
170 };
171 
172 struct proc_xfs_info {
173 	uint64_t	flag;
174 	char		*str;
175 };
176 
177 static int
178 xfs_fs_show_options(
179 	struct seq_file		*m,
180 	struct dentry		*root)
181 {
182 	static struct proc_xfs_info xfs_info_set[] = {
183 		/* the few simple ones we can get from the mount struct */
184 		{ XFS_FEAT_IKEEP,		",ikeep" },
185 		{ XFS_FEAT_WSYNC,		",wsync" },
186 		{ XFS_FEAT_NOALIGN,		",noalign" },
187 		{ XFS_FEAT_SWALLOC,		",swalloc" },
188 		{ XFS_FEAT_NOUUID,		",nouuid" },
189 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
190 		{ XFS_FEAT_ATTR2,		",attr2" },
191 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
192 		{ XFS_FEAT_GRPID,		",grpid" },
193 		{ XFS_FEAT_DISCARD,		",discard" },
194 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
195 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
196 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
197 		{ 0, NULL }
198 	};
199 	struct xfs_mount	*mp = XFS_M(root->d_sb);
200 	struct proc_xfs_info	*xfs_infop;
201 
202 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
203 		if (mp->m_features & xfs_infop->flag)
204 			seq_puts(m, xfs_infop->str);
205 	}
206 
207 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
208 
209 	if (xfs_has_allocsize(mp))
210 		seq_printf(m, ",allocsize=%dk",
211 			   (1 << mp->m_allocsize_log) >> 10);
212 
213 	if (mp->m_logbufs > 0)
214 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
215 	if (mp->m_logbsize > 0)
216 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
217 
218 	if (mp->m_logname)
219 		seq_show_option(m, "logdev", mp->m_logname);
220 	if (mp->m_rtname)
221 		seq_show_option(m, "rtdev", mp->m_rtname);
222 
223 	if (mp->m_dalign > 0)
224 		seq_printf(m, ",sunit=%d",
225 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
226 	if (mp->m_swidth > 0)
227 		seq_printf(m, ",swidth=%d",
228 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
229 
230 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
231 		seq_puts(m, ",usrquota");
232 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
233 		seq_puts(m, ",uqnoenforce");
234 
235 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
236 		seq_puts(m, ",prjquota");
237 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
238 		seq_puts(m, ",pqnoenforce");
239 
240 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
241 		seq_puts(m, ",grpquota");
242 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
243 		seq_puts(m, ",gqnoenforce");
244 
245 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
246 		seq_puts(m, ",noquota");
247 
248 	return 0;
249 }
250 
251 static bool
252 xfs_set_inode_alloc_perag(
253 	struct xfs_perag	*pag,
254 	xfs_ino_t		ino,
255 	xfs_agnumber_t		max_metadata)
256 {
257 	if (!xfs_is_inode32(pag->pag_mount)) {
258 		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
259 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
260 		return false;
261 	}
262 
263 	if (ino > XFS_MAXINUMBER_32) {
264 		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
265 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
266 		return false;
267 	}
268 
269 	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
270 	if (pag->pag_agno < max_metadata)
271 		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
272 	else
273 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
274 	return true;
275 }
276 
277 /*
278  * Set parameters for inode allocation heuristics, taking into account
279  * filesystem size and inode32/inode64 mount options; i.e. specifically
280  * whether or not XFS_FEAT_SMALL_INUMS is set.
281  *
282  * Inode allocation patterns are altered only if inode32 is requested
283  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
284  * If altered, XFS_OPSTATE_INODE32 is set as well.
285  *
286  * An agcount independent of that in the mount structure is provided
287  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
288  * to the potentially higher ag count.
289  *
290  * Returns the maximum AG index which may contain inodes.
291  */
292 xfs_agnumber_t
293 xfs_set_inode_alloc(
294 	struct xfs_mount *mp,
295 	xfs_agnumber_t	agcount)
296 {
297 	xfs_agnumber_t	index;
298 	xfs_agnumber_t	maxagi = 0;
299 	xfs_sb_t	*sbp = &mp->m_sb;
300 	xfs_agnumber_t	max_metadata;
301 	xfs_agino_t	agino;
302 	xfs_ino_t	ino;
303 
304 	/*
305 	 * Calculate how much should be reserved for inodes to meet
306 	 * the max inode percentage.  Used only for inode32.
307 	 */
308 	if (M_IGEO(mp)->maxicount) {
309 		uint64_t	icount;
310 
311 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
312 		do_div(icount, 100);
313 		icount += sbp->sb_agblocks - 1;
314 		do_div(icount, sbp->sb_agblocks);
315 		max_metadata = icount;
316 	} else {
317 		max_metadata = agcount;
318 	}
319 
320 	/* Get the last possible inode in the filesystem */
321 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
322 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
323 
324 	/*
325 	 * If user asked for no more than 32-bit inodes, and the fs is
326 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
327 	 * the allocator to accommodate the request.
328 	 */
329 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
330 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
331 	else
332 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
333 
334 	for (index = 0; index < agcount; index++) {
335 		struct xfs_perag	*pag;
336 
337 		ino = XFS_AGINO_TO_INO(mp, index, agino);
338 
339 		pag = xfs_perag_get(mp, index);
340 		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
341 			maxagi++;
342 		xfs_perag_put(pag);
343 	}
344 
345 	return xfs_is_inode32(mp) ? maxagi : agcount;
346 }
347 
348 static int
349 xfs_setup_dax_always(
350 	struct xfs_mount	*mp)
351 {
352 	if (!mp->m_ddev_targp->bt_daxdev &&
353 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
354 		xfs_alert(mp,
355 			"DAX unsupported by block device. Turning off DAX.");
356 		goto disable_dax;
357 	}
358 
359 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
360 		xfs_alert(mp,
361 			"DAX not supported for blocksize. Turning off DAX.");
362 		goto disable_dax;
363 	}
364 
365 	if (xfs_has_reflink(mp) &&
366 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
367 		xfs_alert(mp,
368 			"DAX and reflink cannot work with multi-partitions!");
369 		return -EINVAL;
370 	}
371 
372 	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
373 	return 0;
374 
375 disable_dax:
376 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
377 	return 0;
378 }
379 
380 STATIC int
381 xfs_blkdev_get(
382 	xfs_mount_t		*mp,
383 	const char		*name,
384 	struct block_device	**bdevp)
385 {
386 	int			error = 0;
387 
388 	*bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
389 				    mp->m_super, &fs_holder_ops);
390 	if (IS_ERR(*bdevp)) {
391 		error = PTR_ERR(*bdevp);
392 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
393 	}
394 
395 	return error;
396 }
397 
398 STATIC void
399 xfs_shutdown_devices(
400 	struct xfs_mount	*mp)
401 {
402 	/*
403 	 * Udev is triggered whenever anyone closes a block device or unmounts
404 	 * a file systemm on a block device.
405 	 * The default udev rules invoke blkid to read the fs super and create
406 	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
407 	 * reads through the page cache.
408 	 *
409 	 * xfs_db also uses buffered reads to examine metadata.  There is no
410 	 * coordination between xfs_db and udev, which means that they can run
411 	 * concurrently.  Note there is no coordination between the kernel and
412 	 * blkid either.
413 	 *
414 	 * On a system with 64k pages, the page cache can cache the superblock
415 	 * and the root inode (and hence the root directory) with the same 64k
416 	 * page.  If udev spawns blkid after the mkfs and the system is busy
417 	 * enough that it is still running when xfs_db starts up, they'll both
418 	 * read from the same page in the pagecache.
419 	 *
420 	 * The unmount writes updated inode metadata to disk directly.  The XFS
421 	 * buffer cache does not use the bdev pagecache, so it needs to
422 	 * invalidate that pagecache on unmount.  If the above scenario occurs,
423 	 * the pagecache no longer reflects what's on disk, xfs_db reads the
424 	 * stale metadata, and fails to find /a.  Most of the time this succeeds
425 	 * because closing a bdev invalidates the page cache, but when processes
426 	 * race, everyone loses.
427 	 */
428 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
429 		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
430 		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
431 	}
432 	if (mp->m_rtdev_targp) {
433 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
434 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
435 	}
436 	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
437 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
438 }
439 
440 /*
441  * The file system configurations are:
442  *	(1) device (partition) with data and internal log
443  *	(2) logical volume with data and log subvolumes.
444  *	(3) logical volume with data, log, and realtime subvolumes.
445  *
446  * We only have to handle opening the log and realtime volumes here if
447  * they are present.  The data subvolume has already been opened by
448  * get_sb_bdev() and is stored in sb->s_bdev.
449  */
450 STATIC int
451 xfs_open_devices(
452 	struct xfs_mount	*mp)
453 {
454 	struct super_block	*sb = mp->m_super;
455 	struct block_device	*ddev = sb->s_bdev;
456 	struct block_device	*logdev = NULL, *rtdev = NULL;
457 	int			error;
458 
459 	/*
460 	 * blkdev_put() can't be called under s_umount, see the comment
461 	 * in get_tree_bdev() for more details
462 	 */
463 	up_write(&sb->s_umount);
464 
465 	/*
466 	 * Open real time and log devices - order is important.
467 	 */
468 	if (mp->m_logname) {
469 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
470 		if (error)
471 			goto out_relock;
472 	}
473 
474 	if (mp->m_rtname) {
475 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
476 		if (error)
477 			goto out_close_logdev;
478 
479 		if (rtdev == ddev || rtdev == logdev) {
480 			xfs_warn(mp,
481 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
482 			error = -EINVAL;
483 			goto out_close_rtdev;
484 		}
485 	}
486 
487 	/*
488 	 * Setup xfs_mount buffer target pointers
489 	 */
490 	error = -ENOMEM;
491 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
492 	if (!mp->m_ddev_targp)
493 		goto out_close_rtdev;
494 
495 	if (rtdev) {
496 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
497 		if (!mp->m_rtdev_targp)
498 			goto out_free_ddev_targ;
499 	}
500 
501 	if (logdev && logdev != ddev) {
502 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
503 		if (!mp->m_logdev_targp)
504 			goto out_free_rtdev_targ;
505 	} else {
506 		mp->m_logdev_targp = mp->m_ddev_targp;
507 	}
508 
509 	error = 0;
510 out_relock:
511 	down_write(&sb->s_umount);
512 	return error;
513 
514  out_free_rtdev_targ:
515 	if (mp->m_rtdev_targp)
516 		xfs_free_buftarg(mp->m_rtdev_targp);
517  out_free_ddev_targ:
518 	xfs_free_buftarg(mp->m_ddev_targp);
519  out_close_rtdev:
520 	 if (rtdev)
521 		 blkdev_put(rtdev, sb);
522  out_close_logdev:
523 	if (logdev && logdev != ddev)
524 		blkdev_put(logdev, sb);
525 	goto out_relock;
526 }
527 
528 /*
529  * Setup xfs_mount buffer target pointers based on superblock
530  */
531 STATIC int
532 xfs_setup_devices(
533 	struct xfs_mount	*mp)
534 {
535 	int			error;
536 
537 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
538 	if (error)
539 		return error;
540 
541 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
542 		unsigned int	log_sector_size = BBSIZE;
543 
544 		if (xfs_has_sector(mp))
545 			log_sector_size = mp->m_sb.sb_logsectsize;
546 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
547 					    log_sector_size);
548 		if (error)
549 			return error;
550 	}
551 	if (mp->m_rtdev_targp) {
552 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
553 					    mp->m_sb.sb_sectsize);
554 		if (error)
555 			return error;
556 	}
557 
558 	return 0;
559 }
560 
561 STATIC int
562 xfs_init_mount_workqueues(
563 	struct xfs_mount	*mp)
564 {
565 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
566 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
567 			1, mp->m_super->s_id);
568 	if (!mp->m_buf_workqueue)
569 		goto out;
570 
571 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
572 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
573 			0, mp->m_super->s_id);
574 	if (!mp->m_unwritten_workqueue)
575 		goto out_destroy_buf;
576 
577 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
578 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
579 			0, mp->m_super->s_id);
580 	if (!mp->m_reclaim_workqueue)
581 		goto out_destroy_unwritten;
582 
583 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
584 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
585 			0, mp->m_super->s_id);
586 	if (!mp->m_blockgc_wq)
587 		goto out_destroy_reclaim;
588 
589 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
590 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
591 			1, mp->m_super->s_id);
592 	if (!mp->m_inodegc_wq)
593 		goto out_destroy_blockgc;
594 
595 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
596 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
597 	if (!mp->m_sync_workqueue)
598 		goto out_destroy_inodegc;
599 
600 	return 0;
601 
602 out_destroy_inodegc:
603 	destroy_workqueue(mp->m_inodegc_wq);
604 out_destroy_blockgc:
605 	destroy_workqueue(mp->m_blockgc_wq);
606 out_destroy_reclaim:
607 	destroy_workqueue(mp->m_reclaim_workqueue);
608 out_destroy_unwritten:
609 	destroy_workqueue(mp->m_unwritten_workqueue);
610 out_destroy_buf:
611 	destroy_workqueue(mp->m_buf_workqueue);
612 out:
613 	return -ENOMEM;
614 }
615 
616 STATIC void
617 xfs_destroy_mount_workqueues(
618 	struct xfs_mount	*mp)
619 {
620 	destroy_workqueue(mp->m_sync_workqueue);
621 	destroy_workqueue(mp->m_blockgc_wq);
622 	destroy_workqueue(mp->m_inodegc_wq);
623 	destroy_workqueue(mp->m_reclaim_workqueue);
624 	destroy_workqueue(mp->m_unwritten_workqueue);
625 	destroy_workqueue(mp->m_buf_workqueue);
626 }
627 
628 static void
629 xfs_flush_inodes_worker(
630 	struct work_struct	*work)
631 {
632 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
633 						   m_flush_inodes_work);
634 	struct super_block	*sb = mp->m_super;
635 
636 	if (down_read_trylock(&sb->s_umount)) {
637 		sync_inodes_sb(sb);
638 		up_read(&sb->s_umount);
639 	}
640 }
641 
642 /*
643  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
644  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
645  * for IO to complete so that we effectively throttle multiple callers to the
646  * rate at which IO is completing.
647  */
648 void
649 xfs_flush_inodes(
650 	struct xfs_mount	*mp)
651 {
652 	/*
653 	 * If flush_work() returns true then that means we waited for a flush
654 	 * which was already in progress.  Don't bother running another scan.
655 	 */
656 	if (flush_work(&mp->m_flush_inodes_work))
657 		return;
658 
659 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
660 	flush_work(&mp->m_flush_inodes_work);
661 }
662 
663 /* Catch misguided souls that try to use this interface on XFS */
664 STATIC struct inode *
665 xfs_fs_alloc_inode(
666 	struct super_block	*sb)
667 {
668 	BUG();
669 	return NULL;
670 }
671 
672 /*
673  * Now that the generic code is guaranteed not to be accessing
674  * the linux inode, we can inactivate and reclaim the inode.
675  */
676 STATIC void
677 xfs_fs_destroy_inode(
678 	struct inode		*inode)
679 {
680 	struct xfs_inode	*ip = XFS_I(inode);
681 
682 	trace_xfs_destroy_inode(ip);
683 
684 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
685 	XFS_STATS_INC(ip->i_mount, vn_rele);
686 	XFS_STATS_INC(ip->i_mount, vn_remove);
687 	xfs_inode_mark_reclaimable(ip);
688 }
689 
690 static void
691 xfs_fs_dirty_inode(
692 	struct inode			*inode,
693 	int				flags)
694 {
695 	struct xfs_inode		*ip = XFS_I(inode);
696 	struct xfs_mount		*mp = ip->i_mount;
697 	struct xfs_trans		*tp;
698 
699 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
700 		return;
701 
702 	/*
703 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
704 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
705 	 * in flags possibly together with I_DIRTY_SYNC.
706 	 */
707 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
708 		return;
709 
710 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
711 		return;
712 	xfs_ilock(ip, XFS_ILOCK_EXCL);
713 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
714 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
715 	xfs_trans_commit(tp);
716 }
717 
718 /*
719  * Slab object creation initialisation for the XFS inode.
720  * This covers only the idempotent fields in the XFS inode;
721  * all other fields need to be initialised on allocation
722  * from the slab. This avoids the need to repeatedly initialise
723  * fields in the xfs inode that left in the initialise state
724  * when freeing the inode.
725  */
726 STATIC void
727 xfs_fs_inode_init_once(
728 	void			*inode)
729 {
730 	struct xfs_inode	*ip = inode;
731 
732 	memset(ip, 0, sizeof(struct xfs_inode));
733 
734 	/* vfs inode */
735 	inode_init_once(VFS_I(ip));
736 
737 	/* xfs inode */
738 	atomic_set(&ip->i_pincount, 0);
739 	spin_lock_init(&ip->i_flags_lock);
740 
741 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
742 		     "xfsino", ip->i_ino);
743 }
744 
745 /*
746  * We do an unlocked check for XFS_IDONTCACHE here because we are already
747  * serialised against cache hits here via the inode->i_lock and igrab() in
748  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
749  * racing with us, and it avoids needing to grab a spinlock here for every inode
750  * we drop the final reference on.
751  */
752 STATIC int
753 xfs_fs_drop_inode(
754 	struct inode		*inode)
755 {
756 	struct xfs_inode	*ip = XFS_I(inode);
757 
758 	/*
759 	 * If this unlinked inode is in the middle of recovery, don't
760 	 * drop the inode just yet; log recovery will take care of
761 	 * that.  See the comment for this inode flag.
762 	 */
763 	if (ip->i_flags & XFS_IRECOVERY) {
764 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
765 		return 0;
766 	}
767 
768 	return generic_drop_inode(inode);
769 }
770 
771 static void
772 xfs_mount_free(
773 	struct xfs_mount	*mp)
774 {
775 	/*
776 	 * Free the buftargs here because blkdev_put needs to be called outside
777 	 * of sb->s_umount, which is held around the call to ->put_super.
778 	 */
779 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
780 		xfs_free_buftarg(mp->m_logdev_targp);
781 	if (mp->m_rtdev_targp)
782 		xfs_free_buftarg(mp->m_rtdev_targp);
783 	if (mp->m_ddev_targp)
784 		xfs_free_buftarg(mp->m_ddev_targp);
785 
786 	kfree(mp->m_rtname);
787 	kfree(mp->m_logname);
788 	kmem_free(mp);
789 }
790 
791 STATIC int
792 xfs_fs_sync_fs(
793 	struct super_block	*sb,
794 	int			wait)
795 {
796 	struct xfs_mount	*mp = XFS_M(sb);
797 	int			error;
798 
799 	trace_xfs_fs_sync_fs(mp, __return_address);
800 
801 	/*
802 	 * Doing anything during the async pass would be counterproductive.
803 	 */
804 	if (!wait)
805 		return 0;
806 
807 	error = xfs_log_force(mp, XFS_LOG_SYNC);
808 	if (error)
809 		return error;
810 
811 	if (laptop_mode) {
812 		/*
813 		 * The disk must be active because we're syncing.
814 		 * We schedule log work now (now that the disk is
815 		 * active) instead of later (when it might not be).
816 		 */
817 		flush_delayed_work(&mp->m_log->l_work);
818 	}
819 
820 	/*
821 	 * If we are called with page faults frozen out, it means we are about
822 	 * to freeze the transaction subsystem. Take the opportunity to shut
823 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
824 	 * prevent inactivation races with freeze. The fs doesn't get called
825 	 * again by the freezing process until after SB_FREEZE_FS has been set,
826 	 * so it's now or never.  Same logic applies to speculative allocation
827 	 * garbage collection.
828 	 *
829 	 * We don't care if this is a normal syncfs call that does this or
830 	 * freeze that does this - we can run this multiple times without issue
831 	 * and we won't race with a restart because a restart can only occur
832 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
833 	 */
834 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
835 		xfs_inodegc_stop(mp);
836 		xfs_blockgc_stop(mp);
837 	}
838 
839 	return 0;
840 }
841 
842 STATIC int
843 xfs_fs_statfs(
844 	struct dentry		*dentry,
845 	struct kstatfs		*statp)
846 {
847 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
848 	xfs_sb_t		*sbp = &mp->m_sb;
849 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
850 	uint64_t		fakeinos, id;
851 	uint64_t		icount;
852 	uint64_t		ifree;
853 	uint64_t		fdblocks;
854 	xfs_extlen_t		lsize;
855 	int64_t			ffree;
856 
857 	/*
858 	 * Expedite background inodegc but don't wait. We do not want to block
859 	 * here waiting hours for a billion extent file to be truncated.
860 	 */
861 	xfs_inodegc_push(mp);
862 
863 	statp->f_type = XFS_SUPER_MAGIC;
864 	statp->f_namelen = MAXNAMELEN - 1;
865 
866 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
867 	statp->f_fsid = u64_to_fsid(id);
868 
869 	icount = percpu_counter_sum(&mp->m_icount);
870 	ifree = percpu_counter_sum(&mp->m_ifree);
871 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
872 
873 	spin_lock(&mp->m_sb_lock);
874 	statp->f_bsize = sbp->sb_blocksize;
875 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
876 	statp->f_blocks = sbp->sb_dblocks - lsize;
877 	spin_unlock(&mp->m_sb_lock);
878 
879 	/* make sure statp->f_bfree does not underflow */
880 	statp->f_bfree = max_t(int64_t, 0,
881 				fdblocks - xfs_fdblocks_unavailable(mp));
882 	statp->f_bavail = statp->f_bfree;
883 
884 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
885 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
886 	if (M_IGEO(mp)->maxicount)
887 		statp->f_files = min_t(typeof(statp->f_files),
888 					statp->f_files,
889 					M_IGEO(mp)->maxicount);
890 
891 	/* If sb_icount overshot maxicount, report actual allocation */
892 	statp->f_files = max_t(typeof(statp->f_files),
893 					statp->f_files,
894 					sbp->sb_icount);
895 
896 	/* make sure statp->f_ffree does not underflow */
897 	ffree = statp->f_files - (icount - ifree);
898 	statp->f_ffree = max_t(int64_t, ffree, 0);
899 
900 
901 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
902 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
903 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
904 		xfs_qm_statvfs(ip, statp);
905 
906 	if (XFS_IS_REALTIME_MOUNT(mp) &&
907 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
908 		s64	freertx;
909 
910 		statp->f_blocks = sbp->sb_rblocks;
911 		freertx = percpu_counter_sum_positive(&mp->m_frextents);
912 		statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
913 	}
914 
915 	return 0;
916 }
917 
918 STATIC void
919 xfs_save_resvblks(struct xfs_mount *mp)
920 {
921 	uint64_t resblks = 0;
922 
923 	mp->m_resblks_save = mp->m_resblks;
924 	xfs_reserve_blocks(mp, &resblks, NULL);
925 }
926 
927 STATIC void
928 xfs_restore_resvblks(struct xfs_mount *mp)
929 {
930 	uint64_t resblks;
931 
932 	if (mp->m_resblks_save) {
933 		resblks = mp->m_resblks_save;
934 		mp->m_resblks_save = 0;
935 	} else
936 		resblks = xfs_default_resblks(mp);
937 
938 	xfs_reserve_blocks(mp, &resblks, NULL);
939 }
940 
941 /*
942  * Second stage of a freeze. The data is already frozen so we only
943  * need to take care of the metadata. Once that's done sync the superblock
944  * to the log to dirty it in case of a crash while frozen. This ensures that we
945  * will recover the unlinked inode lists on the next mount.
946  */
947 STATIC int
948 xfs_fs_freeze(
949 	struct super_block	*sb)
950 {
951 	struct xfs_mount	*mp = XFS_M(sb);
952 	unsigned int		flags;
953 	int			ret;
954 
955 	/*
956 	 * The filesystem is now frozen far enough that memory reclaim
957 	 * cannot safely operate on the filesystem. Hence we need to
958 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
959 	 */
960 	flags = memalloc_nofs_save();
961 	xfs_save_resvblks(mp);
962 	ret = xfs_log_quiesce(mp);
963 	memalloc_nofs_restore(flags);
964 
965 	/*
966 	 * For read-write filesystems, we need to restart the inodegc on error
967 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
968 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
969 	 * here, so we can restart safely without racing with a stop in
970 	 * xfs_fs_sync_fs().
971 	 */
972 	if (ret && !xfs_is_readonly(mp)) {
973 		xfs_blockgc_start(mp);
974 		xfs_inodegc_start(mp);
975 	}
976 
977 	return ret;
978 }
979 
980 STATIC int
981 xfs_fs_unfreeze(
982 	struct super_block	*sb)
983 {
984 	struct xfs_mount	*mp = XFS_M(sb);
985 
986 	xfs_restore_resvblks(mp);
987 	xfs_log_work_queue(mp);
988 
989 	/*
990 	 * Don't reactivate the inodegc worker on a readonly filesystem because
991 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
992 	 * worker because there are no speculative preallocations on a readonly
993 	 * filesystem.
994 	 */
995 	if (!xfs_is_readonly(mp)) {
996 		xfs_blockgc_start(mp);
997 		xfs_inodegc_start(mp);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 /*
1004  * This function fills in xfs_mount_t fields based on mount args.
1005  * Note: the superblock _has_ now been read in.
1006  */
1007 STATIC int
1008 xfs_finish_flags(
1009 	struct xfs_mount	*mp)
1010 {
1011 	/* Fail a mount where the logbuf is smaller than the log stripe */
1012 	if (xfs_has_logv2(mp)) {
1013 		if (mp->m_logbsize <= 0 &&
1014 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1015 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1016 		} else if (mp->m_logbsize > 0 &&
1017 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1018 			xfs_warn(mp,
1019 		"logbuf size must be greater than or equal to log stripe size");
1020 			return -EINVAL;
1021 		}
1022 	} else {
1023 		/* Fail a mount if the logbuf is larger than 32K */
1024 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1025 			xfs_warn(mp,
1026 		"logbuf size for version 1 logs must be 16K or 32K");
1027 			return -EINVAL;
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * V5 filesystems always use attr2 format for attributes.
1033 	 */
1034 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1035 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1036 			     "attr2 is always enabled for V5 filesystems.");
1037 		return -EINVAL;
1038 	}
1039 
1040 	/*
1041 	 * prohibit r/w mounts of read-only filesystems
1042 	 */
1043 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1044 		xfs_warn(mp,
1045 			"cannot mount a read-only filesystem as read-write");
1046 		return -EROFS;
1047 	}
1048 
1049 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1050 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1051 	    !xfs_has_pquotino(mp)) {
1052 		xfs_warn(mp,
1053 		  "Super block does not support project and group quota together");
1054 		return -EINVAL;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 static int
1061 xfs_init_percpu_counters(
1062 	struct xfs_mount	*mp)
1063 {
1064 	int		error;
1065 
1066 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1067 	if (error)
1068 		return -ENOMEM;
1069 
1070 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1071 	if (error)
1072 		goto free_icount;
1073 
1074 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1075 	if (error)
1076 		goto free_ifree;
1077 
1078 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1079 	if (error)
1080 		goto free_fdblocks;
1081 
1082 	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1083 	if (error)
1084 		goto free_delalloc;
1085 
1086 	return 0;
1087 
1088 free_delalloc:
1089 	percpu_counter_destroy(&mp->m_delalloc_blks);
1090 free_fdblocks:
1091 	percpu_counter_destroy(&mp->m_fdblocks);
1092 free_ifree:
1093 	percpu_counter_destroy(&mp->m_ifree);
1094 free_icount:
1095 	percpu_counter_destroy(&mp->m_icount);
1096 	return -ENOMEM;
1097 }
1098 
1099 void
1100 xfs_reinit_percpu_counters(
1101 	struct xfs_mount	*mp)
1102 {
1103 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1104 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1105 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1106 	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1107 }
1108 
1109 static void
1110 xfs_destroy_percpu_counters(
1111 	struct xfs_mount	*mp)
1112 {
1113 	percpu_counter_destroy(&mp->m_icount);
1114 	percpu_counter_destroy(&mp->m_ifree);
1115 	percpu_counter_destroy(&mp->m_fdblocks);
1116 	ASSERT(xfs_is_shutdown(mp) ||
1117 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1118 	percpu_counter_destroy(&mp->m_delalloc_blks);
1119 	percpu_counter_destroy(&mp->m_frextents);
1120 }
1121 
1122 static int
1123 xfs_inodegc_init_percpu(
1124 	struct xfs_mount	*mp)
1125 {
1126 	struct xfs_inodegc	*gc;
1127 	int			cpu;
1128 
1129 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1130 	if (!mp->m_inodegc)
1131 		return -ENOMEM;
1132 
1133 	for_each_possible_cpu(cpu) {
1134 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1135 #if defined(DEBUG) || defined(XFS_WARN)
1136 		gc->cpu = cpu;
1137 #endif
1138 		init_llist_head(&gc->list);
1139 		gc->items = 0;
1140 		gc->error = 0;
1141 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1142 	}
1143 	return 0;
1144 }
1145 
1146 static void
1147 xfs_inodegc_free_percpu(
1148 	struct xfs_mount	*mp)
1149 {
1150 	if (!mp->m_inodegc)
1151 		return;
1152 	free_percpu(mp->m_inodegc);
1153 }
1154 
1155 static void
1156 xfs_fs_put_super(
1157 	struct super_block	*sb)
1158 {
1159 	struct xfs_mount	*mp = XFS_M(sb);
1160 
1161 	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1162 	xfs_filestream_unmount(mp);
1163 	xfs_unmountfs(mp);
1164 
1165 	xfs_freesb(mp);
1166 	free_percpu(mp->m_stats.xs_stats);
1167 	xfs_mount_list_del(mp);
1168 	xfs_inodegc_free_percpu(mp);
1169 	xfs_destroy_percpu_counters(mp);
1170 	xfs_destroy_mount_workqueues(mp);
1171 	xfs_shutdown_devices(mp);
1172 }
1173 
1174 static long
1175 xfs_fs_nr_cached_objects(
1176 	struct super_block	*sb,
1177 	struct shrink_control	*sc)
1178 {
1179 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1180 	if (WARN_ON_ONCE(!sb->s_fs_info))
1181 		return 0;
1182 	return xfs_reclaim_inodes_count(XFS_M(sb));
1183 }
1184 
1185 static long
1186 xfs_fs_free_cached_objects(
1187 	struct super_block	*sb,
1188 	struct shrink_control	*sc)
1189 {
1190 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1191 }
1192 
1193 static void
1194 xfs_fs_shutdown(
1195 	struct super_block	*sb)
1196 {
1197 	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1198 }
1199 
1200 static const struct super_operations xfs_super_operations = {
1201 	.alloc_inode		= xfs_fs_alloc_inode,
1202 	.destroy_inode		= xfs_fs_destroy_inode,
1203 	.dirty_inode		= xfs_fs_dirty_inode,
1204 	.drop_inode		= xfs_fs_drop_inode,
1205 	.put_super		= xfs_fs_put_super,
1206 	.sync_fs		= xfs_fs_sync_fs,
1207 	.freeze_fs		= xfs_fs_freeze,
1208 	.unfreeze_fs		= xfs_fs_unfreeze,
1209 	.statfs			= xfs_fs_statfs,
1210 	.show_options		= xfs_fs_show_options,
1211 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1212 	.free_cached_objects	= xfs_fs_free_cached_objects,
1213 	.shutdown		= xfs_fs_shutdown,
1214 };
1215 
1216 static int
1217 suffix_kstrtoint(
1218 	const char	*s,
1219 	unsigned int	base,
1220 	int		*res)
1221 {
1222 	int		last, shift_left_factor = 0, _res;
1223 	char		*value;
1224 	int		ret = 0;
1225 
1226 	value = kstrdup(s, GFP_KERNEL);
1227 	if (!value)
1228 		return -ENOMEM;
1229 
1230 	last = strlen(value) - 1;
1231 	if (value[last] == 'K' || value[last] == 'k') {
1232 		shift_left_factor = 10;
1233 		value[last] = '\0';
1234 	}
1235 	if (value[last] == 'M' || value[last] == 'm') {
1236 		shift_left_factor = 20;
1237 		value[last] = '\0';
1238 	}
1239 	if (value[last] == 'G' || value[last] == 'g') {
1240 		shift_left_factor = 30;
1241 		value[last] = '\0';
1242 	}
1243 
1244 	if (kstrtoint(value, base, &_res))
1245 		ret = -EINVAL;
1246 	kfree(value);
1247 	*res = _res << shift_left_factor;
1248 	return ret;
1249 }
1250 
1251 static inline void
1252 xfs_fs_warn_deprecated(
1253 	struct fs_context	*fc,
1254 	struct fs_parameter	*param,
1255 	uint64_t		flag,
1256 	bool			value)
1257 {
1258 	/* Don't print the warning if reconfiguring and current mount point
1259 	 * already had the flag set
1260 	 */
1261 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1262             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1263 		return;
1264 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1265 }
1266 
1267 /*
1268  * Set mount state from a mount option.
1269  *
1270  * NOTE: mp->m_super is NULL here!
1271  */
1272 static int
1273 xfs_fs_parse_param(
1274 	struct fs_context	*fc,
1275 	struct fs_parameter	*param)
1276 {
1277 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1278 	struct fs_parse_result	result;
1279 	int			size = 0;
1280 	int			opt;
1281 
1282 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1283 	if (opt < 0)
1284 		return opt;
1285 
1286 	switch (opt) {
1287 	case Opt_logbufs:
1288 		parsing_mp->m_logbufs = result.uint_32;
1289 		return 0;
1290 	case Opt_logbsize:
1291 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1292 			return -EINVAL;
1293 		return 0;
1294 	case Opt_logdev:
1295 		kfree(parsing_mp->m_logname);
1296 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1297 		if (!parsing_mp->m_logname)
1298 			return -ENOMEM;
1299 		return 0;
1300 	case Opt_rtdev:
1301 		kfree(parsing_mp->m_rtname);
1302 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1303 		if (!parsing_mp->m_rtname)
1304 			return -ENOMEM;
1305 		return 0;
1306 	case Opt_allocsize:
1307 		if (suffix_kstrtoint(param->string, 10, &size))
1308 			return -EINVAL;
1309 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1310 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1311 		return 0;
1312 	case Opt_grpid:
1313 	case Opt_bsdgroups:
1314 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1315 		return 0;
1316 	case Opt_nogrpid:
1317 	case Opt_sysvgroups:
1318 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1319 		return 0;
1320 	case Opt_wsync:
1321 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1322 		return 0;
1323 	case Opt_norecovery:
1324 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1325 		return 0;
1326 	case Opt_noalign:
1327 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1328 		return 0;
1329 	case Opt_swalloc:
1330 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1331 		return 0;
1332 	case Opt_sunit:
1333 		parsing_mp->m_dalign = result.uint_32;
1334 		return 0;
1335 	case Opt_swidth:
1336 		parsing_mp->m_swidth = result.uint_32;
1337 		return 0;
1338 	case Opt_inode32:
1339 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1340 		return 0;
1341 	case Opt_inode64:
1342 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1343 		return 0;
1344 	case Opt_nouuid:
1345 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1346 		return 0;
1347 	case Opt_largeio:
1348 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1349 		return 0;
1350 	case Opt_nolargeio:
1351 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1352 		return 0;
1353 	case Opt_filestreams:
1354 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1355 		return 0;
1356 	case Opt_noquota:
1357 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1358 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1359 		return 0;
1360 	case Opt_quota:
1361 	case Opt_uquota:
1362 	case Opt_usrquota:
1363 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1364 		return 0;
1365 	case Opt_qnoenforce:
1366 	case Opt_uqnoenforce:
1367 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1368 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1369 		return 0;
1370 	case Opt_pquota:
1371 	case Opt_prjquota:
1372 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1373 		return 0;
1374 	case Opt_pqnoenforce:
1375 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1376 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1377 		return 0;
1378 	case Opt_gquota:
1379 	case Opt_grpquota:
1380 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1381 		return 0;
1382 	case Opt_gqnoenforce:
1383 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1384 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1385 		return 0;
1386 	case Opt_discard:
1387 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1388 		return 0;
1389 	case Opt_nodiscard:
1390 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1391 		return 0;
1392 #ifdef CONFIG_FS_DAX
1393 	case Opt_dax:
1394 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1395 		return 0;
1396 	case Opt_dax_enum:
1397 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1398 		return 0;
1399 #endif
1400 	/* Following mount options will be removed in September 2025 */
1401 	case Opt_ikeep:
1402 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1403 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1404 		return 0;
1405 	case Opt_noikeep:
1406 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1407 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1408 		return 0;
1409 	case Opt_attr2:
1410 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1411 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1412 		return 0;
1413 	case Opt_noattr2:
1414 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1415 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1416 		return 0;
1417 	default:
1418 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1419 		return -EINVAL;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static int
1426 xfs_fs_validate_params(
1427 	struct xfs_mount	*mp)
1428 {
1429 	/* No recovery flag requires a read-only mount */
1430 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1431 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1432 		return -EINVAL;
1433 	}
1434 
1435 	/*
1436 	 * We have not read the superblock at this point, so only the attr2
1437 	 * mount option can set the attr2 feature by this stage.
1438 	 */
1439 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1440 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1441 		return -EINVAL;
1442 	}
1443 
1444 
1445 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1446 		xfs_warn(mp,
1447 	"sunit and swidth options incompatible with the noalign option");
1448 		return -EINVAL;
1449 	}
1450 
1451 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1452 		xfs_warn(mp, "quota support not available in this kernel.");
1453 		return -EINVAL;
1454 	}
1455 
1456 	if ((mp->m_dalign && !mp->m_swidth) ||
1457 	    (!mp->m_dalign && mp->m_swidth)) {
1458 		xfs_warn(mp, "sunit and swidth must be specified together");
1459 		return -EINVAL;
1460 	}
1461 
1462 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1463 		xfs_warn(mp,
1464 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1465 			mp->m_swidth, mp->m_dalign);
1466 		return -EINVAL;
1467 	}
1468 
1469 	if (mp->m_logbufs != -1 &&
1470 	    mp->m_logbufs != 0 &&
1471 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1472 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1473 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1474 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1475 		return -EINVAL;
1476 	}
1477 
1478 	if (mp->m_logbsize != -1 &&
1479 	    mp->m_logbsize !=  0 &&
1480 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1481 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1482 	     !is_power_of_2(mp->m_logbsize))) {
1483 		xfs_warn(mp,
1484 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1485 			mp->m_logbsize);
1486 		return -EINVAL;
1487 	}
1488 
1489 	if (xfs_has_allocsize(mp) &&
1490 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1491 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1492 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1493 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1494 		return -EINVAL;
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 static int
1501 xfs_fs_fill_super(
1502 	struct super_block	*sb,
1503 	struct fs_context	*fc)
1504 {
1505 	struct xfs_mount	*mp = sb->s_fs_info;
1506 	struct inode		*root;
1507 	int			flags = 0, error;
1508 
1509 	mp->m_super = sb;
1510 
1511 	error = xfs_fs_validate_params(mp);
1512 	if (error)
1513 		return error;
1514 
1515 	sb_min_blocksize(sb, BBSIZE);
1516 	sb->s_xattr = xfs_xattr_handlers;
1517 	sb->s_export_op = &xfs_export_operations;
1518 #ifdef CONFIG_XFS_QUOTA
1519 	sb->s_qcop = &xfs_quotactl_operations;
1520 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1521 #endif
1522 	sb->s_op = &xfs_super_operations;
1523 
1524 	/*
1525 	 * Delay mount work if the debug hook is set. This is debug
1526 	 * instrumention to coordinate simulation of xfs mount failures with
1527 	 * VFS superblock operations
1528 	 */
1529 	if (xfs_globals.mount_delay) {
1530 		xfs_notice(mp, "Delaying mount for %d seconds.",
1531 			xfs_globals.mount_delay);
1532 		msleep(xfs_globals.mount_delay * 1000);
1533 	}
1534 
1535 	if (fc->sb_flags & SB_SILENT)
1536 		flags |= XFS_MFSI_QUIET;
1537 
1538 	error = xfs_open_devices(mp);
1539 	if (error)
1540 		return error;
1541 
1542 	error = xfs_init_mount_workqueues(mp);
1543 	if (error)
1544 		goto out_shutdown_devices;
1545 
1546 	error = xfs_init_percpu_counters(mp);
1547 	if (error)
1548 		goto out_destroy_workqueues;
1549 
1550 	error = xfs_inodegc_init_percpu(mp);
1551 	if (error)
1552 		goto out_destroy_counters;
1553 
1554 	/*
1555 	 * All percpu data structures requiring cleanup when a cpu goes offline
1556 	 * must be allocated before adding this @mp to the cpu-dead handler's
1557 	 * mount list.
1558 	 */
1559 	xfs_mount_list_add(mp);
1560 
1561 	/* Allocate stats memory before we do operations that might use it */
1562 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1563 	if (!mp->m_stats.xs_stats) {
1564 		error = -ENOMEM;
1565 		goto out_destroy_inodegc;
1566 	}
1567 
1568 	error = xfs_readsb(mp, flags);
1569 	if (error)
1570 		goto out_free_stats;
1571 
1572 	error = xfs_finish_flags(mp);
1573 	if (error)
1574 		goto out_free_sb;
1575 
1576 	error = xfs_setup_devices(mp);
1577 	if (error)
1578 		goto out_free_sb;
1579 
1580 	/* V4 support is undergoing deprecation. */
1581 	if (!xfs_has_crc(mp)) {
1582 #ifdef CONFIG_XFS_SUPPORT_V4
1583 		xfs_warn_once(mp,
1584 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1585 #else
1586 		xfs_warn(mp,
1587 	"Deprecated V4 format (crc=0) not supported by kernel.");
1588 		error = -EINVAL;
1589 		goto out_free_sb;
1590 #endif
1591 	}
1592 
1593 	/* ASCII case insensitivity is undergoing deprecation. */
1594 	if (xfs_has_asciici(mp)) {
1595 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1596 		xfs_warn_once(mp,
1597 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1598 #else
1599 		xfs_warn(mp,
1600 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1601 		error = -EINVAL;
1602 		goto out_free_sb;
1603 #endif
1604 	}
1605 
1606 	/* Filesystem claims it needs repair, so refuse the mount. */
1607 	if (xfs_has_needsrepair(mp)) {
1608 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1609 		error = -EFSCORRUPTED;
1610 		goto out_free_sb;
1611 	}
1612 
1613 	/*
1614 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1615 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1616 	 * we don't check them at all.
1617 	 */
1618 	if (mp->m_sb.sb_inprogress) {
1619 		xfs_warn(mp, "Offline file system operation in progress!");
1620 		error = -EFSCORRUPTED;
1621 		goto out_free_sb;
1622 	}
1623 
1624 	/*
1625 	 * Until this is fixed only page-sized or smaller data blocks work.
1626 	 */
1627 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1628 		xfs_warn(mp,
1629 		"File system with blocksize %d bytes. "
1630 		"Only pagesize (%ld) or less will currently work.",
1631 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1632 		error = -ENOSYS;
1633 		goto out_free_sb;
1634 	}
1635 
1636 	/* Ensure this filesystem fits in the page cache limits */
1637 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1638 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1639 		xfs_warn(mp,
1640 		"file system too large to be mounted on this system.");
1641 		error = -EFBIG;
1642 		goto out_free_sb;
1643 	}
1644 
1645 	/*
1646 	 * XFS block mappings use 54 bits to store the logical block offset.
1647 	 * This should suffice to handle the maximum file size that the VFS
1648 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1649 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1650 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1651 	 * to check this assertion.
1652 	 *
1653 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1654 	 * maximum pagecache offset in units of fs blocks.
1655 	 */
1656 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1657 		xfs_warn(mp,
1658 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1659 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1660 			 XFS_MAX_FILEOFF);
1661 		error = -EINVAL;
1662 		goto out_free_sb;
1663 	}
1664 
1665 	error = xfs_filestream_mount(mp);
1666 	if (error)
1667 		goto out_free_sb;
1668 
1669 	/*
1670 	 * we must configure the block size in the superblock before we run the
1671 	 * full mount process as the mount process can lookup and cache inodes.
1672 	 */
1673 	sb->s_magic = XFS_SUPER_MAGIC;
1674 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1675 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1676 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1677 	sb->s_max_links = XFS_MAXLINK;
1678 	sb->s_time_gran = 1;
1679 	if (xfs_has_bigtime(mp)) {
1680 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1681 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1682 	} else {
1683 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1684 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1685 	}
1686 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1687 	sb->s_iflags |= SB_I_CGROUPWB;
1688 
1689 	set_posix_acl_flag(sb);
1690 
1691 	/* version 5 superblocks support inode version counters. */
1692 	if (xfs_has_crc(mp))
1693 		sb->s_flags |= SB_I_VERSION;
1694 
1695 	if (xfs_has_dax_always(mp)) {
1696 		error = xfs_setup_dax_always(mp);
1697 		if (error)
1698 			goto out_filestream_unmount;
1699 	}
1700 
1701 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1702 		xfs_warn(mp,
1703 	"mounting with \"discard\" option, but the device does not support discard");
1704 		mp->m_features &= ~XFS_FEAT_DISCARD;
1705 	}
1706 
1707 	if (xfs_has_reflink(mp)) {
1708 		if (mp->m_sb.sb_rblocks) {
1709 			xfs_alert(mp,
1710 	"reflink not compatible with realtime device!");
1711 			error = -EINVAL;
1712 			goto out_filestream_unmount;
1713 		}
1714 
1715 		if (xfs_globals.always_cow) {
1716 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1717 			mp->m_always_cow = true;
1718 		}
1719 	}
1720 
1721 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1722 		xfs_alert(mp,
1723 	"reverse mapping btree not compatible with realtime device!");
1724 		error = -EINVAL;
1725 		goto out_filestream_unmount;
1726 	}
1727 
1728 	error = xfs_mountfs(mp);
1729 	if (error)
1730 		goto out_filestream_unmount;
1731 
1732 	root = igrab(VFS_I(mp->m_rootip));
1733 	if (!root) {
1734 		error = -ENOENT;
1735 		goto out_unmount;
1736 	}
1737 	sb->s_root = d_make_root(root);
1738 	if (!sb->s_root) {
1739 		error = -ENOMEM;
1740 		goto out_unmount;
1741 	}
1742 
1743 	return 0;
1744 
1745  out_filestream_unmount:
1746 	xfs_filestream_unmount(mp);
1747  out_free_sb:
1748 	xfs_freesb(mp);
1749  out_free_stats:
1750 	free_percpu(mp->m_stats.xs_stats);
1751  out_destroy_inodegc:
1752 	xfs_mount_list_del(mp);
1753 	xfs_inodegc_free_percpu(mp);
1754  out_destroy_counters:
1755 	xfs_destroy_percpu_counters(mp);
1756  out_destroy_workqueues:
1757 	xfs_destroy_mount_workqueues(mp);
1758  out_shutdown_devices:
1759 	xfs_shutdown_devices(mp);
1760 	return error;
1761 
1762  out_unmount:
1763 	xfs_filestream_unmount(mp);
1764 	xfs_unmountfs(mp);
1765 	goto out_free_sb;
1766 }
1767 
1768 static int
1769 xfs_fs_get_tree(
1770 	struct fs_context	*fc)
1771 {
1772 	return get_tree_bdev(fc, xfs_fs_fill_super);
1773 }
1774 
1775 static int
1776 xfs_remount_rw(
1777 	struct xfs_mount	*mp)
1778 {
1779 	struct xfs_sb		*sbp = &mp->m_sb;
1780 	int error;
1781 
1782 	if (xfs_has_norecovery(mp)) {
1783 		xfs_warn(mp,
1784 			"ro->rw transition prohibited on norecovery mount");
1785 		return -EINVAL;
1786 	}
1787 
1788 	if (xfs_sb_is_v5(sbp) &&
1789 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1790 		xfs_warn(mp,
1791 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1792 			(sbp->sb_features_ro_compat &
1793 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1794 		return -EINVAL;
1795 	}
1796 
1797 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1798 
1799 	/*
1800 	 * If this is the first remount to writeable state we might have some
1801 	 * superblock changes to update.
1802 	 */
1803 	if (mp->m_update_sb) {
1804 		error = xfs_sync_sb(mp, false);
1805 		if (error) {
1806 			xfs_warn(mp, "failed to write sb changes");
1807 			return error;
1808 		}
1809 		mp->m_update_sb = false;
1810 	}
1811 
1812 	/*
1813 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1814 	 * it is non-zero, otherwise go with the default.
1815 	 */
1816 	xfs_restore_resvblks(mp);
1817 	xfs_log_work_queue(mp);
1818 	xfs_blockgc_start(mp);
1819 
1820 	/* Create the per-AG metadata reservation pool .*/
1821 	error = xfs_fs_reserve_ag_blocks(mp);
1822 	if (error && error != -ENOSPC)
1823 		return error;
1824 
1825 	/* Re-enable the background inode inactivation worker. */
1826 	xfs_inodegc_start(mp);
1827 
1828 	return 0;
1829 }
1830 
1831 static int
1832 xfs_remount_ro(
1833 	struct xfs_mount	*mp)
1834 {
1835 	struct xfs_icwalk	icw = {
1836 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1837 	};
1838 	int			error;
1839 
1840 	/* Flush all the dirty data to disk. */
1841 	error = sync_filesystem(mp->m_super);
1842 	if (error)
1843 		return error;
1844 
1845 	/*
1846 	 * Cancel background eofb scanning so it cannot race with the final
1847 	 * log force+buftarg wait and deadlock the remount.
1848 	 */
1849 	xfs_blockgc_stop(mp);
1850 
1851 	/*
1852 	 * Clear out all remaining COW staging extents and speculative post-EOF
1853 	 * preallocations so that we don't leave inodes requiring inactivation
1854 	 * cleanups during reclaim on a read-only mount.  We must process every
1855 	 * cached inode, so this requires a synchronous cache scan.
1856 	 */
1857 	error = xfs_blockgc_free_space(mp, &icw);
1858 	if (error) {
1859 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1860 		return error;
1861 	}
1862 
1863 	/*
1864 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1865 	 * flushed all pending inodegc work when it sync'd the filesystem.
1866 	 * The VFS holds s_umount, so we know that inodes cannot enter
1867 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1868 	 * we send inodes straight to reclaim, so no inodes will be queued.
1869 	 */
1870 	xfs_inodegc_stop(mp);
1871 
1872 	/* Free the per-AG metadata reservation pool. */
1873 	error = xfs_fs_unreserve_ag_blocks(mp);
1874 	if (error) {
1875 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1876 		return error;
1877 	}
1878 
1879 	/*
1880 	 * Before we sync the metadata, we need to free up the reserve block
1881 	 * pool so that the used block count in the superblock on disk is
1882 	 * correct at the end of the remount. Stash the current* reserve pool
1883 	 * size so that if we get remounted rw, we can return it to the same
1884 	 * size.
1885 	 */
1886 	xfs_save_resvblks(mp);
1887 
1888 	xfs_log_clean(mp);
1889 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1890 
1891 	return 0;
1892 }
1893 
1894 /*
1895  * Logically we would return an error here to prevent users from believing
1896  * they might have changed mount options using remount which can't be changed.
1897  *
1898  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1899  * arguments in some cases so we can't blindly reject options, but have to
1900  * check for each specified option if it actually differs from the currently
1901  * set option and only reject it if that's the case.
1902  *
1903  * Until that is implemented we return success for every remount request, and
1904  * silently ignore all options that we can't actually change.
1905  */
1906 static int
1907 xfs_fs_reconfigure(
1908 	struct fs_context *fc)
1909 {
1910 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1911 	struct xfs_mount        *new_mp = fc->s_fs_info;
1912 	int			flags = fc->sb_flags;
1913 	int			error;
1914 
1915 	/* version 5 superblocks always support version counters. */
1916 	if (xfs_has_crc(mp))
1917 		fc->sb_flags |= SB_I_VERSION;
1918 
1919 	error = xfs_fs_validate_params(new_mp);
1920 	if (error)
1921 		return error;
1922 
1923 	/* inode32 -> inode64 */
1924 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1925 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1926 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1927 	}
1928 
1929 	/* inode64 -> inode32 */
1930 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1931 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1932 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1933 	}
1934 
1935 	/* ro -> rw */
1936 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1937 		error = xfs_remount_rw(mp);
1938 		if (error)
1939 			return error;
1940 	}
1941 
1942 	/* rw -> ro */
1943 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1944 		error = xfs_remount_ro(mp);
1945 		if (error)
1946 			return error;
1947 	}
1948 
1949 	return 0;
1950 }
1951 
1952 static void
1953 xfs_fs_free(
1954 	struct fs_context	*fc)
1955 {
1956 	struct xfs_mount	*mp = fc->s_fs_info;
1957 
1958 	/*
1959 	 * mp is stored in the fs_context when it is initialized.
1960 	 * mp is transferred to the superblock on a successful mount,
1961 	 * but if an error occurs before the transfer we have to free
1962 	 * it here.
1963 	 */
1964 	if (mp)
1965 		xfs_mount_free(mp);
1966 }
1967 
1968 static const struct fs_context_operations xfs_context_ops = {
1969 	.parse_param = xfs_fs_parse_param,
1970 	.get_tree    = xfs_fs_get_tree,
1971 	.reconfigure = xfs_fs_reconfigure,
1972 	.free        = xfs_fs_free,
1973 };
1974 
1975 static int xfs_init_fs_context(
1976 	struct fs_context	*fc)
1977 {
1978 	struct xfs_mount	*mp;
1979 
1980 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1981 	if (!mp)
1982 		return -ENOMEM;
1983 
1984 	spin_lock_init(&mp->m_sb_lock);
1985 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1986 	spin_lock_init(&mp->m_perag_lock);
1987 	mutex_init(&mp->m_growlock);
1988 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1989 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1990 	mp->m_kobj.kobject.kset = xfs_kset;
1991 	/*
1992 	 * We don't create the finobt per-ag space reservation until after log
1993 	 * recovery, so we must set this to true so that an ifree transaction
1994 	 * started during log recovery will not depend on space reservations
1995 	 * for finobt expansion.
1996 	 */
1997 	mp->m_finobt_nores = true;
1998 
1999 	/*
2000 	 * These can be overridden by the mount option parsing.
2001 	 */
2002 	mp->m_logbufs = -1;
2003 	mp->m_logbsize = -1;
2004 	mp->m_allocsize_log = 16; /* 64k */
2005 
2006 	/*
2007 	 * Copy binary VFS mount flags we are interested in.
2008 	 */
2009 	if (fc->sb_flags & SB_RDONLY)
2010 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
2011 	if (fc->sb_flags & SB_DIRSYNC)
2012 		mp->m_features |= XFS_FEAT_DIRSYNC;
2013 	if (fc->sb_flags & SB_SYNCHRONOUS)
2014 		mp->m_features |= XFS_FEAT_WSYNC;
2015 
2016 	fc->s_fs_info = mp;
2017 	fc->ops = &xfs_context_ops;
2018 
2019 	return 0;
2020 }
2021 
2022 static void
2023 xfs_kill_sb(
2024 	struct super_block		*sb)
2025 {
2026 	kill_block_super(sb);
2027 	xfs_mount_free(XFS_M(sb));
2028 }
2029 
2030 static struct file_system_type xfs_fs_type = {
2031 	.owner			= THIS_MODULE,
2032 	.name			= "xfs",
2033 	.init_fs_context	= xfs_init_fs_context,
2034 	.parameters		= xfs_fs_parameters,
2035 	.kill_sb		= xfs_kill_sb,
2036 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
2037 };
2038 MODULE_ALIAS_FS("xfs");
2039 
2040 STATIC int __init
2041 xfs_init_caches(void)
2042 {
2043 	int		error;
2044 
2045 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2046 					 SLAB_HWCACHE_ALIGN |
2047 					 SLAB_RECLAIM_ACCOUNT |
2048 					 SLAB_MEM_SPREAD,
2049 					 NULL);
2050 	if (!xfs_buf_cache)
2051 		goto out;
2052 
2053 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2054 						sizeof(struct xlog_ticket),
2055 						0, 0, NULL);
2056 	if (!xfs_log_ticket_cache)
2057 		goto out_destroy_buf_cache;
2058 
2059 	error = xfs_btree_init_cur_caches();
2060 	if (error)
2061 		goto out_destroy_log_ticket_cache;
2062 
2063 	error = xfs_defer_init_item_caches();
2064 	if (error)
2065 		goto out_destroy_btree_cur_cache;
2066 
2067 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2068 					      sizeof(struct xfs_da_state),
2069 					      0, 0, NULL);
2070 	if (!xfs_da_state_cache)
2071 		goto out_destroy_defer_item_cache;
2072 
2073 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2074 					   sizeof(struct xfs_ifork),
2075 					   0, 0, NULL);
2076 	if (!xfs_ifork_cache)
2077 		goto out_destroy_da_state_cache;
2078 
2079 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2080 					   sizeof(struct xfs_trans),
2081 					   0, 0, NULL);
2082 	if (!xfs_trans_cache)
2083 		goto out_destroy_ifork_cache;
2084 
2085 
2086 	/*
2087 	 * The size of the cache-allocated buf log item is the maximum
2088 	 * size possible under XFS.  This wastes a little bit of memory,
2089 	 * but it is much faster.
2090 	 */
2091 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2092 					      sizeof(struct xfs_buf_log_item),
2093 					      0, 0, NULL);
2094 	if (!xfs_buf_item_cache)
2095 		goto out_destroy_trans_cache;
2096 
2097 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2098 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2099 			0, 0, NULL);
2100 	if (!xfs_efd_cache)
2101 		goto out_destroy_buf_item_cache;
2102 
2103 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2104 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2105 			0, 0, NULL);
2106 	if (!xfs_efi_cache)
2107 		goto out_destroy_efd_cache;
2108 
2109 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2110 					   sizeof(struct xfs_inode), 0,
2111 					   (SLAB_HWCACHE_ALIGN |
2112 					    SLAB_RECLAIM_ACCOUNT |
2113 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2114 					   xfs_fs_inode_init_once);
2115 	if (!xfs_inode_cache)
2116 		goto out_destroy_efi_cache;
2117 
2118 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2119 					 sizeof(struct xfs_inode_log_item), 0,
2120 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2121 					 NULL);
2122 	if (!xfs_ili_cache)
2123 		goto out_destroy_inode_cache;
2124 
2125 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2126 					     sizeof(struct xfs_icreate_item),
2127 					     0, 0, NULL);
2128 	if (!xfs_icreate_cache)
2129 		goto out_destroy_ili_cache;
2130 
2131 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2132 					 sizeof(struct xfs_rud_log_item),
2133 					 0, 0, NULL);
2134 	if (!xfs_rud_cache)
2135 		goto out_destroy_icreate_cache;
2136 
2137 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2138 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2139 			0, 0, NULL);
2140 	if (!xfs_rui_cache)
2141 		goto out_destroy_rud_cache;
2142 
2143 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2144 					 sizeof(struct xfs_cud_log_item),
2145 					 0, 0, NULL);
2146 	if (!xfs_cud_cache)
2147 		goto out_destroy_rui_cache;
2148 
2149 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2150 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2151 			0, 0, NULL);
2152 	if (!xfs_cui_cache)
2153 		goto out_destroy_cud_cache;
2154 
2155 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2156 					 sizeof(struct xfs_bud_log_item),
2157 					 0, 0, NULL);
2158 	if (!xfs_bud_cache)
2159 		goto out_destroy_cui_cache;
2160 
2161 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2162 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2163 			0, 0, NULL);
2164 	if (!xfs_bui_cache)
2165 		goto out_destroy_bud_cache;
2166 
2167 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2168 					    sizeof(struct xfs_attrd_log_item),
2169 					    0, 0, NULL);
2170 	if (!xfs_attrd_cache)
2171 		goto out_destroy_bui_cache;
2172 
2173 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2174 					    sizeof(struct xfs_attri_log_item),
2175 					    0, 0, NULL);
2176 	if (!xfs_attri_cache)
2177 		goto out_destroy_attrd_cache;
2178 
2179 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2180 					     sizeof(struct xfs_iunlink_item),
2181 					     0, 0, NULL);
2182 	if (!xfs_iunlink_cache)
2183 		goto out_destroy_attri_cache;
2184 
2185 	return 0;
2186 
2187  out_destroy_attri_cache:
2188 	kmem_cache_destroy(xfs_attri_cache);
2189  out_destroy_attrd_cache:
2190 	kmem_cache_destroy(xfs_attrd_cache);
2191  out_destroy_bui_cache:
2192 	kmem_cache_destroy(xfs_bui_cache);
2193  out_destroy_bud_cache:
2194 	kmem_cache_destroy(xfs_bud_cache);
2195  out_destroy_cui_cache:
2196 	kmem_cache_destroy(xfs_cui_cache);
2197  out_destroy_cud_cache:
2198 	kmem_cache_destroy(xfs_cud_cache);
2199  out_destroy_rui_cache:
2200 	kmem_cache_destroy(xfs_rui_cache);
2201  out_destroy_rud_cache:
2202 	kmem_cache_destroy(xfs_rud_cache);
2203  out_destroy_icreate_cache:
2204 	kmem_cache_destroy(xfs_icreate_cache);
2205  out_destroy_ili_cache:
2206 	kmem_cache_destroy(xfs_ili_cache);
2207  out_destroy_inode_cache:
2208 	kmem_cache_destroy(xfs_inode_cache);
2209  out_destroy_efi_cache:
2210 	kmem_cache_destroy(xfs_efi_cache);
2211  out_destroy_efd_cache:
2212 	kmem_cache_destroy(xfs_efd_cache);
2213  out_destroy_buf_item_cache:
2214 	kmem_cache_destroy(xfs_buf_item_cache);
2215  out_destroy_trans_cache:
2216 	kmem_cache_destroy(xfs_trans_cache);
2217  out_destroy_ifork_cache:
2218 	kmem_cache_destroy(xfs_ifork_cache);
2219  out_destroy_da_state_cache:
2220 	kmem_cache_destroy(xfs_da_state_cache);
2221  out_destroy_defer_item_cache:
2222 	xfs_defer_destroy_item_caches();
2223  out_destroy_btree_cur_cache:
2224 	xfs_btree_destroy_cur_caches();
2225  out_destroy_log_ticket_cache:
2226 	kmem_cache_destroy(xfs_log_ticket_cache);
2227  out_destroy_buf_cache:
2228 	kmem_cache_destroy(xfs_buf_cache);
2229  out:
2230 	return -ENOMEM;
2231 }
2232 
2233 STATIC void
2234 xfs_destroy_caches(void)
2235 {
2236 	/*
2237 	 * Make sure all delayed rcu free are flushed before we
2238 	 * destroy caches.
2239 	 */
2240 	rcu_barrier();
2241 	kmem_cache_destroy(xfs_iunlink_cache);
2242 	kmem_cache_destroy(xfs_attri_cache);
2243 	kmem_cache_destroy(xfs_attrd_cache);
2244 	kmem_cache_destroy(xfs_bui_cache);
2245 	kmem_cache_destroy(xfs_bud_cache);
2246 	kmem_cache_destroy(xfs_cui_cache);
2247 	kmem_cache_destroy(xfs_cud_cache);
2248 	kmem_cache_destroy(xfs_rui_cache);
2249 	kmem_cache_destroy(xfs_rud_cache);
2250 	kmem_cache_destroy(xfs_icreate_cache);
2251 	kmem_cache_destroy(xfs_ili_cache);
2252 	kmem_cache_destroy(xfs_inode_cache);
2253 	kmem_cache_destroy(xfs_efi_cache);
2254 	kmem_cache_destroy(xfs_efd_cache);
2255 	kmem_cache_destroy(xfs_buf_item_cache);
2256 	kmem_cache_destroy(xfs_trans_cache);
2257 	kmem_cache_destroy(xfs_ifork_cache);
2258 	kmem_cache_destroy(xfs_da_state_cache);
2259 	xfs_defer_destroy_item_caches();
2260 	xfs_btree_destroy_cur_caches();
2261 	kmem_cache_destroy(xfs_log_ticket_cache);
2262 	kmem_cache_destroy(xfs_buf_cache);
2263 }
2264 
2265 STATIC int __init
2266 xfs_init_workqueues(void)
2267 {
2268 	/*
2269 	 * The allocation workqueue can be used in memory reclaim situations
2270 	 * (writepage path), and parallelism is only limited by the number of
2271 	 * AGs in all the filesystems mounted. Hence use the default large
2272 	 * max_active value for this workqueue.
2273 	 */
2274 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2275 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2276 	if (!xfs_alloc_wq)
2277 		return -ENOMEM;
2278 
2279 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2280 			0);
2281 	if (!xfs_discard_wq)
2282 		goto out_free_alloc_wq;
2283 
2284 	return 0;
2285 out_free_alloc_wq:
2286 	destroy_workqueue(xfs_alloc_wq);
2287 	return -ENOMEM;
2288 }
2289 
2290 STATIC void
2291 xfs_destroy_workqueues(void)
2292 {
2293 	destroy_workqueue(xfs_discard_wq);
2294 	destroy_workqueue(xfs_alloc_wq);
2295 }
2296 
2297 #ifdef CONFIG_HOTPLUG_CPU
2298 static int
2299 xfs_cpu_dead(
2300 	unsigned int		cpu)
2301 {
2302 	struct xfs_mount	*mp, *n;
2303 
2304 	spin_lock(&xfs_mount_list_lock);
2305 	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2306 		spin_unlock(&xfs_mount_list_lock);
2307 		xfs_inodegc_cpu_dead(mp, cpu);
2308 		xlog_cil_pcp_dead(mp->m_log, cpu);
2309 		spin_lock(&xfs_mount_list_lock);
2310 	}
2311 	spin_unlock(&xfs_mount_list_lock);
2312 	return 0;
2313 }
2314 
2315 static int __init
2316 xfs_cpu_hotplug_init(void)
2317 {
2318 	int	error;
2319 
2320 	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2321 			xfs_cpu_dead);
2322 	if (error < 0)
2323 		xfs_alert(NULL,
2324 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2325 			error);
2326 	return error;
2327 }
2328 
2329 static void
2330 xfs_cpu_hotplug_destroy(void)
2331 {
2332 	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2333 }
2334 
2335 #else /* !CONFIG_HOTPLUG_CPU */
2336 static inline int xfs_cpu_hotplug_init(void) { return 0; }
2337 static inline void xfs_cpu_hotplug_destroy(void) {}
2338 #endif
2339 
2340 STATIC int __init
2341 init_xfs_fs(void)
2342 {
2343 	int			error;
2344 
2345 	xfs_check_ondisk_structs();
2346 
2347 	error = xfs_dahash_test();
2348 	if (error)
2349 		return error;
2350 
2351 	printk(KERN_INFO XFS_VERSION_STRING " with "
2352 			 XFS_BUILD_OPTIONS " enabled\n");
2353 
2354 	xfs_dir_startup();
2355 
2356 	error = xfs_cpu_hotplug_init();
2357 	if (error)
2358 		goto out;
2359 
2360 	error = xfs_init_caches();
2361 	if (error)
2362 		goto out_destroy_hp;
2363 
2364 	error = xfs_init_workqueues();
2365 	if (error)
2366 		goto out_destroy_caches;
2367 
2368 	error = xfs_mru_cache_init();
2369 	if (error)
2370 		goto out_destroy_wq;
2371 
2372 	error = xfs_init_procfs();
2373 	if (error)
2374 		goto out_mru_cache_uninit;
2375 
2376 	error = xfs_sysctl_register();
2377 	if (error)
2378 		goto out_cleanup_procfs;
2379 
2380 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2381 	if (!xfs_kset) {
2382 		error = -ENOMEM;
2383 		goto out_sysctl_unregister;
2384 	}
2385 
2386 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2387 
2388 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2389 	if (!xfsstats.xs_stats) {
2390 		error = -ENOMEM;
2391 		goto out_kset_unregister;
2392 	}
2393 
2394 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2395 			       "stats");
2396 	if (error)
2397 		goto out_free_stats;
2398 
2399 #ifdef DEBUG
2400 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2401 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2402 	if (error)
2403 		goto out_remove_stats_kobj;
2404 #endif
2405 
2406 	error = xfs_qm_init();
2407 	if (error)
2408 		goto out_remove_dbg_kobj;
2409 
2410 	error = register_filesystem(&xfs_fs_type);
2411 	if (error)
2412 		goto out_qm_exit;
2413 	return 0;
2414 
2415  out_qm_exit:
2416 	xfs_qm_exit();
2417  out_remove_dbg_kobj:
2418 #ifdef DEBUG
2419 	xfs_sysfs_del(&xfs_dbg_kobj);
2420  out_remove_stats_kobj:
2421 #endif
2422 	xfs_sysfs_del(&xfsstats.xs_kobj);
2423  out_free_stats:
2424 	free_percpu(xfsstats.xs_stats);
2425  out_kset_unregister:
2426 	kset_unregister(xfs_kset);
2427  out_sysctl_unregister:
2428 	xfs_sysctl_unregister();
2429  out_cleanup_procfs:
2430 	xfs_cleanup_procfs();
2431  out_mru_cache_uninit:
2432 	xfs_mru_cache_uninit();
2433  out_destroy_wq:
2434 	xfs_destroy_workqueues();
2435  out_destroy_caches:
2436 	xfs_destroy_caches();
2437  out_destroy_hp:
2438 	xfs_cpu_hotplug_destroy();
2439  out:
2440 	return error;
2441 }
2442 
2443 STATIC void __exit
2444 exit_xfs_fs(void)
2445 {
2446 	xfs_qm_exit();
2447 	unregister_filesystem(&xfs_fs_type);
2448 #ifdef DEBUG
2449 	xfs_sysfs_del(&xfs_dbg_kobj);
2450 #endif
2451 	xfs_sysfs_del(&xfsstats.xs_kobj);
2452 	free_percpu(xfsstats.xs_stats);
2453 	kset_unregister(xfs_kset);
2454 	xfs_sysctl_unregister();
2455 	xfs_cleanup_procfs();
2456 	xfs_mru_cache_uninit();
2457 	xfs_destroy_workqueues();
2458 	xfs_destroy_caches();
2459 	xfs_uuid_table_free();
2460 	xfs_cpu_hotplug_destroy();
2461 }
2462 
2463 module_init(init_xfs_fs);
2464 module_exit(exit_xfs_fs);
2465 
2466 MODULE_AUTHOR("Silicon Graphics, Inc.");
2467 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2468 MODULE_LICENSE("GPL");
2469