xref: /openbmc/linux/fs/xfs/xfs_super.c (revision d37cf9b63113f13d742713881ce691fc615d8b3b)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  
7  #include "xfs.h"
8  #include "xfs_shared.h"
9  #include "xfs_format.h"
10  #include "xfs_log_format.h"
11  #include "xfs_trans_resv.h"
12  #include "xfs_sb.h"
13  #include "xfs_mount.h"
14  #include "xfs_inode.h"
15  #include "xfs_btree.h"
16  #include "xfs_bmap.h"
17  #include "xfs_alloc.h"
18  #include "xfs_fsops.h"
19  #include "xfs_trans.h"
20  #include "xfs_buf_item.h"
21  #include "xfs_log.h"
22  #include "xfs_log_priv.h"
23  #include "xfs_dir2.h"
24  #include "xfs_extfree_item.h"
25  #include "xfs_mru_cache.h"
26  #include "xfs_inode_item.h"
27  #include "xfs_icache.h"
28  #include "xfs_trace.h"
29  #include "xfs_icreate_item.h"
30  #include "xfs_filestream.h"
31  #include "xfs_quota.h"
32  #include "xfs_sysfs.h"
33  #include "xfs_ondisk.h"
34  #include "xfs_rmap_item.h"
35  #include "xfs_refcount_item.h"
36  #include "xfs_bmap_item.h"
37  #include "xfs_reflink.h"
38  #include "xfs_pwork.h"
39  #include "xfs_ag.h"
40  #include "xfs_defer.h"
41  #include "xfs_attr_item.h"
42  #include "xfs_xattr.h"
43  #include "xfs_iunlink_item.h"
44  #include "xfs_dahash_test.h"
45  #include "scrub/stats.h"
46  
47  #include <linux/magic.h>
48  #include <linux/fs_context.h>
49  #include <linux/fs_parser.h>
50  
51  static const struct super_operations xfs_super_operations;
52  
53  static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
54  static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
55  #ifdef DEBUG
56  static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
57  #endif
58  
59  enum xfs_dax_mode {
60  	XFS_DAX_INODE = 0,
61  	XFS_DAX_ALWAYS = 1,
62  	XFS_DAX_NEVER = 2,
63  };
64  
65  static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)66  xfs_mount_set_dax_mode(
67  	struct xfs_mount	*mp,
68  	enum xfs_dax_mode	mode)
69  {
70  	switch (mode) {
71  	case XFS_DAX_INODE:
72  		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
73  		break;
74  	case XFS_DAX_ALWAYS:
75  		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
76  		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
77  		break;
78  	case XFS_DAX_NEVER:
79  		mp->m_features |= XFS_FEAT_DAX_NEVER;
80  		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
81  		break;
82  	}
83  }
84  
85  static const struct constant_table dax_param_enums[] = {
86  	{"inode",	XFS_DAX_INODE },
87  	{"always",	XFS_DAX_ALWAYS },
88  	{"never",	XFS_DAX_NEVER },
89  	{}
90  };
91  
92  /*
93   * Table driven mount option parser.
94   */
95  enum {
96  	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
97  	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
98  	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
99  	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
100  	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
101  	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
102  	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
103  	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
104  	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
105  };
106  
107  static const struct fs_parameter_spec xfs_fs_parameters[] = {
108  	fsparam_u32("logbufs",		Opt_logbufs),
109  	fsparam_string("logbsize",	Opt_logbsize),
110  	fsparam_string("logdev",	Opt_logdev),
111  	fsparam_string("rtdev",		Opt_rtdev),
112  	fsparam_flag("wsync",		Opt_wsync),
113  	fsparam_flag("noalign",		Opt_noalign),
114  	fsparam_flag("swalloc",		Opt_swalloc),
115  	fsparam_u32("sunit",		Opt_sunit),
116  	fsparam_u32("swidth",		Opt_swidth),
117  	fsparam_flag("nouuid",		Opt_nouuid),
118  	fsparam_flag("grpid",		Opt_grpid),
119  	fsparam_flag("nogrpid",		Opt_nogrpid),
120  	fsparam_flag("bsdgroups",	Opt_bsdgroups),
121  	fsparam_flag("sysvgroups",	Opt_sysvgroups),
122  	fsparam_string("allocsize",	Opt_allocsize),
123  	fsparam_flag("norecovery",	Opt_norecovery),
124  	fsparam_flag("inode64",		Opt_inode64),
125  	fsparam_flag("inode32",		Opt_inode32),
126  	fsparam_flag("ikeep",		Opt_ikeep),
127  	fsparam_flag("noikeep",		Opt_noikeep),
128  	fsparam_flag("largeio",		Opt_largeio),
129  	fsparam_flag("nolargeio",	Opt_nolargeio),
130  	fsparam_flag("attr2",		Opt_attr2),
131  	fsparam_flag("noattr2",		Opt_noattr2),
132  	fsparam_flag("filestreams",	Opt_filestreams),
133  	fsparam_flag("quota",		Opt_quota),
134  	fsparam_flag("noquota",		Opt_noquota),
135  	fsparam_flag("usrquota",	Opt_usrquota),
136  	fsparam_flag("grpquota",	Opt_grpquota),
137  	fsparam_flag("prjquota",	Opt_prjquota),
138  	fsparam_flag("uquota",		Opt_uquota),
139  	fsparam_flag("gquota",		Opt_gquota),
140  	fsparam_flag("pquota",		Opt_pquota),
141  	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
142  	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
143  	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
144  	fsparam_flag("qnoenforce",	Opt_qnoenforce),
145  	fsparam_flag("discard",		Opt_discard),
146  	fsparam_flag("nodiscard",	Opt_nodiscard),
147  	fsparam_flag("dax",		Opt_dax),
148  	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
149  	{}
150  };
151  
152  struct proc_xfs_info {
153  	uint64_t	flag;
154  	char		*str;
155  };
156  
157  static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)158  xfs_fs_show_options(
159  	struct seq_file		*m,
160  	struct dentry		*root)
161  {
162  	static struct proc_xfs_info xfs_info_set[] = {
163  		/* the few simple ones we can get from the mount struct */
164  		{ XFS_FEAT_IKEEP,		",ikeep" },
165  		{ XFS_FEAT_WSYNC,		",wsync" },
166  		{ XFS_FEAT_NOALIGN,		",noalign" },
167  		{ XFS_FEAT_SWALLOC,		",swalloc" },
168  		{ XFS_FEAT_NOUUID,		",nouuid" },
169  		{ XFS_FEAT_NORECOVERY,		",norecovery" },
170  		{ XFS_FEAT_ATTR2,		",attr2" },
171  		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
172  		{ XFS_FEAT_GRPID,		",grpid" },
173  		{ XFS_FEAT_DISCARD,		",discard" },
174  		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
175  		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
176  		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
177  		{ 0, NULL }
178  	};
179  	struct xfs_mount	*mp = XFS_M(root->d_sb);
180  	struct proc_xfs_info	*xfs_infop;
181  
182  	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
183  		if (mp->m_features & xfs_infop->flag)
184  			seq_puts(m, xfs_infop->str);
185  	}
186  
187  	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
188  
189  	if (xfs_has_allocsize(mp))
190  		seq_printf(m, ",allocsize=%dk",
191  			   (1 << mp->m_allocsize_log) >> 10);
192  
193  	if (mp->m_logbufs > 0)
194  		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
195  	if (mp->m_logbsize > 0)
196  		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
197  
198  	if (mp->m_logname)
199  		seq_show_option(m, "logdev", mp->m_logname);
200  	if (mp->m_rtname)
201  		seq_show_option(m, "rtdev", mp->m_rtname);
202  
203  	if (mp->m_dalign > 0)
204  		seq_printf(m, ",sunit=%d",
205  				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
206  	if (mp->m_swidth > 0)
207  		seq_printf(m, ",swidth=%d",
208  				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
209  
210  	if (mp->m_qflags & XFS_UQUOTA_ENFD)
211  		seq_puts(m, ",usrquota");
212  	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
213  		seq_puts(m, ",uqnoenforce");
214  
215  	if (mp->m_qflags & XFS_PQUOTA_ENFD)
216  		seq_puts(m, ",prjquota");
217  	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
218  		seq_puts(m, ",pqnoenforce");
219  
220  	if (mp->m_qflags & XFS_GQUOTA_ENFD)
221  		seq_puts(m, ",grpquota");
222  	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
223  		seq_puts(m, ",gqnoenforce");
224  
225  	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
226  		seq_puts(m, ",noquota");
227  
228  	return 0;
229  }
230  
231  static bool
xfs_set_inode_alloc_perag(struct xfs_perag * pag,xfs_ino_t ino,xfs_agnumber_t max_metadata)232  xfs_set_inode_alloc_perag(
233  	struct xfs_perag	*pag,
234  	xfs_ino_t		ino,
235  	xfs_agnumber_t		max_metadata)
236  {
237  	if (!xfs_is_inode32(pag->pag_mount)) {
238  		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
239  		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
240  		return false;
241  	}
242  
243  	if (ino > XFS_MAXINUMBER_32) {
244  		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
245  		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
246  		return false;
247  	}
248  
249  	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
250  	if (pag->pag_agno < max_metadata)
251  		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
252  	else
253  		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
254  	return true;
255  }
256  
257  /*
258   * Set parameters for inode allocation heuristics, taking into account
259   * filesystem size and inode32/inode64 mount options; i.e. specifically
260   * whether or not XFS_FEAT_SMALL_INUMS is set.
261   *
262   * Inode allocation patterns are altered only if inode32 is requested
263   * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
264   * If altered, XFS_OPSTATE_INODE32 is set as well.
265   *
266   * An agcount independent of that in the mount structure is provided
267   * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
268   * to the potentially higher ag count.
269   *
270   * Returns the maximum AG index which may contain inodes.
271   */
272  xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)273  xfs_set_inode_alloc(
274  	struct xfs_mount *mp,
275  	xfs_agnumber_t	agcount)
276  {
277  	xfs_agnumber_t	index;
278  	xfs_agnumber_t	maxagi = 0;
279  	xfs_sb_t	*sbp = &mp->m_sb;
280  	xfs_agnumber_t	max_metadata;
281  	xfs_agino_t	agino;
282  	xfs_ino_t	ino;
283  
284  	/*
285  	 * Calculate how much should be reserved for inodes to meet
286  	 * the max inode percentage.  Used only for inode32.
287  	 */
288  	if (M_IGEO(mp)->maxicount) {
289  		uint64_t	icount;
290  
291  		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
292  		do_div(icount, 100);
293  		icount += sbp->sb_agblocks - 1;
294  		do_div(icount, sbp->sb_agblocks);
295  		max_metadata = icount;
296  	} else {
297  		max_metadata = agcount;
298  	}
299  
300  	/* Get the last possible inode in the filesystem */
301  	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
302  	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
303  
304  	/*
305  	 * If user asked for no more than 32-bit inodes, and the fs is
306  	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
307  	 * the allocator to accommodate the request.
308  	 */
309  	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
310  		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
311  	else
312  		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
313  
314  	for (index = 0; index < agcount; index++) {
315  		struct xfs_perag	*pag;
316  
317  		ino = XFS_AGINO_TO_INO(mp, index, agino);
318  
319  		pag = xfs_perag_get(mp, index);
320  		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
321  			maxagi++;
322  		xfs_perag_put(pag);
323  	}
324  
325  	return xfs_is_inode32(mp) ? maxagi : agcount;
326  }
327  
328  static int
xfs_setup_dax_always(struct xfs_mount * mp)329  xfs_setup_dax_always(
330  	struct xfs_mount	*mp)
331  {
332  	if (!mp->m_ddev_targp->bt_daxdev &&
333  	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
334  		xfs_alert(mp,
335  			"DAX unsupported by block device. Turning off DAX.");
336  		goto disable_dax;
337  	}
338  
339  	if (mp->m_super->s_blocksize != PAGE_SIZE) {
340  		xfs_alert(mp,
341  			"DAX not supported for blocksize. Turning off DAX.");
342  		goto disable_dax;
343  	}
344  
345  	if (xfs_has_reflink(mp) &&
346  	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
347  		xfs_alert(mp,
348  			"DAX and reflink cannot work with multi-partitions!");
349  		return -EINVAL;
350  	}
351  
352  	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
353  	return 0;
354  
355  disable_dax:
356  	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
357  	return 0;
358  }
359  
360  STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)361  xfs_blkdev_get(
362  	xfs_mount_t		*mp,
363  	const char		*name,
364  	struct block_device	**bdevp)
365  {
366  	int			error = 0;
367  
368  	*bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
369  				    mp->m_super, &fs_holder_ops);
370  	if (IS_ERR(*bdevp)) {
371  		error = PTR_ERR(*bdevp);
372  		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
373  	}
374  
375  	return error;
376  }
377  
378  STATIC void
xfs_shutdown_devices(struct xfs_mount * mp)379  xfs_shutdown_devices(
380  	struct xfs_mount	*mp)
381  {
382  	/*
383  	 * Udev is triggered whenever anyone closes a block device or unmounts
384  	 * a file systemm on a block device.
385  	 * The default udev rules invoke blkid to read the fs super and create
386  	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
387  	 * reads through the page cache.
388  	 *
389  	 * xfs_db also uses buffered reads to examine metadata.  There is no
390  	 * coordination between xfs_db and udev, which means that they can run
391  	 * concurrently.  Note there is no coordination between the kernel and
392  	 * blkid either.
393  	 *
394  	 * On a system with 64k pages, the page cache can cache the superblock
395  	 * and the root inode (and hence the root directory) with the same 64k
396  	 * page.  If udev spawns blkid after the mkfs and the system is busy
397  	 * enough that it is still running when xfs_db starts up, they'll both
398  	 * read from the same page in the pagecache.
399  	 *
400  	 * The unmount writes updated inode metadata to disk directly.  The XFS
401  	 * buffer cache does not use the bdev pagecache, so it needs to
402  	 * invalidate that pagecache on unmount.  If the above scenario occurs,
403  	 * the pagecache no longer reflects what's on disk, xfs_db reads the
404  	 * stale metadata, and fails to find /a.  Most of the time this succeeds
405  	 * because closing a bdev invalidates the page cache, but when processes
406  	 * race, everyone loses.
407  	 */
408  	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
409  		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
410  		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
411  	}
412  	if (mp->m_rtdev_targp) {
413  		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
414  		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
415  	}
416  	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
417  	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
418  }
419  
420  /*
421   * The file system configurations are:
422   *	(1) device (partition) with data and internal log
423   *	(2) logical volume with data and log subvolumes.
424   *	(3) logical volume with data, log, and realtime subvolumes.
425   *
426   * We only have to handle opening the log and realtime volumes here if
427   * they are present.  The data subvolume has already been opened by
428   * get_sb_bdev() and is stored in sb->s_bdev.
429   */
430  STATIC int
xfs_open_devices(struct xfs_mount * mp)431  xfs_open_devices(
432  	struct xfs_mount	*mp)
433  {
434  	struct super_block	*sb = mp->m_super;
435  	struct block_device	*ddev = sb->s_bdev;
436  	struct block_device	*logdev = NULL, *rtdev = NULL;
437  	int			error;
438  
439  	/*
440  	 * blkdev_put() can't be called under s_umount, see the comment
441  	 * in get_tree_bdev() for more details
442  	 */
443  	up_write(&sb->s_umount);
444  
445  	/*
446  	 * Open real time and log devices - order is important.
447  	 */
448  	if (mp->m_logname) {
449  		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
450  		if (error)
451  			goto out_relock;
452  	}
453  
454  	if (mp->m_rtname) {
455  		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
456  		if (error)
457  			goto out_close_logdev;
458  
459  		if (rtdev == ddev || rtdev == logdev) {
460  			xfs_warn(mp,
461  	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
462  			error = -EINVAL;
463  			goto out_close_rtdev;
464  		}
465  	}
466  
467  	/*
468  	 * Setup xfs_mount buffer target pointers
469  	 */
470  	error = -ENOMEM;
471  	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
472  	if (!mp->m_ddev_targp)
473  		goto out_close_rtdev;
474  
475  	if (rtdev) {
476  		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
477  		if (!mp->m_rtdev_targp)
478  			goto out_free_ddev_targ;
479  	}
480  
481  	if (logdev && logdev != ddev) {
482  		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
483  		if (!mp->m_logdev_targp)
484  			goto out_free_rtdev_targ;
485  	} else {
486  		mp->m_logdev_targp = mp->m_ddev_targp;
487  	}
488  
489  	error = 0;
490  out_relock:
491  	down_write(&sb->s_umount);
492  	return error;
493  
494   out_free_rtdev_targ:
495  	if (mp->m_rtdev_targp)
496  		xfs_free_buftarg(mp->m_rtdev_targp);
497   out_free_ddev_targ:
498  	xfs_free_buftarg(mp->m_ddev_targp);
499   out_close_rtdev:
500  	 if (rtdev)
501  		 blkdev_put(rtdev, sb);
502   out_close_logdev:
503  	if (logdev && logdev != ddev)
504  		blkdev_put(logdev, sb);
505  	goto out_relock;
506  }
507  
508  /*
509   * Setup xfs_mount buffer target pointers based on superblock
510   */
511  STATIC int
xfs_setup_devices(struct xfs_mount * mp)512  xfs_setup_devices(
513  	struct xfs_mount	*mp)
514  {
515  	int			error;
516  
517  	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
518  	if (error)
519  		return error;
520  
521  	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
522  		unsigned int	log_sector_size = BBSIZE;
523  
524  		if (xfs_has_sector(mp))
525  			log_sector_size = mp->m_sb.sb_logsectsize;
526  		error = xfs_setsize_buftarg(mp->m_logdev_targp,
527  					    log_sector_size);
528  		if (error)
529  			return error;
530  	}
531  	if (mp->m_rtdev_targp) {
532  		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
533  					    mp->m_sb.sb_sectsize);
534  		if (error)
535  			return error;
536  	}
537  
538  	return 0;
539  }
540  
541  STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)542  xfs_init_mount_workqueues(
543  	struct xfs_mount	*mp)
544  {
545  	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
546  			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
547  			1, mp->m_super->s_id);
548  	if (!mp->m_buf_workqueue)
549  		goto out;
550  
551  	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
552  			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
553  			0, mp->m_super->s_id);
554  	if (!mp->m_unwritten_workqueue)
555  		goto out_destroy_buf;
556  
557  	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
558  			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
559  			0, mp->m_super->s_id);
560  	if (!mp->m_reclaim_workqueue)
561  		goto out_destroy_unwritten;
562  
563  	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
564  			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
565  			0, mp->m_super->s_id);
566  	if (!mp->m_blockgc_wq)
567  		goto out_destroy_reclaim;
568  
569  	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
570  			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
571  			1, mp->m_super->s_id);
572  	if (!mp->m_inodegc_wq)
573  		goto out_destroy_blockgc;
574  
575  	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
576  			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
577  	if (!mp->m_sync_workqueue)
578  		goto out_destroy_inodegc;
579  
580  	return 0;
581  
582  out_destroy_inodegc:
583  	destroy_workqueue(mp->m_inodegc_wq);
584  out_destroy_blockgc:
585  	destroy_workqueue(mp->m_blockgc_wq);
586  out_destroy_reclaim:
587  	destroy_workqueue(mp->m_reclaim_workqueue);
588  out_destroy_unwritten:
589  	destroy_workqueue(mp->m_unwritten_workqueue);
590  out_destroy_buf:
591  	destroy_workqueue(mp->m_buf_workqueue);
592  out:
593  	return -ENOMEM;
594  }
595  
596  STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)597  xfs_destroy_mount_workqueues(
598  	struct xfs_mount	*mp)
599  {
600  	destroy_workqueue(mp->m_sync_workqueue);
601  	destroy_workqueue(mp->m_blockgc_wq);
602  	destroy_workqueue(mp->m_inodegc_wq);
603  	destroy_workqueue(mp->m_reclaim_workqueue);
604  	destroy_workqueue(mp->m_unwritten_workqueue);
605  	destroy_workqueue(mp->m_buf_workqueue);
606  }
607  
608  static void
xfs_flush_inodes_worker(struct work_struct * work)609  xfs_flush_inodes_worker(
610  	struct work_struct	*work)
611  {
612  	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
613  						   m_flush_inodes_work);
614  	struct super_block	*sb = mp->m_super;
615  
616  	if (down_read_trylock(&sb->s_umount)) {
617  		sync_inodes_sb(sb);
618  		up_read(&sb->s_umount);
619  	}
620  }
621  
622  /*
623   * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
624   * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
625   * for IO to complete so that we effectively throttle multiple callers to the
626   * rate at which IO is completing.
627   */
628  void
xfs_flush_inodes(struct xfs_mount * mp)629  xfs_flush_inodes(
630  	struct xfs_mount	*mp)
631  {
632  	/*
633  	 * If flush_work() returns true then that means we waited for a flush
634  	 * which was already in progress.  Don't bother running another scan.
635  	 */
636  	if (flush_work(&mp->m_flush_inodes_work))
637  		return;
638  
639  	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
640  	flush_work(&mp->m_flush_inodes_work);
641  }
642  
643  /* Catch misguided souls that try to use this interface on XFS */
644  STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)645  xfs_fs_alloc_inode(
646  	struct super_block	*sb)
647  {
648  	BUG();
649  	return NULL;
650  }
651  
652  /*
653   * Now that the generic code is guaranteed not to be accessing
654   * the linux inode, we can inactivate and reclaim the inode.
655   */
656  STATIC void
xfs_fs_destroy_inode(struct inode * inode)657  xfs_fs_destroy_inode(
658  	struct inode		*inode)
659  {
660  	struct xfs_inode	*ip = XFS_I(inode);
661  
662  	trace_xfs_destroy_inode(ip);
663  
664  	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
665  	XFS_STATS_INC(ip->i_mount, vn_rele);
666  	XFS_STATS_INC(ip->i_mount, vn_remove);
667  	xfs_inode_mark_reclaimable(ip);
668  }
669  
670  static void
xfs_fs_dirty_inode(struct inode * inode,int flags)671  xfs_fs_dirty_inode(
672  	struct inode			*inode,
673  	int				flags)
674  {
675  	struct xfs_inode		*ip = XFS_I(inode);
676  	struct xfs_mount		*mp = ip->i_mount;
677  	struct xfs_trans		*tp;
678  
679  	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
680  		return;
681  
682  	/*
683  	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
684  	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
685  	 * in flags possibly together with I_DIRTY_SYNC.
686  	 */
687  	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
688  		return;
689  
690  	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
691  		return;
692  	xfs_ilock(ip, XFS_ILOCK_EXCL);
693  	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
694  	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
695  	xfs_trans_commit(tp);
696  }
697  
698  /*
699   * Slab object creation initialisation for the XFS inode.
700   * This covers only the idempotent fields in the XFS inode;
701   * all other fields need to be initialised on allocation
702   * from the slab. This avoids the need to repeatedly initialise
703   * fields in the xfs inode that left in the initialise state
704   * when freeing the inode.
705   */
706  STATIC void
xfs_fs_inode_init_once(void * inode)707  xfs_fs_inode_init_once(
708  	void			*inode)
709  {
710  	struct xfs_inode	*ip = inode;
711  
712  	memset(ip, 0, sizeof(struct xfs_inode));
713  
714  	/* vfs inode */
715  	inode_init_once(VFS_I(ip));
716  
717  	/* xfs inode */
718  	atomic_set(&ip->i_pincount, 0);
719  	spin_lock_init(&ip->i_flags_lock);
720  
721  	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
722  		     "xfsino", ip->i_ino);
723  }
724  
725  /*
726   * We do an unlocked check for XFS_IDONTCACHE here because we are already
727   * serialised against cache hits here via the inode->i_lock and igrab() in
728   * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
729   * racing with us, and it avoids needing to grab a spinlock here for every inode
730   * we drop the final reference on.
731   */
732  STATIC int
xfs_fs_drop_inode(struct inode * inode)733  xfs_fs_drop_inode(
734  	struct inode		*inode)
735  {
736  	struct xfs_inode	*ip = XFS_I(inode);
737  
738  	/*
739  	 * If this unlinked inode is in the middle of recovery, don't
740  	 * drop the inode just yet; log recovery will take care of
741  	 * that.  See the comment for this inode flag.
742  	 */
743  	if (ip->i_flags & XFS_IRECOVERY) {
744  		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
745  		return 0;
746  	}
747  
748  	return generic_drop_inode(inode);
749  }
750  
751  static void
xfs_mount_free(struct xfs_mount * mp)752  xfs_mount_free(
753  	struct xfs_mount	*mp)
754  {
755  	/*
756  	 * Free the buftargs here because blkdev_put needs to be called outside
757  	 * of sb->s_umount, which is held around the call to ->put_super.
758  	 */
759  	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
760  		xfs_free_buftarg(mp->m_logdev_targp);
761  	if (mp->m_rtdev_targp)
762  		xfs_free_buftarg(mp->m_rtdev_targp);
763  	if (mp->m_ddev_targp)
764  		xfs_free_buftarg(mp->m_ddev_targp);
765  
766  	debugfs_remove(mp->m_debugfs);
767  	kfree(mp->m_rtname);
768  	kfree(mp->m_logname);
769  	kmem_free(mp);
770  }
771  
772  STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)773  xfs_fs_sync_fs(
774  	struct super_block	*sb,
775  	int			wait)
776  {
777  	struct xfs_mount	*mp = XFS_M(sb);
778  	int			error;
779  
780  	trace_xfs_fs_sync_fs(mp, __return_address);
781  
782  	/*
783  	 * Doing anything during the async pass would be counterproductive.
784  	 */
785  	if (!wait)
786  		return 0;
787  
788  	error = xfs_log_force(mp, XFS_LOG_SYNC);
789  	if (error)
790  		return error;
791  
792  	if (laptop_mode) {
793  		/*
794  		 * The disk must be active because we're syncing.
795  		 * We schedule log work now (now that the disk is
796  		 * active) instead of later (when it might not be).
797  		 */
798  		flush_delayed_work(&mp->m_log->l_work);
799  	}
800  
801  	/*
802  	 * If we are called with page faults frozen out, it means we are about
803  	 * to freeze the transaction subsystem. Take the opportunity to shut
804  	 * down inodegc because once SB_FREEZE_FS is set it's too late to
805  	 * prevent inactivation races with freeze. The fs doesn't get called
806  	 * again by the freezing process until after SB_FREEZE_FS has been set,
807  	 * so it's now or never.  Same logic applies to speculative allocation
808  	 * garbage collection.
809  	 *
810  	 * We don't care if this is a normal syncfs call that does this or
811  	 * freeze that does this - we can run this multiple times without issue
812  	 * and we won't race with a restart because a restart can only occur
813  	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
814  	 */
815  	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
816  		xfs_inodegc_stop(mp);
817  		xfs_blockgc_stop(mp);
818  	}
819  
820  	return 0;
821  }
822  
823  STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)824  xfs_fs_statfs(
825  	struct dentry		*dentry,
826  	struct kstatfs		*statp)
827  {
828  	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
829  	xfs_sb_t		*sbp = &mp->m_sb;
830  	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
831  	uint64_t		fakeinos, id;
832  	uint64_t		icount;
833  	uint64_t		ifree;
834  	uint64_t		fdblocks;
835  	xfs_extlen_t		lsize;
836  	int64_t			ffree;
837  
838  	/*
839  	 * Expedite background inodegc but don't wait. We do not want to block
840  	 * here waiting hours for a billion extent file to be truncated.
841  	 */
842  	xfs_inodegc_push(mp);
843  
844  	statp->f_type = XFS_SUPER_MAGIC;
845  	statp->f_namelen = MAXNAMELEN - 1;
846  
847  	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
848  	statp->f_fsid = u64_to_fsid(id);
849  
850  	icount = percpu_counter_sum(&mp->m_icount);
851  	ifree = percpu_counter_sum(&mp->m_ifree);
852  	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
853  
854  	spin_lock(&mp->m_sb_lock);
855  	statp->f_bsize = sbp->sb_blocksize;
856  	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
857  	statp->f_blocks = sbp->sb_dblocks - lsize;
858  	spin_unlock(&mp->m_sb_lock);
859  
860  	/* make sure statp->f_bfree does not underflow */
861  	statp->f_bfree = max_t(int64_t, 0,
862  				fdblocks - xfs_fdblocks_unavailable(mp));
863  	statp->f_bavail = statp->f_bfree;
864  
865  	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
866  	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
867  	if (M_IGEO(mp)->maxicount)
868  		statp->f_files = min_t(typeof(statp->f_files),
869  					statp->f_files,
870  					M_IGEO(mp)->maxicount);
871  
872  	/* If sb_icount overshot maxicount, report actual allocation */
873  	statp->f_files = max_t(typeof(statp->f_files),
874  					statp->f_files,
875  					sbp->sb_icount);
876  
877  	/* make sure statp->f_ffree does not underflow */
878  	ffree = statp->f_files - (icount - ifree);
879  	statp->f_ffree = max_t(int64_t, ffree, 0);
880  
881  	if (XFS_IS_REALTIME_MOUNT(mp) &&
882  	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
883  		s64	freertx;
884  
885  		statp->f_blocks = sbp->sb_rblocks;
886  		freertx = percpu_counter_sum_positive(&mp->m_frextents);
887  		statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
888  	}
889  
890  	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
891  	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
892  			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
893  		xfs_qm_statvfs(ip, statp);
894  
895  	return 0;
896  }
897  
898  STATIC void
xfs_save_resvblks(struct xfs_mount * mp)899  xfs_save_resvblks(struct xfs_mount *mp)
900  {
901  	uint64_t resblks = 0;
902  
903  	mp->m_resblks_save = mp->m_resblks;
904  	xfs_reserve_blocks(mp, &resblks, NULL);
905  }
906  
907  STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)908  xfs_restore_resvblks(struct xfs_mount *mp)
909  {
910  	uint64_t resblks;
911  
912  	if (mp->m_resblks_save) {
913  		resblks = mp->m_resblks_save;
914  		mp->m_resblks_save = 0;
915  	} else
916  		resblks = xfs_default_resblks(mp);
917  
918  	xfs_reserve_blocks(mp, &resblks, NULL);
919  }
920  
921  /*
922   * Second stage of a freeze. The data is already frozen so we only
923   * need to take care of the metadata. Once that's done sync the superblock
924   * to the log to dirty it in case of a crash while frozen. This ensures that we
925   * will recover the unlinked inode lists on the next mount.
926   */
927  STATIC int
xfs_fs_freeze(struct super_block * sb)928  xfs_fs_freeze(
929  	struct super_block	*sb)
930  {
931  	struct xfs_mount	*mp = XFS_M(sb);
932  	unsigned int		flags;
933  	int			ret;
934  
935  	/*
936  	 * The filesystem is now frozen far enough that memory reclaim
937  	 * cannot safely operate on the filesystem. Hence we need to
938  	 * set a GFP_NOFS context here to avoid recursion deadlocks.
939  	 */
940  	flags = memalloc_nofs_save();
941  	xfs_save_resvblks(mp);
942  	ret = xfs_log_quiesce(mp);
943  	memalloc_nofs_restore(flags);
944  
945  	/*
946  	 * For read-write filesystems, we need to restart the inodegc on error
947  	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
948  	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
949  	 * here, so we can restart safely without racing with a stop in
950  	 * xfs_fs_sync_fs().
951  	 */
952  	if (ret && !xfs_is_readonly(mp)) {
953  		xfs_blockgc_start(mp);
954  		xfs_inodegc_start(mp);
955  	}
956  
957  	return ret;
958  }
959  
960  STATIC int
xfs_fs_unfreeze(struct super_block * sb)961  xfs_fs_unfreeze(
962  	struct super_block	*sb)
963  {
964  	struct xfs_mount	*mp = XFS_M(sb);
965  
966  	xfs_restore_resvblks(mp);
967  	xfs_log_work_queue(mp);
968  
969  	/*
970  	 * Don't reactivate the inodegc worker on a readonly filesystem because
971  	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
972  	 * worker because there are no speculative preallocations on a readonly
973  	 * filesystem.
974  	 */
975  	if (!xfs_is_readonly(mp)) {
976  		xfs_blockgc_start(mp);
977  		xfs_inodegc_start(mp);
978  	}
979  
980  	return 0;
981  }
982  
983  /*
984   * This function fills in xfs_mount_t fields based on mount args.
985   * Note: the superblock _has_ now been read in.
986   */
987  STATIC int
xfs_finish_flags(struct xfs_mount * mp)988  xfs_finish_flags(
989  	struct xfs_mount	*mp)
990  {
991  	/* Fail a mount where the logbuf is smaller than the log stripe */
992  	if (xfs_has_logv2(mp)) {
993  		if (mp->m_logbsize <= 0 &&
994  		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
995  			mp->m_logbsize = mp->m_sb.sb_logsunit;
996  		} else if (mp->m_logbsize > 0 &&
997  			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
998  			xfs_warn(mp,
999  		"logbuf size must be greater than or equal to log stripe size");
1000  			return -EINVAL;
1001  		}
1002  	} else {
1003  		/* Fail a mount if the logbuf is larger than 32K */
1004  		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1005  			xfs_warn(mp,
1006  		"logbuf size for version 1 logs must be 16K or 32K");
1007  			return -EINVAL;
1008  		}
1009  	}
1010  
1011  	/*
1012  	 * V5 filesystems always use attr2 format for attributes.
1013  	 */
1014  	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1015  		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1016  			     "attr2 is always enabled for V5 filesystems.");
1017  		return -EINVAL;
1018  	}
1019  
1020  	/*
1021  	 * prohibit r/w mounts of read-only filesystems
1022  	 */
1023  	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1024  		xfs_warn(mp,
1025  			"cannot mount a read-only filesystem as read-write");
1026  		return -EROFS;
1027  	}
1028  
1029  	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1030  	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1031  	    !xfs_has_pquotino(mp)) {
1032  		xfs_warn(mp,
1033  		  "Super block does not support project and group quota together");
1034  		return -EINVAL;
1035  	}
1036  
1037  	return 0;
1038  }
1039  
1040  static int
xfs_init_percpu_counters(struct xfs_mount * mp)1041  xfs_init_percpu_counters(
1042  	struct xfs_mount	*mp)
1043  {
1044  	int		error;
1045  
1046  	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1047  	if (error)
1048  		return -ENOMEM;
1049  
1050  	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1051  	if (error)
1052  		goto free_icount;
1053  
1054  	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1055  	if (error)
1056  		goto free_ifree;
1057  
1058  	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1059  	if (error)
1060  		goto free_fdblocks;
1061  
1062  	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1063  	if (error)
1064  		goto free_delalloc;
1065  
1066  	return 0;
1067  
1068  free_delalloc:
1069  	percpu_counter_destroy(&mp->m_delalloc_blks);
1070  free_fdblocks:
1071  	percpu_counter_destroy(&mp->m_fdblocks);
1072  free_ifree:
1073  	percpu_counter_destroy(&mp->m_ifree);
1074  free_icount:
1075  	percpu_counter_destroy(&mp->m_icount);
1076  	return -ENOMEM;
1077  }
1078  
1079  void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1080  xfs_reinit_percpu_counters(
1081  	struct xfs_mount	*mp)
1082  {
1083  	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1084  	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1085  	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1086  	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1087  }
1088  
1089  static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1090  xfs_destroy_percpu_counters(
1091  	struct xfs_mount	*mp)
1092  {
1093  	percpu_counter_destroy(&mp->m_icount);
1094  	percpu_counter_destroy(&mp->m_ifree);
1095  	percpu_counter_destroy(&mp->m_fdblocks);
1096  	ASSERT(xfs_is_shutdown(mp) ||
1097  	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1098  	percpu_counter_destroy(&mp->m_delalloc_blks);
1099  	percpu_counter_destroy(&mp->m_frextents);
1100  }
1101  
1102  static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1103  xfs_inodegc_init_percpu(
1104  	struct xfs_mount	*mp)
1105  {
1106  	struct xfs_inodegc	*gc;
1107  	int			cpu;
1108  
1109  	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1110  	if (!mp->m_inodegc)
1111  		return -ENOMEM;
1112  
1113  	for_each_possible_cpu(cpu) {
1114  		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1115  		gc->cpu = cpu;
1116  		gc->mp = mp;
1117  		init_llist_head(&gc->list);
1118  		gc->items = 0;
1119  		gc->error = 0;
1120  		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1121  	}
1122  	return 0;
1123  }
1124  
1125  static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1126  xfs_inodegc_free_percpu(
1127  	struct xfs_mount	*mp)
1128  {
1129  	if (!mp->m_inodegc)
1130  		return;
1131  	free_percpu(mp->m_inodegc);
1132  }
1133  
1134  static void
xfs_fs_put_super(struct super_block * sb)1135  xfs_fs_put_super(
1136  	struct super_block	*sb)
1137  {
1138  	struct xfs_mount	*mp = XFS_M(sb);
1139  
1140  	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1141  	xfs_filestream_unmount(mp);
1142  	xfs_unmountfs(mp);
1143  
1144  	xfs_freesb(mp);
1145  	xchk_mount_stats_free(mp);
1146  	free_percpu(mp->m_stats.xs_stats);
1147  	xfs_inodegc_free_percpu(mp);
1148  	xfs_destroy_percpu_counters(mp);
1149  	xfs_destroy_mount_workqueues(mp);
1150  	xfs_shutdown_devices(mp);
1151  }
1152  
1153  static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1154  xfs_fs_nr_cached_objects(
1155  	struct super_block	*sb,
1156  	struct shrink_control	*sc)
1157  {
1158  	/* Paranoia: catch incorrect calls during mount setup or teardown */
1159  	if (WARN_ON_ONCE(!sb->s_fs_info))
1160  		return 0;
1161  	return xfs_reclaim_inodes_count(XFS_M(sb));
1162  }
1163  
1164  static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1165  xfs_fs_free_cached_objects(
1166  	struct super_block	*sb,
1167  	struct shrink_control	*sc)
1168  {
1169  	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1170  }
1171  
1172  static void
xfs_fs_shutdown(struct super_block * sb)1173  xfs_fs_shutdown(
1174  	struct super_block	*sb)
1175  {
1176  	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1177  }
1178  
1179  static const struct super_operations xfs_super_operations = {
1180  	.alloc_inode		= xfs_fs_alloc_inode,
1181  	.destroy_inode		= xfs_fs_destroy_inode,
1182  	.dirty_inode		= xfs_fs_dirty_inode,
1183  	.drop_inode		= xfs_fs_drop_inode,
1184  	.put_super		= xfs_fs_put_super,
1185  	.sync_fs		= xfs_fs_sync_fs,
1186  	.freeze_fs		= xfs_fs_freeze,
1187  	.unfreeze_fs		= xfs_fs_unfreeze,
1188  	.statfs			= xfs_fs_statfs,
1189  	.show_options		= xfs_fs_show_options,
1190  	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1191  	.free_cached_objects	= xfs_fs_free_cached_objects,
1192  	.shutdown		= xfs_fs_shutdown,
1193  };
1194  
1195  static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1196  suffix_kstrtoint(
1197  	const char	*s,
1198  	unsigned int	base,
1199  	int		*res)
1200  {
1201  	int		last, shift_left_factor = 0, _res;
1202  	char		*value;
1203  	int		ret = 0;
1204  
1205  	value = kstrdup(s, GFP_KERNEL);
1206  	if (!value)
1207  		return -ENOMEM;
1208  
1209  	last = strlen(value) - 1;
1210  	if (value[last] == 'K' || value[last] == 'k') {
1211  		shift_left_factor = 10;
1212  		value[last] = '\0';
1213  	}
1214  	if (value[last] == 'M' || value[last] == 'm') {
1215  		shift_left_factor = 20;
1216  		value[last] = '\0';
1217  	}
1218  	if (value[last] == 'G' || value[last] == 'g') {
1219  		shift_left_factor = 30;
1220  		value[last] = '\0';
1221  	}
1222  
1223  	if (kstrtoint(value, base, &_res))
1224  		ret = -EINVAL;
1225  	kfree(value);
1226  	*res = _res << shift_left_factor;
1227  	return ret;
1228  }
1229  
1230  static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1231  xfs_fs_warn_deprecated(
1232  	struct fs_context	*fc,
1233  	struct fs_parameter	*param,
1234  	uint64_t		flag,
1235  	bool			value)
1236  {
1237  	/* Don't print the warning if reconfiguring and current mount point
1238  	 * already had the flag set
1239  	 */
1240  	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1241              !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1242  		return;
1243  	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1244  }
1245  
1246  /*
1247   * Set mount state from a mount option.
1248   *
1249   * NOTE: mp->m_super is NULL here!
1250   */
1251  static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1252  xfs_fs_parse_param(
1253  	struct fs_context	*fc,
1254  	struct fs_parameter	*param)
1255  {
1256  	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1257  	struct fs_parse_result	result;
1258  	int			size = 0;
1259  	int			opt;
1260  
1261  	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1262  	if (opt < 0)
1263  		return opt;
1264  
1265  	switch (opt) {
1266  	case Opt_logbufs:
1267  		parsing_mp->m_logbufs = result.uint_32;
1268  		return 0;
1269  	case Opt_logbsize:
1270  		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1271  			return -EINVAL;
1272  		return 0;
1273  	case Opt_logdev:
1274  		kfree(parsing_mp->m_logname);
1275  		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1276  		if (!parsing_mp->m_logname)
1277  			return -ENOMEM;
1278  		return 0;
1279  	case Opt_rtdev:
1280  		kfree(parsing_mp->m_rtname);
1281  		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1282  		if (!parsing_mp->m_rtname)
1283  			return -ENOMEM;
1284  		return 0;
1285  	case Opt_allocsize:
1286  		if (suffix_kstrtoint(param->string, 10, &size))
1287  			return -EINVAL;
1288  		parsing_mp->m_allocsize_log = ffs(size) - 1;
1289  		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1290  		return 0;
1291  	case Opt_grpid:
1292  	case Opt_bsdgroups:
1293  		parsing_mp->m_features |= XFS_FEAT_GRPID;
1294  		return 0;
1295  	case Opt_nogrpid:
1296  	case Opt_sysvgroups:
1297  		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1298  		return 0;
1299  	case Opt_wsync:
1300  		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1301  		return 0;
1302  	case Opt_norecovery:
1303  		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1304  		return 0;
1305  	case Opt_noalign:
1306  		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1307  		return 0;
1308  	case Opt_swalloc:
1309  		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1310  		return 0;
1311  	case Opt_sunit:
1312  		parsing_mp->m_dalign = result.uint_32;
1313  		return 0;
1314  	case Opt_swidth:
1315  		parsing_mp->m_swidth = result.uint_32;
1316  		return 0;
1317  	case Opt_inode32:
1318  		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1319  		return 0;
1320  	case Opt_inode64:
1321  		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1322  		return 0;
1323  	case Opt_nouuid:
1324  		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1325  		return 0;
1326  	case Opt_largeio:
1327  		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1328  		return 0;
1329  	case Opt_nolargeio:
1330  		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1331  		return 0;
1332  	case Opt_filestreams:
1333  		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1334  		return 0;
1335  	case Opt_noquota:
1336  		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1337  		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1338  		return 0;
1339  	case Opt_quota:
1340  	case Opt_uquota:
1341  	case Opt_usrquota:
1342  		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1343  		return 0;
1344  	case Opt_qnoenforce:
1345  	case Opt_uqnoenforce:
1346  		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1347  		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1348  		return 0;
1349  	case Opt_pquota:
1350  	case Opt_prjquota:
1351  		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1352  		return 0;
1353  	case Opt_pqnoenforce:
1354  		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1355  		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1356  		return 0;
1357  	case Opt_gquota:
1358  	case Opt_grpquota:
1359  		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1360  		return 0;
1361  	case Opt_gqnoenforce:
1362  		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1363  		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1364  		return 0;
1365  	case Opt_discard:
1366  		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1367  		return 0;
1368  	case Opt_nodiscard:
1369  		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1370  		return 0;
1371  #ifdef CONFIG_FS_DAX
1372  	case Opt_dax:
1373  		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1374  		return 0;
1375  	case Opt_dax_enum:
1376  		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1377  		return 0;
1378  #endif
1379  	/* Following mount options will be removed in September 2025 */
1380  	case Opt_ikeep:
1381  		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1382  		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1383  		return 0;
1384  	case Opt_noikeep:
1385  		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1386  		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1387  		return 0;
1388  	case Opt_attr2:
1389  		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1390  		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1391  		return 0;
1392  	case Opt_noattr2:
1393  		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1394  		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1395  		return 0;
1396  	default:
1397  		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1398  		return -EINVAL;
1399  	}
1400  
1401  	return 0;
1402  }
1403  
1404  static int
xfs_fs_validate_params(struct xfs_mount * mp)1405  xfs_fs_validate_params(
1406  	struct xfs_mount	*mp)
1407  {
1408  	/* No recovery flag requires a read-only mount */
1409  	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1410  		xfs_warn(mp, "no-recovery mounts must be read-only.");
1411  		return -EINVAL;
1412  	}
1413  
1414  	/*
1415  	 * We have not read the superblock at this point, so only the attr2
1416  	 * mount option can set the attr2 feature by this stage.
1417  	 */
1418  	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1419  		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1420  		return -EINVAL;
1421  	}
1422  
1423  
1424  	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1425  		xfs_warn(mp,
1426  	"sunit and swidth options incompatible with the noalign option");
1427  		return -EINVAL;
1428  	}
1429  
1430  	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1431  		xfs_warn(mp, "quota support not available in this kernel.");
1432  		return -EINVAL;
1433  	}
1434  
1435  	if ((mp->m_dalign && !mp->m_swidth) ||
1436  	    (!mp->m_dalign && mp->m_swidth)) {
1437  		xfs_warn(mp, "sunit and swidth must be specified together");
1438  		return -EINVAL;
1439  	}
1440  
1441  	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1442  		xfs_warn(mp,
1443  	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1444  			mp->m_swidth, mp->m_dalign);
1445  		return -EINVAL;
1446  	}
1447  
1448  	if (mp->m_logbufs != -1 &&
1449  	    mp->m_logbufs != 0 &&
1450  	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1451  	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1452  		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1453  			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1454  		return -EINVAL;
1455  	}
1456  
1457  	if (mp->m_logbsize != -1 &&
1458  	    mp->m_logbsize !=  0 &&
1459  	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1460  	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1461  	     !is_power_of_2(mp->m_logbsize))) {
1462  		xfs_warn(mp,
1463  			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1464  			mp->m_logbsize);
1465  		return -EINVAL;
1466  	}
1467  
1468  	if (xfs_has_allocsize(mp) &&
1469  	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1470  	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1471  		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1472  			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1473  		return -EINVAL;
1474  	}
1475  
1476  	return 0;
1477  }
1478  
1479  struct dentry *
xfs_debugfs_mkdir(const char * name,struct dentry * parent)1480  xfs_debugfs_mkdir(
1481  	const char	*name,
1482  	struct dentry	*parent)
1483  {
1484  	struct dentry	*child;
1485  
1486  	/* Apparently we're expected to ignore error returns?? */
1487  	child = debugfs_create_dir(name, parent);
1488  	if (IS_ERR(child))
1489  		return NULL;
1490  
1491  	return child;
1492  }
1493  
1494  static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1495  xfs_fs_fill_super(
1496  	struct super_block	*sb,
1497  	struct fs_context	*fc)
1498  {
1499  	struct xfs_mount	*mp = sb->s_fs_info;
1500  	struct inode		*root;
1501  	int			flags = 0, error;
1502  
1503  	mp->m_super = sb;
1504  
1505  	/*
1506  	 * Copy VFS mount flags from the context now that all parameter parsing
1507  	 * is guaranteed to have been completed by either the old mount API or
1508  	 * the newer fsopen/fsconfig API.
1509  	 */
1510  	if (fc->sb_flags & SB_RDONLY)
1511  		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1512  	if (fc->sb_flags & SB_DIRSYNC)
1513  		mp->m_features |= XFS_FEAT_DIRSYNC;
1514  	if (fc->sb_flags & SB_SYNCHRONOUS)
1515  		mp->m_features |= XFS_FEAT_WSYNC;
1516  
1517  	error = xfs_fs_validate_params(mp);
1518  	if (error)
1519  		return error;
1520  
1521  	sb_min_blocksize(sb, BBSIZE);
1522  	sb->s_xattr = xfs_xattr_handlers;
1523  	sb->s_export_op = &xfs_export_operations;
1524  #ifdef CONFIG_XFS_QUOTA
1525  	sb->s_qcop = &xfs_quotactl_operations;
1526  	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1527  #endif
1528  	sb->s_op = &xfs_super_operations;
1529  
1530  	/*
1531  	 * Delay mount work if the debug hook is set. This is debug
1532  	 * instrumention to coordinate simulation of xfs mount failures with
1533  	 * VFS superblock operations
1534  	 */
1535  	if (xfs_globals.mount_delay) {
1536  		xfs_notice(mp, "Delaying mount for %d seconds.",
1537  			xfs_globals.mount_delay);
1538  		msleep(xfs_globals.mount_delay * 1000);
1539  	}
1540  
1541  	if (fc->sb_flags & SB_SILENT)
1542  		flags |= XFS_MFSI_QUIET;
1543  
1544  	error = xfs_open_devices(mp);
1545  	if (error)
1546  		return error;
1547  
1548  	if (xfs_debugfs) {
1549  		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1550  						  xfs_debugfs);
1551  	} else {
1552  		mp->m_debugfs = NULL;
1553  	}
1554  
1555  	error = xfs_init_mount_workqueues(mp);
1556  	if (error)
1557  		goto out_shutdown_devices;
1558  
1559  	error = xfs_init_percpu_counters(mp);
1560  	if (error)
1561  		goto out_destroy_workqueues;
1562  
1563  	error = xfs_inodegc_init_percpu(mp);
1564  	if (error)
1565  		goto out_destroy_counters;
1566  
1567  	/* Allocate stats memory before we do operations that might use it */
1568  	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1569  	if (!mp->m_stats.xs_stats) {
1570  		error = -ENOMEM;
1571  		goto out_destroy_inodegc;
1572  	}
1573  
1574  	error = xchk_mount_stats_alloc(mp);
1575  	if (error)
1576  		goto out_free_stats;
1577  
1578  	error = xfs_readsb(mp, flags);
1579  	if (error)
1580  		goto out_free_scrub_stats;
1581  
1582  	error = xfs_finish_flags(mp);
1583  	if (error)
1584  		goto out_free_sb;
1585  
1586  	error = xfs_setup_devices(mp);
1587  	if (error)
1588  		goto out_free_sb;
1589  
1590  	/* V4 support is undergoing deprecation. */
1591  	if (!xfs_has_crc(mp)) {
1592  #ifdef CONFIG_XFS_SUPPORT_V4
1593  		xfs_warn_once(mp,
1594  	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1595  #else
1596  		xfs_warn(mp,
1597  	"Deprecated V4 format (crc=0) not supported by kernel.");
1598  		error = -EINVAL;
1599  		goto out_free_sb;
1600  #endif
1601  	}
1602  
1603  	/* ASCII case insensitivity is undergoing deprecation. */
1604  	if (xfs_has_asciici(mp)) {
1605  #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1606  		xfs_warn_once(mp,
1607  	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1608  #else
1609  		xfs_warn(mp,
1610  	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1611  		error = -EINVAL;
1612  		goto out_free_sb;
1613  #endif
1614  	}
1615  
1616  	/* Filesystem claims it needs repair, so refuse the mount. */
1617  	if (xfs_has_needsrepair(mp)) {
1618  		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1619  		error = -EFSCORRUPTED;
1620  		goto out_free_sb;
1621  	}
1622  
1623  	/*
1624  	 * Don't touch the filesystem if a user tool thinks it owns the primary
1625  	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1626  	 * we don't check them at all.
1627  	 */
1628  	if (mp->m_sb.sb_inprogress) {
1629  		xfs_warn(mp, "Offline file system operation in progress!");
1630  		error = -EFSCORRUPTED;
1631  		goto out_free_sb;
1632  	}
1633  
1634  	/*
1635  	 * Until this is fixed only page-sized or smaller data blocks work.
1636  	 */
1637  	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1638  		xfs_warn(mp,
1639  		"File system with blocksize %d bytes. "
1640  		"Only pagesize (%ld) or less will currently work.",
1641  				mp->m_sb.sb_blocksize, PAGE_SIZE);
1642  		error = -ENOSYS;
1643  		goto out_free_sb;
1644  	}
1645  
1646  	/* Ensure this filesystem fits in the page cache limits */
1647  	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1648  	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1649  		xfs_warn(mp,
1650  		"file system too large to be mounted on this system.");
1651  		error = -EFBIG;
1652  		goto out_free_sb;
1653  	}
1654  
1655  	/*
1656  	 * XFS block mappings use 54 bits to store the logical block offset.
1657  	 * This should suffice to handle the maximum file size that the VFS
1658  	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1659  	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1660  	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1661  	 * to check this assertion.
1662  	 *
1663  	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1664  	 * maximum pagecache offset in units of fs blocks.
1665  	 */
1666  	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1667  		xfs_warn(mp,
1668  "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1669  			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1670  			 XFS_MAX_FILEOFF);
1671  		error = -EINVAL;
1672  		goto out_free_sb;
1673  	}
1674  
1675  	error = xfs_filestream_mount(mp);
1676  	if (error)
1677  		goto out_free_sb;
1678  
1679  	/*
1680  	 * we must configure the block size in the superblock before we run the
1681  	 * full mount process as the mount process can lookup and cache inodes.
1682  	 */
1683  	sb->s_magic = XFS_SUPER_MAGIC;
1684  	sb->s_blocksize = mp->m_sb.sb_blocksize;
1685  	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1686  	sb->s_maxbytes = MAX_LFS_FILESIZE;
1687  	sb->s_max_links = XFS_MAXLINK;
1688  	sb->s_time_gran = 1;
1689  	if (xfs_has_bigtime(mp)) {
1690  		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1691  		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1692  	} else {
1693  		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1694  		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1695  	}
1696  	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1697  	sb->s_iflags |= SB_I_CGROUPWB;
1698  
1699  	set_posix_acl_flag(sb);
1700  
1701  	/* version 5 superblocks support inode version counters. */
1702  	if (xfs_has_crc(mp))
1703  		sb->s_flags |= SB_I_VERSION;
1704  
1705  	if (xfs_has_dax_always(mp)) {
1706  		error = xfs_setup_dax_always(mp);
1707  		if (error)
1708  			goto out_filestream_unmount;
1709  	}
1710  
1711  	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1712  		xfs_warn(mp,
1713  	"mounting with \"discard\" option, but the device does not support discard");
1714  		mp->m_features &= ~XFS_FEAT_DISCARD;
1715  	}
1716  
1717  	if (xfs_has_reflink(mp)) {
1718  		if (mp->m_sb.sb_rblocks) {
1719  			xfs_alert(mp,
1720  	"reflink not compatible with realtime device!");
1721  			error = -EINVAL;
1722  			goto out_filestream_unmount;
1723  		}
1724  
1725  		if (xfs_globals.always_cow) {
1726  			xfs_info(mp, "using DEBUG-only always_cow mode.");
1727  			mp->m_always_cow = true;
1728  		}
1729  	}
1730  
1731  	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1732  		xfs_alert(mp,
1733  	"reverse mapping btree not compatible with realtime device!");
1734  		error = -EINVAL;
1735  		goto out_filestream_unmount;
1736  	}
1737  
1738  	error = xfs_mountfs(mp);
1739  	if (error)
1740  		goto out_filestream_unmount;
1741  
1742  	root = igrab(VFS_I(mp->m_rootip));
1743  	if (!root) {
1744  		error = -ENOENT;
1745  		goto out_unmount;
1746  	}
1747  	sb->s_root = d_make_root(root);
1748  	if (!sb->s_root) {
1749  		error = -ENOMEM;
1750  		goto out_unmount;
1751  	}
1752  
1753  	return 0;
1754  
1755   out_filestream_unmount:
1756  	xfs_filestream_unmount(mp);
1757   out_free_sb:
1758  	xfs_freesb(mp);
1759   out_free_scrub_stats:
1760  	xchk_mount_stats_free(mp);
1761   out_free_stats:
1762  	free_percpu(mp->m_stats.xs_stats);
1763   out_destroy_inodegc:
1764  	xfs_inodegc_free_percpu(mp);
1765   out_destroy_counters:
1766  	xfs_destroy_percpu_counters(mp);
1767   out_destroy_workqueues:
1768  	xfs_destroy_mount_workqueues(mp);
1769   out_shutdown_devices:
1770  	xfs_shutdown_devices(mp);
1771  	return error;
1772  
1773   out_unmount:
1774  	xfs_filestream_unmount(mp);
1775  	xfs_unmountfs(mp);
1776  	goto out_free_sb;
1777  }
1778  
1779  static int
xfs_fs_get_tree(struct fs_context * fc)1780  xfs_fs_get_tree(
1781  	struct fs_context	*fc)
1782  {
1783  	return get_tree_bdev(fc, xfs_fs_fill_super);
1784  }
1785  
1786  static int
xfs_remount_rw(struct xfs_mount * mp)1787  xfs_remount_rw(
1788  	struct xfs_mount	*mp)
1789  {
1790  	struct xfs_sb		*sbp = &mp->m_sb;
1791  	int error;
1792  
1793  	if (xfs_has_norecovery(mp)) {
1794  		xfs_warn(mp,
1795  			"ro->rw transition prohibited on norecovery mount");
1796  		return -EINVAL;
1797  	}
1798  
1799  	if (xfs_sb_is_v5(sbp) &&
1800  	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1801  		xfs_warn(mp,
1802  	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1803  			(sbp->sb_features_ro_compat &
1804  				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1805  		return -EINVAL;
1806  	}
1807  
1808  	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1809  
1810  	/*
1811  	 * If this is the first remount to writeable state we might have some
1812  	 * superblock changes to update.
1813  	 */
1814  	if (mp->m_update_sb) {
1815  		error = xfs_sync_sb(mp, false);
1816  		if (error) {
1817  			xfs_warn(mp, "failed to write sb changes");
1818  			return error;
1819  		}
1820  		mp->m_update_sb = false;
1821  	}
1822  
1823  	/*
1824  	 * Fill out the reserve pool if it is empty. Use the stashed value if
1825  	 * it is non-zero, otherwise go with the default.
1826  	 */
1827  	xfs_restore_resvblks(mp);
1828  	xfs_log_work_queue(mp);
1829  	xfs_blockgc_start(mp);
1830  
1831  	/* Create the per-AG metadata reservation pool .*/
1832  	error = xfs_fs_reserve_ag_blocks(mp);
1833  	if (error && error != -ENOSPC)
1834  		return error;
1835  
1836  	/* Re-enable the background inode inactivation worker. */
1837  	xfs_inodegc_start(mp);
1838  
1839  	return 0;
1840  }
1841  
1842  static int
xfs_remount_ro(struct xfs_mount * mp)1843  xfs_remount_ro(
1844  	struct xfs_mount	*mp)
1845  {
1846  	struct xfs_icwalk	icw = {
1847  		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1848  	};
1849  	int			error;
1850  
1851  	/* Flush all the dirty data to disk. */
1852  	error = sync_filesystem(mp->m_super);
1853  	if (error)
1854  		return error;
1855  
1856  	/*
1857  	 * Cancel background eofb scanning so it cannot race with the final
1858  	 * log force+buftarg wait and deadlock the remount.
1859  	 */
1860  	xfs_blockgc_stop(mp);
1861  
1862  	/*
1863  	 * Clear out all remaining COW staging extents and speculative post-EOF
1864  	 * preallocations so that we don't leave inodes requiring inactivation
1865  	 * cleanups during reclaim on a read-only mount.  We must process every
1866  	 * cached inode, so this requires a synchronous cache scan.
1867  	 */
1868  	error = xfs_blockgc_free_space(mp, &icw);
1869  	if (error) {
1870  		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1871  		return error;
1872  	}
1873  
1874  	/*
1875  	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1876  	 * flushed all pending inodegc work when it sync'd the filesystem.
1877  	 * The VFS holds s_umount, so we know that inodes cannot enter
1878  	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1879  	 * we send inodes straight to reclaim, so no inodes will be queued.
1880  	 */
1881  	xfs_inodegc_stop(mp);
1882  
1883  	/* Free the per-AG metadata reservation pool. */
1884  	error = xfs_fs_unreserve_ag_blocks(mp);
1885  	if (error) {
1886  		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1887  		return error;
1888  	}
1889  
1890  	/*
1891  	 * Before we sync the metadata, we need to free up the reserve block
1892  	 * pool so that the used block count in the superblock on disk is
1893  	 * correct at the end of the remount. Stash the current* reserve pool
1894  	 * size so that if we get remounted rw, we can return it to the same
1895  	 * size.
1896  	 */
1897  	xfs_save_resvblks(mp);
1898  
1899  	xfs_log_clean(mp);
1900  	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1901  
1902  	return 0;
1903  }
1904  
1905  /*
1906   * Logically we would return an error here to prevent users from believing
1907   * they might have changed mount options using remount which can't be changed.
1908   *
1909   * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1910   * arguments in some cases so we can't blindly reject options, but have to
1911   * check for each specified option if it actually differs from the currently
1912   * set option and only reject it if that's the case.
1913   *
1914   * Until that is implemented we return success for every remount request, and
1915   * silently ignore all options that we can't actually change.
1916   */
1917  static int
xfs_fs_reconfigure(struct fs_context * fc)1918  xfs_fs_reconfigure(
1919  	struct fs_context *fc)
1920  {
1921  	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1922  	struct xfs_mount        *new_mp = fc->s_fs_info;
1923  	int			flags = fc->sb_flags;
1924  	int			error;
1925  
1926  	/* version 5 superblocks always support version counters. */
1927  	if (xfs_has_crc(mp))
1928  		fc->sb_flags |= SB_I_VERSION;
1929  
1930  	error = xfs_fs_validate_params(new_mp);
1931  	if (error)
1932  		return error;
1933  
1934  	/* inode32 -> inode64 */
1935  	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1936  		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1937  		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1938  	}
1939  
1940  	/* inode64 -> inode32 */
1941  	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1942  		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1943  		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1944  	}
1945  
1946  	/* ro -> rw */
1947  	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1948  		error = xfs_remount_rw(mp);
1949  		if (error)
1950  			return error;
1951  	}
1952  
1953  	/* rw -> ro */
1954  	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1955  		error = xfs_remount_ro(mp);
1956  		if (error)
1957  			return error;
1958  	}
1959  
1960  	return 0;
1961  }
1962  
1963  static void
xfs_fs_free(struct fs_context * fc)1964  xfs_fs_free(
1965  	struct fs_context	*fc)
1966  {
1967  	struct xfs_mount	*mp = fc->s_fs_info;
1968  
1969  	/*
1970  	 * mp is stored in the fs_context when it is initialized.
1971  	 * mp is transferred to the superblock on a successful mount,
1972  	 * but if an error occurs before the transfer we have to free
1973  	 * it here.
1974  	 */
1975  	if (mp)
1976  		xfs_mount_free(mp);
1977  }
1978  
1979  static const struct fs_context_operations xfs_context_ops = {
1980  	.parse_param = xfs_fs_parse_param,
1981  	.get_tree    = xfs_fs_get_tree,
1982  	.reconfigure = xfs_fs_reconfigure,
1983  	.free        = xfs_fs_free,
1984  };
1985  
1986  /*
1987   * WARNING: do not initialise any parameters in this function that depend on
1988   * mount option parsing having already been performed as this can be called from
1989   * fsopen() before any parameters have been set.
1990   */
xfs_init_fs_context(struct fs_context * fc)1991  static int xfs_init_fs_context(
1992  	struct fs_context	*fc)
1993  {
1994  	struct xfs_mount	*mp;
1995  
1996  	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1997  	if (!mp)
1998  		return -ENOMEM;
1999  
2000  	spin_lock_init(&mp->m_sb_lock);
2001  	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
2002  	spin_lock_init(&mp->m_perag_lock);
2003  	mutex_init(&mp->m_growlock);
2004  	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2005  	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2006  	mp->m_kobj.kobject.kset = xfs_kset;
2007  	/*
2008  	 * We don't create the finobt per-ag space reservation until after log
2009  	 * recovery, so we must set this to true so that an ifree transaction
2010  	 * started during log recovery will not depend on space reservations
2011  	 * for finobt expansion.
2012  	 */
2013  	mp->m_finobt_nores = true;
2014  
2015  	/*
2016  	 * These can be overridden by the mount option parsing.
2017  	 */
2018  	mp->m_logbufs = -1;
2019  	mp->m_logbsize = -1;
2020  	mp->m_allocsize_log = 16; /* 64k */
2021  
2022  	fc->s_fs_info = mp;
2023  	fc->ops = &xfs_context_ops;
2024  
2025  	return 0;
2026  }
2027  
2028  static void
xfs_kill_sb(struct super_block * sb)2029  xfs_kill_sb(
2030  	struct super_block		*sb)
2031  {
2032  	kill_block_super(sb);
2033  	xfs_mount_free(XFS_M(sb));
2034  }
2035  
2036  static struct file_system_type xfs_fs_type = {
2037  	.owner			= THIS_MODULE,
2038  	.name			= "xfs",
2039  	.init_fs_context	= xfs_init_fs_context,
2040  	.parameters		= xfs_fs_parameters,
2041  	.kill_sb		= xfs_kill_sb,
2042  	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2043  };
2044  MODULE_ALIAS_FS("xfs");
2045  
2046  STATIC int __init
xfs_init_caches(void)2047  xfs_init_caches(void)
2048  {
2049  	int		error;
2050  
2051  	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2052  					 SLAB_HWCACHE_ALIGN |
2053  					 SLAB_RECLAIM_ACCOUNT |
2054  					 SLAB_MEM_SPREAD,
2055  					 NULL);
2056  	if (!xfs_buf_cache)
2057  		goto out;
2058  
2059  	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2060  						sizeof(struct xlog_ticket),
2061  						0, 0, NULL);
2062  	if (!xfs_log_ticket_cache)
2063  		goto out_destroy_buf_cache;
2064  
2065  	error = xfs_btree_init_cur_caches();
2066  	if (error)
2067  		goto out_destroy_log_ticket_cache;
2068  
2069  	error = xfs_defer_init_item_caches();
2070  	if (error)
2071  		goto out_destroy_btree_cur_cache;
2072  
2073  	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2074  					      sizeof(struct xfs_da_state),
2075  					      0, 0, NULL);
2076  	if (!xfs_da_state_cache)
2077  		goto out_destroy_defer_item_cache;
2078  
2079  	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2080  					   sizeof(struct xfs_ifork),
2081  					   0, 0, NULL);
2082  	if (!xfs_ifork_cache)
2083  		goto out_destroy_da_state_cache;
2084  
2085  	xfs_trans_cache = kmem_cache_create("xfs_trans",
2086  					   sizeof(struct xfs_trans),
2087  					   0, 0, NULL);
2088  	if (!xfs_trans_cache)
2089  		goto out_destroy_ifork_cache;
2090  
2091  
2092  	/*
2093  	 * The size of the cache-allocated buf log item is the maximum
2094  	 * size possible under XFS.  This wastes a little bit of memory,
2095  	 * but it is much faster.
2096  	 */
2097  	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2098  					      sizeof(struct xfs_buf_log_item),
2099  					      0, 0, NULL);
2100  	if (!xfs_buf_item_cache)
2101  		goto out_destroy_trans_cache;
2102  
2103  	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2104  			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2105  			0, 0, NULL);
2106  	if (!xfs_efd_cache)
2107  		goto out_destroy_buf_item_cache;
2108  
2109  	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2110  			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2111  			0, 0, NULL);
2112  	if (!xfs_efi_cache)
2113  		goto out_destroy_efd_cache;
2114  
2115  	xfs_inode_cache = kmem_cache_create("xfs_inode",
2116  					   sizeof(struct xfs_inode), 0,
2117  					   (SLAB_HWCACHE_ALIGN |
2118  					    SLAB_RECLAIM_ACCOUNT |
2119  					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2120  					   xfs_fs_inode_init_once);
2121  	if (!xfs_inode_cache)
2122  		goto out_destroy_efi_cache;
2123  
2124  	xfs_ili_cache = kmem_cache_create("xfs_ili",
2125  					 sizeof(struct xfs_inode_log_item), 0,
2126  					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2127  					 NULL);
2128  	if (!xfs_ili_cache)
2129  		goto out_destroy_inode_cache;
2130  
2131  	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2132  					     sizeof(struct xfs_icreate_item),
2133  					     0, 0, NULL);
2134  	if (!xfs_icreate_cache)
2135  		goto out_destroy_ili_cache;
2136  
2137  	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2138  					 sizeof(struct xfs_rud_log_item),
2139  					 0, 0, NULL);
2140  	if (!xfs_rud_cache)
2141  		goto out_destroy_icreate_cache;
2142  
2143  	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2144  			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2145  			0, 0, NULL);
2146  	if (!xfs_rui_cache)
2147  		goto out_destroy_rud_cache;
2148  
2149  	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2150  					 sizeof(struct xfs_cud_log_item),
2151  					 0, 0, NULL);
2152  	if (!xfs_cud_cache)
2153  		goto out_destroy_rui_cache;
2154  
2155  	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2156  			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2157  			0, 0, NULL);
2158  	if (!xfs_cui_cache)
2159  		goto out_destroy_cud_cache;
2160  
2161  	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2162  					 sizeof(struct xfs_bud_log_item),
2163  					 0, 0, NULL);
2164  	if (!xfs_bud_cache)
2165  		goto out_destroy_cui_cache;
2166  
2167  	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2168  			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2169  			0, 0, NULL);
2170  	if (!xfs_bui_cache)
2171  		goto out_destroy_bud_cache;
2172  
2173  	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2174  					    sizeof(struct xfs_attrd_log_item),
2175  					    0, 0, NULL);
2176  	if (!xfs_attrd_cache)
2177  		goto out_destroy_bui_cache;
2178  
2179  	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2180  					    sizeof(struct xfs_attri_log_item),
2181  					    0, 0, NULL);
2182  	if (!xfs_attri_cache)
2183  		goto out_destroy_attrd_cache;
2184  
2185  	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2186  					     sizeof(struct xfs_iunlink_item),
2187  					     0, 0, NULL);
2188  	if (!xfs_iunlink_cache)
2189  		goto out_destroy_attri_cache;
2190  
2191  	return 0;
2192  
2193   out_destroy_attri_cache:
2194  	kmem_cache_destroy(xfs_attri_cache);
2195   out_destroy_attrd_cache:
2196  	kmem_cache_destroy(xfs_attrd_cache);
2197   out_destroy_bui_cache:
2198  	kmem_cache_destroy(xfs_bui_cache);
2199   out_destroy_bud_cache:
2200  	kmem_cache_destroy(xfs_bud_cache);
2201   out_destroy_cui_cache:
2202  	kmem_cache_destroy(xfs_cui_cache);
2203   out_destroy_cud_cache:
2204  	kmem_cache_destroy(xfs_cud_cache);
2205   out_destroy_rui_cache:
2206  	kmem_cache_destroy(xfs_rui_cache);
2207   out_destroy_rud_cache:
2208  	kmem_cache_destroy(xfs_rud_cache);
2209   out_destroy_icreate_cache:
2210  	kmem_cache_destroy(xfs_icreate_cache);
2211   out_destroy_ili_cache:
2212  	kmem_cache_destroy(xfs_ili_cache);
2213   out_destroy_inode_cache:
2214  	kmem_cache_destroy(xfs_inode_cache);
2215   out_destroy_efi_cache:
2216  	kmem_cache_destroy(xfs_efi_cache);
2217   out_destroy_efd_cache:
2218  	kmem_cache_destroy(xfs_efd_cache);
2219   out_destroy_buf_item_cache:
2220  	kmem_cache_destroy(xfs_buf_item_cache);
2221   out_destroy_trans_cache:
2222  	kmem_cache_destroy(xfs_trans_cache);
2223   out_destroy_ifork_cache:
2224  	kmem_cache_destroy(xfs_ifork_cache);
2225   out_destroy_da_state_cache:
2226  	kmem_cache_destroy(xfs_da_state_cache);
2227   out_destroy_defer_item_cache:
2228  	xfs_defer_destroy_item_caches();
2229   out_destroy_btree_cur_cache:
2230  	xfs_btree_destroy_cur_caches();
2231   out_destroy_log_ticket_cache:
2232  	kmem_cache_destroy(xfs_log_ticket_cache);
2233   out_destroy_buf_cache:
2234  	kmem_cache_destroy(xfs_buf_cache);
2235   out:
2236  	return -ENOMEM;
2237  }
2238  
2239  STATIC void
xfs_destroy_caches(void)2240  xfs_destroy_caches(void)
2241  {
2242  	/*
2243  	 * Make sure all delayed rcu free are flushed before we
2244  	 * destroy caches.
2245  	 */
2246  	rcu_barrier();
2247  	kmem_cache_destroy(xfs_iunlink_cache);
2248  	kmem_cache_destroy(xfs_attri_cache);
2249  	kmem_cache_destroy(xfs_attrd_cache);
2250  	kmem_cache_destroy(xfs_bui_cache);
2251  	kmem_cache_destroy(xfs_bud_cache);
2252  	kmem_cache_destroy(xfs_cui_cache);
2253  	kmem_cache_destroy(xfs_cud_cache);
2254  	kmem_cache_destroy(xfs_rui_cache);
2255  	kmem_cache_destroy(xfs_rud_cache);
2256  	kmem_cache_destroy(xfs_icreate_cache);
2257  	kmem_cache_destroy(xfs_ili_cache);
2258  	kmem_cache_destroy(xfs_inode_cache);
2259  	kmem_cache_destroy(xfs_efi_cache);
2260  	kmem_cache_destroy(xfs_efd_cache);
2261  	kmem_cache_destroy(xfs_buf_item_cache);
2262  	kmem_cache_destroy(xfs_trans_cache);
2263  	kmem_cache_destroy(xfs_ifork_cache);
2264  	kmem_cache_destroy(xfs_da_state_cache);
2265  	xfs_defer_destroy_item_caches();
2266  	xfs_btree_destroy_cur_caches();
2267  	kmem_cache_destroy(xfs_log_ticket_cache);
2268  	kmem_cache_destroy(xfs_buf_cache);
2269  }
2270  
2271  STATIC int __init
xfs_init_workqueues(void)2272  xfs_init_workqueues(void)
2273  {
2274  	/*
2275  	 * The allocation workqueue can be used in memory reclaim situations
2276  	 * (writepage path), and parallelism is only limited by the number of
2277  	 * AGs in all the filesystems mounted. Hence use the default large
2278  	 * max_active value for this workqueue.
2279  	 */
2280  	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2281  			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2282  	if (!xfs_alloc_wq)
2283  		return -ENOMEM;
2284  
2285  	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2286  			0);
2287  	if (!xfs_discard_wq)
2288  		goto out_free_alloc_wq;
2289  
2290  	return 0;
2291  out_free_alloc_wq:
2292  	destroy_workqueue(xfs_alloc_wq);
2293  	return -ENOMEM;
2294  }
2295  
2296  STATIC void
xfs_destroy_workqueues(void)2297  xfs_destroy_workqueues(void)
2298  {
2299  	destroy_workqueue(xfs_discard_wq);
2300  	destroy_workqueue(xfs_alloc_wq);
2301  }
2302  
2303  STATIC int __init
init_xfs_fs(void)2304  init_xfs_fs(void)
2305  {
2306  	int			error;
2307  
2308  	xfs_check_ondisk_structs();
2309  
2310  	error = xfs_dahash_test();
2311  	if (error)
2312  		return error;
2313  
2314  	printk(KERN_INFO XFS_VERSION_STRING " with "
2315  			 XFS_BUILD_OPTIONS " enabled\n");
2316  
2317  	xfs_dir_startup();
2318  
2319  	error = xfs_init_caches();
2320  	if (error)
2321  		goto out;
2322  
2323  	error = xfs_init_workqueues();
2324  	if (error)
2325  		goto out_destroy_caches;
2326  
2327  	error = xfs_mru_cache_init();
2328  	if (error)
2329  		goto out_destroy_wq;
2330  
2331  	error = xfs_init_procfs();
2332  	if (error)
2333  		goto out_mru_cache_uninit;
2334  
2335  	error = xfs_sysctl_register();
2336  	if (error)
2337  		goto out_cleanup_procfs;
2338  
2339  	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2340  
2341  	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2342  	if (!xfs_kset) {
2343  		error = -ENOMEM;
2344  		goto out_debugfs_unregister;
2345  	}
2346  
2347  	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2348  
2349  	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2350  	if (!xfsstats.xs_stats) {
2351  		error = -ENOMEM;
2352  		goto out_kset_unregister;
2353  	}
2354  
2355  	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2356  			       "stats");
2357  	if (error)
2358  		goto out_free_stats;
2359  
2360  	error = xchk_global_stats_setup(xfs_debugfs);
2361  	if (error)
2362  		goto out_remove_stats_kobj;
2363  
2364  #ifdef DEBUG
2365  	xfs_dbg_kobj.kobject.kset = xfs_kset;
2366  	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2367  	if (error)
2368  		goto out_remove_scrub_stats;
2369  #endif
2370  
2371  	error = xfs_qm_init();
2372  	if (error)
2373  		goto out_remove_dbg_kobj;
2374  
2375  	error = register_filesystem(&xfs_fs_type);
2376  	if (error)
2377  		goto out_qm_exit;
2378  	return 0;
2379  
2380   out_qm_exit:
2381  	xfs_qm_exit();
2382   out_remove_dbg_kobj:
2383  #ifdef DEBUG
2384  	xfs_sysfs_del(&xfs_dbg_kobj);
2385   out_remove_scrub_stats:
2386  #endif
2387  	xchk_global_stats_teardown();
2388   out_remove_stats_kobj:
2389  	xfs_sysfs_del(&xfsstats.xs_kobj);
2390   out_free_stats:
2391  	free_percpu(xfsstats.xs_stats);
2392   out_kset_unregister:
2393  	kset_unregister(xfs_kset);
2394   out_debugfs_unregister:
2395  	debugfs_remove(xfs_debugfs);
2396  	xfs_sysctl_unregister();
2397   out_cleanup_procfs:
2398  	xfs_cleanup_procfs();
2399   out_mru_cache_uninit:
2400  	xfs_mru_cache_uninit();
2401   out_destroy_wq:
2402  	xfs_destroy_workqueues();
2403   out_destroy_caches:
2404  	xfs_destroy_caches();
2405   out:
2406  	return error;
2407  }
2408  
2409  STATIC void __exit
exit_xfs_fs(void)2410  exit_xfs_fs(void)
2411  {
2412  	xfs_qm_exit();
2413  	unregister_filesystem(&xfs_fs_type);
2414  #ifdef DEBUG
2415  	xfs_sysfs_del(&xfs_dbg_kobj);
2416  #endif
2417  	xchk_global_stats_teardown();
2418  	xfs_sysfs_del(&xfsstats.xs_kobj);
2419  	free_percpu(xfsstats.xs_stats);
2420  	kset_unregister(xfs_kset);
2421  	debugfs_remove(xfs_debugfs);
2422  	xfs_sysctl_unregister();
2423  	xfs_cleanup_procfs();
2424  	xfs_mru_cache_uninit();
2425  	xfs_destroy_workqueues();
2426  	xfs_destroy_caches();
2427  	xfs_uuid_table_free();
2428  }
2429  
2430  module_init(init_xfs_fs);
2431  module_exit(exit_xfs_fs);
2432  
2433  MODULE_AUTHOR("Silicon Graphics, Inc.");
2434  MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2435  MODULE_LICENSE("GPL");
2436