xref: /openbmc/linux/fs/xfs/xfs_super.c (revision 388f6966)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
42 
43 static const struct super_operations xfs_super_operations;
44 
45 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
46 #ifdef DEBUG
47 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
48 #endif
49 
50 /*
51  * Table driven mount option parser.
52  */
53 enum {
54 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
55 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
56 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
57 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
61 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
62 	Opt_discard, Opt_nodiscard, Opt_dax,
63 };
64 
65 static const struct fs_parameter_spec xfs_fs_parameters[] = {
66 	fsparam_u32("logbufs",		Opt_logbufs),
67 	fsparam_string("logbsize",	Opt_logbsize),
68 	fsparam_string("logdev",	Opt_logdev),
69 	fsparam_string("rtdev",		Opt_rtdev),
70 	fsparam_flag("wsync",		Opt_wsync),
71 	fsparam_flag("noalign",		Opt_noalign),
72 	fsparam_flag("swalloc",		Opt_swalloc),
73 	fsparam_u32("sunit",		Opt_sunit),
74 	fsparam_u32("swidth",		Opt_swidth),
75 	fsparam_flag("nouuid",		Opt_nouuid),
76 	fsparam_flag("grpid",		Opt_grpid),
77 	fsparam_flag("nogrpid",		Opt_nogrpid),
78 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
79 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
80 	fsparam_string("allocsize",	Opt_allocsize),
81 	fsparam_flag("norecovery",	Opt_norecovery),
82 	fsparam_flag("inode64",		Opt_inode64),
83 	fsparam_flag("inode32",		Opt_inode32),
84 	fsparam_flag("ikeep",		Opt_ikeep),
85 	fsparam_flag("noikeep",		Opt_noikeep),
86 	fsparam_flag("largeio",		Opt_largeio),
87 	fsparam_flag("nolargeio",	Opt_nolargeio),
88 	fsparam_flag("attr2",		Opt_attr2),
89 	fsparam_flag("noattr2",		Opt_noattr2),
90 	fsparam_flag("filestreams",	Opt_filestreams),
91 	fsparam_flag("quota",		Opt_quota),
92 	fsparam_flag("noquota",		Opt_noquota),
93 	fsparam_flag("usrquota",	Opt_usrquota),
94 	fsparam_flag("grpquota",	Opt_grpquota),
95 	fsparam_flag("prjquota",	Opt_prjquota),
96 	fsparam_flag("uquota",		Opt_uquota),
97 	fsparam_flag("gquota",		Opt_gquota),
98 	fsparam_flag("pquota",		Opt_pquota),
99 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
100 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
101 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
102 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
103 	fsparam_flag("discard",		Opt_discard),
104 	fsparam_flag("nodiscard",	Opt_nodiscard),
105 	fsparam_flag("dax",		Opt_dax),
106 	{}
107 };
108 
109 struct proc_xfs_info {
110 	uint64_t	flag;
111 	char		*str;
112 };
113 
114 static int
115 xfs_fs_show_options(
116 	struct seq_file		*m,
117 	struct dentry		*root)
118 {
119 	static struct proc_xfs_info xfs_info_set[] = {
120 		/* the few simple ones we can get from the mount struct */
121 		{ XFS_MOUNT_IKEEP,		",ikeep" },
122 		{ XFS_MOUNT_WSYNC,		",wsync" },
123 		{ XFS_MOUNT_NOALIGN,		",noalign" },
124 		{ XFS_MOUNT_SWALLOC,		",swalloc" },
125 		{ XFS_MOUNT_NOUUID,		",nouuid" },
126 		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
127 		{ XFS_MOUNT_ATTR2,		",attr2" },
128 		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
129 		{ XFS_MOUNT_GRPID,		",grpid" },
130 		{ XFS_MOUNT_DISCARD,		",discard" },
131 		{ XFS_MOUNT_LARGEIO,		",largeio" },
132 		{ XFS_MOUNT_DAX,		",dax" },
133 		{ 0, NULL }
134 	};
135 	struct xfs_mount	*mp = XFS_M(root->d_sb);
136 	struct proc_xfs_info	*xfs_infop;
137 
138 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
139 		if (mp->m_flags & xfs_infop->flag)
140 			seq_puts(m, xfs_infop->str);
141 	}
142 
143 	seq_printf(m, ",inode%d",
144 		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
145 
146 	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
147 		seq_printf(m, ",allocsize=%dk",
148 			   (1 << mp->m_allocsize_log) >> 10);
149 
150 	if (mp->m_logbufs > 0)
151 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
152 	if (mp->m_logbsize > 0)
153 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
154 
155 	if (mp->m_logname)
156 		seq_show_option(m, "logdev", mp->m_logname);
157 	if (mp->m_rtname)
158 		seq_show_option(m, "rtdev", mp->m_rtname);
159 
160 	if (mp->m_dalign > 0)
161 		seq_printf(m, ",sunit=%d",
162 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
163 	if (mp->m_swidth > 0)
164 		seq_printf(m, ",swidth=%d",
165 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
166 
167 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
168 		seq_puts(m, ",usrquota");
169 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
170 		seq_puts(m, ",uqnoenforce");
171 
172 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
173 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
174 			seq_puts(m, ",prjquota");
175 		else
176 			seq_puts(m, ",pqnoenforce");
177 	}
178 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
179 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
180 			seq_puts(m, ",grpquota");
181 		else
182 			seq_puts(m, ",gqnoenforce");
183 	}
184 
185 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
186 		seq_puts(m, ",noquota");
187 
188 	return 0;
189 }
190 
191 /*
192  * Set parameters for inode allocation heuristics, taking into account
193  * filesystem size and inode32/inode64 mount options; i.e. specifically
194  * whether or not XFS_MOUNT_SMALL_INUMS is set.
195  *
196  * Inode allocation patterns are altered only if inode32 is requested
197  * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
198  * If altered, XFS_MOUNT_32BITINODES is set as well.
199  *
200  * An agcount independent of that in the mount structure is provided
201  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
202  * to the potentially higher ag count.
203  *
204  * Returns the maximum AG index which may contain inodes.
205  */
206 xfs_agnumber_t
207 xfs_set_inode_alloc(
208 	struct xfs_mount *mp,
209 	xfs_agnumber_t	agcount)
210 {
211 	xfs_agnumber_t	index;
212 	xfs_agnumber_t	maxagi = 0;
213 	xfs_sb_t	*sbp = &mp->m_sb;
214 	xfs_agnumber_t	max_metadata;
215 	xfs_agino_t	agino;
216 	xfs_ino_t	ino;
217 
218 	/*
219 	 * Calculate how much should be reserved for inodes to meet
220 	 * the max inode percentage.  Used only for inode32.
221 	 */
222 	if (M_IGEO(mp)->maxicount) {
223 		uint64_t	icount;
224 
225 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
226 		do_div(icount, 100);
227 		icount += sbp->sb_agblocks - 1;
228 		do_div(icount, sbp->sb_agblocks);
229 		max_metadata = icount;
230 	} else {
231 		max_metadata = agcount;
232 	}
233 
234 	/* Get the last possible inode in the filesystem */
235 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
236 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
237 
238 	/*
239 	 * If user asked for no more than 32-bit inodes, and the fs is
240 	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
241 	 * the allocator to accommodate the request.
242 	 */
243 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
244 		mp->m_flags |= XFS_MOUNT_32BITINODES;
245 	else
246 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
247 
248 	for (index = 0; index < agcount; index++) {
249 		struct xfs_perag	*pag;
250 
251 		ino = XFS_AGINO_TO_INO(mp, index, agino);
252 
253 		pag = xfs_perag_get(mp, index);
254 
255 		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
256 			if (ino > XFS_MAXINUMBER_32) {
257 				pag->pagi_inodeok = 0;
258 				pag->pagf_metadata = 0;
259 			} else {
260 				pag->pagi_inodeok = 1;
261 				maxagi++;
262 				if (index < max_metadata)
263 					pag->pagf_metadata = 1;
264 				else
265 					pag->pagf_metadata = 0;
266 			}
267 		} else {
268 			pag->pagi_inodeok = 1;
269 			pag->pagf_metadata = 0;
270 		}
271 
272 		xfs_perag_put(pag);
273 	}
274 
275 	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
276 }
277 
278 STATIC int
279 xfs_blkdev_get(
280 	xfs_mount_t		*mp,
281 	const char		*name,
282 	struct block_device	**bdevp)
283 {
284 	int			error = 0;
285 
286 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
287 				    mp);
288 	if (IS_ERR(*bdevp)) {
289 		error = PTR_ERR(*bdevp);
290 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
291 	}
292 
293 	return error;
294 }
295 
296 STATIC void
297 xfs_blkdev_put(
298 	struct block_device	*bdev)
299 {
300 	if (bdev)
301 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
302 }
303 
304 void
305 xfs_blkdev_issue_flush(
306 	xfs_buftarg_t		*buftarg)
307 {
308 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
309 }
310 
311 STATIC void
312 xfs_close_devices(
313 	struct xfs_mount	*mp)
314 {
315 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
316 
317 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
318 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
319 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
320 
321 		xfs_free_buftarg(mp->m_logdev_targp);
322 		xfs_blkdev_put(logdev);
323 		fs_put_dax(dax_logdev);
324 	}
325 	if (mp->m_rtdev_targp) {
326 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
327 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
328 
329 		xfs_free_buftarg(mp->m_rtdev_targp);
330 		xfs_blkdev_put(rtdev);
331 		fs_put_dax(dax_rtdev);
332 	}
333 	xfs_free_buftarg(mp->m_ddev_targp);
334 	fs_put_dax(dax_ddev);
335 }
336 
337 /*
338  * The file system configurations are:
339  *	(1) device (partition) with data and internal log
340  *	(2) logical volume with data and log subvolumes.
341  *	(3) logical volume with data, log, and realtime subvolumes.
342  *
343  * We only have to handle opening the log and realtime volumes here if
344  * they are present.  The data subvolume has already been opened by
345  * get_sb_bdev() and is stored in sb->s_bdev.
346  */
347 STATIC int
348 xfs_open_devices(
349 	struct xfs_mount	*mp)
350 {
351 	struct block_device	*ddev = mp->m_super->s_bdev;
352 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
353 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
354 	struct block_device	*logdev = NULL, *rtdev = NULL;
355 	int			error;
356 
357 	/*
358 	 * Open real time and log devices - order is important.
359 	 */
360 	if (mp->m_logname) {
361 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
362 		if (error)
363 			goto out;
364 		dax_logdev = fs_dax_get_by_bdev(logdev);
365 	}
366 
367 	if (mp->m_rtname) {
368 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
369 		if (error)
370 			goto out_close_logdev;
371 
372 		if (rtdev == ddev || rtdev == logdev) {
373 			xfs_warn(mp,
374 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
375 			error = -EINVAL;
376 			goto out_close_rtdev;
377 		}
378 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
379 	}
380 
381 	/*
382 	 * Setup xfs_mount buffer target pointers
383 	 */
384 	error = -ENOMEM;
385 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
386 	if (!mp->m_ddev_targp)
387 		goto out_close_rtdev;
388 
389 	if (rtdev) {
390 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
391 		if (!mp->m_rtdev_targp)
392 			goto out_free_ddev_targ;
393 	}
394 
395 	if (logdev && logdev != ddev) {
396 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
397 		if (!mp->m_logdev_targp)
398 			goto out_free_rtdev_targ;
399 	} else {
400 		mp->m_logdev_targp = mp->m_ddev_targp;
401 	}
402 
403 	return 0;
404 
405  out_free_rtdev_targ:
406 	if (mp->m_rtdev_targp)
407 		xfs_free_buftarg(mp->m_rtdev_targp);
408  out_free_ddev_targ:
409 	xfs_free_buftarg(mp->m_ddev_targp);
410  out_close_rtdev:
411 	xfs_blkdev_put(rtdev);
412 	fs_put_dax(dax_rtdev);
413  out_close_logdev:
414 	if (logdev && logdev != ddev) {
415 		xfs_blkdev_put(logdev);
416 		fs_put_dax(dax_logdev);
417 	}
418  out:
419 	fs_put_dax(dax_ddev);
420 	return error;
421 }
422 
423 /*
424  * Setup xfs_mount buffer target pointers based on superblock
425  */
426 STATIC int
427 xfs_setup_devices(
428 	struct xfs_mount	*mp)
429 {
430 	int			error;
431 
432 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
433 	if (error)
434 		return error;
435 
436 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
437 		unsigned int	log_sector_size = BBSIZE;
438 
439 		if (xfs_sb_version_hassector(&mp->m_sb))
440 			log_sector_size = mp->m_sb.sb_logsectsize;
441 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
442 					    log_sector_size);
443 		if (error)
444 			return error;
445 	}
446 	if (mp->m_rtdev_targp) {
447 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
448 					    mp->m_sb.sb_sectsize);
449 		if (error)
450 			return error;
451 	}
452 
453 	return 0;
454 }
455 
456 STATIC int
457 xfs_init_mount_workqueues(
458 	struct xfs_mount	*mp)
459 {
460 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
461 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
462 	if (!mp->m_buf_workqueue)
463 		goto out;
464 
465 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
466 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
467 	if (!mp->m_unwritten_workqueue)
468 		goto out_destroy_buf;
469 
470 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
471 			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
472 			0, mp->m_super->s_id);
473 	if (!mp->m_cil_workqueue)
474 		goto out_destroy_unwritten;
475 
476 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
477 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
478 	if (!mp->m_reclaim_workqueue)
479 		goto out_destroy_cil;
480 
481 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
482 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
483 	if (!mp->m_eofblocks_workqueue)
484 		goto out_destroy_reclaim;
485 
486 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
487 					       mp->m_super->s_id);
488 	if (!mp->m_sync_workqueue)
489 		goto out_destroy_eofb;
490 
491 	return 0;
492 
493 out_destroy_eofb:
494 	destroy_workqueue(mp->m_eofblocks_workqueue);
495 out_destroy_reclaim:
496 	destroy_workqueue(mp->m_reclaim_workqueue);
497 out_destroy_cil:
498 	destroy_workqueue(mp->m_cil_workqueue);
499 out_destroy_unwritten:
500 	destroy_workqueue(mp->m_unwritten_workqueue);
501 out_destroy_buf:
502 	destroy_workqueue(mp->m_buf_workqueue);
503 out:
504 	return -ENOMEM;
505 }
506 
507 STATIC void
508 xfs_destroy_mount_workqueues(
509 	struct xfs_mount	*mp)
510 {
511 	destroy_workqueue(mp->m_sync_workqueue);
512 	destroy_workqueue(mp->m_eofblocks_workqueue);
513 	destroy_workqueue(mp->m_reclaim_workqueue);
514 	destroy_workqueue(mp->m_cil_workqueue);
515 	destroy_workqueue(mp->m_unwritten_workqueue);
516 	destroy_workqueue(mp->m_buf_workqueue);
517 }
518 
519 /*
520  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
521  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
522  * for IO to complete so that we effectively throttle multiple callers to the
523  * rate at which IO is completing.
524  */
525 void
526 xfs_flush_inodes(
527 	struct xfs_mount	*mp)
528 {
529 	struct super_block	*sb = mp->m_super;
530 
531 	if (down_read_trylock(&sb->s_umount)) {
532 		sync_inodes_sb(sb);
533 		up_read(&sb->s_umount);
534 	}
535 }
536 
537 /* Catch misguided souls that try to use this interface on XFS */
538 STATIC struct inode *
539 xfs_fs_alloc_inode(
540 	struct super_block	*sb)
541 {
542 	BUG();
543 	return NULL;
544 }
545 
546 #ifdef DEBUG
547 static void
548 xfs_check_delalloc(
549 	struct xfs_inode	*ip,
550 	int			whichfork)
551 {
552 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
553 	struct xfs_bmbt_irec	got;
554 	struct xfs_iext_cursor	icur;
555 
556 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
557 		return;
558 	do {
559 		if (isnullstartblock(got.br_startblock)) {
560 			xfs_warn(ip->i_mount,
561 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
562 				ip->i_ino,
563 				whichfork == XFS_DATA_FORK ? "data" : "cow",
564 				got.br_startoff, got.br_blockcount);
565 		}
566 	} while (xfs_iext_next_extent(ifp, &icur, &got));
567 }
568 #else
569 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
570 #endif
571 
572 /*
573  * Now that the generic code is guaranteed not to be accessing
574  * the linux inode, we can inactivate and reclaim the inode.
575  */
576 STATIC void
577 xfs_fs_destroy_inode(
578 	struct inode		*inode)
579 {
580 	struct xfs_inode	*ip = XFS_I(inode);
581 
582 	trace_xfs_destroy_inode(ip);
583 
584 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
585 	XFS_STATS_INC(ip->i_mount, vn_rele);
586 	XFS_STATS_INC(ip->i_mount, vn_remove);
587 
588 	xfs_inactive(ip);
589 
590 	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
591 		xfs_check_delalloc(ip, XFS_DATA_FORK);
592 		xfs_check_delalloc(ip, XFS_COW_FORK);
593 		ASSERT(0);
594 	}
595 
596 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
597 
598 	/*
599 	 * We should never get here with one of the reclaim flags already set.
600 	 */
601 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
602 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
603 
604 	/*
605 	 * We always use background reclaim here because even if the
606 	 * inode is clean, it still may be under IO and hence we have
607 	 * to take the flush lock. The background reclaim path handles
608 	 * this more efficiently than we can here, so simply let background
609 	 * reclaim tear down all inodes.
610 	 */
611 	xfs_inode_set_reclaim_tag(ip);
612 }
613 
614 static void
615 xfs_fs_dirty_inode(
616 	struct inode			*inode,
617 	int				flag)
618 {
619 	struct xfs_inode		*ip = XFS_I(inode);
620 	struct xfs_mount		*mp = ip->i_mount;
621 	struct xfs_trans		*tp;
622 
623 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
624 		return;
625 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
626 		return;
627 
628 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
629 		return;
630 	xfs_ilock(ip, XFS_ILOCK_EXCL);
631 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
632 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
633 	xfs_trans_commit(tp);
634 }
635 
636 /*
637  * Slab object creation initialisation for the XFS inode.
638  * This covers only the idempotent fields in the XFS inode;
639  * all other fields need to be initialised on allocation
640  * from the slab. This avoids the need to repeatedly initialise
641  * fields in the xfs inode that left in the initialise state
642  * when freeing the inode.
643  */
644 STATIC void
645 xfs_fs_inode_init_once(
646 	void			*inode)
647 {
648 	struct xfs_inode	*ip = inode;
649 
650 	memset(ip, 0, sizeof(struct xfs_inode));
651 
652 	/* vfs inode */
653 	inode_init_once(VFS_I(ip));
654 
655 	/* xfs inode */
656 	atomic_set(&ip->i_pincount, 0);
657 	spin_lock_init(&ip->i_flags_lock);
658 
659 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
660 		     "xfsino", ip->i_ino);
661 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
662 		     "xfsino", ip->i_ino);
663 }
664 
665 /*
666  * We do an unlocked check for XFS_IDONTCACHE here because we are already
667  * serialised against cache hits here via the inode->i_lock and igrab() in
668  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
669  * racing with us, and it avoids needing to grab a spinlock here for every inode
670  * we drop the final reference on.
671  */
672 STATIC int
673 xfs_fs_drop_inode(
674 	struct inode		*inode)
675 {
676 	struct xfs_inode	*ip = XFS_I(inode);
677 
678 	/*
679 	 * If this unlinked inode is in the middle of recovery, don't
680 	 * drop the inode just yet; log recovery will take care of
681 	 * that.  See the comment for this inode flag.
682 	 */
683 	if (ip->i_flags & XFS_IRECOVERY) {
684 		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
685 		return 0;
686 	}
687 
688 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
689 }
690 
691 static void
692 xfs_mount_free(
693 	struct xfs_mount	*mp)
694 {
695 	kfree(mp->m_rtname);
696 	kfree(mp->m_logname);
697 	kmem_free(mp);
698 }
699 
700 STATIC int
701 xfs_fs_sync_fs(
702 	struct super_block	*sb,
703 	int			wait)
704 {
705 	struct xfs_mount	*mp = XFS_M(sb);
706 
707 	/*
708 	 * Doing anything during the async pass would be counterproductive.
709 	 */
710 	if (!wait)
711 		return 0;
712 
713 	xfs_log_force(mp, XFS_LOG_SYNC);
714 	if (laptop_mode) {
715 		/*
716 		 * The disk must be active because we're syncing.
717 		 * We schedule log work now (now that the disk is
718 		 * active) instead of later (when it might not be).
719 		 */
720 		flush_delayed_work(&mp->m_log->l_work);
721 	}
722 
723 	return 0;
724 }
725 
726 STATIC int
727 xfs_fs_statfs(
728 	struct dentry		*dentry,
729 	struct kstatfs		*statp)
730 {
731 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
732 	xfs_sb_t		*sbp = &mp->m_sb;
733 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
734 	uint64_t		fakeinos, id;
735 	uint64_t		icount;
736 	uint64_t		ifree;
737 	uint64_t		fdblocks;
738 	xfs_extlen_t		lsize;
739 	int64_t			ffree;
740 
741 	statp->f_type = XFS_SUPER_MAGIC;
742 	statp->f_namelen = MAXNAMELEN - 1;
743 
744 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
745 	statp->f_fsid.val[0] = (u32)id;
746 	statp->f_fsid.val[1] = (u32)(id >> 32);
747 
748 	icount = percpu_counter_sum(&mp->m_icount);
749 	ifree = percpu_counter_sum(&mp->m_ifree);
750 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
751 
752 	spin_lock(&mp->m_sb_lock);
753 	statp->f_bsize = sbp->sb_blocksize;
754 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
755 	statp->f_blocks = sbp->sb_dblocks - lsize;
756 	spin_unlock(&mp->m_sb_lock);
757 
758 	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
759 	statp->f_bavail = statp->f_bfree;
760 
761 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
762 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
763 	if (M_IGEO(mp)->maxicount)
764 		statp->f_files = min_t(typeof(statp->f_files),
765 					statp->f_files,
766 					M_IGEO(mp)->maxicount);
767 
768 	/* If sb_icount overshot maxicount, report actual allocation */
769 	statp->f_files = max_t(typeof(statp->f_files),
770 					statp->f_files,
771 					sbp->sb_icount);
772 
773 	/* make sure statp->f_ffree does not underflow */
774 	ffree = statp->f_files - (icount - ifree);
775 	statp->f_ffree = max_t(int64_t, ffree, 0);
776 
777 
778 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
779 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
780 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
781 		xfs_qm_statvfs(ip, statp);
782 
783 	if (XFS_IS_REALTIME_MOUNT(mp) &&
784 	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
785 		statp->f_blocks = sbp->sb_rblocks;
786 		statp->f_bavail = statp->f_bfree =
787 			sbp->sb_frextents * sbp->sb_rextsize;
788 	}
789 
790 	return 0;
791 }
792 
793 STATIC void
794 xfs_save_resvblks(struct xfs_mount *mp)
795 {
796 	uint64_t resblks = 0;
797 
798 	mp->m_resblks_save = mp->m_resblks;
799 	xfs_reserve_blocks(mp, &resblks, NULL);
800 }
801 
802 STATIC void
803 xfs_restore_resvblks(struct xfs_mount *mp)
804 {
805 	uint64_t resblks;
806 
807 	if (mp->m_resblks_save) {
808 		resblks = mp->m_resblks_save;
809 		mp->m_resblks_save = 0;
810 	} else
811 		resblks = xfs_default_resblks(mp);
812 
813 	xfs_reserve_blocks(mp, &resblks, NULL);
814 }
815 
816 /*
817  * Trigger writeback of all the dirty metadata in the file system.
818  *
819  * This ensures that the metadata is written to their location on disk rather
820  * than just existing in transactions in the log. This means after a quiesce
821  * there is no log replay required to write the inodes to disk - this is the
822  * primary difference between a sync and a quiesce.
823  *
824  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
825  * it is started again when appropriate.
826  */
827 void
828 xfs_quiesce_attr(
829 	struct xfs_mount	*mp)
830 {
831 	int	error = 0;
832 
833 	/* wait for all modifications to complete */
834 	while (atomic_read(&mp->m_active_trans) > 0)
835 		delay(100);
836 
837 	/* force the log to unpin objects from the now complete transactions */
838 	xfs_log_force(mp, XFS_LOG_SYNC);
839 
840 	/* reclaim inodes to do any IO before the freeze completes */
841 	xfs_reclaim_inodes(mp, 0);
842 	xfs_reclaim_inodes(mp, SYNC_WAIT);
843 
844 	/* Push the superblock and write an unmount record */
845 	error = xfs_log_sbcount(mp);
846 	if (error)
847 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
848 				"Frozen image may not be consistent.");
849 	/*
850 	 * Just warn here till VFS can correctly support
851 	 * read-only remount without racing.
852 	 */
853 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
854 
855 	xfs_log_quiesce(mp);
856 }
857 
858 /*
859  * Second stage of a freeze. The data is already frozen so we only
860  * need to take care of the metadata. Once that's done sync the superblock
861  * to the log to dirty it in case of a crash while frozen. This ensures that we
862  * will recover the unlinked inode lists on the next mount.
863  */
864 STATIC int
865 xfs_fs_freeze(
866 	struct super_block	*sb)
867 {
868 	struct xfs_mount	*mp = XFS_M(sb);
869 
870 	xfs_stop_block_reaping(mp);
871 	xfs_save_resvblks(mp);
872 	xfs_quiesce_attr(mp);
873 	return xfs_sync_sb(mp, true);
874 }
875 
876 STATIC int
877 xfs_fs_unfreeze(
878 	struct super_block	*sb)
879 {
880 	struct xfs_mount	*mp = XFS_M(sb);
881 
882 	xfs_restore_resvblks(mp);
883 	xfs_log_work_queue(mp);
884 	xfs_start_block_reaping(mp);
885 	return 0;
886 }
887 
888 /*
889  * This function fills in xfs_mount_t fields based on mount args.
890  * Note: the superblock _has_ now been read in.
891  */
892 STATIC int
893 xfs_finish_flags(
894 	struct xfs_mount	*mp)
895 {
896 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
897 
898 	/* Fail a mount where the logbuf is smaller than the log stripe */
899 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
900 		if (mp->m_logbsize <= 0 &&
901 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
902 			mp->m_logbsize = mp->m_sb.sb_logsunit;
903 		} else if (mp->m_logbsize > 0 &&
904 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
905 			xfs_warn(mp,
906 		"logbuf size must be greater than or equal to log stripe size");
907 			return -EINVAL;
908 		}
909 	} else {
910 		/* Fail a mount if the logbuf is larger than 32K */
911 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
912 			xfs_warn(mp,
913 		"logbuf size for version 1 logs must be 16K or 32K");
914 			return -EINVAL;
915 		}
916 	}
917 
918 	/*
919 	 * V5 filesystems always use attr2 format for attributes.
920 	 */
921 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
922 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
923 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
924 			     "attr2 is always enabled for V5 filesystems.");
925 		return -EINVAL;
926 	}
927 
928 	/*
929 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
930 	 * told by noattr2 to turn it off
931 	 */
932 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
933 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
934 		mp->m_flags |= XFS_MOUNT_ATTR2;
935 
936 	/*
937 	 * prohibit r/w mounts of read-only filesystems
938 	 */
939 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
940 		xfs_warn(mp,
941 			"cannot mount a read-only filesystem as read-write");
942 		return -EROFS;
943 	}
944 
945 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
946 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
947 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
948 		xfs_warn(mp,
949 		  "Super block does not support project and group quota together");
950 		return -EINVAL;
951 	}
952 
953 	return 0;
954 }
955 
956 static int
957 xfs_init_percpu_counters(
958 	struct xfs_mount	*mp)
959 {
960 	int		error;
961 
962 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
963 	if (error)
964 		return -ENOMEM;
965 
966 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
967 	if (error)
968 		goto free_icount;
969 
970 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
971 	if (error)
972 		goto free_ifree;
973 
974 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
975 	if (error)
976 		goto free_fdblocks;
977 
978 	return 0;
979 
980 free_fdblocks:
981 	percpu_counter_destroy(&mp->m_fdblocks);
982 free_ifree:
983 	percpu_counter_destroy(&mp->m_ifree);
984 free_icount:
985 	percpu_counter_destroy(&mp->m_icount);
986 	return -ENOMEM;
987 }
988 
989 void
990 xfs_reinit_percpu_counters(
991 	struct xfs_mount	*mp)
992 {
993 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
994 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
995 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
996 }
997 
998 static void
999 xfs_destroy_percpu_counters(
1000 	struct xfs_mount	*mp)
1001 {
1002 	percpu_counter_destroy(&mp->m_icount);
1003 	percpu_counter_destroy(&mp->m_ifree);
1004 	percpu_counter_destroy(&mp->m_fdblocks);
1005 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1006 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1007 	percpu_counter_destroy(&mp->m_delalloc_blks);
1008 }
1009 
1010 static void
1011 xfs_fs_put_super(
1012 	struct super_block	*sb)
1013 {
1014 	struct xfs_mount	*mp = XFS_M(sb);
1015 
1016 	/* if ->fill_super failed, we have no mount to tear down */
1017 	if (!sb->s_fs_info)
1018 		return;
1019 
1020 	xfs_notice(mp, "Unmounting Filesystem");
1021 	xfs_filestream_unmount(mp);
1022 	xfs_unmountfs(mp);
1023 
1024 	xfs_freesb(mp);
1025 	free_percpu(mp->m_stats.xs_stats);
1026 	xfs_destroy_percpu_counters(mp);
1027 	xfs_destroy_mount_workqueues(mp);
1028 	xfs_close_devices(mp);
1029 
1030 	sb->s_fs_info = NULL;
1031 	xfs_mount_free(mp);
1032 }
1033 
1034 static long
1035 xfs_fs_nr_cached_objects(
1036 	struct super_block	*sb,
1037 	struct shrink_control	*sc)
1038 {
1039 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1040 	if (WARN_ON_ONCE(!sb->s_fs_info))
1041 		return 0;
1042 	return xfs_reclaim_inodes_count(XFS_M(sb));
1043 }
1044 
1045 static long
1046 xfs_fs_free_cached_objects(
1047 	struct super_block	*sb,
1048 	struct shrink_control	*sc)
1049 {
1050 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1051 }
1052 
1053 static const struct super_operations xfs_super_operations = {
1054 	.alloc_inode		= xfs_fs_alloc_inode,
1055 	.destroy_inode		= xfs_fs_destroy_inode,
1056 	.dirty_inode		= xfs_fs_dirty_inode,
1057 	.drop_inode		= xfs_fs_drop_inode,
1058 	.put_super		= xfs_fs_put_super,
1059 	.sync_fs		= xfs_fs_sync_fs,
1060 	.freeze_fs		= xfs_fs_freeze,
1061 	.unfreeze_fs		= xfs_fs_unfreeze,
1062 	.statfs			= xfs_fs_statfs,
1063 	.show_options		= xfs_fs_show_options,
1064 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1065 	.free_cached_objects	= xfs_fs_free_cached_objects,
1066 };
1067 
1068 static int
1069 suffix_kstrtoint(
1070 	const char	*s,
1071 	unsigned int	base,
1072 	int		*res)
1073 {
1074 	int		last, shift_left_factor = 0, _res;
1075 	char		*value;
1076 	int		ret = 0;
1077 
1078 	value = kstrdup(s, GFP_KERNEL);
1079 	if (!value)
1080 		return -ENOMEM;
1081 
1082 	last = strlen(value) - 1;
1083 	if (value[last] == 'K' || value[last] == 'k') {
1084 		shift_left_factor = 10;
1085 		value[last] = '\0';
1086 	}
1087 	if (value[last] == 'M' || value[last] == 'm') {
1088 		shift_left_factor = 20;
1089 		value[last] = '\0';
1090 	}
1091 	if (value[last] == 'G' || value[last] == 'g') {
1092 		shift_left_factor = 30;
1093 		value[last] = '\0';
1094 	}
1095 
1096 	if (kstrtoint(value, base, &_res))
1097 		ret = -EINVAL;
1098 	kfree(value);
1099 	*res = _res << shift_left_factor;
1100 	return ret;
1101 }
1102 
1103 /*
1104  * Set mount state from a mount option.
1105  *
1106  * NOTE: mp->m_super is NULL here!
1107  */
1108 static int
1109 xfs_fc_parse_param(
1110 	struct fs_context	*fc,
1111 	struct fs_parameter	*param)
1112 {
1113 	struct xfs_mount	*mp = fc->s_fs_info;
1114 	struct fs_parse_result	result;
1115 	int			size = 0;
1116 	int			opt;
1117 
1118 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1119 	if (opt < 0)
1120 		return opt;
1121 
1122 	switch (opt) {
1123 	case Opt_logbufs:
1124 		mp->m_logbufs = result.uint_32;
1125 		return 0;
1126 	case Opt_logbsize:
1127 		if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1128 			return -EINVAL;
1129 		return 0;
1130 	case Opt_logdev:
1131 		kfree(mp->m_logname);
1132 		mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1133 		if (!mp->m_logname)
1134 			return -ENOMEM;
1135 		return 0;
1136 	case Opt_rtdev:
1137 		kfree(mp->m_rtname);
1138 		mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1139 		if (!mp->m_rtname)
1140 			return -ENOMEM;
1141 		return 0;
1142 	case Opt_allocsize:
1143 		if (suffix_kstrtoint(param->string, 10, &size))
1144 			return -EINVAL;
1145 		mp->m_allocsize_log = ffs(size) - 1;
1146 		mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1147 		return 0;
1148 	case Opt_grpid:
1149 	case Opt_bsdgroups:
1150 		mp->m_flags |= XFS_MOUNT_GRPID;
1151 		return 0;
1152 	case Opt_nogrpid:
1153 	case Opt_sysvgroups:
1154 		mp->m_flags &= ~XFS_MOUNT_GRPID;
1155 		return 0;
1156 	case Opt_wsync:
1157 		mp->m_flags |= XFS_MOUNT_WSYNC;
1158 		return 0;
1159 	case Opt_norecovery:
1160 		mp->m_flags |= XFS_MOUNT_NORECOVERY;
1161 		return 0;
1162 	case Opt_noalign:
1163 		mp->m_flags |= XFS_MOUNT_NOALIGN;
1164 		return 0;
1165 	case Opt_swalloc:
1166 		mp->m_flags |= XFS_MOUNT_SWALLOC;
1167 		return 0;
1168 	case Opt_sunit:
1169 		mp->m_dalign = result.uint_32;
1170 		return 0;
1171 	case Opt_swidth:
1172 		mp->m_swidth = result.uint_32;
1173 		return 0;
1174 	case Opt_inode32:
1175 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1176 		return 0;
1177 	case Opt_inode64:
1178 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1179 		return 0;
1180 	case Opt_nouuid:
1181 		mp->m_flags |= XFS_MOUNT_NOUUID;
1182 		return 0;
1183 	case Opt_ikeep:
1184 		mp->m_flags |= XFS_MOUNT_IKEEP;
1185 		return 0;
1186 	case Opt_noikeep:
1187 		mp->m_flags &= ~XFS_MOUNT_IKEEP;
1188 		return 0;
1189 	case Opt_largeio:
1190 		mp->m_flags |= XFS_MOUNT_LARGEIO;
1191 		return 0;
1192 	case Opt_nolargeio:
1193 		mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1194 		return 0;
1195 	case Opt_attr2:
1196 		mp->m_flags |= XFS_MOUNT_ATTR2;
1197 		return 0;
1198 	case Opt_noattr2:
1199 		mp->m_flags &= ~XFS_MOUNT_ATTR2;
1200 		mp->m_flags |= XFS_MOUNT_NOATTR2;
1201 		return 0;
1202 	case Opt_filestreams:
1203 		mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1204 		return 0;
1205 	case Opt_noquota:
1206 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1207 		mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1208 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1209 		return 0;
1210 	case Opt_quota:
1211 	case Opt_uquota:
1212 	case Opt_usrquota:
1213 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1214 				 XFS_UQUOTA_ENFD);
1215 		return 0;
1216 	case Opt_qnoenforce:
1217 	case Opt_uqnoenforce:
1218 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1219 		mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1220 		return 0;
1221 	case Opt_pquota:
1222 	case Opt_prjquota:
1223 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1224 				 XFS_PQUOTA_ENFD);
1225 		return 0;
1226 	case Opt_pqnoenforce:
1227 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1228 		mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1229 		return 0;
1230 	case Opt_gquota:
1231 	case Opt_grpquota:
1232 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1233 				 XFS_GQUOTA_ENFD);
1234 		return 0;
1235 	case Opt_gqnoenforce:
1236 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1237 		mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1238 		return 0;
1239 	case Opt_discard:
1240 		mp->m_flags |= XFS_MOUNT_DISCARD;
1241 		return 0;
1242 	case Opt_nodiscard:
1243 		mp->m_flags &= ~XFS_MOUNT_DISCARD;
1244 		return 0;
1245 #ifdef CONFIG_FS_DAX
1246 	case Opt_dax:
1247 		mp->m_flags |= XFS_MOUNT_DAX;
1248 		return 0;
1249 #endif
1250 	default:
1251 		xfs_warn(mp, "unknown mount option [%s].", param->key);
1252 		return -EINVAL;
1253 	}
1254 
1255 	return 0;
1256 }
1257 
1258 static int
1259 xfs_fc_validate_params(
1260 	struct xfs_mount	*mp)
1261 {
1262 	/*
1263 	 * no recovery flag requires a read-only mount
1264 	 */
1265 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1266 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1267 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1268 		return -EINVAL;
1269 	}
1270 
1271 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1272 	    (mp->m_dalign || mp->m_swidth)) {
1273 		xfs_warn(mp,
1274 	"sunit and swidth options incompatible with the noalign option");
1275 		return -EINVAL;
1276 	}
1277 
1278 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1279 		xfs_warn(mp, "quota support not available in this kernel.");
1280 		return -EINVAL;
1281 	}
1282 
1283 	if ((mp->m_dalign && !mp->m_swidth) ||
1284 	    (!mp->m_dalign && mp->m_swidth)) {
1285 		xfs_warn(mp, "sunit and swidth must be specified together");
1286 		return -EINVAL;
1287 	}
1288 
1289 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1290 		xfs_warn(mp,
1291 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1292 			mp->m_swidth, mp->m_dalign);
1293 		return -EINVAL;
1294 	}
1295 
1296 	if (mp->m_logbufs != -1 &&
1297 	    mp->m_logbufs != 0 &&
1298 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1299 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1300 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1301 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1302 		return -EINVAL;
1303 	}
1304 
1305 	if (mp->m_logbsize != -1 &&
1306 	    mp->m_logbsize !=  0 &&
1307 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1308 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1309 	     !is_power_of_2(mp->m_logbsize))) {
1310 		xfs_warn(mp,
1311 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1312 			mp->m_logbsize);
1313 		return -EINVAL;
1314 	}
1315 
1316 	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1317 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1318 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1319 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1320 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1321 		return -EINVAL;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static int
1328 xfs_fc_fill_super(
1329 	struct super_block	*sb,
1330 	struct fs_context	*fc)
1331 {
1332 	struct xfs_mount	*mp = sb->s_fs_info;
1333 	struct inode		*root;
1334 	int			flags = 0, error;
1335 
1336 	mp->m_super = sb;
1337 
1338 	error = xfs_fc_validate_params(mp);
1339 	if (error)
1340 		goto out_free_names;
1341 
1342 	sb_min_blocksize(sb, BBSIZE);
1343 	sb->s_xattr = xfs_xattr_handlers;
1344 	sb->s_export_op = &xfs_export_operations;
1345 #ifdef CONFIG_XFS_QUOTA
1346 	sb->s_qcop = &xfs_quotactl_operations;
1347 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1348 #endif
1349 	sb->s_op = &xfs_super_operations;
1350 
1351 	/*
1352 	 * Delay mount work if the debug hook is set. This is debug
1353 	 * instrumention to coordinate simulation of xfs mount failures with
1354 	 * VFS superblock operations
1355 	 */
1356 	if (xfs_globals.mount_delay) {
1357 		xfs_notice(mp, "Delaying mount for %d seconds.",
1358 			xfs_globals.mount_delay);
1359 		msleep(xfs_globals.mount_delay * 1000);
1360 	}
1361 
1362 	if (fc->sb_flags & SB_SILENT)
1363 		flags |= XFS_MFSI_QUIET;
1364 
1365 	error = xfs_open_devices(mp);
1366 	if (error)
1367 		goto out_free_names;
1368 
1369 	error = xfs_init_mount_workqueues(mp);
1370 	if (error)
1371 		goto out_close_devices;
1372 
1373 	error = xfs_init_percpu_counters(mp);
1374 	if (error)
1375 		goto out_destroy_workqueues;
1376 
1377 	/* Allocate stats memory before we do operations that might use it */
1378 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1379 	if (!mp->m_stats.xs_stats) {
1380 		error = -ENOMEM;
1381 		goto out_destroy_counters;
1382 	}
1383 
1384 	error = xfs_readsb(mp, flags);
1385 	if (error)
1386 		goto out_free_stats;
1387 
1388 	error = xfs_finish_flags(mp);
1389 	if (error)
1390 		goto out_free_sb;
1391 
1392 	error = xfs_setup_devices(mp);
1393 	if (error)
1394 		goto out_free_sb;
1395 
1396 	/*
1397 	 * XFS block mappings use 54 bits to store the logical block offset.
1398 	 * This should suffice to handle the maximum file size that the VFS
1399 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1400 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1401 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1402 	 * to check this assertion.
1403 	 *
1404 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1405 	 * maximum pagecache offset in units of fs blocks.
1406 	 */
1407 	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1408 		xfs_warn(mp,
1409 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1410 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1411 			 XFS_MAX_FILEOFF);
1412 		error = -EINVAL;
1413 		goto out_free_sb;
1414 	}
1415 
1416 	error = xfs_filestream_mount(mp);
1417 	if (error)
1418 		goto out_free_sb;
1419 
1420 	/*
1421 	 * we must configure the block size in the superblock before we run the
1422 	 * full mount process as the mount process can lookup and cache inodes.
1423 	 */
1424 	sb->s_magic = XFS_SUPER_MAGIC;
1425 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1426 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1427 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1428 	sb->s_max_links = XFS_MAXLINK;
1429 	sb->s_time_gran = 1;
1430 	sb->s_time_min = S32_MIN;
1431 	sb->s_time_max = S32_MAX;
1432 	sb->s_iflags |= SB_I_CGROUPWB;
1433 
1434 	set_posix_acl_flag(sb);
1435 
1436 	/* version 5 superblocks support inode version counters. */
1437 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1438 		sb->s_flags |= SB_I_VERSION;
1439 
1440 	if (mp->m_flags & XFS_MOUNT_DAX) {
1441 		bool rtdev_is_dax = false, datadev_is_dax;
1442 
1443 		xfs_warn(mp,
1444 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1445 
1446 		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1447 			sb->s_blocksize);
1448 		if (mp->m_rtdev_targp)
1449 			rtdev_is_dax = bdev_dax_supported(
1450 				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1451 		if (!rtdev_is_dax && !datadev_is_dax) {
1452 			xfs_alert(mp,
1453 			"DAX unsupported by block device. Turning off DAX.");
1454 			mp->m_flags &= ~XFS_MOUNT_DAX;
1455 		}
1456 		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1457 			xfs_alert(mp,
1458 		"DAX and reflink cannot be used together!");
1459 			error = -EINVAL;
1460 			goto out_filestream_unmount;
1461 		}
1462 	}
1463 
1464 	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1465 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1466 
1467 		if (!blk_queue_discard(q)) {
1468 			xfs_warn(mp, "mounting with \"discard\" option, but "
1469 					"the device does not support discard");
1470 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1471 		}
1472 	}
1473 
1474 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1475 		if (mp->m_sb.sb_rblocks) {
1476 			xfs_alert(mp,
1477 	"reflink not compatible with realtime device!");
1478 			error = -EINVAL;
1479 			goto out_filestream_unmount;
1480 		}
1481 
1482 		if (xfs_globals.always_cow) {
1483 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1484 			mp->m_always_cow = true;
1485 		}
1486 	}
1487 
1488 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1489 		xfs_alert(mp,
1490 	"reverse mapping btree not compatible with realtime device!");
1491 		error = -EINVAL;
1492 		goto out_filestream_unmount;
1493 	}
1494 
1495 	error = xfs_mountfs(mp);
1496 	if (error)
1497 		goto out_filestream_unmount;
1498 
1499 	root = igrab(VFS_I(mp->m_rootip));
1500 	if (!root) {
1501 		error = -ENOENT;
1502 		goto out_unmount;
1503 	}
1504 	sb->s_root = d_make_root(root);
1505 	if (!sb->s_root) {
1506 		error = -ENOMEM;
1507 		goto out_unmount;
1508 	}
1509 
1510 	return 0;
1511 
1512  out_filestream_unmount:
1513 	xfs_filestream_unmount(mp);
1514  out_free_sb:
1515 	xfs_freesb(mp);
1516  out_free_stats:
1517 	free_percpu(mp->m_stats.xs_stats);
1518  out_destroy_counters:
1519 	xfs_destroy_percpu_counters(mp);
1520  out_destroy_workqueues:
1521 	xfs_destroy_mount_workqueues(mp);
1522  out_close_devices:
1523 	xfs_close_devices(mp);
1524  out_free_names:
1525 	sb->s_fs_info = NULL;
1526 	xfs_mount_free(mp);
1527 	return error;
1528 
1529  out_unmount:
1530 	xfs_filestream_unmount(mp);
1531 	xfs_unmountfs(mp);
1532 	goto out_free_sb;
1533 }
1534 
1535 static int
1536 xfs_fc_get_tree(
1537 	struct fs_context	*fc)
1538 {
1539 	return get_tree_bdev(fc, xfs_fc_fill_super);
1540 }
1541 
1542 static int
1543 xfs_remount_rw(
1544 	struct xfs_mount	*mp)
1545 {
1546 	struct xfs_sb		*sbp = &mp->m_sb;
1547 	int error;
1548 
1549 	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1550 		xfs_warn(mp,
1551 			"ro->rw transition prohibited on norecovery mount");
1552 		return -EINVAL;
1553 	}
1554 
1555 	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1556 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1557 		xfs_warn(mp,
1558 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1559 			(sbp->sb_features_ro_compat &
1560 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1561 		return -EINVAL;
1562 	}
1563 
1564 	mp->m_flags &= ~XFS_MOUNT_RDONLY;
1565 
1566 	/*
1567 	 * If this is the first remount to writeable state we might have some
1568 	 * superblock changes to update.
1569 	 */
1570 	if (mp->m_update_sb) {
1571 		error = xfs_sync_sb(mp, false);
1572 		if (error) {
1573 			xfs_warn(mp, "failed to write sb changes");
1574 			return error;
1575 		}
1576 		mp->m_update_sb = false;
1577 	}
1578 
1579 	/*
1580 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1581 	 * it is non-zero, otherwise go with the default.
1582 	 */
1583 	xfs_restore_resvblks(mp);
1584 	xfs_log_work_queue(mp);
1585 
1586 	/* Recover any CoW blocks that never got remapped. */
1587 	error = xfs_reflink_recover_cow(mp);
1588 	if (error) {
1589 		xfs_err(mp,
1590 			"Error %d recovering leftover CoW allocations.", error);
1591 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1592 		return error;
1593 	}
1594 	xfs_start_block_reaping(mp);
1595 
1596 	/* Create the per-AG metadata reservation pool .*/
1597 	error = xfs_fs_reserve_ag_blocks(mp);
1598 	if (error && error != -ENOSPC)
1599 		return error;
1600 
1601 	return 0;
1602 }
1603 
1604 static int
1605 xfs_remount_ro(
1606 	struct xfs_mount	*mp)
1607 {
1608 	int error;
1609 
1610 	/*
1611 	 * Cancel background eofb scanning so it cannot race with the final
1612 	 * log force+buftarg wait and deadlock the remount.
1613 	 */
1614 	xfs_stop_block_reaping(mp);
1615 
1616 	/* Get rid of any leftover CoW reservations... */
1617 	error = xfs_icache_free_cowblocks(mp, NULL);
1618 	if (error) {
1619 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1620 		return error;
1621 	}
1622 
1623 	/* Free the per-AG metadata reservation pool. */
1624 	error = xfs_fs_unreserve_ag_blocks(mp);
1625 	if (error) {
1626 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1627 		return error;
1628 	}
1629 
1630 	/*
1631 	 * Before we sync the metadata, we need to free up the reserve block
1632 	 * pool so that the used block count in the superblock on disk is
1633 	 * correct at the end of the remount. Stash the current* reserve pool
1634 	 * size so that if we get remounted rw, we can return it to the same
1635 	 * size.
1636 	 */
1637 	xfs_save_resvblks(mp);
1638 
1639 	xfs_quiesce_attr(mp);
1640 	mp->m_flags |= XFS_MOUNT_RDONLY;
1641 
1642 	return 0;
1643 }
1644 
1645 /*
1646  * Logically we would return an error here to prevent users from believing
1647  * they might have changed mount options using remount which can't be changed.
1648  *
1649  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1650  * arguments in some cases so we can't blindly reject options, but have to
1651  * check for each specified option if it actually differs from the currently
1652  * set option and only reject it if that's the case.
1653  *
1654  * Until that is implemented we return success for every remount request, and
1655  * silently ignore all options that we can't actually change.
1656  */
1657 static int
1658 xfs_fc_reconfigure(
1659 	struct fs_context *fc)
1660 {
1661 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1662 	struct xfs_mount        *new_mp = fc->s_fs_info;
1663 	xfs_sb_t		*sbp = &mp->m_sb;
1664 	int			flags = fc->sb_flags;
1665 	int			error;
1666 
1667 	error = xfs_fc_validate_params(new_mp);
1668 	if (error)
1669 		return error;
1670 
1671 	sync_filesystem(mp->m_super);
1672 
1673 	/* inode32 -> inode64 */
1674 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1675 	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1676 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1677 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1678 	}
1679 
1680 	/* inode64 -> inode32 */
1681 	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1682 	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1683 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1684 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1685 	}
1686 
1687 	/* ro -> rw */
1688 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1689 		error = xfs_remount_rw(mp);
1690 		if (error)
1691 			return error;
1692 	}
1693 
1694 	/* rw -> ro */
1695 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1696 		error = xfs_remount_ro(mp);
1697 		if (error)
1698 			return error;
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 static void xfs_fc_free(
1705 	struct fs_context	*fc)
1706 {
1707 	struct xfs_mount	*mp = fc->s_fs_info;
1708 
1709 	/*
1710 	 * mp is stored in the fs_context when it is initialized.
1711 	 * mp is transferred to the superblock on a successful mount,
1712 	 * but if an error occurs before the transfer we have to free
1713 	 * it here.
1714 	 */
1715 	if (mp)
1716 		xfs_mount_free(mp);
1717 }
1718 
1719 static const struct fs_context_operations xfs_context_ops = {
1720 	.parse_param = xfs_fc_parse_param,
1721 	.get_tree    = xfs_fc_get_tree,
1722 	.reconfigure = xfs_fc_reconfigure,
1723 	.free        = xfs_fc_free,
1724 };
1725 
1726 static int xfs_init_fs_context(
1727 	struct fs_context	*fc)
1728 {
1729 	struct xfs_mount	*mp;
1730 
1731 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1732 	if (!mp)
1733 		return -ENOMEM;
1734 
1735 	spin_lock_init(&mp->m_sb_lock);
1736 	spin_lock_init(&mp->m_agirotor_lock);
1737 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1738 	spin_lock_init(&mp->m_perag_lock);
1739 	mutex_init(&mp->m_growlock);
1740 	atomic_set(&mp->m_active_trans, 0);
1741 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1742 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1743 	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1744 	mp->m_kobj.kobject.kset = xfs_kset;
1745 	/*
1746 	 * We don't create the finobt per-ag space reservation until after log
1747 	 * recovery, so we must set this to true so that an ifree transaction
1748 	 * started during log recovery will not depend on space reservations
1749 	 * for finobt expansion.
1750 	 */
1751 	mp->m_finobt_nores = true;
1752 
1753 	/*
1754 	 * These can be overridden by the mount option parsing.
1755 	 */
1756 	mp->m_logbufs = -1;
1757 	mp->m_logbsize = -1;
1758 	mp->m_allocsize_log = 16; /* 64k */
1759 
1760 	/*
1761 	 * Copy binary VFS mount flags we are interested in.
1762 	 */
1763 	if (fc->sb_flags & SB_RDONLY)
1764 		mp->m_flags |= XFS_MOUNT_RDONLY;
1765 	if (fc->sb_flags & SB_DIRSYNC)
1766 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
1767 	if (fc->sb_flags & SB_SYNCHRONOUS)
1768 		mp->m_flags |= XFS_MOUNT_WSYNC;
1769 
1770 	fc->s_fs_info = mp;
1771 	fc->ops = &xfs_context_ops;
1772 
1773 	return 0;
1774 }
1775 
1776 static struct file_system_type xfs_fs_type = {
1777 	.owner			= THIS_MODULE,
1778 	.name			= "xfs",
1779 	.init_fs_context	= xfs_init_fs_context,
1780 	.parameters		= xfs_fs_parameters,
1781 	.kill_sb		= kill_block_super,
1782 	.fs_flags		= FS_REQUIRES_DEV,
1783 };
1784 MODULE_ALIAS_FS("xfs");
1785 
1786 STATIC int __init
1787 xfs_init_zones(void)
1788 {
1789 	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1790 						sizeof(struct xlog_ticket),
1791 						0, 0, NULL);
1792 	if (!xfs_log_ticket_zone)
1793 		goto out;
1794 
1795 	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1796 					sizeof(struct xfs_extent_free_item),
1797 					0, 0, NULL);
1798 	if (!xfs_bmap_free_item_zone)
1799 		goto out_destroy_log_ticket_zone;
1800 
1801 	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1802 					       sizeof(struct xfs_btree_cur),
1803 					       0, 0, NULL);
1804 	if (!xfs_btree_cur_zone)
1805 		goto out_destroy_bmap_free_item_zone;
1806 
1807 	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1808 					      sizeof(struct xfs_da_state),
1809 					      0, 0, NULL);
1810 	if (!xfs_da_state_zone)
1811 		goto out_destroy_btree_cur_zone;
1812 
1813 	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1814 					   sizeof(struct xfs_ifork),
1815 					   0, 0, NULL);
1816 	if (!xfs_ifork_zone)
1817 		goto out_destroy_da_state_zone;
1818 
1819 	xfs_trans_zone = kmem_cache_create("xf_trans",
1820 					   sizeof(struct xfs_trans),
1821 					   0, 0, NULL);
1822 	if (!xfs_trans_zone)
1823 		goto out_destroy_ifork_zone;
1824 
1825 
1826 	/*
1827 	 * The size of the zone allocated buf log item is the maximum
1828 	 * size possible under XFS.  This wastes a little bit of memory,
1829 	 * but it is much faster.
1830 	 */
1831 	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1832 					      sizeof(struct xfs_buf_log_item),
1833 					      0, 0, NULL);
1834 	if (!xfs_buf_item_zone)
1835 		goto out_destroy_trans_zone;
1836 
1837 	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1838 					(sizeof(struct xfs_efd_log_item) +
1839 					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
1840 					sizeof(struct xfs_extent)),
1841 					0, 0, NULL);
1842 	if (!xfs_efd_zone)
1843 		goto out_destroy_buf_item_zone;
1844 
1845 	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1846 					 (sizeof(struct xfs_efi_log_item) +
1847 					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1848 					 sizeof(struct xfs_extent)),
1849 					 0, 0, NULL);
1850 	if (!xfs_efi_zone)
1851 		goto out_destroy_efd_zone;
1852 
1853 	xfs_inode_zone = kmem_cache_create("xfs_inode",
1854 					   sizeof(struct xfs_inode), 0,
1855 					   (SLAB_HWCACHE_ALIGN |
1856 					    SLAB_RECLAIM_ACCOUNT |
1857 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1858 					   xfs_fs_inode_init_once);
1859 	if (!xfs_inode_zone)
1860 		goto out_destroy_efi_zone;
1861 
1862 	xfs_ili_zone = kmem_cache_create("xfs_ili",
1863 					 sizeof(struct xfs_inode_log_item), 0,
1864 					 SLAB_MEM_SPREAD, NULL);
1865 	if (!xfs_ili_zone)
1866 		goto out_destroy_inode_zone;
1867 
1868 	xfs_icreate_zone = kmem_cache_create("xfs_icr",
1869 					     sizeof(struct xfs_icreate_item),
1870 					     0, 0, NULL);
1871 	if (!xfs_icreate_zone)
1872 		goto out_destroy_ili_zone;
1873 
1874 	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1875 					 sizeof(struct xfs_rud_log_item),
1876 					 0, 0, NULL);
1877 	if (!xfs_rud_zone)
1878 		goto out_destroy_icreate_zone;
1879 
1880 	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1881 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1882 			0, 0, NULL);
1883 	if (!xfs_rui_zone)
1884 		goto out_destroy_rud_zone;
1885 
1886 	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1887 					 sizeof(struct xfs_cud_log_item),
1888 					 0, 0, NULL);
1889 	if (!xfs_cud_zone)
1890 		goto out_destroy_rui_zone;
1891 
1892 	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
1893 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1894 			0, 0, NULL);
1895 	if (!xfs_cui_zone)
1896 		goto out_destroy_cud_zone;
1897 
1898 	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1899 					 sizeof(struct xfs_bud_log_item),
1900 					 0, 0, NULL);
1901 	if (!xfs_bud_zone)
1902 		goto out_destroy_cui_zone;
1903 
1904 	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
1905 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1906 			0, 0, NULL);
1907 	if (!xfs_bui_zone)
1908 		goto out_destroy_bud_zone;
1909 
1910 	return 0;
1911 
1912  out_destroy_bud_zone:
1913 	kmem_cache_destroy(xfs_bud_zone);
1914  out_destroy_cui_zone:
1915 	kmem_cache_destroy(xfs_cui_zone);
1916  out_destroy_cud_zone:
1917 	kmem_cache_destroy(xfs_cud_zone);
1918  out_destroy_rui_zone:
1919 	kmem_cache_destroy(xfs_rui_zone);
1920  out_destroy_rud_zone:
1921 	kmem_cache_destroy(xfs_rud_zone);
1922  out_destroy_icreate_zone:
1923 	kmem_cache_destroy(xfs_icreate_zone);
1924  out_destroy_ili_zone:
1925 	kmem_cache_destroy(xfs_ili_zone);
1926  out_destroy_inode_zone:
1927 	kmem_cache_destroy(xfs_inode_zone);
1928  out_destroy_efi_zone:
1929 	kmem_cache_destroy(xfs_efi_zone);
1930  out_destroy_efd_zone:
1931 	kmem_cache_destroy(xfs_efd_zone);
1932  out_destroy_buf_item_zone:
1933 	kmem_cache_destroy(xfs_buf_item_zone);
1934  out_destroy_trans_zone:
1935 	kmem_cache_destroy(xfs_trans_zone);
1936  out_destroy_ifork_zone:
1937 	kmem_cache_destroy(xfs_ifork_zone);
1938  out_destroy_da_state_zone:
1939 	kmem_cache_destroy(xfs_da_state_zone);
1940  out_destroy_btree_cur_zone:
1941 	kmem_cache_destroy(xfs_btree_cur_zone);
1942  out_destroy_bmap_free_item_zone:
1943 	kmem_cache_destroy(xfs_bmap_free_item_zone);
1944  out_destroy_log_ticket_zone:
1945 	kmem_cache_destroy(xfs_log_ticket_zone);
1946  out:
1947 	return -ENOMEM;
1948 }
1949 
1950 STATIC void
1951 xfs_destroy_zones(void)
1952 {
1953 	/*
1954 	 * Make sure all delayed rcu free are flushed before we
1955 	 * destroy caches.
1956 	 */
1957 	rcu_barrier();
1958 	kmem_cache_destroy(xfs_bui_zone);
1959 	kmem_cache_destroy(xfs_bud_zone);
1960 	kmem_cache_destroy(xfs_cui_zone);
1961 	kmem_cache_destroy(xfs_cud_zone);
1962 	kmem_cache_destroy(xfs_rui_zone);
1963 	kmem_cache_destroy(xfs_rud_zone);
1964 	kmem_cache_destroy(xfs_icreate_zone);
1965 	kmem_cache_destroy(xfs_ili_zone);
1966 	kmem_cache_destroy(xfs_inode_zone);
1967 	kmem_cache_destroy(xfs_efi_zone);
1968 	kmem_cache_destroy(xfs_efd_zone);
1969 	kmem_cache_destroy(xfs_buf_item_zone);
1970 	kmem_cache_destroy(xfs_trans_zone);
1971 	kmem_cache_destroy(xfs_ifork_zone);
1972 	kmem_cache_destroy(xfs_da_state_zone);
1973 	kmem_cache_destroy(xfs_btree_cur_zone);
1974 	kmem_cache_destroy(xfs_bmap_free_item_zone);
1975 	kmem_cache_destroy(xfs_log_ticket_zone);
1976 }
1977 
1978 STATIC int __init
1979 xfs_init_workqueues(void)
1980 {
1981 	/*
1982 	 * The allocation workqueue can be used in memory reclaim situations
1983 	 * (writepage path), and parallelism is only limited by the number of
1984 	 * AGs in all the filesystems mounted. Hence use the default large
1985 	 * max_active value for this workqueue.
1986 	 */
1987 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
1988 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
1989 	if (!xfs_alloc_wq)
1990 		return -ENOMEM;
1991 
1992 	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
1993 	if (!xfs_discard_wq)
1994 		goto out_free_alloc_wq;
1995 
1996 	return 0;
1997 out_free_alloc_wq:
1998 	destroy_workqueue(xfs_alloc_wq);
1999 	return -ENOMEM;
2000 }
2001 
2002 STATIC void
2003 xfs_destroy_workqueues(void)
2004 {
2005 	destroy_workqueue(xfs_discard_wq);
2006 	destroy_workqueue(xfs_alloc_wq);
2007 }
2008 
2009 STATIC int __init
2010 init_xfs_fs(void)
2011 {
2012 	int			error;
2013 
2014 	xfs_check_ondisk_structs();
2015 
2016 	printk(KERN_INFO XFS_VERSION_STRING " with "
2017 			 XFS_BUILD_OPTIONS " enabled\n");
2018 
2019 	xfs_dir_startup();
2020 
2021 	error = xfs_init_zones();
2022 	if (error)
2023 		goto out;
2024 
2025 	error = xfs_init_workqueues();
2026 	if (error)
2027 		goto out_destroy_zones;
2028 
2029 	error = xfs_mru_cache_init();
2030 	if (error)
2031 		goto out_destroy_wq;
2032 
2033 	error = xfs_buf_init();
2034 	if (error)
2035 		goto out_mru_cache_uninit;
2036 
2037 	error = xfs_init_procfs();
2038 	if (error)
2039 		goto out_buf_terminate;
2040 
2041 	error = xfs_sysctl_register();
2042 	if (error)
2043 		goto out_cleanup_procfs;
2044 
2045 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2046 	if (!xfs_kset) {
2047 		error = -ENOMEM;
2048 		goto out_sysctl_unregister;
2049 	}
2050 
2051 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2052 
2053 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2054 	if (!xfsstats.xs_stats) {
2055 		error = -ENOMEM;
2056 		goto out_kset_unregister;
2057 	}
2058 
2059 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2060 			       "stats");
2061 	if (error)
2062 		goto out_free_stats;
2063 
2064 #ifdef DEBUG
2065 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2066 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2067 	if (error)
2068 		goto out_remove_stats_kobj;
2069 #endif
2070 
2071 	error = xfs_qm_init();
2072 	if (error)
2073 		goto out_remove_dbg_kobj;
2074 
2075 	error = register_filesystem(&xfs_fs_type);
2076 	if (error)
2077 		goto out_qm_exit;
2078 	return 0;
2079 
2080  out_qm_exit:
2081 	xfs_qm_exit();
2082  out_remove_dbg_kobj:
2083 #ifdef DEBUG
2084 	xfs_sysfs_del(&xfs_dbg_kobj);
2085  out_remove_stats_kobj:
2086 #endif
2087 	xfs_sysfs_del(&xfsstats.xs_kobj);
2088  out_free_stats:
2089 	free_percpu(xfsstats.xs_stats);
2090  out_kset_unregister:
2091 	kset_unregister(xfs_kset);
2092  out_sysctl_unregister:
2093 	xfs_sysctl_unregister();
2094  out_cleanup_procfs:
2095 	xfs_cleanup_procfs();
2096  out_buf_terminate:
2097 	xfs_buf_terminate();
2098  out_mru_cache_uninit:
2099 	xfs_mru_cache_uninit();
2100  out_destroy_wq:
2101 	xfs_destroy_workqueues();
2102  out_destroy_zones:
2103 	xfs_destroy_zones();
2104  out:
2105 	return error;
2106 }
2107 
2108 STATIC void __exit
2109 exit_xfs_fs(void)
2110 {
2111 	xfs_qm_exit();
2112 	unregister_filesystem(&xfs_fs_type);
2113 #ifdef DEBUG
2114 	xfs_sysfs_del(&xfs_dbg_kobj);
2115 #endif
2116 	xfs_sysfs_del(&xfsstats.xs_kobj);
2117 	free_percpu(xfsstats.xs_stats);
2118 	kset_unregister(xfs_kset);
2119 	xfs_sysctl_unregister();
2120 	xfs_cleanup_procfs();
2121 	xfs_buf_terminate();
2122 	xfs_mru_cache_uninit();
2123 	xfs_destroy_workqueues();
2124 	xfs_destroy_zones();
2125 	xfs_uuid_table_free();
2126 }
2127 
2128 module_init(init_xfs_fs);
2129 module_exit(exit_xfs_fs);
2130 
2131 MODULE_AUTHOR("Silicon Graphics, Inc.");
2132 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2133 MODULE_LICENSE("GPL");
2134