xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision 537c013b)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4c59d87c4SChristoph Hellwig  * All Rights Reserved.
5c59d87c4SChristoph Hellwig  */
6c59d87c4SChristoph Hellwig #include "xfs.h"
7c59d87c4SChristoph Hellwig #include "xfs_fs.h"
8a4fbe6abSDave Chinner #include "xfs_shared.h"
96ca1c906SDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
12c59d87c4SChristoph Hellwig #include "xfs_bit.h"
13c59d87c4SChristoph Hellwig #include "xfs_sb.h"
14c59d87c4SChristoph Hellwig #include "xfs_mount.h"
15c59d87c4SChristoph Hellwig #include "xfs_inode.h"
16ebd126a6SDarrick J. Wong #include "xfs_iwalk.h"
17239880efSDave Chinner #include "xfs_quota.h"
18c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
198bfadd8dSChristoph Hellwig #include "xfs_bmap_util.h"
20239880efSDave Chinner #include "xfs_trans.h"
21c59d87c4SChristoph Hellwig #include "xfs_trans_space.h"
22c59d87c4SChristoph Hellwig #include "xfs_qm.h"
23c59d87c4SChristoph Hellwig #include "xfs_trace.h"
2433479e05SDave Chinner #include "xfs_icache.h"
25a5155b87SDarrick J. Wong #include "xfs_error.h"
269bbafc71SDave Chinner #include "xfs_ag.h"
27b652afd9SDave Chinner #include "xfs_ialloc.h"
2801728b44SDave Chinner #include "xfs_log_priv.h"
29c59d87c4SChristoph Hellwig 
30c59d87c4SChristoph Hellwig /*
31c59d87c4SChristoph Hellwig  * The global quota manager. There is only one of these for the entire
32c59d87c4SChristoph Hellwig  * system, _not_ one per file system. XQM keeps track of the overall
33c59d87c4SChristoph Hellwig  * quota functionality, including maintaining the freelist and hash
34c59d87c4SChristoph Hellwig  * tables of dquots.
35c59d87c4SChristoph Hellwig  */
36c072fbefSPavel Reichl STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
37c072fbefSPavel Reichl STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
38c59d87c4SChristoph Hellwig 
39c072fbefSPavel Reichl STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
40cd56a39aSDave Chinner STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
41c59d87c4SChristoph Hellwig /*
42b84a3a96SChristoph Hellwig  * We use the batch lookup interface to iterate over the dquots as it
43b84a3a96SChristoph Hellwig  * currently is the only interface into the radix tree code that allows
44b84a3a96SChristoph Hellwig  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
45b84a3a96SChristoph Hellwig  * operations is fine as all callers are used either during mount/umount
46b84a3a96SChristoph Hellwig  * or quotaoff.
47b84a3a96SChristoph Hellwig  */
48b84a3a96SChristoph Hellwig #define XFS_DQ_LOOKUP_BATCH	32
49b84a3a96SChristoph Hellwig 
50b84a3a96SChristoph Hellwig STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,xfs_dqtype_t type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)51b84a3a96SChristoph Hellwig xfs_qm_dquot_walk(
52b84a3a96SChristoph Hellwig 	struct xfs_mount	*mp,
531a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
5443ff2122SChristoph Hellwig 	int			(*execute)(struct xfs_dquot *dqp, void *data),
5543ff2122SChristoph Hellwig 	void			*data)
56b84a3a96SChristoph Hellwig {
57b84a3a96SChristoph Hellwig 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
58329e0875SChandra Seetharaman 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
59b84a3a96SChristoph Hellwig 	uint32_t		next_index;
60b84a3a96SChristoph Hellwig 	int			last_error = 0;
61b84a3a96SChristoph Hellwig 	int			skipped;
62b84a3a96SChristoph Hellwig 	int			nr_found;
63b84a3a96SChristoph Hellwig 
64b84a3a96SChristoph Hellwig restart:
65b84a3a96SChristoph Hellwig 	skipped = 0;
66b84a3a96SChristoph Hellwig 	next_index = 0;
67b84a3a96SChristoph Hellwig 	nr_found = 0;
68b84a3a96SChristoph Hellwig 
69b84a3a96SChristoph Hellwig 	while (1) {
70b84a3a96SChristoph Hellwig 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71e195605eSLi zeming 		int		error;
72b84a3a96SChristoph Hellwig 		int		i;
73b84a3a96SChristoph Hellwig 
74b84a3a96SChristoph Hellwig 		mutex_lock(&qi->qi_tree_lock);
75b84a3a96SChristoph Hellwig 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76b84a3a96SChristoph Hellwig 					next_index, XFS_DQ_LOOKUP_BATCH);
77b84a3a96SChristoph Hellwig 		if (!nr_found) {
78b84a3a96SChristoph Hellwig 			mutex_unlock(&qi->qi_tree_lock);
79b84a3a96SChristoph Hellwig 			break;
80b84a3a96SChristoph Hellwig 		}
81b84a3a96SChristoph Hellwig 
82b84a3a96SChristoph Hellwig 		for (i = 0; i < nr_found; i++) {
83b84a3a96SChristoph Hellwig 			struct xfs_dquot *dqp = batch[i];
84b84a3a96SChristoph Hellwig 
85c51df733SDarrick J. Wong 			next_index = dqp->q_id + 1;
86b84a3a96SChristoph Hellwig 
8743ff2122SChristoph Hellwig 			error = execute(batch[i], data);
882451337dSDave Chinner 			if (error == -EAGAIN) {
89b84a3a96SChristoph Hellwig 				skipped++;
90b84a3a96SChristoph Hellwig 				continue;
91b84a3a96SChristoph Hellwig 			}
922451337dSDave Chinner 			if (error && last_error != -EFSCORRUPTED)
93b84a3a96SChristoph Hellwig 				last_error = error;
94b84a3a96SChristoph Hellwig 		}
95b84a3a96SChristoph Hellwig 
96b84a3a96SChristoph Hellwig 		mutex_unlock(&qi->qi_tree_lock);
97b84a3a96SChristoph Hellwig 
98b84a3a96SChristoph Hellwig 		/* bail out if the filesystem is corrupted.  */
992451337dSDave Chinner 		if (last_error == -EFSCORRUPTED) {
100b84a3a96SChristoph Hellwig 			skipped = 0;
101b84a3a96SChristoph Hellwig 			break;
102b84a3a96SChristoph Hellwig 		}
103cfaf2d03SBrian Foster 		/* we're done if id overflows back to zero */
104cfaf2d03SBrian Foster 		if (!next_index)
105cfaf2d03SBrian Foster 			break;
106b84a3a96SChristoph Hellwig 	}
107b84a3a96SChristoph Hellwig 
108b84a3a96SChristoph Hellwig 	if (skipped) {
109b84a3a96SChristoph Hellwig 		delay(1);
110b84a3a96SChristoph Hellwig 		goto restart;
111b84a3a96SChristoph Hellwig 	}
112b84a3a96SChristoph Hellwig 
113b84a3a96SChristoph Hellwig 	return last_error;
114b84a3a96SChristoph Hellwig }
115b84a3a96SChristoph Hellwig 
116b84a3a96SChristoph Hellwig 
117b84a3a96SChristoph Hellwig /*
118b84a3a96SChristoph Hellwig  * Purge a dquot from all tracking data structures and free it.
119b84a3a96SChristoph Hellwig  */
120b84a3a96SChristoph Hellwig STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)121b84a3a96SChristoph Hellwig xfs_qm_dqpurge(
12243ff2122SChristoph Hellwig 	struct xfs_dquot	*dqp,
12343ff2122SChristoph Hellwig 	void			*data)
124b84a3a96SChristoph Hellwig {
12501728b44SDave Chinner 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
1268d3d7e2bSBrian Foster 	int			error = -EAGAIN;
127b84a3a96SChristoph Hellwig 
128b84a3a96SChristoph Hellwig 	xfs_dqlock(dqp);
129985a78fdSDarrick J. Wong 	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
1308d3d7e2bSBrian Foster 		goto out_unlock;
131b84a3a96SChristoph Hellwig 
132985a78fdSDarrick J. Wong 	dqp->q_flags |= XFS_DQFLAG_FREEING;
133b84a3a96SChristoph Hellwig 
13443ff2122SChristoph Hellwig 	xfs_dqflock(dqp);
135b84a3a96SChristoph Hellwig 
136b84a3a96SChristoph Hellwig 	/*
137b84a3a96SChristoph Hellwig 	 * If we are turning this type of quotas off, we don't care
138b84a3a96SChristoph Hellwig 	 * about the dirty metadata sitting in this dquot. OTOH, if
139b84a3a96SChristoph Hellwig 	 * we're unmounting, we do care, so we flush it and wait.
140b84a3a96SChristoph Hellwig 	 */
141b84a3a96SChristoph Hellwig 	if (XFS_DQ_IS_DIRTY(dqp)) {
142fe7257fdSChristoph Hellwig 		struct xfs_buf	*bp = NULL;
143b84a3a96SChristoph Hellwig 
144b84a3a96SChristoph Hellwig 		/*
145b84a3a96SChristoph Hellwig 		 * We don't care about getting disk errors here. We need
146b84a3a96SChristoph Hellwig 		 * to purge this dquot anyway, so we go ahead regardless.
147b84a3a96SChristoph Hellwig 		 */
148fe7257fdSChristoph Hellwig 		error = xfs_qm_dqflush(dqp, &bp);
149609001bcSDarrick J. Wong 		if (!error) {
150fe7257fdSChristoph Hellwig 			error = xfs_bwrite(bp);
151fe7257fdSChristoph Hellwig 			xfs_buf_relse(bp);
1528d3d7e2bSBrian Foster 		} else if (error == -EAGAIN) {
153985a78fdSDarrick J. Wong 			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
1548d3d7e2bSBrian Foster 			goto out_unlock;
155fe7257fdSChristoph Hellwig 		}
156b84a3a96SChristoph Hellwig 		xfs_dqflock(dqp);
157b84a3a96SChristoph Hellwig 	}
158b84a3a96SChristoph Hellwig 
159b84a3a96SChristoph Hellwig 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
16001728b44SDave Chinner 	ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
16122525c17SDave Chinner 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162b84a3a96SChristoph Hellwig 
163b84a3a96SChristoph Hellwig 	xfs_dqfunlock(dqp);
164b84a3a96SChristoph Hellwig 	xfs_dqunlock(dqp);
165b84a3a96SChristoph Hellwig 
16651dbb1beSDarrick J. Wong 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
167b84a3a96SChristoph Hellwig 	qi->qi_dquots--;
168b84a3a96SChristoph Hellwig 
169b84a3a96SChristoph Hellwig 	/*
170b84a3a96SChristoph Hellwig 	 * We move dquots to the freelist as soon as their reference count
171b84a3a96SChristoph Hellwig 	 * hits zero, so it really should be on the freelist here.
172b84a3a96SChristoph Hellwig 	 */
173b84a3a96SChristoph Hellwig 	ASSERT(!list_empty(&dqp->q_lru));
174cd56a39aSDave Chinner 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
17501728b44SDave Chinner 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
176b84a3a96SChristoph Hellwig 
177b84a3a96SChristoph Hellwig 	xfs_qm_dqdestroy(dqp);
178df8052e7SJie Liu 	return 0;
1798d3d7e2bSBrian Foster 
1808d3d7e2bSBrian Foster out_unlock:
1818d3d7e2bSBrian Foster 	xfs_dqunlock(dqp);
1828d3d7e2bSBrian Foster 	return error;
183df8052e7SJie Liu }
184df8052e7SJie Liu 
185df8052e7SJie Liu /*
186b84a3a96SChristoph Hellwig  * Purge the dquot cache.
187b84a3a96SChristoph Hellwig  */
18840b52225SChristoph Hellwig static void
xfs_qm_dqpurge_all(struct xfs_mount * mp)189b84a3a96SChristoph Hellwig xfs_qm_dqpurge_all(
190e497dfbaSChristoph Hellwig 	struct xfs_mount	*mp)
191b84a3a96SChristoph Hellwig {
1928cd4901dSDarrick J. Wong 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
1938cd4901dSDarrick J. Wong 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
1948cd4901dSDarrick J. Wong 	xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
195b84a3a96SChristoph Hellwig }
196b84a3a96SChristoph Hellwig 
197b84a3a96SChristoph Hellwig /*
198c59d87c4SChristoph Hellwig  * Just destroy the quotainfo structure.
199c59d87c4SChristoph Hellwig  */
200c59d87c4SChristoph Hellwig void
xfs_qm_unmount(struct xfs_mount * mp)201c59d87c4SChristoph Hellwig xfs_qm_unmount(
202c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp)
203c59d87c4SChristoph Hellwig {
204c59d87c4SChristoph Hellwig 	if (mp->m_quotainfo) {
205e497dfbaSChristoph Hellwig 		xfs_qm_dqpurge_all(mp);
206c59d87c4SChristoph Hellwig 		xfs_qm_destroy_quotainfo(mp);
207c59d87c4SChristoph Hellwig 	}
208c59d87c4SChristoph Hellwig }
209c59d87c4SChristoph Hellwig 
210c59d87c4SChristoph Hellwig /*
211c59d87c4SChristoph Hellwig  * Called from the vfsops layer.
212c59d87c4SChristoph Hellwig  */
213c59d87c4SChristoph Hellwig void
xfs_qm_unmount_quotas(xfs_mount_t * mp)214c59d87c4SChristoph Hellwig xfs_qm_unmount_quotas(
215c59d87c4SChristoph Hellwig 	xfs_mount_t	*mp)
216c59d87c4SChristoph Hellwig {
217c59d87c4SChristoph Hellwig 	/*
218c59d87c4SChristoph Hellwig 	 * Release the dquots that root inode, et al might be holding,
219c59d87c4SChristoph Hellwig 	 * before we flush quotas and blow away the quotainfo structure.
220c59d87c4SChristoph Hellwig 	 */
221c59d87c4SChristoph Hellwig 	ASSERT(mp->m_rootip);
222c59d87c4SChristoph Hellwig 	xfs_qm_dqdetach(mp->m_rootip);
223c59d87c4SChristoph Hellwig 	if (mp->m_rbmip)
224c59d87c4SChristoph Hellwig 		xfs_qm_dqdetach(mp->m_rbmip);
225c59d87c4SChristoph Hellwig 	if (mp->m_rsumip)
226c59d87c4SChristoph Hellwig 		xfs_qm_dqdetach(mp->m_rsumip);
227c59d87c4SChristoph Hellwig 
228c59d87c4SChristoph Hellwig 	/*
229c59d87c4SChristoph Hellwig 	 * Release the quota inodes.
230c59d87c4SChristoph Hellwig 	 */
231c59d87c4SChristoph Hellwig 	if (mp->m_quotainfo) {
232c59d87c4SChristoph Hellwig 		if (mp->m_quotainfo->qi_uquotaip) {
23344a8736bSDarrick J. Wong 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
234c59d87c4SChristoph Hellwig 			mp->m_quotainfo->qi_uquotaip = NULL;
235c59d87c4SChristoph Hellwig 		}
236c59d87c4SChristoph Hellwig 		if (mp->m_quotainfo->qi_gquotaip) {
23744a8736bSDarrick J. Wong 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
238c59d87c4SChristoph Hellwig 			mp->m_quotainfo->qi_gquotaip = NULL;
239c59d87c4SChristoph Hellwig 		}
24092f8ff73SChandra Seetharaman 		if (mp->m_quotainfo->qi_pquotaip) {
24144a8736bSDarrick J. Wong 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
24292f8ff73SChandra Seetharaman 			mp->m_quotainfo->qi_pquotaip = NULL;
24392f8ff73SChandra Seetharaman 		}
244c59d87c4SChristoph Hellwig 	}
245c59d87c4SChristoph Hellwig }
246c59d87c4SChristoph Hellwig 
247c59d87c4SChristoph Hellwig STATIC int
xfs_qm_dqattach_one(struct xfs_inode * ip,xfs_dqtype_t type,bool doalloc,struct xfs_dquot ** IO_idqpp)248c59d87c4SChristoph Hellwig xfs_qm_dqattach_one(
249aefe69a4SPavel Reichl 	struct xfs_inode	*ip,
2501a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
2514882c19dSDarrick J. Wong 	bool			doalloc,
252aefe69a4SPavel Reichl 	struct xfs_dquot	**IO_idqpp)
253c59d87c4SChristoph Hellwig {
254aefe69a4SPavel Reichl 	struct xfs_dquot	*dqp;
255c59d87c4SChristoph Hellwig 	int			error;
256c59d87c4SChristoph Hellwig 
257c59d87c4SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
258c59d87c4SChristoph Hellwig 	error = 0;
259c59d87c4SChristoph Hellwig 
260c59d87c4SChristoph Hellwig 	/*
2613c353375SDave Chinner 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
2623c353375SDave Chinner 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
2633c353375SDave Chinner 	 * simpler.
264c59d87c4SChristoph Hellwig 	 */
265c59d87c4SChristoph Hellwig 	dqp = *IO_idqpp;
266c59d87c4SChristoph Hellwig 	if (dqp) {
267c59d87c4SChristoph Hellwig 		trace_xfs_dqattach_found(dqp);
268c59d87c4SChristoph Hellwig 		return 0;
269c59d87c4SChristoph Hellwig 	}
270c59d87c4SChristoph Hellwig 
271c59d87c4SChristoph Hellwig 	/*
2723c353375SDave Chinner 	 * Find the dquot from somewhere. This bumps the reference count of
2733c353375SDave Chinner 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
2743c353375SDave Chinner 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
2753c353375SDave Chinner 	 * turned off suddenly.
276c59d87c4SChristoph Hellwig 	 */
2774882c19dSDarrick J. Wong 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
278c59d87c4SChristoph Hellwig 	if (error)
279c59d87c4SChristoph Hellwig 		return error;
280c59d87c4SChristoph Hellwig 
281c59d87c4SChristoph Hellwig 	trace_xfs_dqattach_get(dqp);
282c59d87c4SChristoph Hellwig 
283c59d87c4SChristoph Hellwig 	/*
284c59d87c4SChristoph Hellwig 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
285c59d87c4SChristoph Hellwig 	 * that the dquot returned is the one that should go in the inode.
286c59d87c4SChristoph Hellwig 	 */
287c59d87c4SChristoph Hellwig 	*IO_idqpp = dqp;
288c59d87c4SChristoph Hellwig 	xfs_dqunlock(dqp);
289c59d87c4SChristoph Hellwig 	return 0;
290c59d87c4SChristoph Hellwig }
291c59d87c4SChristoph Hellwig 
292b4d05e30SChristoph Hellwig static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)293b4d05e30SChristoph Hellwig xfs_qm_need_dqattach(
294b4d05e30SChristoph Hellwig 	struct xfs_inode	*ip)
295b4d05e30SChristoph Hellwig {
296b4d05e30SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
297b4d05e30SChristoph Hellwig 
298b4d05e30SChristoph Hellwig 	if (!XFS_IS_QUOTA_ON(mp))
299b4d05e30SChristoph Hellwig 		return false;
300b4d05e30SChristoph Hellwig 	if (!XFS_NOT_DQATTACHED(mp, ip))
301b4d05e30SChristoph Hellwig 		return false;
3029cad19d2SChandra Seetharaman 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
303b4d05e30SChristoph Hellwig 		return false;
304b4d05e30SChristoph Hellwig 	return true;
305b4d05e30SChristoph Hellwig }
306c59d87c4SChristoph Hellwig 
307c59d87c4SChristoph Hellwig /*
308c59d87c4SChristoph Hellwig  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
309c59d87c4SChristoph Hellwig  * into account.
31030ab2dcfSDarrick J. Wong  * If @doalloc is true, the dquot(s) will be allocated if needed.
311c59d87c4SChristoph Hellwig  * Inode may get unlocked and relocked in here, and the caller must deal with
312c59d87c4SChristoph Hellwig  * the consequences.
313c59d87c4SChristoph Hellwig  */
314c59d87c4SChristoph Hellwig int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)315c59d87c4SChristoph Hellwig xfs_qm_dqattach_locked(
316c59d87c4SChristoph Hellwig 	xfs_inode_t	*ip,
3174882c19dSDarrick J. Wong 	bool		doalloc)
318c59d87c4SChristoph Hellwig {
319c59d87c4SChristoph Hellwig 	xfs_mount_t	*mp = ip->i_mount;
320c59d87c4SChristoph Hellwig 	int		error = 0;
321c59d87c4SChristoph Hellwig 
322b4d05e30SChristoph Hellwig 	if (!xfs_qm_need_dqattach(ip))
323c59d87c4SChristoph Hellwig 		return 0;
324c59d87c4SChristoph Hellwig 
325c59d87c4SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326c59d87c4SChristoph Hellwig 
3273c353375SDave Chinner 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
32874af4c17SKaixu Xia 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
32974af4c17SKaixu Xia 				doalloc, &ip->i_udquot);
330c59d87c4SChristoph Hellwig 		if (error)
331c59d87c4SChristoph Hellwig 			goto done;
3323c353375SDave Chinner 		ASSERT(ip->i_udquot);
333c59d87c4SChristoph Hellwig 	}
334c59d87c4SChristoph Hellwig 
3353c353375SDave Chinner 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
33674af4c17SKaixu Xia 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
33774af4c17SKaixu Xia 				doalloc, &ip->i_gdquot);
338c59d87c4SChristoph Hellwig 		if (error)
339c59d87c4SChristoph Hellwig 			goto done;
3403c353375SDave Chinner 		ASSERT(ip->i_gdquot);
341c59d87c4SChristoph Hellwig 	}
342c59d87c4SChristoph Hellwig 
3433c353375SDave Chinner 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
34474af4c17SKaixu Xia 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
3454882c19dSDarrick J. Wong 				doalloc, &ip->i_pdquot);
34692f8ff73SChandra Seetharaman 		if (error)
34792f8ff73SChandra Seetharaman 			goto done;
3483c353375SDave Chinner 		ASSERT(ip->i_pdquot);
349c59d87c4SChristoph Hellwig 	}
350c59d87c4SChristoph Hellwig 
351c59d87c4SChristoph Hellwig done:
3523c353375SDave Chinner 	/*
3533c353375SDave Chinner 	 * Don't worry about the dquots that we may have attached before any
3543c353375SDave Chinner 	 * error - they'll get detached later if it has not already been done.
3553c353375SDave Chinner 	 */
356c59d87c4SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
357c59d87c4SChristoph Hellwig 	return error;
358c59d87c4SChristoph Hellwig }
359c59d87c4SChristoph Hellwig 
360c59d87c4SChristoph Hellwig int
xfs_qm_dqattach(struct xfs_inode * ip)361c59d87c4SChristoph Hellwig xfs_qm_dqattach(
362c14cfccaSDarrick J. Wong 	struct xfs_inode	*ip)
363c59d87c4SChristoph Hellwig {
364c59d87c4SChristoph Hellwig 	int			error;
365c59d87c4SChristoph Hellwig 
366b4d05e30SChristoph Hellwig 	if (!xfs_qm_need_dqattach(ip))
367b4d05e30SChristoph Hellwig 		return 0;
368b4d05e30SChristoph Hellwig 
369c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
3704882c19dSDarrick J. Wong 	error = xfs_qm_dqattach_locked(ip, false);
371c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
372c59d87c4SChristoph Hellwig 
373c59d87c4SChristoph Hellwig 	return error;
374c59d87c4SChristoph Hellwig }
375c59d87c4SChristoph Hellwig 
376c59d87c4SChristoph Hellwig /*
377c59d87c4SChristoph Hellwig  * Release dquots (and their references) if any.
378c59d87c4SChristoph Hellwig  * The inode should be locked EXCL except when this's called by
379c59d87c4SChristoph Hellwig  * xfs_ireclaim.
380c59d87c4SChristoph Hellwig  */
381c59d87c4SChristoph Hellwig void
xfs_qm_dqdetach(xfs_inode_t * ip)382c59d87c4SChristoph Hellwig xfs_qm_dqdetach(
383c59d87c4SChristoph Hellwig 	xfs_inode_t	*ip)
384c59d87c4SChristoph Hellwig {
38592f8ff73SChandra Seetharaman 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
386c59d87c4SChristoph Hellwig 		return;
387c59d87c4SChristoph Hellwig 
388c59d87c4SChristoph Hellwig 	trace_xfs_dquot_dqdetach(ip);
389c59d87c4SChristoph Hellwig 
3909cad19d2SChandra Seetharaman 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
391c59d87c4SChristoph Hellwig 	if (ip->i_udquot) {
392c59d87c4SChristoph Hellwig 		xfs_qm_dqrele(ip->i_udquot);
393c59d87c4SChristoph Hellwig 		ip->i_udquot = NULL;
394c59d87c4SChristoph Hellwig 	}
395c59d87c4SChristoph Hellwig 	if (ip->i_gdquot) {
396c59d87c4SChristoph Hellwig 		xfs_qm_dqrele(ip->i_gdquot);
397c59d87c4SChristoph Hellwig 		ip->i_gdquot = NULL;
398c59d87c4SChristoph Hellwig 	}
39992f8ff73SChandra Seetharaman 	if (ip->i_pdquot) {
40092f8ff73SChandra Seetharaman 		xfs_qm_dqrele(ip->i_pdquot);
40192f8ff73SChandra Seetharaman 		ip->i_pdquot = NULL;
40292f8ff73SChandra Seetharaman 	}
403c59d87c4SChristoph Hellwig }
404c59d87c4SChristoph Hellwig 
405cd56a39aSDave Chinner struct xfs_qm_isolate {
406cd56a39aSDave Chinner 	struct list_head	buffers;
407cd56a39aSDave Chinner 	struct list_head	dispose;
408cd56a39aSDave Chinner };
409cd56a39aSDave Chinner 
410cd56a39aSDave Chinner static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)411cd56a39aSDave Chinner xfs_qm_dquot_isolate(
412cd56a39aSDave Chinner 	struct list_head	*item,
4133f97b163SVladimir Davydov 	struct list_lru_one	*lru,
414cd56a39aSDave Chinner 	spinlock_t		*lru_lock,
415cd56a39aSDave Chinner 	void			*arg)
416bf1ed383SDave Chinner 		__releases(lru_lock) __acquires(lru_lock)
417cd56a39aSDave Chinner {
418cd56a39aSDave Chinner 	struct xfs_dquot	*dqp = container_of(item,
419cd56a39aSDave Chinner 						struct xfs_dquot, q_lru);
420cd56a39aSDave Chinner 	struct xfs_qm_isolate	*isol = arg;
421cd56a39aSDave Chinner 
422cd56a39aSDave Chinner 	if (!xfs_dqlock_nowait(dqp))
423cd56a39aSDave Chinner 		goto out_miss_busy;
424cd56a39aSDave Chinner 
425cd56a39aSDave Chinner 	/*
42652f31ed2SDave Chinner 	 * If something else is freeing this dquot and hasn't yet removed it
42752f31ed2SDave Chinner 	 * from the LRU, leave it for the freeing task to complete the freeing
42852f31ed2SDave Chinner 	 * process rather than risk it being free from under us here.
42952f31ed2SDave Chinner 	 */
43052f31ed2SDave Chinner 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
43152f31ed2SDave Chinner 		goto out_miss_unlock;
43252f31ed2SDave Chinner 
43352f31ed2SDave Chinner 	/*
434cd56a39aSDave Chinner 	 * This dquot has acquired a reference in the meantime remove it from
435cd56a39aSDave Chinner 	 * the freelist and try again.
436cd56a39aSDave Chinner 	 */
437cd56a39aSDave Chinner 	if (dqp->q_nrefs) {
438cd56a39aSDave Chinner 		xfs_dqunlock(dqp);
439ff6d6af2SBill O'Donnell 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
440cd56a39aSDave Chinner 
441cd56a39aSDave Chinner 		trace_xfs_dqreclaim_want(dqp);
4423f97b163SVladimir Davydov 		list_lru_isolate(lru, &dqp->q_lru);
443ff6d6af2SBill O'Donnell 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
44435163417SDave Chinner 		return LRU_REMOVED;
445cd56a39aSDave Chinner 	}
446cd56a39aSDave Chinner 
447cd56a39aSDave Chinner 	/*
448cd56a39aSDave Chinner 	 * If the dquot is dirty, flush it. If it's already being flushed, just
449cd56a39aSDave Chinner 	 * skip it so there is time for the IO to complete before we try to
450cd56a39aSDave Chinner 	 * reclaim it again on the next LRU pass.
451cd56a39aSDave Chinner 	 */
45252f31ed2SDave Chinner 	if (!xfs_dqflock_nowait(dqp))
45352f31ed2SDave Chinner 		goto out_miss_unlock;
454cd56a39aSDave Chinner 
455cd56a39aSDave Chinner 	if (XFS_DQ_IS_DIRTY(dqp)) {
456cd56a39aSDave Chinner 		struct xfs_buf	*bp = NULL;
457cd56a39aSDave Chinner 		int		error;
458cd56a39aSDave Chinner 
459cd56a39aSDave Chinner 		trace_xfs_dqreclaim_dirty(dqp);
460cd56a39aSDave Chinner 
461cd56a39aSDave Chinner 		/* we have to drop the LRU lock to flush the dquot */
462cd56a39aSDave Chinner 		spin_unlock(lru_lock);
463cd56a39aSDave Chinner 
464cd56a39aSDave Chinner 		error = xfs_qm_dqflush(dqp, &bp);
465609001bcSDarrick J. Wong 		if (error)
466cd56a39aSDave Chinner 			goto out_unlock_dirty;
467cd56a39aSDave Chinner 
468cd56a39aSDave Chinner 		xfs_buf_delwri_queue(bp, &isol->buffers);
469cd56a39aSDave Chinner 		xfs_buf_relse(bp);
470cd56a39aSDave Chinner 		goto out_unlock_dirty;
471cd56a39aSDave Chinner 	}
472cd56a39aSDave Chinner 	xfs_dqfunlock(dqp);
473cd56a39aSDave Chinner 
474cd56a39aSDave Chinner 	/*
475cd56a39aSDave Chinner 	 * Prevent lookups now that we are past the point of no return.
476cd56a39aSDave Chinner 	 */
477985a78fdSDarrick J. Wong 	dqp->q_flags |= XFS_DQFLAG_FREEING;
478cd56a39aSDave Chinner 	xfs_dqunlock(dqp);
479cd56a39aSDave Chinner 
480cd56a39aSDave Chinner 	ASSERT(dqp->q_nrefs == 0);
4813f97b163SVladimir Davydov 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482ff6d6af2SBill O'Donnell 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483cd56a39aSDave Chinner 	trace_xfs_dqreclaim_done(dqp);
484ff6d6af2SBill O'Donnell 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
48535163417SDave Chinner 	return LRU_REMOVED;
486cd56a39aSDave Chinner 
48752f31ed2SDave Chinner out_miss_unlock:
48852f31ed2SDave Chinner 	xfs_dqunlock(dqp);
489cd56a39aSDave Chinner out_miss_busy:
490cd56a39aSDave Chinner 	trace_xfs_dqreclaim_busy(dqp);
491ff6d6af2SBill O'Donnell 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
49235163417SDave Chinner 	return LRU_SKIP;
493cd56a39aSDave Chinner 
494cd56a39aSDave Chinner out_unlock_dirty:
495cd56a39aSDave Chinner 	trace_xfs_dqreclaim_busy(dqp);
496ff6d6af2SBill O'Donnell 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
49735163417SDave Chinner 	xfs_dqunlock(dqp);
49835163417SDave Chinner 	spin_lock(lru_lock);
49935163417SDave Chinner 	return LRU_RETRY;
500cd56a39aSDave Chinner }
501cd56a39aSDave Chinner 
5022f5b56f8SAndrew Morton static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)503cd56a39aSDave Chinner xfs_qm_shrink_scan(
504cd56a39aSDave Chinner 	struct shrinker		*shrink,
505cd56a39aSDave Chinner 	struct shrink_control	*sc)
506cd56a39aSDave Chinner {
507cd56a39aSDave Chinner 	struct xfs_quotainfo	*qi = container_of(shrink,
508cd56a39aSDave Chinner 					struct xfs_quotainfo, qi_shrinker);
509cd56a39aSDave Chinner 	struct xfs_qm_isolate	isol;
5102f5b56f8SAndrew Morton 	unsigned long		freed;
511cd56a39aSDave Chinner 	int			error;
512cd56a39aSDave Chinner 
513d0164adcSMel Gorman 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514cd56a39aSDave Chinner 		return 0;
515cd56a39aSDave Chinner 
516cd56a39aSDave Chinner 	INIT_LIST_HEAD(&isol.buffers);
517cd56a39aSDave Chinner 	INIT_LIST_HEAD(&isol.dispose);
518cd56a39aSDave Chinner 
519503c358cSVladimir Davydov 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
520503c358cSVladimir Davydov 				     xfs_qm_dquot_isolate, &isol);
521cd56a39aSDave Chinner 
522cd56a39aSDave Chinner 	error = xfs_buf_delwri_submit(&isol.buffers);
523cd56a39aSDave Chinner 	if (error)
524cd56a39aSDave Chinner 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525cd56a39aSDave Chinner 
526cd56a39aSDave Chinner 	while (!list_empty(&isol.dispose)) {
527cd56a39aSDave Chinner 		struct xfs_dquot	*dqp;
528cd56a39aSDave Chinner 
529cd56a39aSDave Chinner 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530cd56a39aSDave Chinner 		list_del_init(&dqp->q_lru);
531cd56a39aSDave Chinner 		xfs_qm_dqfree_one(dqp);
532cd56a39aSDave Chinner 	}
533cd56a39aSDave Chinner 
534cd56a39aSDave Chinner 	return freed;
535cd56a39aSDave Chinner }
536cd56a39aSDave Chinner 
5372f5b56f8SAndrew Morton static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)538cd56a39aSDave Chinner xfs_qm_shrink_count(
539cd56a39aSDave Chinner 	struct shrinker		*shrink,
540cd56a39aSDave Chinner 	struct shrink_control	*sc)
541cd56a39aSDave Chinner {
542cd56a39aSDave Chinner 	struct xfs_quotainfo	*qi = container_of(shrink,
543cd56a39aSDave Chinner 					struct xfs_quotainfo, qi_shrinker);
544cd56a39aSDave Chinner 
545503c358cSVladimir Davydov 	return list_lru_shrink_count(&qi->qi_lru, sc);
546cd56a39aSDave Chinner }
547cd56a39aSDave Chinner 
548be607946SCarlos Maiolino STATIC void
xfs_qm_set_defquota(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_quotainfo * qinf)549be607946SCarlos Maiolino xfs_qm_set_defquota(
550c072fbefSPavel Reichl 	struct xfs_mount	*mp,
5511a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
552c072fbefSPavel Reichl 	struct xfs_quotainfo	*qinf)
553be607946SCarlos Maiolino {
554aefe69a4SPavel Reichl 	struct xfs_dquot	*dqp;
555be607946SCarlos Maiolino 	struct xfs_def_quota	*defq;
556be607946SCarlos Maiolino 	int			error;
557be607946SCarlos Maiolino 
558114e73ccSDarrick J. Wong 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
559eac69e16SDarrick J. Wong 	if (error)
560eac69e16SDarrick J. Wong 		return;
561be607946SCarlos Maiolino 
562ce6e7e79SEric Sandeen 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
563be607946SCarlos Maiolino 
564be607946SCarlos Maiolino 	/*
565be607946SCarlos Maiolino 	 * Timers and warnings have been already set, let's just set the
566be607946SCarlos Maiolino 	 * default limits for this quota type
567be607946SCarlos Maiolino 	 */
568438769e3SDarrick J. Wong 	defq->blk.hard = dqp->q_blk.hardlimit;
569438769e3SDarrick J. Wong 	defq->blk.soft = dqp->q_blk.softlimit;
570438769e3SDarrick J. Wong 	defq->ino.hard = dqp->q_ino.hardlimit;
571438769e3SDarrick J. Wong 	defq->ino.soft = dqp->q_ino.softlimit;
572438769e3SDarrick J. Wong 	defq->rtb.hard = dqp->q_rtb.hardlimit;
573438769e3SDarrick J. Wong 	defq->rtb.soft = dqp->q_rtb.softlimit;
574be607946SCarlos Maiolino 	xfs_qm_dqdestroy(dqp);
575be607946SCarlos Maiolino }
576eac69e16SDarrick J. Wong 
577eac69e16SDarrick J. Wong /* Initialize quota time limits from the root dquot. */
578eac69e16SDarrick J. Wong static void
xfs_qm_init_timelimits(struct xfs_mount * mp,xfs_dqtype_t type)579eac69e16SDarrick J. Wong xfs_qm_init_timelimits(
580eac69e16SDarrick J. Wong 	struct xfs_mount	*mp,
5811a7ed271SDarrick J. Wong 	xfs_dqtype_t		type)
582eac69e16SDarrick J. Wong {
583e850301fSEric Sandeen 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
584e850301fSEric Sandeen 	struct xfs_def_quota	*defq;
585eac69e16SDarrick J. Wong 	struct xfs_dquot	*dqp;
586eac69e16SDarrick J. Wong 	int			error;
587eac69e16SDarrick J. Wong 
588e850301fSEric Sandeen 	defq = xfs_get_defquota(qinf, type);
589e850301fSEric Sandeen 
590438769e3SDarrick J. Wong 	defq->blk.time = XFS_QM_BTIMELIMIT;
591438769e3SDarrick J. Wong 	defq->ino.time = XFS_QM_ITIMELIMIT;
592438769e3SDarrick J. Wong 	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
593eac69e16SDarrick J. Wong 
594eac69e16SDarrick J. Wong 	/*
595eac69e16SDarrick J. Wong 	 * We try to get the limits from the superuser's limits fields.
596eac69e16SDarrick J. Wong 	 * This is quite hacky, but it is standard quota practice.
597eac69e16SDarrick J. Wong 	 *
598eac69e16SDarrick J. Wong 	 * Since we may not have done a quotacheck by this point, just read
599eac69e16SDarrick J. Wong 	 * the dquot without attaching it to any hashtables or lists.
600eac69e16SDarrick J. Wong 	 */
601eac69e16SDarrick J. Wong 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
602eac69e16SDarrick J. Wong 	if (error)
603eac69e16SDarrick J. Wong 		return;
604eac69e16SDarrick J. Wong 
605eac69e16SDarrick J. Wong 	/*
606eac69e16SDarrick J. Wong 	 * The warnings and timers set the grace period given to
607eac69e16SDarrick J. Wong 	 * a user or group before he or she can not perform any
608eac69e16SDarrick J. Wong 	 * more writing. If it is zero, a default is used.
609eac69e16SDarrick J. Wong 	 */
61019dce7eaSDarrick J. Wong 	if (dqp->q_blk.timer)
611438769e3SDarrick J. Wong 		defq->blk.time = dqp->q_blk.timer;
61219dce7eaSDarrick J. Wong 	if (dqp->q_ino.timer)
613438769e3SDarrick J. Wong 		defq->ino.time = dqp->q_ino.timer;
61419dce7eaSDarrick J. Wong 	if (dqp->q_rtb.timer)
615438769e3SDarrick J. Wong 		defq->rtb.time = dqp->q_rtb.timer;
616eac69e16SDarrick J. Wong 
617eac69e16SDarrick J. Wong 	xfs_qm_dqdestroy(dqp);
618be607946SCarlos Maiolino }
619be607946SCarlos Maiolino 
620c59d87c4SChristoph Hellwig /*
621c59d87c4SChristoph Hellwig  * This initializes all the quota information that's kept in the
622c59d87c4SChristoph Hellwig  * mount structure
623c59d87c4SChristoph Hellwig  */
624c59d87c4SChristoph Hellwig STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)625c59d87c4SChristoph Hellwig xfs_qm_init_quotainfo(
626114e73ccSDarrick J. Wong 	struct xfs_mount	*mp)
627c59d87c4SChristoph Hellwig {
628114e73ccSDarrick J. Wong 	struct xfs_quotainfo	*qinf;
629c59d87c4SChristoph Hellwig 	int			error;
630c59d87c4SChristoph Hellwig 
631149e53afSChristoph Hellwig 	ASSERT(XFS_IS_QUOTA_ON(mp));
632c59d87c4SChristoph Hellwig 
633c072fbefSPavel Reichl 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
634c59d87c4SChristoph Hellwig 
6352451337dSDave Chinner 	error = list_lru_init(&qinf->qi_lru);
636ee4eec47SDave Chinner 	if (error)
637ee4eec47SDave Chinner 		goto out_free_qinf;
6385ca302c8SGlauber Costa 
639c59d87c4SChristoph Hellwig 	/*
640c59d87c4SChristoph Hellwig 	 * See if quotainodes are setup, and if not, allocate them,
641c59d87c4SChristoph Hellwig 	 * and change the superblock accordingly.
642c59d87c4SChristoph Hellwig 	 */
643ee4eec47SDave Chinner 	error = xfs_qm_init_quotainos(mp);
644ee4eec47SDave Chinner 	if (error)
645ee4eec47SDave Chinner 		goto out_free_lru;
646c59d87c4SChristoph Hellwig 
6479f920f11SChristoph Hellwig 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
6489f920f11SChristoph Hellwig 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
64992f8ff73SChandra Seetharaman 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
6509f920f11SChristoph Hellwig 	mutex_init(&qinf->qi_tree_lock);
6519f920f11SChristoph Hellwig 
652c59d87c4SChristoph Hellwig 	/* mutex used to serialize quotaoffs */
653c59d87c4SChristoph Hellwig 	mutex_init(&qinf->qi_quotaofflock);
654c59d87c4SChristoph Hellwig 
655c59d87c4SChristoph Hellwig 	/* Precalc some constants */
656c59d87c4SChristoph Hellwig 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
6576ea94bb5SEric Sandeen 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
65838c26bfdSDave Chinner 	if (xfs_has_bigtime(mp)) {
6594ea1ff3bSDarrick J. Wong 		qinf->qi_expiry_min =
6604ea1ff3bSDarrick J. Wong 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
6614ea1ff3bSDarrick J. Wong 		qinf->qi_expiry_max =
6624ea1ff3bSDarrick J. Wong 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
6634ea1ff3bSDarrick J. Wong 	} else {
66411d8a919SDarrick J. Wong 		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
66511d8a919SDarrick J. Wong 		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
6664ea1ff3bSDarrick J. Wong 	}
66706dbf82bSDarrick J. Wong 	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
66806dbf82bSDarrick J. Wong 			qinf->qi_expiry_max);
669c59d87c4SChristoph Hellwig 
670c59d87c4SChristoph Hellwig 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671c59d87c4SChristoph Hellwig 
6728cd4901dSDarrick J. Wong 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
6738cd4901dSDarrick J. Wong 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
6748cd4901dSDarrick J. Wong 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675c59d87c4SChristoph Hellwig 
676149e53afSChristoph Hellwig 	if (XFS_IS_UQUOTA_ON(mp))
6778cd4901dSDarrick J. Wong 		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678149e53afSChristoph Hellwig 	if (XFS_IS_GQUOTA_ON(mp))
6798cd4901dSDarrick J. Wong 		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680149e53afSChristoph Hellwig 	if (XFS_IS_PQUOTA_ON(mp))
6818cd4901dSDarrick J. Wong 		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682be607946SCarlos Maiolino 
683cd56a39aSDave Chinner 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
684cd56a39aSDave Chinner 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
685f8739c3cSChristoph Hellwig 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
686cd56a39aSDave Chinner 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
6873a3882ffSAliaksei Karaliou 
688e33c267aSRoman Gushchin 	error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
689e33c267aSRoman Gushchin 				  mp->m_super->s_id);
6903a3882ffSAliaksei Karaliou 	if (error)
6913a3882ffSAliaksei Karaliou 		goto out_free_inos;
6923a3882ffSAliaksei Karaliou 
693c59d87c4SChristoph Hellwig 	return 0;
694ee4eec47SDave Chinner 
6953a3882ffSAliaksei Karaliou out_free_inos:
6963a3882ffSAliaksei Karaliou 	mutex_destroy(&qinf->qi_quotaofflock);
6973a3882ffSAliaksei Karaliou 	mutex_destroy(&qinf->qi_tree_lock);
6983a3882ffSAliaksei Karaliou 	xfs_qm_destroy_quotainos(qinf);
699ee4eec47SDave Chinner out_free_lru:
700ee4eec47SDave Chinner 	list_lru_destroy(&qinf->qi_lru);
701ee4eec47SDave Chinner out_free_qinf:
702ee4eec47SDave Chinner 	kmem_free(qinf);
703ee4eec47SDave Chinner 	mp->m_quotainfo = NULL;
704ee4eec47SDave Chinner 	return error;
705c59d87c4SChristoph Hellwig }
706c59d87c4SChristoph Hellwig 
707c59d87c4SChristoph Hellwig /*
708c59d87c4SChristoph Hellwig  * Gets called when unmounting a filesystem or when all quotas get
709c59d87c4SChristoph Hellwig  * turned off.
710c59d87c4SChristoph Hellwig  * This purges the quota inodes, destroys locks and frees itself.
711c59d87c4SChristoph Hellwig  */
712c59d87c4SChristoph Hellwig void
xfs_qm_destroy_quotainfo(struct xfs_mount * mp)713c59d87c4SChristoph Hellwig xfs_qm_destroy_quotainfo(
714c072fbefSPavel Reichl 	struct xfs_mount	*mp)
715c59d87c4SChristoph Hellwig {
716c072fbefSPavel Reichl 	struct xfs_quotainfo	*qi;
717c59d87c4SChristoph Hellwig 
718c59d87c4SChristoph Hellwig 	qi = mp->m_quotainfo;
719c59d87c4SChristoph Hellwig 	ASSERT(qi != NULL);
720c59d87c4SChristoph Hellwig 
721f8739c3cSChristoph Hellwig 	unregister_shrinker(&qi->qi_shrinker);
722f5e1dd34SGlauber Costa 	list_lru_destroy(&qi->qi_lru);
7233a3882ffSAliaksei Karaliou 	xfs_qm_destroy_quotainos(qi);
72421968815SAliaksei Karaliou 	mutex_destroy(&qi->qi_tree_lock);
725c59d87c4SChristoph Hellwig 	mutex_destroy(&qi->qi_quotaofflock);
726c59d87c4SChristoph Hellwig 	kmem_free(qi);
727c59d87c4SChristoph Hellwig 	mp->m_quotainfo = NULL;
728c59d87c4SChristoph Hellwig }
729c59d87c4SChristoph Hellwig 
730c59d87c4SChristoph Hellwig /*
731c59d87c4SChristoph Hellwig  * Create an inode and return with a reference already taken, but unlocked
732c59d87c4SChristoph Hellwig  * This is how we create quota inodes
733c59d87c4SChristoph Hellwig  */
734c59d87c4SChristoph Hellwig STATIC int
xfs_qm_qino_alloc(struct xfs_mount * mp,struct xfs_inode ** ipp,unsigned int flags)735c59d87c4SChristoph Hellwig xfs_qm_qino_alloc(
7361abcf261SDave Chinner 	struct xfs_mount	*mp,
7371abcf261SDave Chinner 	struct xfs_inode	**ipp,
7381abcf261SDave Chinner 	unsigned int		flags)
739c59d87c4SChristoph Hellwig {
7401abcf261SDave Chinner 	struct xfs_trans	*tp;
741c59d87c4SChristoph Hellwig 	int			error;
74258c90473SDave Chinner 	bool			need_alloc = true;
743c59d87c4SChristoph Hellwig 
7441abcf261SDave Chinner 	*ipp = NULL;
745d892d586SChandra Seetharaman 	/*
746d892d586SChandra Seetharaman 	 * With superblock that doesn't have separate pquotino, we
747d892d586SChandra Seetharaman 	 * share an inode between gquota and pquota. If the on-disk
748d892d586SChandra Seetharaman 	 * superblock has GQUOTA and the filesystem is now mounted
749d892d586SChandra Seetharaman 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
750d892d586SChandra Seetharaman 	 * vice-versa.
751d892d586SChandra Seetharaman 	 */
75238c26bfdSDave Chinner 	if (!xfs_has_pquotino(mp) &&
753d892d586SChandra Seetharaman 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
754d892d586SChandra Seetharaman 		xfs_ino_t ino = NULLFSINO;
755d892d586SChandra Seetharaman 
756d892d586SChandra Seetharaman 		if ((flags & XFS_QMOPT_PQUOTA) &&
757d892d586SChandra Seetharaman 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
758d892d586SChandra Seetharaman 			ino = mp->m_sb.sb_gquotino;
759a71895c5SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp,
760a71895c5SDarrick J. Wong 					   mp->m_sb.sb_pquotino != NULLFSINO))
761a5155b87SDarrick J. Wong 				return -EFSCORRUPTED;
762d892d586SChandra Seetharaman 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
763d892d586SChandra Seetharaman 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
764d892d586SChandra Seetharaman 			ino = mp->m_sb.sb_pquotino;
765a71895c5SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp,
766a71895c5SDarrick J. Wong 					   mp->m_sb.sb_gquotino != NULLFSINO))
767a5155b87SDarrick J. Wong 				return -EFSCORRUPTED;
768a5155b87SDarrick J. Wong 		}
769d892d586SChandra Seetharaman 		if (ino != NULLFSINO) {
7701abcf261SDave Chinner 			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
771d892d586SChandra Seetharaman 			if (error)
772d892d586SChandra Seetharaman 				return error;
773d892d586SChandra Seetharaman 			mp->m_sb.sb_gquotino = NULLFSINO;
774d892d586SChandra Seetharaman 			mp->m_sb.sb_pquotino = NULLFSINO;
77558c90473SDave Chinner 			need_alloc = false;
776d892d586SChandra Seetharaman 		}
777d892d586SChandra Seetharaman 	}
778d892d586SChandra Seetharaman 
779253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
780fb353ff1SKaixu Xia 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
781fb353ff1SKaixu Xia 			0, 0, &tp);
782253f4911SChristoph Hellwig 	if (error)
783c59d87c4SChristoph Hellwig 		return error;
784c59d87c4SChristoph Hellwig 
78558c90473SDave Chinner 	if (need_alloc) {
786b652afd9SDave Chinner 		xfs_ino_t	ino;
787b652afd9SDave Chinner 
788b652afd9SDave Chinner 		error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
789b652afd9SDave Chinner 		if (!error)
790f2d40141SChristian Brauner 			error = xfs_init_new_inode(&nop_mnt_idmap, tp, NULL, ino,
791b652afd9SDave Chinner 					S_IFREG, 1, 0, 0, false, ipp);
792c59d87c4SChristoph Hellwig 		if (error) {
7934906e215SChristoph Hellwig 			xfs_trans_cancel(tp);
794c59d87c4SChristoph Hellwig 			return error;
795c59d87c4SChristoph Hellwig 		}
796d892d586SChandra Seetharaman 	}
797c59d87c4SChristoph Hellwig 
798c59d87c4SChristoph Hellwig 	/*
799c59d87c4SChristoph Hellwig 	 * Make the changes in the superblock, and log those too.
800c59d87c4SChristoph Hellwig 	 * sbfields arg may contain fields other than *QUOTINO;
801c59d87c4SChristoph Hellwig 	 * VERSIONNUM for example.
802c59d87c4SChristoph Hellwig 	 */
803c59d87c4SChristoph Hellwig 	spin_lock(&mp->m_sb_lock);
804c59d87c4SChristoph Hellwig 	if (flags & XFS_QMOPT_SBVERSION) {
80538c26bfdSDave Chinner 		ASSERT(!xfs_has_quota(mp));
806c59d87c4SChristoph Hellwig 
80738c26bfdSDave Chinner 		xfs_add_quota(mp);
808c59d87c4SChristoph Hellwig 		mp->m_sb.sb_uquotino = NULLFSINO;
809c59d87c4SChristoph Hellwig 		mp->m_sb.sb_gquotino = NULLFSINO;
810d892d586SChandra Seetharaman 		mp->m_sb.sb_pquotino = NULLFSINO;
811c59d87c4SChristoph Hellwig 
812d892d586SChandra Seetharaman 		/* qflags will get updated fully _after_ quotacheck */
813d892d586SChandra Seetharaman 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
814c59d87c4SChristoph Hellwig 	}
815c59d87c4SChristoph Hellwig 	if (flags & XFS_QMOPT_UQUOTA)
8161abcf261SDave Chinner 		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
817d892d586SChandra Seetharaman 	else if (flags & XFS_QMOPT_GQUOTA)
8181abcf261SDave Chinner 		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
819d892d586SChandra Seetharaman 	else
8201abcf261SDave Chinner 		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
821c59d87c4SChristoph Hellwig 	spin_unlock(&mp->m_sb_lock);
82261e63ecbSDave Chinner 	xfs_log_sb(tp);
823c59d87c4SChristoph Hellwig 
82470393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
82558c90473SDave Chinner 	if (error) {
82675c8c50fSDave Chinner 		ASSERT(xfs_is_shutdown(mp));
827c59d87c4SChristoph Hellwig 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
828c59d87c4SChristoph Hellwig 	}
82958c90473SDave Chinner 	if (need_alloc)
8301abcf261SDave Chinner 		xfs_finish_inode_setup(*ipp);
83158c90473SDave Chinner 	return error;
832c59d87c4SChristoph Hellwig }
833c59d87c4SChristoph Hellwig 
834c59d87c4SChristoph Hellwig 
835c59d87c4SChristoph Hellwig STATIC void
xfs_qm_reset_dqcounts(struct xfs_mount * mp,struct xfs_buf * bp,xfs_dqid_t id,xfs_dqtype_t type)836c59d87c4SChristoph Hellwig xfs_qm_reset_dqcounts(
8371a7ed271SDarrick J. Wong 	struct xfs_mount	*mp,
8381a7ed271SDarrick J. Wong 	struct xfs_buf		*bp,
839c59d87c4SChristoph Hellwig 	xfs_dqid_t		id,
8401a7ed271SDarrick J. Wong 	xfs_dqtype_t		type)
841c59d87c4SChristoph Hellwig {
8426fcdc59dSDave Chinner 	struct xfs_dqblk	*dqb;
843c59d87c4SChristoph Hellwig 	int			j;
844c59d87c4SChristoph Hellwig 
845c59d87c4SChristoph Hellwig 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
846c59d87c4SChristoph Hellwig 
847c59d87c4SChristoph Hellwig 	/*
848c59d87c4SChristoph Hellwig 	 * Reset all counters and timers. They'll be
849c59d87c4SChristoph Hellwig 	 * started afresh by xfs_qm_quotacheck.
850c59d87c4SChristoph Hellwig 	 */
851c59d87c4SChristoph Hellwig #ifdef DEBUG
85290115407SEric Sandeen 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
85311a83f4cSChristoph Hellwig 		sizeof(struct xfs_dqblk);
854c59d87c4SChristoph Hellwig 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
855c59d87c4SChristoph Hellwig #endif
8566fcdc59dSDave Chinner 	dqb = bp->b_addr;
857c59d87c4SChristoph Hellwig 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
8586fcdc59dSDave Chinner 		struct xfs_disk_dquot	*ddq;
8596fcdc59dSDave Chinner 
8606fcdc59dSDave Chinner 		ddq = (struct xfs_disk_dquot *)&dqb[j];
8616fcdc59dSDave Chinner 
862c59d87c4SChristoph Hellwig 		/*
863c59d87c4SChristoph Hellwig 		 * Do a sanity check, and if needed, repair the dqblk. Don't
864c59d87c4SChristoph Hellwig 		 * output any warnings because it's perfectly possible to
865eebf3cabSDarrick J. Wong 		 * find uninitialised dquot blks. See comment in
866eebf3cabSDarrick J. Wong 		 * xfs_dquot_verify.
867c59d87c4SChristoph Hellwig 		 */
868f9751c4aSDarrick J. Wong 		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
869d8c1af0dSDarrick J. Wong 		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
87048fa1db8SEric Sandeen 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
871eeea7980SDarrick J. Wong 
872dfcc70a8SJan Kara 		/*
873dfcc70a8SJan Kara 		 * Reset type in case we are reusing group quota file for
874dfcc70a8SJan Kara 		 * project quotas or vice versa
875dfcc70a8SJan Kara 		 */
876d8c1af0dSDarrick J. Wong 		ddq->d_type = type;
877c59d87c4SChristoph Hellwig 		ddq->d_bcount = 0;
878c59d87c4SChristoph Hellwig 		ddq->d_icount = 0;
879c59d87c4SChristoph Hellwig 		ddq->d_rtbcount = 0;
8805885539fSDarrick J. Wong 
8815885539fSDarrick J. Wong 		/*
8825885539fSDarrick J. Wong 		 * dquot id 0 stores the default grace period and the maximum
8835885539fSDarrick J. Wong 		 * warning limit that were set by the administrator, so we
8845885539fSDarrick J. Wong 		 * should not reset them.
8855885539fSDarrick J. Wong 		 */
8865885539fSDarrick J. Wong 		if (ddq->d_id != 0) {
887c59d87c4SChristoph Hellwig 			ddq->d_btimer = 0;
888c59d87c4SChristoph Hellwig 			ddq->d_itimer = 0;
889c59d87c4SChristoph Hellwig 			ddq->d_rtbtimer = 0;
890c59d87c4SChristoph Hellwig 			ddq->d_bwarns = 0;
891c59d87c4SChristoph Hellwig 			ddq->d_iwarns = 0;
892c59d87c4SChristoph Hellwig 			ddq->d_rtbwarns = 0;
89338c26bfdSDave Chinner 			if (xfs_has_bigtime(mp))
8944ea1ff3bSDarrick J. Wong 				ddq->d_type |= XFS_DQTYPE_BIGTIME;
8955885539fSDarrick J. Wong 		}
8966fcdc59dSDave Chinner 
89738c26bfdSDave Chinner 		if (xfs_has_crc(mp)) {
8986fcdc59dSDave Chinner 			xfs_update_cksum((char *)&dqb[j],
8996fcdc59dSDave Chinner 					 sizeof(struct xfs_dqblk),
9006fcdc59dSDave Chinner 					 XFS_DQUOT_CRC_OFF);
9016fcdc59dSDave Chinner 		}
902c59d87c4SChristoph Hellwig 	}
903c59d87c4SChristoph Hellwig }
904c59d87c4SChristoph Hellwig 
905c59d87c4SChristoph Hellwig STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,xfs_dqtype_t type,struct list_head * buffer_list)90628b9060bSDarrick J. Wong xfs_qm_reset_dqcounts_all(
90743ff2122SChristoph Hellwig 	struct xfs_mount	*mp,
908c59d87c4SChristoph Hellwig 	xfs_dqid_t		firstid,
909c59d87c4SChristoph Hellwig 	xfs_fsblock_t		bno,
910c59d87c4SChristoph Hellwig 	xfs_filblks_t		blkcnt,
9111a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
91243ff2122SChristoph Hellwig 	struct list_head	*buffer_list)
913c59d87c4SChristoph Hellwig {
91443ff2122SChristoph Hellwig 	struct xfs_buf		*bp;
9150dcc0728SDarrick J. Wong 	int			error = 0;
916c59d87c4SChristoph Hellwig 
917c59d87c4SChristoph Hellwig 	ASSERT(blkcnt > 0);
918c59d87c4SChristoph Hellwig 
919c59d87c4SChristoph Hellwig 	/*
920c59d87c4SChristoph Hellwig 	 * Blkcnt arg can be a very big number, and might even be
921c59d87c4SChristoph Hellwig 	 * larger than the log itself. So, we have to break it up into
922c59d87c4SChristoph Hellwig 	 * manageable-sized transactions.
923c59d87c4SChristoph Hellwig 	 * Note that we don't start a permanent transaction here; we might
924c59d87c4SChristoph Hellwig 	 * not be able to get a log reservation for the whole thing up front,
925c59d87c4SChristoph Hellwig 	 * and we don't really care to either, because we just discard
926c59d87c4SChristoph Hellwig 	 * everything if we were to crash in the middle of this loop.
927c59d87c4SChristoph Hellwig 	 */
928c59d87c4SChristoph Hellwig 	while (blkcnt--) {
929c59d87c4SChristoph Hellwig 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930c59d87c4SChristoph Hellwig 			      XFS_FSB_TO_DADDR(mp, bno),
931c6319198SDave Chinner 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
9321813dd64SDave Chinner 			      &xfs_dquot_buf_ops);
9336fcdc59dSDave Chinner 
9346fcdc59dSDave Chinner 		/*
9356fcdc59dSDave Chinner 		 * CRC and validation errors will return a EFSCORRUPTED here. If
9366fcdc59dSDave Chinner 		 * this occurs, re-read without CRC validation so that we can
9376fcdc59dSDave Chinner 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
9386fcdc59dSDave Chinner 		 * will leave a trace in the log indicating corruption has
9396fcdc59dSDave Chinner 		 * been detected.
9406fcdc59dSDave Chinner 		 */
9412451337dSDave Chinner 		if (error == -EFSCORRUPTED) {
9426fcdc59dSDave Chinner 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
9436fcdc59dSDave Chinner 				      XFS_FSB_TO_DADDR(mp, bno),
9446fcdc59dSDave Chinner 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
9456fcdc59dSDave Chinner 				      NULL);
9466fcdc59dSDave Chinner 		}
9476fcdc59dSDave Chinner 
948c59d87c4SChristoph Hellwig 		if (error)
949c59d87c4SChristoph Hellwig 			break;
950c59d87c4SChristoph Hellwig 
9515fd364feSDave Chinner 		/*
9525fd364feSDave Chinner 		 * A corrupt buffer might not have a verifier attached, so
9535fd364feSDave Chinner 		 * make sure we have the correct one attached before writeback
9545fd364feSDave Chinner 		 * occurs.
9555fd364feSDave Chinner 		 */
9565fd364feSDave Chinner 		bp->b_ops = &xfs_dquot_buf_ops;
957c59d87c4SChristoph Hellwig 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
95843ff2122SChristoph Hellwig 		xfs_buf_delwri_queue(bp, buffer_list);
95961551f1eSChristoph Hellwig 		xfs_buf_relse(bp);
9606fcdc59dSDave Chinner 
9616fcdc59dSDave Chinner 		/* goto the next block. */
962c59d87c4SChristoph Hellwig 		bno++;
963c59d87c4SChristoph Hellwig 		firstid += mp->m_quotainfo->qi_dqperchunk;
964c59d87c4SChristoph Hellwig 	}
96543ff2122SChristoph Hellwig 
966c59d87c4SChristoph Hellwig 	return error;
967c59d87c4SChristoph Hellwig }
968c59d87c4SChristoph Hellwig 
969c59d87c4SChristoph Hellwig /*
97028b9060bSDarrick J. Wong  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
97128b9060bSDarrick J. Wong  * counters for every chunk of dquots that we find.
972c59d87c4SChristoph Hellwig  */
973c59d87c4SChristoph Hellwig STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,xfs_dqtype_t type,struct list_head * buffer_list)97428b9060bSDarrick J. Wong xfs_qm_reset_dqcounts_buf(
97543ff2122SChristoph Hellwig 	struct xfs_mount	*mp,
97643ff2122SChristoph Hellwig 	struct xfs_inode	*qip,
9771a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
97843ff2122SChristoph Hellwig 	struct list_head	*buffer_list)
979c59d87c4SChristoph Hellwig {
98043ff2122SChristoph Hellwig 	struct xfs_bmbt_irec	*map;
981c59d87c4SChristoph Hellwig 	int			i, nmaps;	/* number of map entries */
982c59d87c4SChristoph Hellwig 	int			error;		/* return value */
983c59d87c4SChristoph Hellwig 	xfs_fileoff_t		lblkno;
984c59d87c4SChristoph Hellwig 	xfs_filblks_t		maxlblkcnt;
985c59d87c4SChristoph Hellwig 	xfs_dqid_t		firstid;
986c59d87c4SChristoph Hellwig 	xfs_fsblock_t		rablkno;
987c59d87c4SChristoph Hellwig 	xfs_filblks_t		rablkcnt;
988c59d87c4SChristoph Hellwig 
989c59d87c4SChristoph Hellwig 	error = 0;
990c59d87c4SChristoph Hellwig 	/*
991c59d87c4SChristoph Hellwig 	 * This looks racy, but we can't keep an inode lock across a
992c59d87c4SChristoph Hellwig 	 * trans_reserve. But, this gets called during quotacheck, and that
993c59d87c4SChristoph Hellwig 	 * happens only at mount time which is single threaded.
994c59d87c4SChristoph Hellwig 	 */
9956e73a545SChristoph Hellwig 	if (qip->i_nblocks == 0)
996c59d87c4SChristoph Hellwig 		return 0;
997c59d87c4SChristoph Hellwig 
998707e0ddaSTetsuo Handa 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
999c59d87c4SChristoph Hellwig 
1000c59d87c4SChristoph Hellwig 	lblkno = 0;
100132972383SDave Chinner 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1002c59d87c4SChristoph Hellwig 	do {
1003da51d32dSChristoph Hellwig 		uint		lock_mode;
1004da51d32dSChristoph Hellwig 
1005c59d87c4SChristoph Hellwig 		nmaps = XFS_DQITER_MAP_SIZE;
1006c59d87c4SChristoph Hellwig 		/*
1007c59d87c4SChristoph Hellwig 		 * We aren't changing the inode itself. Just changing
1008c59d87c4SChristoph Hellwig 		 * some of its data. No new blocks are added here, and
1009c59d87c4SChristoph Hellwig 		 * the inode is never added to the transaction.
1010c59d87c4SChristoph Hellwig 		 */
1011da51d32dSChristoph Hellwig 		lock_mode = xfs_ilock_data_map_shared(qip);
10125c8ed202SDave Chinner 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
10135c8ed202SDave Chinner 				       map, &nmaps, 0);
1014da51d32dSChristoph Hellwig 		xfs_iunlock(qip, lock_mode);
1015c59d87c4SChristoph Hellwig 		if (error)
1016c59d87c4SChristoph Hellwig 			break;
1017c59d87c4SChristoph Hellwig 
1018c59d87c4SChristoph Hellwig 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1019c59d87c4SChristoph Hellwig 		for (i = 0; i < nmaps; i++) {
1020c59d87c4SChristoph Hellwig 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1021c59d87c4SChristoph Hellwig 			ASSERT(map[i].br_blockcount);
1022c59d87c4SChristoph Hellwig 
1023c59d87c4SChristoph Hellwig 
1024c59d87c4SChristoph Hellwig 			lblkno += map[i].br_blockcount;
1025c59d87c4SChristoph Hellwig 
1026c59d87c4SChristoph Hellwig 			if (map[i].br_startblock == HOLESTARTBLOCK)
1027c59d87c4SChristoph Hellwig 				continue;
1028c59d87c4SChristoph Hellwig 
1029c59d87c4SChristoph Hellwig 			firstid = (xfs_dqid_t) map[i].br_startoff *
1030c59d87c4SChristoph Hellwig 				mp->m_quotainfo->qi_dqperchunk;
1031c59d87c4SChristoph Hellwig 			/*
1032c59d87c4SChristoph Hellwig 			 * Do a read-ahead on the next extent.
1033c59d87c4SChristoph Hellwig 			 */
1034c59d87c4SChristoph Hellwig 			if ((i+1 < nmaps) &&
1035c59d87c4SChristoph Hellwig 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1036c59d87c4SChristoph Hellwig 				rablkcnt =  map[i+1].br_blockcount;
1037c59d87c4SChristoph Hellwig 				rablkno = map[i+1].br_startblock;
1038c59d87c4SChristoph Hellwig 				while (rablkcnt--) {
1039c59d87c4SChristoph Hellwig 					xfs_buf_readahead(mp->m_ddev_targp,
1040c59d87c4SChristoph Hellwig 					       XFS_FSB_TO_DADDR(mp, rablkno),
1041c3f8fc73SDave Chinner 					       mp->m_quotainfo->qi_dqchunklen,
10425fd364feSDave Chinner 					       &xfs_dquot_buf_ops);
1043c59d87c4SChristoph Hellwig 					rablkno++;
1044c59d87c4SChristoph Hellwig 				}
1045c59d87c4SChristoph Hellwig 			}
1046c59d87c4SChristoph Hellwig 			/*
1047c59d87c4SChristoph Hellwig 			 * Iterate thru all the blks in the extent and
1048c59d87c4SChristoph Hellwig 			 * reset the counters of all the dquots inside them.
1049c59d87c4SChristoph Hellwig 			 */
105028b9060bSDarrick J. Wong 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1051c59d87c4SChristoph Hellwig 						   map[i].br_startblock,
1052c59d87c4SChristoph Hellwig 						   map[i].br_blockcount,
10530dcc0728SDarrick J. Wong 						   type, buffer_list);
1054c59d87c4SChristoph Hellwig 			if (error)
105543ff2122SChristoph Hellwig 				goto out;
105643ff2122SChristoph Hellwig 		}
1057c59d87c4SChristoph Hellwig 	} while (nmaps > 0);
1058c59d87c4SChristoph Hellwig 
105943ff2122SChristoph Hellwig out:
1060c59d87c4SChristoph Hellwig 	kmem_free(map);
1061c59d87c4SChristoph Hellwig 	return error;
1062c59d87c4SChristoph Hellwig }
1063c59d87c4SChristoph Hellwig 
1064c59d87c4SChristoph Hellwig /*
1065c59d87c4SChristoph Hellwig  * Called by dqusage_adjust in doing a quotacheck.
1066c59d87c4SChristoph Hellwig  *
1067c59d87c4SChristoph Hellwig  * Given the inode, and a dquot id this updates both the incore dqout as well
1068c59d87c4SChristoph Hellwig  * as the buffer copy. This is so that once the quotacheck is done, we can
1069c59d87c4SChristoph Hellwig  * just log all the buffers, as opposed to logging numerous updates to
1070c59d87c4SChristoph Hellwig  * individual dquots.
1071c59d87c4SChristoph Hellwig  */
1072c59d87c4SChristoph Hellwig STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,xfs_dqtype_t type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1073c59d87c4SChristoph Hellwig xfs_qm_quotacheck_dqadjust(
1074c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip,
10751a7ed271SDarrick J. Wong 	xfs_dqtype_t		type,
1076c59d87c4SChristoph Hellwig 	xfs_qcnt_t		nblks,
1077c59d87c4SChristoph Hellwig 	xfs_qcnt_t		rtblks)
1078c59d87c4SChristoph Hellwig {
1079c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
1080c59d87c4SChristoph Hellwig 	struct xfs_dquot	*dqp;
10810fcef127SDarrick J. Wong 	xfs_dqid_t		id;
1082c59d87c4SChristoph Hellwig 	int			error;
1083c59d87c4SChristoph Hellwig 
10840fcef127SDarrick J. Wong 	id = xfs_qm_id_for_quotatype(ip, type);
108530ab2dcfSDarrick J. Wong 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1086c59d87c4SChristoph Hellwig 	if (error) {
1087c59d87c4SChristoph Hellwig 		/*
1088c59d87c4SChristoph Hellwig 		 * Shouldn't be able to turn off quotas here.
1089c59d87c4SChristoph Hellwig 		 */
10902451337dSDave Chinner 		ASSERT(error != -ESRCH);
10912451337dSDave Chinner 		ASSERT(error != -ENOENT);
1092c59d87c4SChristoph Hellwig 		return error;
1093c59d87c4SChristoph Hellwig 	}
1094c59d87c4SChristoph Hellwig 
1095c59d87c4SChristoph Hellwig 	trace_xfs_dqadjust(dqp);
1096c59d87c4SChristoph Hellwig 
1097c59d87c4SChristoph Hellwig 	/*
1098c59d87c4SChristoph Hellwig 	 * Adjust the inode count and the block count to reflect this inode's
1099c59d87c4SChristoph Hellwig 	 * resource usage.
1100c59d87c4SChristoph Hellwig 	 */
1101be37d40cSDarrick J. Wong 	dqp->q_ino.count++;
1102784e80f5SDarrick J. Wong 	dqp->q_ino.reserved++;
1103c59d87c4SChristoph Hellwig 	if (nblks) {
1104be37d40cSDarrick J. Wong 		dqp->q_blk.count += nblks;
1105784e80f5SDarrick J. Wong 		dqp->q_blk.reserved += nblks;
1106c59d87c4SChristoph Hellwig 	}
1107c59d87c4SChristoph Hellwig 	if (rtblks) {
1108be37d40cSDarrick J. Wong 		dqp->q_rtb.count += rtblks;
1109784e80f5SDarrick J. Wong 		dqp->q_rtb.reserved += rtblks;
1110c59d87c4SChristoph Hellwig 	}
1111c59d87c4SChristoph Hellwig 
1112c59d87c4SChristoph Hellwig 	/*
1113c59d87c4SChristoph Hellwig 	 * Set default limits, adjust timers (since we changed usages)
1114c59d87c4SChristoph Hellwig 	 *
1115c59d87c4SChristoph Hellwig 	 * There are no timers for the default values set in the root dquot.
1116c59d87c4SChristoph Hellwig 	 */
1117c51df733SDarrick J. Wong 	if (dqp->q_id) {
1118c8c753e1SDarrick J. Wong 		xfs_qm_adjust_dqlimits(dqp);
1119c8c753e1SDarrick J. Wong 		xfs_qm_adjust_dqtimers(dqp);
1120c59d87c4SChristoph Hellwig 	}
1121c59d87c4SChristoph Hellwig 
1122985a78fdSDarrick J. Wong 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1123c59d87c4SChristoph Hellwig 	xfs_qm_dqput(dqp);
1124c59d87c4SChristoph Hellwig 	return 0;
1125c59d87c4SChristoph Hellwig }
1126c59d87c4SChristoph Hellwig 
1127c59d87c4SChristoph Hellwig /*
1128c59d87c4SChristoph Hellwig  * callback routine supplied to bulkstat(). Given an inumber, find its
1129c59d87c4SChristoph Hellwig  * dquots and update them to account for resources taken by that inode.
1130c59d87c4SChristoph Hellwig  */
1131c59d87c4SChristoph Hellwig /* ARGSUSED */
1132c59d87c4SChristoph Hellwig STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1133c59d87c4SChristoph Hellwig xfs_qm_dqusage_adjust(
1134ebd126a6SDarrick J. Wong 	struct xfs_mount	*mp,
1135ebd126a6SDarrick J. Wong 	struct xfs_trans	*tp,
1136ebd126a6SDarrick J. Wong 	xfs_ino_t		ino,
1137ebd126a6SDarrick J. Wong 	void			*data)
1138c59d87c4SChristoph Hellwig {
1139ebd126a6SDarrick J. Wong 	struct xfs_inode	*ip;
11408bfadd8dSChristoph Hellwig 	xfs_qcnt_t		nblks;
11418bfadd8dSChristoph Hellwig 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1142c59d87c4SChristoph Hellwig 	int			error;
1143c59d87c4SChristoph Hellwig 
1144149e53afSChristoph Hellwig 	ASSERT(XFS_IS_QUOTA_ON(mp));
1145c59d87c4SChristoph Hellwig 
1146c59d87c4SChristoph Hellwig 	/*
1147c59d87c4SChristoph Hellwig 	 * rootino must have its resources accounted for, not so with the quota
1148c59d87c4SChristoph Hellwig 	 * inodes.
1149c59d87c4SChristoph Hellwig 	 */
1150ebd126a6SDarrick J. Wong 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1151ebd126a6SDarrick J. Wong 		return 0;
1152c59d87c4SChristoph Hellwig 
1153c59d87c4SChristoph Hellwig 	/*
11540fcef127SDarrick J. Wong 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
11550fcef127SDarrick J. Wong 	 * at mount time and therefore nobody will be racing chown/chproj.
1156c59d87c4SChristoph Hellwig 	 */
1157ebd126a6SDarrick J. Wong 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1158ebd126a6SDarrick J. Wong 	if (error == -EINVAL || error == -ENOENT)
1159ebd126a6SDarrick J. Wong 		return 0;
1160ebd126a6SDarrick J. Wong 	if (error)
1161c59d87c4SChristoph Hellwig 		return error;
1162c59d87c4SChristoph Hellwig 
1163*537c013bSDarrick J. Wong 	/*
1164*537c013bSDarrick J. Wong 	 * Reload the incore unlinked list to avoid failure in inodegc.
1165*537c013bSDarrick J. Wong 	 * Use an unlocked check here because unrecovered unlinked inodes
1166*537c013bSDarrick J. Wong 	 * should be somewhat rare.
1167*537c013bSDarrick J. Wong 	 */
1168*537c013bSDarrick J. Wong 	if (xfs_inode_unlinked_incomplete(ip)) {
116949813a21SDarrick J. Wong 		error = xfs_inode_reload_unlinked(ip);
1170*537c013bSDarrick J. Wong 		if (error) {
1171*537c013bSDarrick J. Wong 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
117249813a21SDarrick J. Wong 			goto error0;
1173*537c013bSDarrick J. Wong 		}
1174*537c013bSDarrick J. Wong 	}
117549813a21SDarrick J. Wong 
1176c59d87c4SChristoph Hellwig 	ASSERT(ip->i_delayed_blks == 0);
1177c59d87c4SChristoph Hellwig 
1178c59d87c4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip)) {
1179732436efSDarrick J. Wong 		struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
11808bfadd8dSChristoph Hellwig 
1181ebd126a6SDarrick J. Wong 		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1182c59d87c4SChristoph Hellwig 		if (error)
1183c59d87c4SChristoph Hellwig 			goto error0;
1184c59d87c4SChristoph Hellwig 
11858bfadd8dSChristoph Hellwig 		xfs_bmap_count_leaves(ifp, &rtblks);
11868bfadd8dSChristoph Hellwig 	}
11878bfadd8dSChristoph Hellwig 
11886e73a545SChristoph Hellwig 	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
118949813a21SDarrick J. Wong 	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1190c59d87c4SChristoph Hellwig 
1191c59d87c4SChristoph Hellwig 	/*
1192c59d87c4SChristoph Hellwig 	 * Add the (disk blocks and inode) resources occupied by this
1193c59d87c4SChristoph Hellwig 	 * inode to its dquots. We do this adjustment in the incore dquot,
1194c59d87c4SChristoph Hellwig 	 * and also copy the changes to its buffer.
1195c59d87c4SChristoph Hellwig 	 * We don't care about putting these changes in a transaction
1196c59d87c4SChristoph Hellwig 	 * envelope because if we crash in the middle of a 'quotacheck'
1197c59d87c4SChristoph Hellwig 	 * we have to start from the beginning anyway.
1198c59d87c4SChristoph Hellwig 	 * Once we're done, we'll log all the dquot bufs.
1199c59d87c4SChristoph Hellwig 	 *
1200c59d87c4SChristoph Hellwig 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1201c59d87c4SChristoph Hellwig 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1202c59d87c4SChristoph Hellwig 	 */
1203c59d87c4SChristoph Hellwig 	if (XFS_IS_UQUOTA_ON(mp)) {
12048cd4901dSDarrick J. Wong 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
12050fcef127SDarrick J. Wong 				rtblks);
1206c59d87c4SChristoph Hellwig 		if (error)
1207c59d87c4SChristoph Hellwig 			goto error0;
1208c59d87c4SChristoph Hellwig 	}
1209c59d87c4SChristoph Hellwig 
1210c59d87c4SChristoph Hellwig 	if (XFS_IS_GQUOTA_ON(mp)) {
12118cd4901dSDarrick J. Wong 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
12120fcef127SDarrick J. Wong 				rtblks);
1213c59d87c4SChristoph Hellwig 		if (error)
1214c59d87c4SChristoph Hellwig 			goto error0;
1215c59d87c4SChristoph Hellwig 	}
1216c59d87c4SChristoph Hellwig 
1217c59d87c4SChristoph Hellwig 	if (XFS_IS_PQUOTA_ON(mp)) {
12188cd4901dSDarrick J. Wong 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
12190fcef127SDarrick J. Wong 				rtblks);
1220c59d87c4SChristoph Hellwig 		if (error)
1221c59d87c4SChristoph Hellwig 			goto error0;
1222c59d87c4SChristoph Hellwig 	}
1223c59d87c4SChristoph Hellwig 
1224c59d87c4SChristoph Hellwig error0:
122544a8736bSDarrick J. Wong 	xfs_irele(ip);
1226c59d87c4SChristoph Hellwig 	return error;
1227c59d87c4SChristoph Hellwig }
1228c59d87c4SChristoph Hellwig 
1229b84a3a96SChristoph Hellwig STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1230b84a3a96SChristoph Hellwig xfs_qm_flush_one(
123143ff2122SChristoph Hellwig 	struct xfs_dquot	*dqp,
123243ff2122SChristoph Hellwig 	void			*data)
1233b84a3a96SChristoph Hellwig {
12347912e7feSBrian Foster 	struct xfs_mount	*mp = dqp->q_mount;
123543ff2122SChristoph Hellwig 	struct list_head	*buffer_list = data;
1236fe7257fdSChristoph Hellwig 	struct xfs_buf		*bp = NULL;
1237b84a3a96SChristoph Hellwig 	int			error = 0;
1238b84a3a96SChristoph Hellwig 
1239b84a3a96SChristoph Hellwig 	xfs_dqlock(dqp);
1240985a78fdSDarrick J. Wong 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1241b84a3a96SChristoph Hellwig 		goto out_unlock;
1242b84a3a96SChristoph Hellwig 	if (!XFS_DQ_IS_DIRTY(dqp))
1243b84a3a96SChristoph Hellwig 		goto out_unlock;
1244b84a3a96SChristoph Hellwig 
12457912e7feSBrian Foster 	/*
12467912e7feSBrian Foster 	 * The only way the dquot is already flush locked by the time quotacheck
12477912e7feSBrian Foster 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
12487912e7feSBrian Foster 	 * it for the final time. Quotacheck collects all dquot bufs in the
12497912e7feSBrian Foster 	 * local delwri queue before dquots are dirtied, so reclaim can't have
12507912e7feSBrian Foster 	 * possibly queued it for I/O. The only way out is to push the buffer to
12517912e7feSBrian Foster 	 * cycle the flush lock.
12527912e7feSBrian Foster 	 */
12537912e7feSBrian Foster 	if (!xfs_dqflock_nowait(dqp)) {
12547912e7feSBrian Foster 		/* buf is pinned in-core by delwri list */
125585c73bf7SDave Chinner 		error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
125685c73bf7SDave Chinner 				mp->m_quotainfo->qi_dqchunklen, 0, &bp);
125785c73bf7SDave Chinner 		if (error)
12587912e7feSBrian Foster 			goto out_unlock;
125985c73bf7SDave Chinner 
1260f0c2d7d2SDarrick J. Wong 		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1261f0c2d7d2SDarrick J. Wong 			error = -EAGAIN;
1262f0c2d7d2SDarrick J. Wong 			xfs_buf_relse(bp);
1263f0c2d7d2SDarrick J. Wong 			goto out_unlock;
1264f0c2d7d2SDarrick J. Wong 		}
12657912e7feSBrian Foster 		xfs_buf_unlock(bp);
12667912e7feSBrian Foster 
12677912e7feSBrian Foster 		xfs_buf_delwri_pushbuf(bp, buffer_list);
12687912e7feSBrian Foster 		xfs_buf_rele(bp);
12697912e7feSBrian Foster 
12707912e7feSBrian Foster 		error = -EAGAIN;
12717912e7feSBrian Foster 		goto out_unlock;
12727912e7feSBrian Foster 	}
12737912e7feSBrian Foster 
1274fe7257fdSChristoph Hellwig 	error = xfs_qm_dqflush(dqp, &bp);
1275fe7257fdSChristoph Hellwig 	if (error)
1276fe7257fdSChristoph Hellwig 		goto out_unlock;
1277b84a3a96SChristoph Hellwig 
127843ff2122SChristoph Hellwig 	xfs_buf_delwri_queue(bp, buffer_list);
1279fe7257fdSChristoph Hellwig 	xfs_buf_relse(bp);
1280b84a3a96SChristoph Hellwig out_unlock:
1281b84a3a96SChristoph Hellwig 	xfs_dqunlock(dqp);
1282b84a3a96SChristoph Hellwig 	return error;
1283b84a3a96SChristoph Hellwig }
1284b84a3a96SChristoph Hellwig 
1285c59d87c4SChristoph Hellwig /*
1286c59d87c4SChristoph Hellwig  * Walk thru all the filesystem inodes and construct a consistent view
1287c59d87c4SChristoph Hellwig  * of the disk quota world. If the quotacheck fails, disable quotas.
1288c59d87c4SChristoph Hellwig  */
1289eb866bbfSJie Liu STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1290c59d87c4SChristoph Hellwig xfs_qm_quotacheck(
1291c59d87c4SChristoph Hellwig 	xfs_mount_t	*mp)
1292c59d87c4SChristoph Hellwig {
1293ebd126a6SDarrick J. Wong 	int			error, error2;
1294c59d87c4SChristoph Hellwig 	uint			flags;
129543ff2122SChristoph Hellwig 	LIST_HEAD		(buffer_list);
1296113a5683SChandra Seetharaman 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1297113a5683SChandra Seetharaman 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
129892f8ff73SChandra Seetharaman 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1299c59d87c4SChristoph Hellwig 
1300c59d87c4SChristoph Hellwig 	flags = 0;
1301c59d87c4SChristoph Hellwig 
130292f8ff73SChandra Seetharaman 	ASSERT(uip || gip || pip);
1303149e53afSChristoph Hellwig 	ASSERT(XFS_IS_QUOTA_ON(mp));
1304c59d87c4SChristoph Hellwig 
1305c59d87c4SChristoph Hellwig 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1306c59d87c4SChristoph Hellwig 
1307c59d87c4SChristoph Hellwig 	/*
1308c59d87c4SChristoph Hellwig 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1309c59d87c4SChristoph Hellwig 	 * their counters to zero. We need a clean slate.
1310c59d87c4SChristoph Hellwig 	 * We don't log our changes till later.
1311c59d87c4SChristoph Hellwig 	 */
1312c59d87c4SChristoph Hellwig 	if (uip) {
13138cd4901dSDarrick J. Wong 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
131443ff2122SChristoph Hellwig 					 &buffer_list);
1315c59d87c4SChristoph Hellwig 		if (error)
1316c59d87c4SChristoph Hellwig 			goto error_return;
1317c59d87c4SChristoph Hellwig 		flags |= XFS_UQUOTA_CHKD;
1318c59d87c4SChristoph Hellwig 	}
1319c59d87c4SChristoph Hellwig 
1320c59d87c4SChristoph Hellwig 	if (gip) {
13218cd4901dSDarrick J. Wong 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
132243ff2122SChristoph Hellwig 					 &buffer_list);
1323c59d87c4SChristoph Hellwig 		if (error)
1324c59d87c4SChristoph Hellwig 			goto error_return;
132592f8ff73SChandra Seetharaman 		flags |= XFS_GQUOTA_CHKD;
132692f8ff73SChandra Seetharaman 	}
132792f8ff73SChandra Seetharaman 
132892f8ff73SChandra Seetharaman 	if (pip) {
13298cd4901dSDarrick J. Wong 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
133092f8ff73SChandra Seetharaman 					 &buffer_list);
133192f8ff73SChandra Seetharaman 		if (error)
133292f8ff73SChandra Seetharaman 			goto error_return;
133392f8ff73SChandra Seetharaman 		flags |= XFS_PQUOTA_CHKD;
1334c59d87c4SChristoph Hellwig 	}
1335c59d87c4SChristoph Hellwig 
133649813a21SDarrick J. Wong 	xfs_set_quotacheck_running(mp);
133713d59a2aSDarrick J. Wong 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
133813d59a2aSDarrick J. Wong 			NULL);
133949813a21SDarrick J. Wong 	xfs_clear_quotacheck_running(mp);
13400c7273e4SDave Chinner 
134186d40f1eSDarrick J. Wong 	/*
13420c7273e4SDave Chinner 	 * On error, the inode walk may have partially populated the dquot
13430c7273e4SDave Chinner 	 * caches.  We must purge them before disabling quota and tearing down
13440c7273e4SDave Chinner 	 * the quotainfo, or else the dquots will leak.
134586d40f1eSDarrick J. Wong 	 */
13460c7273e4SDave Chinner 	if (error)
13470c7273e4SDave Chinner 		goto error_purge;
1348c59d87c4SChristoph Hellwig 
1349c59d87c4SChristoph Hellwig 	/*
1350b84a3a96SChristoph Hellwig 	 * We've made all the changes that we need to make incore.  Flush them
1351b84a3a96SChristoph Hellwig 	 * down to disk buffers if everything was updated successfully.
1352c59d87c4SChristoph Hellwig 	 */
135343ff2122SChristoph Hellwig 	if (XFS_IS_UQUOTA_ON(mp)) {
13548cd4901dSDarrick J. Wong 		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
135543ff2122SChristoph Hellwig 					  &buffer_list);
135643ff2122SChristoph Hellwig 	}
1357b84a3a96SChristoph Hellwig 	if (XFS_IS_GQUOTA_ON(mp)) {
13588cd4901dSDarrick J. Wong 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
135943ff2122SChristoph Hellwig 					   &buffer_list);
1360c59d87c4SChristoph Hellwig 		if (!error)
1361b84a3a96SChristoph Hellwig 			error = error2;
1362b84a3a96SChristoph Hellwig 	}
1363b84a3a96SChristoph Hellwig 	if (XFS_IS_PQUOTA_ON(mp)) {
13648cd4901dSDarrick J. Wong 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
136543ff2122SChristoph Hellwig 					   &buffer_list);
1366b84a3a96SChristoph Hellwig 		if (!error)
1367b84a3a96SChristoph Hellwig 			error = error2;
1368b84a3a96SChristoph Hellwig 	}
1369c59d87c4SChristoph Hellwig 
137043ff2122SChristoph Hellwig 	error2 = xfs_buf_delwri_submit(&buffer_list);
137143ff2122SChristoph Hellwig 	if (!error)
137243ff2122SChristoph Hellwig 		error = error2;
137343ff2122SChristoph Hellwig 
1374c59d87c4SChristoph Hellwig 	/*
1375c59d87c4SChristoph Hellwig 	 * We can get this error if we couldn't do a dquot allocation inside
1376c59d87c4SChristoph Hellwig 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1377c59d87c4SChristoph Hellwig 	 * dirty dquots that might be cached, we just want to get rid of them
1378c59d87c4SChristoph Hellwig 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1379c59d87c4SChristoph Hellwig 	 * at this point (because we intentionally didn't in dqget_noattach).
1380c59d87c4SChristoph Hellwig 	 */
13810c7273e4SDave Chinner 	if (error)
13820c7273e4SDave Chinner 		goto error_purge;
1383c59d87c4SChristoph Hellwig 
1384c59d87c4SChristoph Hellwig 	/*
1385c59d87c4SChristoph Hellwig 	 * If one type of quotas is off, then it will lose its
1386c59d87c4SChristoph Hellwig 	 * quotachecked status, since we won't be doing accounting for
1387c59d87c4SChristoph Hellwig 	 * that type anymore.
1388c59d87c4SChristoph Hellwig 	 */
13894177af3aSChandra Seetharaman 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1390c59d87c4SChristoph Hellwig 	mp->m_qflags |= flags;
1391c59d87c4SChristoph Hellwig 
1392c59d87c4SChristoph Hellwig error_return:
139320e8a063SBrian Foster 	xfs_buf_delwri_cancel(&buffer_list);
139443ff2122SChristoph Hellwig 
1395c59d87c4SChristoph Hellwig 	if (error) {
1396c59d87c4SChristoph Hellwig 		xfs_warn(mp,
1397c59d87c4SChristoph Hellwig 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1398c59d87c4SChristoph Hellwig 			error);
1399c59d87c4SChristoph Hellwig 		/*
1400c59d87c4SChristoph Hellwig 		 * We must turn off quotas.
1401c59d87c4SChristoph Hellwig 		 */
1402c59d87c4SChristoph Hellwig 		ASSERT(mp->m_quotainfo != NULL);
1403c59d87c4SChristoph Hellwig 		xfs_qm_destroy_quotainfo(mp);
1404c59d87c4SChristoph Hellwig 		if (xfs_mount_reset_sbqflags(mp)) {
1405c59d87c4SChristoph Hellwig 			xfs_warn(mp,
1406c59d87c4SChristoph Hellwig 				"Quotacheck: Failed to reset quota flags.");
1407c59d87c4SChristoph Hellwig 		}
1408c59d87c4SChristoph Hellwig 	} else
1409c59d87c4SChristoph Hellwig 		xfs_notice(mp, "Quotacheck: Done.");
1410d99831ffSEric Sandeen 	return error;
14110c7273e4SDave Chinner 
14120c7273e4SDave Chinner error_purge:
14130c7273e4SDave Chinner 	/*
14140c7273e4SDave Chinner 	 * On error, we may have inodes queued for inactivation. This may try
14150c7273e4SDave Chinner 	 * to attach dquots to the inode before running cleanup operations on
14160c7273e4SDave Chinner 	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
14170c7273e4SDave Chinner 	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
14180c7273e4SDave Chinner 	 * pending inodegc operations before we purge the dquots from memory,
14190c7273e4SDave Chinner 	 * ensuring that background inactivation is idle whilst we turn off
14200c7273e4SDave Chinner 	 * quotas.
14210c7273e4SDave Chinner 	 */
14220c7273e4SDave Chinner 	xfs_inodegc_flush(mp);
14230c7273e4SDave Chinner 	xfs_qm_dqpurge_all(mp);
14240c7273e4SDave Chinner 	goto error_return;
14250c7273e4SDave Chinner 
1426c59d87c4SChristoph Hellwig }
1427c59d87c4SChristoph Hellwig 
1428c59d87c4SChristoph Hellwig /*
1429eb866bbfSJie Liu  * This is called from xfs_mountfs to start quotas and initialize all
1430eb866bbfSJie Liu  * necessary data structures like quotainfo.  This is also responsible for
1431eb866bbfSJie Liu  * running a quotacheck as necessary.  We are guaranteed that the superblock
1432eb866bbfSJie Liu  * is consistently read in at this point.
1433eb866bbfSJie Liu  *
1434eb866bbfSJie Liu  * If we fail here, the mount will continue with quota turned off. We don't
1435eb866bbfSJie Liu  * need to inidicate success or failure at all.
1436eb866bbfSJie Liu  */
1437eb866bbfSJie Liu void
xfs_qm_mount_quotas(struct xfs_mount * mp)1438eb866bbfSJie Liu xfs_qm_mount_quotas(
1439eb866bbfSJie Liu 	struct xfs_mount	*mp)
1440eb866bbfSJie Liu {
1441eb866bbfSJie Liu 	int			error = 0;
1442eb866bbfSJie Liu 	uint			sbf;
1443eb866bbfSJie Liu 
1444eb866bbfSJie Liu 	/*
1445eb866bbfSJie Liu 	 * If quotas on realtime volumes is not supported, we disable
1446eb866bbfSJie Liu 	 * quotas immediately.
1447eb866bbfSJie Liu 	 */
1448eb866bbfSJie Liu 	if (mp->m_sb.sb_rextents) {
1449eb866bbfSJie Liu 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1450eb866bbfSJie Liu 		mp->m_qflags = 0;
1451eb866bbfSJie Liu 		goto write_changes;
1452eb866bbfSJie Liu 	}
1453eb866bbfSJie Liu 
1454149e53afSChristoph Hellwig 	ASSERT(XFS_IS_QUOTA_ON(mp));
1455eb866bbfSJie Liu 
1456eb866bbfSJie Liu 	/*
1457eb866bbfSJie Liu 	 * Allocate the quotainfo structure inside the mount struct, and
1458eb866bbfSJie Liu 	 * create quotainode(s), and change/rev superblock if necessary.
1459eb866bbfSJie Liu 	 */
1460eb866bbfSJie Liu 	error = xfs_qm_init_quotainfo(mp);
1461eb866bbfSJie Liu 	if (error) {
1462eb866bbfSJie Liu 		/*
1463eb866bbfSJie Liu 		 * We must turn off quotas.
1464eb866bbfSJie Liu 		 */
1465eb866bbfSJie Liu 		ASSERT(mp->m_quotainfo == NULL);
1466eb866bbfSJie Liu 		mp->m_qflags = 0;
1467eb866bbfSJie Liu 		goto write_changes;
1468eb866bbfSJie Liu 	}
1469eb866bbfSJie Liu 	/*
1470eb866bbfSJie Liu 	 * If any of the quotas are not consistent, do a quotacheck.
1471eb866bbfSJie Liu 	 */
1472eb866bbfSJie Liu 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1473eb866bbfSJie Liu 		error = xfs_qm_quotacheck(mp);
1474eb866bbfSJie Liu 		if (error) {
1475eb866bbfSJie Liu 			/* Quotacheck failed and disabled quotas. */
1476eb866bbfSJie Liu 			return;
1477eb866bbfSJie Liu 		}
1478eb866bbfSJie Liu 	}
1479eb866bbfSJie Liu 	/*
1480eb866bbfSJie Liu 	 * If one type of quotas is off, then it will lose its
1481eb866bbfSJie Liu 	 * quotachecked status, since we won't be doing accounting for
1482eb866bbfSJie Liu 	 * that type anymore.
1483eb866bbfSJie Liu 	 */
1484eb866bbfSJie Liu 	if (!XFS_IS_UQUOTA_ON(mp))
1485eb866bbfSJie Liu 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1486eb866bbfSJie Liu 	if (!XFS_IS_GQUOTA_ON(mp))
1487eb866bbfSJie Liu 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1488eb866bbfSJie Liu 	if (!XFS_IS_PQUOTA_ON(mp))
1489eb866bbfSJie Liu 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1490eb866bbfSJie Liu 
1491eb866bbfSJie Liu  write_changes:
1492eb866bbfSJie Liu 	/*
1493eb866bbfSJie Liu 	 * We actually don't have to acquire the m_sb_lock at all.
1494eb866bbfSJie Liu 	 * This can only be called from mount, and that's single threaded. XXX
1495eb866bbfSJie Liu 	 */
1496eb866bbfSJie Liu 	spin_lock(&mp->m_sb_lock);
1497eb866bbfSJie Liu 	sbf = mp->m_sb.sb_qflags;
1498eb866bbfSJie Liu 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1499eb866bbfSJie Liu 	spin_unlock(&mp->m_sb_lock);
1500eb866bbfSJie Liu 
1501eb866bbfSJie Liu 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
150261e63ecbSDave Chinner 		if (xfs_sync_sb(mp, false)) {
1503eb866bbfSJie Liu 			/*
1504eb866bbfSJie Liu 			 * We could only have been turning quotas off.
1505eb866bbfSJie Liu 			 * We aren't in very good shape actually because
1506eb866bbfSJie Liu 			 * the incore structures are convinced that quotas are
1507eb866bbfSJie Liu 			 * off, but the on disk superblock doesn't know that !
1508eb866bbfSJie Liu 			 */
1509149e53afSChristoph Hellwig 			ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1510eb866bbfSJie Liu 			xfs_alert(mp, "%s: Superblock update failed!",
1511eb866bbfSJie Liu 				__func__);
1512eb866bbfSJie Liu 		}
1513eb866bbfSJie Liu 	}
1514eb866bbfSJie Liu 
1515eb866bbfSJie Liu 	if (error) {
1516eb866bbfSJie Liu 		xfs_warn(mp, "Failed to initialize disk quotas.");
1517eb866bbfSJie Liu 		return;
1518eb866bbfSJie Liu 	}
1519eb866bbfSJie Liu }
1520eb866bbfSJie Liu 
1521eb866bbfSJie Liu /*
1522c59d87c4SChristoph Hellwig  * This is called after the superblock has been read in and we're ready to
1523c59d87c4SChristoph Hellwig  * iget the quota inodes.
1524c59d87c4SChristoph Hellwig  */
1525c59d87c4SChristoph Hellwig STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1526c59d87c4SChristoph Hellwig xfs_qm_init_quotainos(
1527c59d87c4SChristoph Hellwig 	xfs_mount_t	*mp)
1528c59d87c4SChristoph Hellwig {
1529113a5683SChandra Seetharaman 	struct xfs_inode	*uip = NULL;
1530113a5683SChandra Seetharaman 	struct xfs_inode	*gip = NULL;
153192f8ff73SChandra Seetharaman 	struct xfs_inode	*pip = NULL;
1532c59d87c4SChristoph Hellwig 	int			error;
1533113a5683SChandra Seetharaman 	uint			flags = 0;
1534c59d87c4SChristoph Hellwig 
1535c59d87c4SChristoph Hellwig 	ASSERT(mp->m_quotainfo);
1536c59d87c4SChristoph Hellwig 
1537c59d87c4SChristoph Hellwig 	/*
1538c59d87c4SChristoph Hellwig 	 * Get the uquota and gquota inodes
1539c59d87c4SChristoph Hellwig 	 */
154038c26bfdSDave Chinner 	if (xfs_has_quota(mp)) {
1541c59d87c4SChristoph Hellwig 		if (XFS_IS_UQUOTA_ON(mp) &&
1542c59d87c4SChristoph Hellwig 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1543c59d87c4SChristoph Hellwig 			ASSERT(mp->m_sb.sb_uquotino > 0);
1544113a5683SChandra Seetharaman 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1545113a5683SChandra Seetharaman 					     0, 0, &uip);
1546113a5683SChandra Seetharaman 			if (error)
1547b474c7aeSEric Sandeen 				return error;
1548c59d87c4SChristoph Hellwig 		}
154992f8ff73SChandra Seetharaman 		if (XFS_IS_GQUOTA_ON(mp) &&
1550c59d87c4SChristoph Hellwig 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1551c59d87c4SChristoph Hellwig 			ASSERT(mp->m_sb.sb_gquotino > 0);
1552113a5683SChandra Seetharaman 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1553113a5683SChandra Seetharaman 					     0, 0, &gip);
1554113a5683SChandra Seetharaman 			if (error)
1555113a5683SChandra Seetharaman 				goto error_rele;
1556c59d87c4SChristoph Hellwig 		}
155792f8ff73SChandra Seetharaman 		if (XFS_IS_PQUOTA_ON(mp) &&
1558d892d586SChandra Seetharaman 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1559d892d586SChandra Seetharaman 			ASSERT(mp->m_sb.sb_pquotino > 0);
1560d892d586SChandra Seetharaman 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
156192f8ff73SChandra Seetharaman 					     0, 0, &pip);
156292f8ff73SChandra Seetharaman 			if (error)
156392f8ff73SChandra Seetharaman 				goto error_rele;
156492f8ff73SChandra Seetharaman 		}
1565c59d87c4SChristoph Hellwig 	} else {
1566c59d87c4SChristoph Hellwig 		flags |= XFS_QMOPT_SBVERSION;
1567c59d87c4SChristoph Hellwig 	}
1568c59d87c4SChristoph Hellwig 
1569c59d87c4SChristoph Hellwig 	/*
157092f8ff73SChandra Seetharaman 	 * Create the three inodes, if they don't exist already. The changes
1571c59d87c4SChristoph Hellwig 	 * made above will get added to a transaction and logged in one of
1572c59d87c4SChristoph Hellwig 	 * the qino_alloc calls below.  If the device is readonly,
1573c59d87c4SChristoph Hellwig 	 * temporarily switch to read-write to do this.
1574c59d87c4SChristoph Hellwig 	 */
1575c59d87c4SChristoph Hellwig 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1576113a5683SChandra Seetharaman 		error = xfs_qm_qino_alloc(mp, &uip,
1577113a5683SChandra Seetharaman 					      flags | XFS_QMOPT_UQUOTA);
1578113a5683SChandra Seetharaman 		if (error)
1579113a5683SChandra Seetharaman 			goto error_rele;
1580c59d87c4SChristoph Hellwig 
1581c59d87c4SChristoph Hellwig 		flags &= ~XFS_QMOPT_SBVERSION;
1582c59d87c4SChristoph Hellwig 	}
158392f8ff73SChandra Seetharaman 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1584c59d87c4SChristoph Hellwig 		error = xfs_qm_qino_alloc(mp, &gip,
158592f8ff73SChandra Seetharaman 					  flags | XFS_QMOPT_GQUOTA);
158692f8ff73SChandra Seetharaman 		if (error)
158792f8ff73SChandra Seetharaman 			goto error_rele;
158892f8ff73SChandra Seetharaman 
158992f8ff73SChandra Seetharaman 		flags &= ~XFS_QMOPT_SBVERSION;
159092f8ff73SChandra Seetharaman 	}
159192f8ff73SChandra Seetharaman 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
159292f8ff73SChandra Seetharaman 		error = xfs_qm_qino_alloc(mp, &pip,
159392f8ff73SChandra Seetharaman 					  flags | XFS_QMOPT_PQUOTA);
1594113a5683SChandra Seetharaman 		if (error)
1595113a5683SChandra Seetharaman 			goto error_rele;
1596c59d87c4SChristoph Hellwig 	}
1597c59d87c4SChristoph Hellwig 
1598c59d87c4SChristoph Hellwig 	mp->m_quotainfo->qi_uquotaip = uip;
1599c59d87c4SChristoph Hellwig 	mp->m_quotainfo->qi_gquotaip = gip;
160092f8ff73SChandra Seetharaman 	mp->m_quotainfo->qi_pquotaip = pip;
1601c59d87c4SChristoph Hellwig 
1602c59d87c4SChristoph Hellwig 	return 0;
1603113a5683SChandra Seetharaman 
1604113a5683SChandra Seetharaman error_rele:
1605113a5683SChandra Seetharaman 	if (uip)
160644a8736bSDarrick J. Wong 		xfs_irele(uip);
1607113a5683SChandra Seetharaman 	if (gip)
160844a8736bSDarrick J. Wong 		xfs_irele(gip);
160992f8ff73SChandra Seetharaman 	if (pip)
161044a8736bSDarrick J. Wong 		xfs_irele(pip);
1611b474c7aeSEric Sandeen 	return error;
1612c59d87c4SChristoph Hellwig }
1613c59d87c4SChristoph Hellwig 
161492b2e5b3SChristoph Hellwig STATIC void
xfs_qm_destroy_quotainos(struct xfs_quotainfo * qi)16153a3882ffSAliaksei Karaliou xfs_qm_destroy_quotainos(
1616c072fbefSPavel Reichl 	struct xfs_quotainfo	*qi)
16173a3882ffSAliaksei Karaliou {
16183a3882ffSAliaksei Karaliou 	if (qi->qi_uquotaip) {
161944a8736bSDarrick J. Wong 		xfs_irele(qi->qi_uquotaip);
16203a3882ffSAliaksei Karaliou 		qi->qi_uquotaip = NULL; /* paranoia */
16213a3882ffSAliaksei Karaliou 	}
16223a3882ffSAliaksei Karaliou 	if (qi->qi_gquotaip) {
162344a8736bSDarrick J. Wong 		xfs_irele(qi->qi_gquotaip);
16243a3882ffSAliaksei Karaliou 		qi->qi_gquotaip = NULL;
16253a3882ffSAliaksei Karaliou 	}
16263a3882ffSAliaksei Karaliou 	if (qi->qi_pquotaip) {
162744a8736bSDarrick J. Wong 		xfs_irele(qi->qi_pquotaip);
16283a3882ffSAliaksei Karaliou 		qi->qi_pquotaip = NULL;
16293a3882ffSAliaksei Karaliou 	}
16303a3882ffSAliaksei Karaliou }
16313a3882ffSAliaksei Karaliou 
16323a3882ffSAliaksei Karaliou STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)163392b2e5b3SChristoph Hellwig xfs_qm_dqfree_one(
163492b2e5b3SChristoph Hellwig 	struct xfs_dquot	*dqp)
1635c59d87c4SChristoph Hellwig {
1636c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = dqp->q_mount;
163792b2e5b3SChristoph Hellwig 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
163892b2e5b3SChristoph Hellwig 
16399f920f11SChristoph Hellwig 	mutex_lock(&qi->qi_tree_lock);
164051dbb1beSDarrick J. Wong 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
164192b2e5b3SChristoph Hellwig 
164292b2e5b3SChristoph Hellwig 	qi->qi_dquots--;
1643b84a3a96SChristoph Hellwig 	mutex_unlock(&qi->qi_tree_lock);
164492b2e5b3SChristoph Hellwig 
164592b2e5b3SChristoph Hellwig 	xfs_qm_dqdestroy(dqp);
164692b2e5b3SChristoph Hellwig }
164792b2e5b3SChristoph Hellwig 
1648c59d87c4SChristoph Hellwig /* --------------- utility functions for vnodeops ---------------- */
1649c59d87c4SChristoph Hellwig 
1650c59d87c4SChristoph Hellwig 
1651c59d87c4SChristoph Hellwig /*
1652c59d87c4SChristoph Hellwig  * Given an inode, a uid, gid and prid make sure that we have
1653c59d87c4SChristoph Hellwig  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1654c59d87c4SChristoph Hellwig  * quotas by creating this file.
1655c59d87c4SChristoph Hellwig  * This also attaches dquot(s) to the given inode after locking it,
1656c59d87c4SChristoph Hellwig  * and returns the dquots corresponding to the uid and/or gid.
1657c59d87c4SChristoph Hellwig  *
1658c59d87c4SChristoph Hellwig  * in	: inode (unlocked)
1659c59d87c4SChristoph Hellwig  * out	: udquot, gdquot with references taken and unlocked
1660c59d87c4SChristoph Hellwig  */
1661c59d87c4SChristoph Hellwig int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,kuid_t uid,kgid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1662c59d87c4SChristoph Hellwig xfs_qm_vop_dqalloc(
1663c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip,
166454295159SChristoph Hellwig 	kuid_t			uid,
166554295159SChristoph Hellwig 	kgid_t			gid,
1666c59d87c4SChristoph Hellwig 	prid_t			prid,
1667c59d87c4SChristoph Hellwig 	uint			flags,
1668c59d87c4SChristoph Hellwig 	struct xfs_dquot	**O_udqpp,
166992f8ff73SChandra Seetharaman 	struct xfs_dquot	**O_gdqpp,
167092f8ff73SChandra Seetharaman 	struct xfs_dquot	**O_pdqpp)
1671c59d87c4SChristoph Hellwig {
1672c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
167354295159SChristoph Hellwig 	struct inode		*inode = VFS_I(ip);
1674ba8adad5SChristoph Hellwig 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1675113a5683SChandra Seetharaman 	struct xfs_dquot	*uq = NULL;
1676113a5683SChandra Seetharaman 	struct xfs_dquot	*gq = NULL;
167792f8ff73SChandra Seetharaman 	struct xfs_dquot	*pq = NULL;
1678c59d87c4SChristoph Hellwig 	int			error;
1679c59d87c4SChristoph Hellwig 	uint			lockflags;
1680c59d87c4SChristoph Hellwig 
1681149e53afSChristoph Hellwig 	if (!XFS_IS_QUOTA_ON(mp))
1682c59d87c4SChristoph Hellwig 		return 0;
1683c59d87c4SChristoph Hellwig 
1684c59d87c4SChristoph Hellwig 	lockflags = XFS_ILOCK_EXCL;
1685c59d87c4SChristoph Hellwig 	xfs_ilock(ip, lockflags);
1686c59d87c4SChristoph Hellwig 
1687c59d87c4SChristoph Hellwig 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
168854295159SChristoph Hellwig 		gid = inode->i_gid;
1689c59d87c4SChristoph Hellwig 
1690c59d87c4SChristoph Hellwig 	/*
1691c59d87c4SChristoph Hellwig 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1692c59d87c4SChristoph Hellwig 	 * if necessary. The dquot(s) will not be locked.
1693c59d87c4SChristoph Hellwig 	 */
1694c59d87c4SChristoph Hellwig 	if (XFS_NOT_DQATTACHED(mp, ip)) {
16954882c19dSDarrick J. Wong 		error = xfs_qm_dqattach_locked(ip, true);
1696c59d87c4SChristoph Hellwig 		if (error) {
1697c59d87c4SChristoph Hellwig 			xfs_iunlock(ip, lockflags);
1698c59d87c4SChristoph Hellwig 			return error;
1699c59d87c4SChristoph Hellwig 		}
1700c59d87c4SChristoph Hellwig 	}
1701c59d87c4SChristoph Hellwig 
1702c59d87c4SChristoph Hellwig 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
170397611f93SKaixu Xia 		ASSERT(O_udqpp);
170454295159SChristoph Hellwig 		if (!uid_eq(inode->i_uid, uid)) {
1705c59d87c4SChristoph Hellwig 			/*
1706c59d87c4SChristoph Hellwig 			 * What we need is the dquot that has this uid, and
1707c59d87c4SChristoph Hellwig 			 * if we send the inode to dqget, the uid of the inode
1708c59d87c4SChristoph Hellwig 			 * takes priority over what's sent in the uid argument.
1709c59d87c4SChristoph Hellwig 			 * We must unlock inode here before calling dqget if
1710c59d87c4SChristoph Hellwig 			 * we're not sending the inode, because otherwise
1711c59d87c4SChristoph Hellwig 			 * we'll deadlock by doing trans_reserve while
1712c59d87c4SChristoph Hellwig 			 * holding ilock.
1713c59d87c4SChristoph Hellwig 			 */
1714c59d87c4SChristoph Hellwig 			xfs_iunlock(ip, lockflags);
1715ba8adad5SChristoph Hellwig 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
17168cd4901dSDarrick J. Wong 					XFS_DQTYPE_USER, true, &uq);
1717113a5683SChandra Seetharaman 			if (error) {
17182451337dSDave Chinner 				ASSERT(error != -ENOENT);
1719c59d87c4SChristoph Hellwig 				return error;
1720c59d87c4SChristoph Hellwig 			}
1721c59d87c4SChristoph Hellwig 			/*
1722c59d87c4SChristoph Hellwig 			 * Get the ilock in the right order.
1723c59d87c4SChristoph Hellwig 			 */
1724c59d87c4SChristoph Hellwig 			xfs_dqunlock(uq);
1725c59d87c4SChristoph Hellwig 			lockflags = XFS_ILOCK_SHARED;
1726c59d87c4SChristoph Hellwig 			xfs_ilock(ip, lockflags);
1727c59d87c4SChristoph Hellwig 		} else {
1728c59d87c4SChristoph Hellwig 			/*
1729c59d87c4SChristoph Hellwig 			 * Take an extra reference, because we'll return
1730c59d87c4SChristoph Hellwig 			 * this to caller
1731c59d87c4SChristoph Hellwig 			 */
1732c59d87c4SChristoph Hellwig 			ASSERT(ip->i_udquot);
173378e55892SChristoph Hellwig 			uq = xfs_qm_dqhold(ip->i_udquot);
1734c59d87c4SChristoph Hellwig 		}
1735c59d87c4SChristoph Hellwig 	}
1736c59d87c4SChristoph Hellwig 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
173797611f93SKaixu Xia 		ASSERT(O_gdqpp);
173854295159SChristoph Hellwig 		if (!gid_eq(inode->i_gid, gid)) {
1739c59d87c4SChristoph Hellwig 			xfs_iunlock(ip, lockflags);
1740ba8adad5SChristoph Hellwig 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
17418cd4901dSDarrick J. Wong 					XFS_DQTYPE_GROUP, true, &gq);
1742113a5683SChandra Seetharaman 			if (error) {
17432451337dSDave Chinner 				ASSERT(error != -ENOENT);
1744113a5683SChandra Seetharaman 				goto error_rele;
1745c59d87c4SChristoph Hellwig 			}
1746c59d87c4SChristoph Hellwig 			xfs_dqunlock(gq);
1747c59d87c4SChristoph Hellwig 			lockflags = XFS_ILOCK_SHARED;
1748c59d87c4SChristoph Hellwig 			xfs_ilock(ip, lockflags);
1749c59d87c4SChristoph Hellwig 		} else {
1750c59d87c4SChristoph Hellwig 			ASSERT(ip->i_gdquot);
175178e55892SChristoph Hellwig 			gq = xfs_qm_dqhold(ip->i_gdquot);
1752c59d87c4SChristoph Hellwig 		}
175392f8ff73SChandra Seetharaman 	}
175492f8ff73SChandra Seetharaman 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
175597611f93SKaixu Xia 		ASSERT(O_pdqpp);
1756ceaf603cSChristoph Hellwig 		if (ip->i_projid != prid) {
1757c59d87c4SChristoph Hellwig 			xfs_iunlock(ip, lockflags);
17585aff6750SKaixu Xia 			error = xfs_qm_dqget(mp, prid,
17598cd4901dSDarrick J. Wong 					XFS_DQTYPE_PROJ, true, &pq);
1760113a5683SChandra Seetharaman 			if (error) {
17612451337dSDave Chinner 				ASSERT(error != -ENOENT);
1762113a5683SChandra Seetharaman 				goto error_rele;
1763c59d87c4SChristoph Hellwig 			}
176492f8ff73SChandra Seetharaman 			xfs_dqunlock(pq);
1765c59d87c4SChristoph Hellwig 			lockflags = XFS_ILOCK_SHARED;
1766c59d87c4SChristoph Hellwig 			xfs_ilock(ip, lockflags);
1767c59d87c4SChristoph Hellwig 		} else {
176892f8ff73SChandra Seetharaman 			ASSERT(ip->i_pdquot);
176992f8ff73SChandra Seetharaman 			pq = xfs_qm_dqhold(ip->i_pdquot);
1770c59d87c4SChristoph Hellwig 		}
1771c59d87c4SChristoph Hellwig 	}
1772c59d87c4SChristoph Hellwig 	trace_xfs_dquot_dqalloc(ip);
1773c59d87c4SChristoph Hellwig 
1774c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, lockflags);
1775c59d87c4SChristoph Hellwig 	if (O_udqpp)
1776c59d87c4SChristoph Hellwig 		*O_udqpp = uq;
1777d2a5e3c6SMarkus Elfring 	else
1778c59d87c4SChristoph Hellwig 		xfs_qm_dqrele(uq);
1779c59d87c4SChristoph Hellwig 	if (O_gdqpp)
1780c59d87c4SChristoph Hellwig 		*O_gdqpp = gq;
1781d2a5e3c6SMarkus Elfring 	else
1782c59d87c4SChristoph Hellwig 		xfs_qm_dqrele(gq);
178392f8ff73SChandra Seetharaman 	if (O_pdqpp)
178492f8ff73SChandra Seetharaman 		*O_pdqpp = pq;
1785d2a5e3c6SMarkus Elfring 	else
178692f8ff73SChandra Seetharaman 		xfs_qm_dqrele(pq);
1787c59d87c4SChristoph Hellwig 	return 0;
1788113a5683SChandra Seetharaman 
1789113a5683SChandra Seetharaman error_rele:
179092f8ff73SChandra Seetharaman 	xfs_qm_dqrele(gq);
1791113a5683SChandra Seetharaman 	xfs_qm_dqrele(uq);
1792113a5683SChandra Seetharaman 	return error;
1793c59d87c4SChristoph Hellwig }
1794c59d87c4SChristoph Hellwig 
1795c59d87c4SChristoph Hellwig /*
1796c59d87c4SChristoph Hellwig  * Actually transfer ownership, and do dquot modifications.
1797c59d87c4SChristoph Hellwig  * These were already reserved.
1798c59d87c4SChristoph Hellwig  */
1799aefe69a4SPavel Reichl struct xfs_dquot *
xfs_qm_vop_chown(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot ** IO_olddq,struct xfs_dquot * newdq)1800c59d87c4SChristoph Hellwig xfs_qm_vop_chown(
1801aefe69a4SPavel Reichl 	struct xfs_trans	*tp,
1802aefe69a4SPavel Reichl 	struct xfs_inode	*ip,
1803aefe69a4SPavel Reichl 	struct xfs_dquot	**IO_olddq,
1804aefe69a4SPavel Reichl 	struct xfs_dquot	*newdq)
1805c59d87c4SChristoph Hellwig {
1806aefe69a4SPavel Reichl 	struct xfs_dquot	*prevdq;
1807c59d87c4SChristoph Hellwig 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1808c59d87c4SChristoph Hellwig 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1809c59d87c4SChristoph Hellwig 
1810c59d87c4SChristoph Hellwig 
1811c59d87c4SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1812149e53afSChristoph Hellwig 	ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1813c59d87c4SChristoph Hellwig 
1814c59d87c4SChristoph Hellwig 	/* old dquot */
1815c59d87c4SChristoph Hellwig 	prevdq = *IO_olddq;
1816c59d87c4SChristoph Hellwig 	ASSERT(prevdq);
1817c59d87c4SChristoph Hellwig 	ASSERT(prevdq != newdq);
1818c59d87c4SChristoph Hellwig 
18196e73a545SChristoph Hellwig 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
1820c59d87c4SChristoph Hellwig 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1821c59d87c4SChristoph Hellwig 
1822c59d87c4SChristoph Hellwig 	/* the sparkling new dquot */
18236e73a545SChristoph Hellwig 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
1824c59d87c4SChristoph Hellwig 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1825c59d87c4SChristoph Hellwig 
1826c59d87c4SChristoph Hellwig 	/*
18271aecf373SDarrick J. Wong 	 * Back when we made quota reservations for the chown, we reserved the
18281aecf373SDarrick J. Wong 	 * ondisk blocks + delalloc blocks with the new dquot.  Now that we've
18291aecf373SDarrick J. Wong 	 * switched the dquots, decrease the new dquot's block reservation
18301aecf373SDarrick J. Wong 	 * (having already bumped up the real counter) so that we don't have
18311aecf373SDarrick J. Wong 	 * any reservation to give back when we commit.
18321aecf373SDarrick J. Wong 	 */
18331aecf373SDarrick J. Wong 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
18341aecf373SDarrick J. Wong 			-ip->i_delayed_blks);
18351aecf373SDarrick J. Wong 
18361aecf373SDarrick J. Wong 	/*
18371aecf373SDarrick J. Wong 	 * Give the incore reservation for delalloc blocks back to the old
18381aecf373SDarrick J. Wong 	 * dquot.  We don't normally handle delalloc quota reservations
18391aecf373SDarrick J. Wong 	 * transactionally, so just lock the dquot and subtract from the
18401aecf373SDarrick J. Wong 	 * reservation.  Dirty the transaction because it's too late to turn
18411aecf373SDarrick J. Wong 	 * back now.
18421aecf373SDarrick J. Wong 	 */
18431aecf373SDarrick J. Wong 	tp->t_flags |= XFS_TRANS_DIRTY;
18441aecf373SDarrick J. Wong 	xfs_dqlock(prevdq);
18451aecf373SDarrick J. Wong 	ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
18461aecf373SDarrick J. Wong 	prevdq->q_blk.reserved -= ip->i_delayed_blks;
18471aecf373SDarrick J. Wong 	xfs_dqunlock(prevdq);
18481aecf373SDarrick J. Wong 
18491aecf373SDarrick J. Wong 	/*
185078e55892SChristoph Hellwig 	 * Take an extra reference, because the inode is going to keep
185178e55892SChristoph Hellwig 	 * this dquot pointer even after the trans_commit.
1852c59d87c4SChristoph Hellwig 	 */
185378e55892SChristoph Hellwig 	*IO_olddq = xfs_qm_dqhold(newdq);
1854c59d87c4SChristoph Hellwig 
1855c59d87c4SChristoph Hellwig 	return prevdq;
1856c59d87c4SChristoph Hellwig }
1857c59d87c4SChristoph Hellwig 
1858c59d87c4SChristoph Hellwig int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)1859c59d87c4SChristoph Hellwig xfs_qm_vop_rename_dqattach(
1860c59d87c4SChristoph Hellwig 	struct xfs_inode	**i_tab)
1861c59d87c4SChristoph Hellwig {
1862c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1863c59d87c4SChristoph Hellwig 	int			i;
1864c59d87c4SChristoph Hellwig 
1865149e53afSChristoph Hellwig 	if (!XFS_IS_QUOTA_ON(mp))
1866c59d87c4SChristoph Hellwig 		return 0;
1867c59d87c4SChristoph Hellwig 
1868c59d87c4SChristoph Hellwig 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1869c59d87c4SChristoph Hellwig 		struct xfs_inode	*ip = i_tab[i];
1870c59d87c4SChristoph Hellwig 		int			error;
1871c59d87c4SChristoph Hellwig 
1872c59d87c4SChristoph Hellwig 		/*
1873c59d87c4SChristoph Hellwig 		 * Watch out for duplicate entries in the table.
1874c59d87c4SChristoph Hellwig 		 */
1875c59d87c4SChristoph Hellwig 		if (i == 0 || ip != i_tab[i-1]) {
1876c59d87c4SChristoph Hellwig 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1877c14cfccaSDarrick J. Wong 				error = xfs_qm_dqattach(ip);
1878c59d87c4SChristoph Hellwig 				if (error)
1879c59d87c4SChristoph Hellwig 					return error;
1880c59d87c4SChristoph Hellwig 			}
1881c59d87c4SChristoph Hellwig 		}
1882c59d87c4SChristoph Hellwig 	}
1883c59d87c4SChristoph Hellwig 	return 0;
1884c59d87c4SChristoph Hellwig }
1885c59d87c4SChristoph Hellwig 
1886c59d87c4SChristoph Hellwig void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)1887c59d87c4SChristoph Hellwig xfs_qm_vop_create_dqattach(
1888c59d87c4SChristoph Hellwig 	struct xfs_trans	*tp,
1889c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip,
1890c59d87c4SChristoph Hellwig 	struct xfs_dquot	*udqp,
189192f8ff73SChandra Seetharaman 	struct xfs_dquot	*gdqp,
189292f8ff73SChandra Seetharaman 	struct xfs_dquot	*pdqp)
1893c59d87c4SChristoph Hellwig {
1894c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
1895c59d87c4SChristoph Hellwig 
1896149e53afSChristoph Hellwig 	if (!XFS_IS_QUOTA_ON(mp))
1897c59d87c4SChristoph Hellwig 		return;
1898c59d87c4SChristoph Hellwig 
1899c59d87c4SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1900c59d87c4SChristoph Hellwig 
190137eb9706SJie Liu 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1902c59d87c4SChristoph Hellwig 		ASSERT(ip->i_udquot == NULL);
1903c51df733SDarrick J. Wong 		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
190478e55892SChristoph Hellwig 
190578e55892SChristoph Hellwig 		ip->i_udquot = xfs_qm_dqhold(udqp);
1906c59d87c4SChristoph Hellwig 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1907c59d87c4SChristoph Hellwig 	}
190837eb9706SJie Liu 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1909c59d87c4SChristoph Hellwig 		ASSERT(ip->i_gdquot == NULL);
1910c51df733SDarrick J. Wong 		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
191154295159SChristoph Hellwig 
191278e55892SChristoph Hellwig 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1913c59d87c4SChristoph Hellwig 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1914c59d87c4SChristoph Hellwig 	}
191537eb9706SJie Liu 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
191692f8ff73SChandra Seetharaman 		ASSERT(ip->i_pdquot == NULL);
1917ceaf603cSChristoph Hellwig 		ASSERT(ip->i_projid == pdqp->q_id);
191892f8ff73SChandra Seetharaman 
191992f8ff73SChandra Seetharaman 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
192092f8ff73SChandra Seetharaman 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
192192f8ff73SChandra Seetharaman 	}
1922c59d87c4SChristoph Hellwig }
1923c59d87c4SChristoph Hellwig 
1924108523b8SDarrick J. Wong /* Decide if this inode's dquot is near an enforcement boundary. */
1925108523b8SDarrick J. Wong bool
xfs_inode_near_dquot_enforcement(struct xfs_inode * ip,xfs_dqtype_t type)1926108523b8SDarrick J. Wong xfs_inode_near_dquot_enforcement(
1927108523b8SDarrick J. Wong 	struct xfs_inode	*ip,
1928108523b8SDarrick J. Wong 	xfs_dqtype_t		type)
1929108523b8SDarrick J. Wong {
1930108523b8SDarrick J. Wong 	struct xfs_dquot	*dqp;
1931108523b8SDarrick J. Wong 	int64_t			freesp;
1932108523b8SDarrick J. Wong 
1933108523b8SDarrick J. Wong 	/* We only care for quotas that are enabled and enforced. */
1934108523b8SDarrick J. Wong 	dqp = xfs_inode_dquot(ip, type);
1935108523b8SDarrick J. Wong 	if (!dqp || !xfs_dquot_is_enforced(dqp))
1936108523b8SDarrick J. Wong 		return false;
1937108523b8SDarrick J. Wong 
1938108523b8SDarrick J. Wong 	if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1939108523b8SDarrick J. Wong 	    xfs_dquot_res_over_limits(&dqp->q_rtb))
1940108523b8SDarrick J. Wong 		return true;
1941108523b8SDarrick J. Wong 
1942108523b8SDarrick J. Wong 	/* For space on the data device, check the various thresholds. */
1943108523b8SDarrick J. Wong 	if (!dqp->q_prealloc_hi_wmark)
1944108523b8SDarrick J. Wong 		return false;
1945108523b8SDarrick J. Wong 
1946108523b8SDarrick J. Wong 	if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1947108523b8SDarrick J. Wong 		return false;
1948108523b8SDarrick J. Wong 
1949108523b8SDarrick J. Wong 	if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1950108523b8SDarrick J. Wong 		return true;
1951108523b8SDarrick J. Wong 
1952108523b8SDarrick J. Wong 	freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1953108523b8SDarrick J. Wong 	if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1954108523b8SDarrick J. Wong 		return true;
1955108523b8SDarrick J. Wong 
1956108523b8SDarrick J. Wong 	return false;
1957108523b8SDarrick J. Wong }
1958