xref: /openbmc/linux/fs/xfs/scrub/quota.c (revision c0891ac1)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_qm.h"
17 #include "scrub/scrub.h"
18 #include "scrub/common.h"
19 
20 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
21 static inline xfs_dqtype_t
22 xchk_quota_to_dqtype(
23 	struct xfs_scrub	*sc)
24 {
25 	switch (sc->sm->sm_type) {
26 	case XFS_SCRUB_TYPE_UQUOTA:
27 		return XFS_DQTYPE_USER;
28 	case XFS_SCRUB_TYPE_GQUOTA:
29 		return XFS_DQTYPE_GROUP;
30 	case XFS_SCRUB_TYPE_PQUOTA:
31 		return XFS_DQTYPE_PROJ;
32 	default:
33 		return 0;
34 	}
35 }
36 
37 /* Set us up to scrub a quota. */
38 int
39 xchk_setup_quota(
40 	struct xfs_scrub	*sc)
41 {
42 	xfs_dqtype_t		dqtype;
43 	int			error;
44 
45 	if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
46 		return -ENOENT;
47 
48 	dqtype = xchk_quota_to_dqtype(sc);
49 	if (dqtype == 0)
50 		return -EINVAL;
51 	sc->flags |= XCHK_HAS_QUOTAOFFLOCK;
52 	mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
53 	if (!xfs_this_quota_on(sc->mp, dqtype))
54 		return -ENOENT;
55 	error = xchk_setup_fs(sc);
56 	if (error)
57 		return error;
58 	sc->ip = xfs_quota_inode(sc->mp, dqtype);
59 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
60 	sc->ilock_flags = XFS_ILOCK_EXCL;
61 	return 0;
62 }
63 
64 /* Quotas. */
65 
66 struct xchk_quota_info {
67 	struct xfs_scrub	*sc;
68 	xfs_dqid_t		last_id;
69 };
70 
71 /* Scrub the fields in an individual quota item. */
72 STATIC int
73 xchk_quota_item(
74 	struct xfs_dquot	*dq,
75 	xfs_dqtype_t		dqtype,
76 	void			*priv)
77 {
78 	struct xchk_quota_info	*sqi = priv;
79 	struct xfs_scrub	*sc = sqi->sc;
80 	struct xfs_mount	*mp = sc->mp;
81 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
82 	xfs_fileoff_t		offset;
83 	xfs_ino_t		fs_icount;
84 	int			error = 0;
85 
86 	if (xchk_should_terminate(sc, &error))
87 		return -ECANCELED;
88 
89 	/*
90 	 * Except for the root dquot, the actual dquot we got must either have
91 	 * the same or higher id as we saw before.
92 	 */
93 	offset = dq->q_id / qi->qi_dqperchunk;
94 	if (dq->q_id && dq->q_id <= sqi->last_id)
95 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
96 
97 	sqi->last_id = dq->q_id;
98 
99 	/*
100 	 * Warn if the hard limits are larger than the fs.
101 	 * Administrators can do this, though in production this seems
102 	 * suspect, which is why we flag it for review.
103 	 *
104 	 * Complain about corruption if the soft limit is greater than
105 	 * the hard limit.
106 	 */
107 	if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
108 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
109 	if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
110 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
111 
112 	if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
113 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
114 	if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
115 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
116 
117 	if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
118 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
119 	if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
120 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
121 
122 	/* Check the resource counts. */
123 	fs_icount = percpu_counter_sum(&mp->m_icount);
124 
125 	/*
126 	 * Check that usage doesn't exceed physical limits.  However, on
127 	 * a reflink filesystem we're allowed to exceed physical space
128 	 * if there are no quota limits.
129 	 */
130 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
131 		if (mp->m_sb.sb_dblocks < dq->q_blk.count)
132 			xchk_fblock_set_warning(sc, XFS_DATA_FORK,
133 					offset);
134 	} else {
135 		if (mp->m_sb.sb_dblocks < dq->q_blk.count)
136 			xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
137 					offset);
138 	}
139 	if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
140 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
141 
142 	/*
143 	 * We can violate the hard limits if the admin suddenly sets a
144 	 * lower limit than the actual usage.  However, we flag it for
145 	 * admin review.
146 	 */
147 	if (dq->q_id == 0)
148 		goto out;
149 
150 	if (dq->q_blk.hardlimit != 0 &&
151 	    dq->q_blk.count > dq->q_blk.hardlimit)
152 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
153 
154 	if (dq->q_ino.hardlimit != 0 &&
155 	    dq->q_ino.count > dq->q_ino.hardlimit)
156 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
157 
158 	if (dq->q_rtb.hardlimit != 0 &&
159 	    dq->q_rtb.count > dq->q_rtb.hardlimit)
160 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
161 
162 out:
163 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
164 		return -ECANCELED;
165 
166 	return 0;
167 }
168 
169 /* Check the quota's data fork. */
170 STATIC int
171 xchk_quota_data_fork(
172 	struct xfs_scrub	*sc)
173 {
174 	struct xfs_bmbt_irec	irec = { 0 };
175 	struct xfs_iext_cursor	icur;
176 	struct xfs_quotainfo	*qi = sc->mp->m_quotainfo;
177 	struct xfs_ifork	*ifp;
178 	xfs_fileoff_t		max_dqid_off;
179 	int			error = 0;
180 
181 	/* Invoke the fork scrubber. */
182 	error = xchk_metadata_inode_forks(sc);
183 	if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
184 		return error;
185 
186 	/* Check for data fork problems that apply only to quota files. */
187 	max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
188 	ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
189 	for_each_xfs_iext(ifp, &icur, &irec) {
190 		if (xchk_should_terminate(sc, &error))
191 			break;
192 		/*
193 		 * delalloc extents or blocks mapped above the highest
194 		 * quota id shouldn't happen.
195 		 */
196 		if (isnullstartblock(irec.br_startblock) ||
197 		    irec.br_startoff > max_dqid_off ||
198 		    irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
199 			xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
200 					irec.br_startoff);
201 			break;
202 		}
203 	}
204 
205 	return error;
206 }
207 
208 /* Scrub all of a quota type's items. */
209 int
210 xchk_quota(
211 	struct xfs_scrub	*sc)
212 {
213 	struct xchk_quota_info	sqi;
214 	struct xfs_mount	*mp = sc->mp;
215 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
216 	xfs_dqtype_t		dqtype;
217 	int			error = 0;
218 
219 	dqtype = xchk_quota_to_dqtype(sc);
220 
221 	/* Look for problem extents. */
222 	error = xchk_quota_data_fork(sc);
223 	if (error)
224 		goto out;
225 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
226 		goto out;
227 
228 	/*
229 	 * Check all the quota items.  Now that we've checked the quota inode
230 	 * data fork we have to drop ILOCK_EXCL to use the regular dquot
231 	 * functions.
232 	 */
233 	xfs_iunlock(sc->ip, sc->ilock_flags);
234 	sc->ilock_flags = 0;
235 	sqi.sc = sc;
236 	sqi.last_id = 0;
237 	error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
238 	sc->ilock_flags = XFS_ILOCK_EXCL;
239 	xfs_ilock(sc->ip, sc->ilock_flags);
240 	if (error == -ECANCELED)
241 		error = 0;
242 	if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
243 			sqi.last_id * qi->qi_dqperchunk, &error))
244 		goto out;
245 
246 out:
247 	return error;
248 }
249