xref: /openbmc/linux/fs/xfs/scrub/quota.c (revision 0022cec7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_qm.h"
17 #include "xfs_bmap.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20 
21 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
22 static inline xfs_dqtype_t
23 xchk_quota_to_dqtype(
24 	struct xfs_scrub	*sc)
25 {
26 	switch (sc->sm->sm_type) {
27 	case XFS_SCRUB_TYPE_UQUOTA:
28 		return XFS_DQTYPE_USER;
29 	case XFS_SCRUB_TYPE_GQUOTA:
30 		return XFS_DQTYPE_GROUP;
31 	case XFS_SCRUB_TYPE_PQUOTA:
32 		return XFS_DQTYPE_PROJ;
33 	default:
34 		return 0;
35 	}
36 }
37 
38 /* Set us up to scrub a quota. */
39 int
40 xchk_setup_quota(
41 	struct xfs_scrub	*sc)
42 {
43 	xfs_dqtype_t		dqtype;
44 	int			error;
45 
46 	if (!XFS_IS_QUOTA_ON(sc->mp))
47 		return -ENOENT;
48 
49 	dqtype = xchk_quota_to_dqtype(sc);
50 	if (dqtype == 0)
51 		return -EINVAL;
52 
53 	if (!xfs_this_quota_on(sc->mp, dqtype))
54 		return -ENOENT;
55 
56 	if (xchk_need_intent_drain(sc))
57 		xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
58 
59 	error = xchk_setup_fs(sc);
60 	if (error)
61 		return error;
62 	sc->ip = xfs_quota_inode(sc->mp, dqtype);
63 	xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
64 	sc->ilock_flags = XFS_ILOCK_EXCL;
65 	return 0;
66 }
67 
68 /* Quotas. */
69 
70 struct xchk_quota_info {
71 	struct xfs_scrub	*sc;
72 	xfs_dqid_t		last_id;
73 };
74 
75 /* Scrub the fields in an individual quota item. */
76 STATIC int
77 xchk_quota_item(
78 	struct xfs_dquot	*dq,
79 	xfs_dqtype_t		dqtype,
80 	void			*priv)
81 {
82 	struct xchk_quota_info	*sqi = priv;
83 	struct xfs_scrub	*sc = sqi->sc;
84 	struct xfs_mount	*mp = sc->mp;
85 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
86 	xfs_fileoff_t		offset;
87 	xfs_ino_t		fs_icount;
88 	int			error = 0;
89 
90 	if (xchk_should_terminate(sc, &error))
91 		return error;
92 
93 	/*
94 	 * Except for the root dquot, the actual dquot we got must either have
95 	 * the same or higher id as we saw before.
96 	 */
97 	offset = dq->q_id / qi->qi_dqperchunk;
98 	if (dq->q_id && dq->q_id <= sqi->last_id)
99 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
100 
101 	sqi->last_id = dq->q_id;
102 
103 	/*
104 	 * Warn if the hard limits are larger than the fs.
105 	 * Administrators can do this, though in production this seems
106 	 * suspect, which is why we flag it for review.
107 	 *
108 	 * Complain about corruption if the soft limit is greater than
109 	 * the hard limit.
110 	 */
111 	if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
112 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
113 	if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
114 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
115 
116 	if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
117 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
118 	if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
119 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
120 
121 	if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
122 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
123 	if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
124 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
125 
126 	/* Check the resource counts. */
127 	fs_icount = percpu_counter_sum(&mp->m_icount);
128 
129 	/*
130 	 * Check that usage doesn't exceed physical limits.  However, on
131 	 * a reflink filesystem we're allowed to exceed physical space
132 	 * if there are no quota limits.
133 	 */
134 	if (xfs_has_reflink(mp)) {
135 		if (mp->m_sb.sb_dblocks < dq->q_blk.count)
136 			xchk_fblock_set_warning(sc, XFS_DATA_FORK,
137 					offset);
138 	} else {
139 		if (mp->m_sb.sb_dblocks < dq->q_blk.count)
140 			xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
141 					offset);
142 	}
143 	if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
144 		xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
145 
146 	/*
147 	 * We can violate the hard limits if the admin suddenly sets a
148 	 * lower limit than the actual usage.  However, we flag it for
149 	 * admin review.
150 	 */
151 	if (dq->q_id == 0)
152 		goto out;
153 
154 	if (dq->q_blk.hardlimit != 0 &&
155 	    dq->q_blk.count > dq->q_blk.hardlimit)
156 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
157 
158 	if (dq->q_ino.hardlimit != 0 &&
159 	    dq->q_ino.count > dq->q_ino.hardlimit)
160 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
161 
162 	if (dq->q_rtb.hardlimit != 0 &&
163 	    dq->q_rtb.count > dq->q_rtb.hardlimit)
164 		xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
165 
166 out:
167 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
168 		return -ECANCELED;
169 
170 	return 0;
171 }
172 
173 /* Check the quota's data fork. */
174 STATIC int
175 xchk_quota_data_fork(
176 	struct xfs_scrub	*sc)
177 {
178 	struct xfs_bmbt_irec	irec = { 0 };
179 	struct xfs_iext_cursor	icur;
180 	struct xfs_quotainfo	*qi = sc->mp->m_quotainfo;
181 	struct xfs_ifork	*ifp;
182 	xfs_fileoff_t		max_dqid_off;
183 	int			error = 0;
184 
185 	/* Invoke the fork scrubber. */
186 	error = xchk_metadata_inode_forks(sc);
187 	if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
188 		return error;
189 
190 	/* Check for data fork problems that apply only to quota files. */
191 	max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
192 	ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
193 	for_each_xfs_iext(ifp, &icur, &irec) {
194 		if (xchk_should_terminate(sc, &error))
195 			break;
196 
197 		/*
198 		 * delalloc/unwritten extents or blocks mapped above the highest
199 		 * quota id shouldn't happen.
200 		 */
201 		if (!xfs_bmap_is_written_extent(&irec) ||
202 		    irec.br_startoff > max_dqid_off ||
203 		    irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
204 			xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
205 					irec.br_startoff);
206 			break;
207 		}
208 	}
209 
210 	return error;
211 }
212 
213 /* Scrub all of a quota type's items. */
214 int
215 xchk_quota(
216 	struct xfs_scrub	*sc)
217 {
218 	struct xchk_quota_info	sqi;
219 	struct xfs_mount	*mp = sc->mp;
220 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
221 	xfs_dqtype_t		dqtype;
222 	int			error = 0;
223 
224 	dqtype = xchk_quota_to_dqtype(sc);
225 
226 	/* Look for problem extents. */
227 	error = xchk_quota_data_fork(sc);
228 	if (error)
229 		goto out;
230 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
231 		goto out;
232 
233 	/*
234 	 * Check all the quota items.  Now that we've checked the quota inode
235 	 * data fork we have to drop ILOCK_EXCL to use the regular dquot
236 	 * functions.
237 	 */
238 	xfs_iunlock(sc->ip, sc->ilock_flags);
239 	sc->ilock_flags = 0;
240 	sqi.sc = sc;
241 	sqi.last_id = 0;
242 	error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
243 	sc->ilock_flags = XFS_ILOCK_EXCL;
244 	xfs_ilock(sc->ip, sc->ilock_flags);
245 	if (error == -ECANCELED)
246 		error = 0;
247 	if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
248 			sqi.last_id * qi->qi_dqperchunk, &error))
249 		goto out;
250 
251 out:
252 	return error;
253 }
254