1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_qm.h"
17 #include "xfs_bmap.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20
21 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
22 static inline xfs_dqtype_t
xchk_quota_to_dqtype(struct xfs_scrub * sc)23 xchk_quota_to_dqtype(
24 struct xfs_scrub *sc)
25 {
26 switch (sc->sm->sm_type) {
27 case XFS_SCRUB_TYPE_UQUOTA:
28 return XFS_DQTYPE_USER;
29 case XFS_SCRUB_TYPE_GQUOTA:
30 return XFS_DQTYPE_GROUP;
31 case XFS_SCRUB_TYPE_PQUOTA:
32 return XFS_DQTYPE_PROJ;
33 default:
34 return 0;
35 }
36 }
37
38 /* Set us up to scrub a quota. */
39 int
xchk_setup_quota(struct xfs_scrub * sc)40 xchk_setup_quota(
41 struct xfs_scrub *sc)
42 {
43 xfs_dqtype_t dqtype;
44 int error;
45
46 if (!XFS_IS_QUOTA_ON(sc->mp))
47 return -ENOENT;
48
49 dqtype = xchk_quota_to_dqtype(sc);
50 if (dqtype == 0)
51 return -EINVAL;
52
53 if (!xfs_this_quota_on(sc->mp, dqtype))
54 return -ENOENT;
55
56 if (xchk_need_intent_drain(sc))
57 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
58
59 error = xchk_setup_fs(sc);
60 if (error)
61 return error;
62
63 error = xchk_install_live_inode(sc, xfs_quota_inode(sc->mp, dqtype));
64 if (error)
65 return error;
66
67 xchk_ilock(sc, XFS_ILOCK_EXCL);
68 return 0;
69 }
70
71 /* Quotas. */
72
73 struct xchk_quota_info {
74 struct xfs_scrub *sc;
75 xfs_dqid_t last_id;
76 };
77
78 /* Scrub the fields in an individual quota item. */
79 STATIC int
xchk_quota_item(struct xfs_dquot * dq,xfs_dqtype_t dqtype,void * priv)80 xchk_quota_item(
81 struct xfs_dquot *dq,
82 xfs_dqtype_t dqtype,
83 void *priv)
84 {
85 struct xchk_quota_info *sqi = priv;
86 struct xfs_scrub *sc = sqi->sc;
87 struct xfs_mount *mp = sc->mp;
88 struct xfs_quotainfo *qi = mp->m_quotainfo;
89 xfs_fileoff_t offset;
90 xfs_ino_t fs_icount;
91 int error = 0;
92
93 if (xchk_should_terminate(sc, &error))
94 return error;
95
96 /*
97 * Except for the root dquot, the actual dquot we got must either have
98 * the same or higher id as we saw before.
99 */
100 offset = dq->q_id / qi->qi_dqperchunk;
101 if (dq->q_id && dq->q_id <= sqi->last_id)
102 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
103
104 sqi->last_id = dq->q_id;
105
106 /*
107 * Warn if the hard limits are larger than the fs.
108 * Administrators can do this, though in production this seems
109 * suspect, which is why we flag it for review.
110 *
111 * Complain about corruption if the soft limit is greater than
112 * the hard limit.
113 */
114 if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
115 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
116 if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
117 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
118
119 if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
120 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
121 if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
122 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
123
124 if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
125 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
126 if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
127 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
128
129 /* Check the resource counts. */
130 fs_icount = percpu_counter_sum(&mp->m_icount);
131
132 /*
133 * Check that usage doesn't exceed physical limits. However, on
134 * a reflink filesystem we're allowed to exceed physical space
135 * if there are no quota limits.
136 */
137 if (xfs_has_reflink(mp)) {
138 if (mp->m_sb.sb_dblocks < dq->q_blk.count)
139 xchk_fblock_set_warning(sc, XFS_DATA_FORK,
140 offset);
141 } else {
142 if (mp->m_sb.sb_dblocks < dq->q_blk.count)
143 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
144 offset);
145 }
146 if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
147 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
148
149 /*
150 * We can violate the hard limits if the admin suddenly sets a
151 * lower limit than the actual usage. However, we flag it for
152 * admin review.
153 */
154 if (dq->q_id == 0)
155 goto out;
156
157 if (dq->q_blk.hardlimit != 0 &&
158 dq->q_blk.count > dq->q_blk.hardlimit)
159 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
160
161 if (dq->q_ino.hardlimit != 0 &&
162 dq->q_ino.count > dq->q_ino.hardlimit)
163 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
164
165 if (dq->q_rtb.hardlimit != 0 &&
166 dq->q_rtb.count > dq->q_rtb.hardlimit)
167 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
168
169 out:
170 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
171 return -ECANCELED;
172
173 return 0;
174 }
175
176 /* Check the quota's data fork. */
177 STATIC int
xchk_quota_data_fork(struct xfs_scrub * sc)178 xchk_quota_data_fork(
179 struct xfs_scrub *sc)
180 {
181 struct xfs_bmbt_irec irec = { 0 };
182 struct xfs_iext_cursor icur;
183 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
184 struct xfs_ifork *ifp;
185 xfs_fileoff_t max_dqid_off;
186 int error = 0;
187
188 /* Invoke the fork scrubber. */
189 error = xchk_metadata_inode_forks(sc);
190 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
191 return error;
192
193 /* Check for data fork problems that apply only to quota files. */
194 max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
195 ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
196 for_each_xfs_iext(ifp, &icur, &irec) {
197 if (xchk_should_terminate(sc, &error))
198 break;
199
200 /*
201 * delalloc/unwritten extents or blocks mapped above the highest
202 * quota id shouldn't happen.
203 */
204 if (!xfs_bmap_is_written_extent(&irec) ||
205 irec.br_startoff > max_dqid_off ||
206 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
207 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
208 irec.br_startoff);
209 break;
210 }
211 }
212
213 return error;
214 }
215
216 /* Scrub all of a quota type's items. */
217 int
xchk_quota(struct xfs_scrub * sc)218 xchk_quota(
219 struct xfs_scrub *sc)
220 {
221 struct xchk_quota_info sqi;
222 struct xfs_mount *mp = sc->mp;
223 struct xfs_quotainfo *qi = mp->m_quotainfo;
224 xfs_dqtype_t dqtype;
225 int error = 0;
226
227 dqtype = xchk_quota_to_dqtype(sc);
228
229 /* Look for problem extents. */
230 error = xchk_quota_data_fork(sc);
231 if (error)
232 goto out;
233 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
234 goto out;
235
236 /*
237 * Check all the quota items. Now that we've checked the quota inode
238 * data fork we have to drop ILOCK_EXCL to use the regular dquot
239 * functions.
240 */
241 xchk_iunlock(sc, sc->ilock_flags);
242 sqi.sc = sc;
243 sqi.last_id = 0;
244 error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
245 xchk_ilock(sc, XFS_ILOCK_EXCL);
246 if (error == -ECANCELED)
247 error = 0;
248 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
249 sqi.last_id * qi->qi_dqperchunk, &error))
250 goto out;
251
252 out:
253 return error;
254 }
255