xref: /openbmc/linux/fs/xfs/libxfs/xfs_dquot_buf.c (revision 5e0266f0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_qm.h"
18 #include "xfs_error.h"
19 
20 int
21 xfs_calc_dquots_per_chunk(
22 	unsigned int		nbblks)	/* basic block units */
23 {
24 	ASSERT(nbblks > 0);
25 	return BBTOB(nbblks) / sizeof(struct xfs_dqblk);
26 }
27 
28 /*
29  * Do some primitive error checking on ondisk dquot data structures.
30  *
31  * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure;
32  * we verify them separately because at some points we have only the
33  * smaller xfs_disk_dquot structure available.
34  */
35 
36 xfs_failaddr_t
37 xfs_dquot_verify(
38 	struct xfs_mount	*mp,
39 	struct xfs_disk_dquot	*ddq,
40 	xfs_dqid_t		id)	/* used only during quotacheck */
41 {
42 	__u8			ddq_type;
43 
44 	/*
45 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
46 	 * 1. If we crash while deleting the quotainode(s), and those blks got
47 	 *    used for user data. This is because we take the path of regular
48 	 *    file deletion; however, the size field of quotainodes is never
49 	 *    updated, so all the tricks that we play in itruncate_finish
50 	 *    don't quite matter.
51 	 *
52 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
53 	 *    But the allocation will be replayed so we'll end up with an
54 	 *    uninitialized quota block.
55 	 *
56 	 * This is all fine; things are still consistent, and we haven't lost
57 	 * any quota information. Just don't complain about bad dquot blks.
58 	 */
59 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
60 		return __this_address;
61 	if (ddq->d_version != XFS_DQUOT_VERSION)
62 		return __this_address;
63 
64 	if (ddq->d_type & ~XFS_DQTYPE_ANY)
65 		return __this_address;
66 	ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK;
67 	if (ddq_type != XFS_DQTYPE_USER &&
68 	    ddq_type != XFS_DQTYPE_PROJ &&
69 	    ddq_type != XFS_DQTYPE_GROUP)
70 		return __this_address;
71 
72 	if ((ddq->d_type & XFS_DQTYPE_BIGTIME) &&
73 	    !xfs_has_bigtime(mp))
74 		return __this_address;
75 
76 	if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !ddq->d_id)
77 		return __this_address;
78 
79 	if (id != -1 && id != be32_to_cpu(ddq->d_id))
80 		return __this_address;
81 
82 	if (!ddq->d_id)
83 		return NULL;
84 
85 	if (ddq->d_blk_softlimit &&
86 	    be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
87 	    !ddq->d_btimer)
88 		return __this_address;
89 
90 	if (ddq->d_ino_softlimit &&
91 	    be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
92 	    !ddq->d_itimer)
93 		return __this_address;
94 
95 	if (ddq->d_rtb_softlimit &&
96 	    be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
97 	    !ddq->d_rtbtimer)
98 		return __this_address;
99 
100 	return NULL;
101 }
102 
103 xfs_failaddr_t
104 xfs_dqblk_verify(
105 	struct xfs_mount	*mp,
106 	struct xfs_dqblk	*dqb,
107 	xfs_dqid_t		id)	/* used only during quotacheck */
108 {
109 	if (xfs_has_crc(mp) &&
110 	    !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid))
111 		return __this_address;
112 
113 	return xfs_dquot_verify(mp, &dqb->dd_diskdq, id);
114 }
115 
116 /*
117  * Do some primitive error checking on ondisk dquot data structures.
118  */
119 void
120 xfs_dqblk_repair(
121 	struct xfs_mount	*mp,
122 	struct xfs_dqblk	*dqb,
123 	xfs_dqid_t		id,
124 	xfs_dqtype_t		type)
125 {
126 	/*
127 	 * Typically, a repair is only requested by quotacheck.
128 	 */
129 	ASSERT(id != -1);
130 	memset(dqb, 0, sizeof(struct xfs_dqblk));
131 
132 	dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
133 	dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION;
134 	dqb->dd_diskdq.d_type = type;
135 	dqb->dd_diskdq.d_id = cpu_to_be32(id);
136 
137 	if (xfs_has_crc(mp)) {
138 		uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid);
139 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
140 				 XFS_DQUOT_CRC_OFF);
141 	}
142 }
143 
144 STATIC bool
145 xfs_dquot_buf_verify_crc(
146 	struct xfs_mount	*mp,
147 	struct xfs_buf		*bp,
148 	bool			readahead)
149 {
150 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
151 	int			ndquots;
152 	int			i;
153 
154 	if (!xfs_has_crc(mp))
155 		return true;
156 
157 	/*
158 	 * if we are in log recovery, the quota subsystem has not been
159 	 * initialised so we have no quotainfo structure. In that case, we need
160 	 * to manually calculate the number of dquots in the buffer.
161 	 */
162 	if (mp->m_quotainfo)
163 		ndquots = mp->m_quotainfo->qi_dqperchunk;
164 	else
165 		ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
166 
167 	for (i = 0; i < ndquots; i++, d++) {
168 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
169 				 XFS_DQUOT_CRC_OFF)) {
170 			if (!readahead)
171 				xfs_buf_verifier_error(bp, -EFSBADCRC, __func__,
172 					d, sizeof(*d), __this_address);
173 			return false;
174 		}
175 	}
176 	return true;
177 }
178 
179 STATIC xfs_failaddr_t
180 xfs_dquot_buf_verify(
181 	struct xfs_mount	*mp,
182 	struct xfs_buf		*bp,
183 	bool			readahead)
184 {
185 	struct xfs_dqblk	*dqb = bp->b_addr;
186 	xfs_failaddr_t		fa;
187 	xfs_dqid_t		id = 0;
188 	int			ndquots;
189 	int			i;
190 
191 	/*
192 	 * if we are in log recovery, the quota subsystem has not been
193 	 * initialised so we have no quotainfo structure. In that case, we need
194 	 * to manually calculate the number of dquots in the buffer.
195 	 */
196 	if (mp->m_quotainfo)
197 		ndquots = mp->m_quotainfo->qi_dqperchunk;
198 	else
199 		ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
200 
201 	/*
202 	 * On the first read of the buffer, verify that each dquot is valid.
203 	 * We don't know what the id of the dquot is supposed to be, just that
204 	 * they should be increasing monotonically within the buffer. If the
205 	 * first id is corrupt, then it will fail on the second dquot in the
206 	 * buffer so corruptions could point to the wrong dquot in this case.
207 	 */
208 	for (i = 0; i < ndquots; i++) {
209 		struct xfs_disk_dquot	*ddq;
210 
211 		ddq = &dqb[i].dd_diskdq;
212 
213 		if (i == 0)
214 			id = be32_to_cpu(ddq->d_id);
215 
216 		fa = xfs_dqblk_verify(mp, &dqb[i], id + i);
217 		if (fa) {
218 			if (!readahead)
219 				xfs_buf_verifier_error(bp, -EFSCORRUPTED,
220 					__func__, &dqb[i],
221 					sizeof(struct xfs_dqblk), fa);
222 			return fa;
223 		}
224 	}
225 
226 	return NULL;
227 }
228 
229 static xfs_failaddr_t
230 xfs_dquot_buf_verify_struct(
231 	struct xfs_buf		*bp)
232 {
233 	struct xfs_mount	*mp = bp->b_mount;
234 
235 	return xfs_dquot_buf_verify(mp, bp, false);
236 }
237 
238 static void
239 xfs_dquot_buf_read_verify(
240 	struct xfs_buf		*bp)
241 {
242 	struct xfs_mount	*mp = bp->b_mount;
243 
244 	if (!xfs_dquot_buf_verify_crc(mp, bp, false))
245 		return;
246 	xfs_dquot_buf_verify(mp, bp, false);
247 }
248 
249 /*
250  * readahead errors are silent and simply leave the buffer as !done so a real
251  * read will then be run with the xfs_dquot_buf_ops verifier. See
252  * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
253  * reporting the failure.
254  */
255 static void
256 xfs_dquot_buf_readahead_verify(
257 	struct xfs_buf	*bp)
258 {
259 	struct xfs_mount	*mp = bp->b_mount;
260 
261 	if (!xfs_dquot_buf_verify_crc(mp, bp, true) ||
262 	    xfs_dquot_buf_verify(mp, bp, true) != NULL) {
263 		xfs_buf_ioerror(bp, -EIO);
264 		bp->b_flags &= ~XBF_DONE;
265 	}
266 }
267 
268 /*
269  * we don't calculate the CRC here as that is done when the dquot is flushed to
270  * the buffer after the update is done. This ensures that the dquot in the
271  * buffer always has an up-to-date CRC value.
272  */
273 static void
274 xfs_dquot_buf_write_verify(
275 	struct xfs_buf		*bp)
276 {
277 	struct xfs_mount	*mp = bp->b_mount;
278 
279 	xfs_dquot_buf_verify(mp, bp, false);
280 }
281 
282 const struct xfs_buf_ops xfs_dquot_buf_ops = {
283 	.name = "xfs_dquot",
284 	.magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
285 		     cpu_to_be16(XFS_DQUOT_MAGIC) },
286 	.verify_read = xfs_dquot_buf_read_verify,
287 	.verify_write = xfs_dquot_buf_write_verify,
288 	.verify_struct = xfs_dquot_buf_verify_struct,
289 };
290 
291 const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
292 	.name = "xfs_dquot_ra",
293 	.magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
294 		     cpu_to_be16(XFS_DQUOT_MAGIC) },
295 	.verify_read = xfs_dquot_buf_readahead_verify,
296 	.verify_write = xfs_dquot_buf_write_verify,
297 };
298 
299 /* Convert an on-disk timer value into an incore timer value. */
300 time64_t
301 xfs_dquot_from_disk_ts(
302 	struct xfs_disk_dquot	*ddq,
303 	__be32			dtimer)
304 {
305 	uint32_t		t = be32_to_cpu(dtimer);
306 
307 	if (t != 0 && (ddq->d_type & XFS_DQTYPE_BIGTIME))
308 		return xfs_dq_bigtime_to_unix(t);
309 
310 	return t;
311 }
312 
313 /* Convert an incore timer value into an on-disk timer value. */
314 __be32
315 xfs_dquot_to_disk_ts(
316 	struct xfs_dquot	*dqp,
317 	time64_t		timer)
318 {
319 	uint32_t		t = timer;
320 
321 	if (timer != 0 && (dqp->q_type & XFS_DQTYPE_BIGTIME))
322 		t = xfs_dq_unix_to_bigtime(timer);
323 
324 	return cpu_to_be32(t);
325 }
326