xref: /openbmc/linux/fs/xfs/libxfs/xfs_inode_buf.c (revision 29c37341)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_errortag.h"
15 #include "xfs_error.h"
16 #include "xfs_icache.h"
17 #include "xfs_trans.h"
18 #include "xfs_ialloc.h"
19 #include "xfs_dir2.h"
20 
21 #include <linux/iversion.h>
22 
23 /*
24  * If we are doing readahead on an inode buffer, we might be in log recovery
25  * reading an inode allocation buffer that hasn't yet been replayed, and hence
26  * has not had the inode cores stamped into it. Hence for readahead, the buffer
27  * may be potentially invalid.
28  *
29  * If the readahead buffer is invalid, we need to mark it with an error and
30  * clear the DONE status of the buffer so that a followup read will re-read it
31  * from disk. We don't report the error otherwise to avoid warnings during log
32  * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
33  * because all we want to do is say readahead failed; there is no-one to report
34  * the error to, so this will distinguish it from a non-ra verifier failure.
35  * Changes to this readahead error behaviour also need to be reflected in
36  * xfs_dquot_buf_readahead_verify().
37  */
38 static void
39 xfs_inode_buf_verify(
40 	struct xfs_buf	*bp,
41 	bool		readahead)
42 {
43 	struct xfs_mount *mp = bp->b_mount;
44 	xfs_agnumber_t	agno;
45 	int		i;
46 	int		ni;
47 
48 	/*
49 	 * Validate the magic number and version of every inode in the buffer
50 	 */
51 	agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
52 	ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
53 	for (i = 0; i < ni; i++) {
54 		int		di_ok;
55 		xfs_dinode_t	*dip;
56 		xfs_agino_t	unlinked_ino;
57 
58 		dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
59 		unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
60 		di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
61 			xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
62 			xfs_verify_agino_or_null(mp, agno, unlinked_ino);
63 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
64 						XFS_ERRTAG_ITOBP_INOTOBP))) {
65 			if (readahead) {
66 				bp->b_flags &= ~XBF_DONE;
67 				xfs_buf_ioerror(bp, -EIO);
68 				return;
69 			}
70 
71 #ifdef DEBUG
72 			xfs_alert(mp,
73 				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
74 				(unsigned long long)bp->b_bn, i,
75 				be16_to_cpu(dip->di_magic));
76 #endif
77 			xfs_buf_verifier_error(bp, -EFSCORRUPTED,
78 					__func__, dip, sizeof(*dip),
79 					NULL);
80 			return;
81 		}
82 	}
83 }
84 
85 
86 static void
87 xfs_inode_buf_read_verify(
88 	struct xfs_buf	*bp)
89 {
90 	xfs_inode_buf_verify(bp, false);
91 }
92 
93 static void
94 xfs_inode_buf_readahead_verify(
95 	struct xfs_buf	*bp)
96 {
97 	xfs_inode_buf_verify(bp, true);
98 }
99 
100 static void
101 xfs_inode_buf_write_verify(
102 	struct xfs_buf	*bp)
103 {
104 	xfs_inode_buf_verify(bp, false);
105 }
106 
107 const struct xfs_buf_ops xfs_inode_buf_ops = {
108 	.name = "xfs_inode",
109 	.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
110 		     cpu_to_be16(XFS_DINODE_MAGIC) },
111 	.verify_read = xfs_inode_buf_read_verify,
112 	.verify_write = xfs_inode_buf_write_verify,
113 };
114 
115 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
116 	.name = "xfs_inode_ra",
117 	.magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
118 		     cpu_to_be16(XFS_DINODE_MAGIC) },
119 	.verify_read = xfs_inode_buf_readahead_verify,
120 	.verify_write = xfs_inode_buf_write_verify,
121 };
122 
123 
124 /*
125  * This routine is called to map an inode to the buffer containing the on-disk
126  * version of the inode.  It returns a pointer to the buffer containing the
127  * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
128  * pointer to the on-disk inode within that buffer.
129  *
130  * If a non-zero error is returned, then the contents of bpp and dipp are
131  * undefined.
132  */
133 int
134 xfs_imap_to_bp(
135 	struct xfs_mount	*mp,
136 	struct xfs_trans	*tp,
137 	struct xfs_imap		*imap,
138 	struct xfs_dinode       **dipp,
139 	struct xfs_buf		**bpp,
140 	uint			buf_flags)
141 {
142 	struct xfs_buf		*bp;
143 	int			error;
144 
145 	buf_flags |= XBF_UNMAPPED;
146 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
147 				   (int)imap->im_len, buf_flags, &bp,
148 				   &xfs_inode_buf_ops);
149 	if (error) {
150 		ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
151 		return error;
152 	}
153 
154 	*bpp = bp;
155 	if (dipp)
156 		*dipp = xfs_buf_offset(bp, imap->im_boffset);
157 	return 0;
158 }
159 
160 int
161 xfs_inode_from_disk(
162 	struct xfs_inode	*ip,
163 	struct xfs_dinode	*from)
164 {
165 	struct xfs_icdinode	*to = &ip->i_d;
166 	struct inode		*inode = VFS_I(ip);
167 	int			error;
168 	xfs_failaddr_t		fa;
169 
170 	ASSERT(ip->i_cowfp == NULL);
171 	ASSERT(ip->i_afp == NULL);
172 
173 	fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
174 	if (fa) {
175 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
176 				sizeof(*from), fa);
177 		return -EFSCORRUPTED;
178 	}
179 
180 	/*
181 	 * First get the permanent information that is needed to allocate an
182 	 * inode. If the inode is unused, mode is zero and we shouldn't mess
183 	 * with the uninitialized part of it.
184 	 */
185 	to->di_flushiter = be16_to_cpu(from->di_flushiter);
186 	inode->i_generation = be32_to_cpu(from->di_gen);
187 	inode->i_mode = be16_to_cpu(from->di_mode);
188 	if (!inode->i_mode)
189 		return 0;
190 
191 	/*
192 	 * Convert v1 inodes immediately to v2 inode format as this is the
193 	 * minimum inode version format we support in the rest of the code.
194 	 * They will also be unconditionally written back to disk as v2 inodes.
195 	 */
196 	if (unlikely(from->di_version == 1)) {
197 		set_nlink(inode, be16_to_cpu(from->di_onlink));
198 		to->di_projid = 0;
199 	} else {
200 		set_nlink(inode, be32_to_cpu(from->di_nlink));
201 		to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
202 					be16_to_cpu(from->di_projid_lo);
203 	}
204 
205 	i_uid_write(inode, be32_to_cpu(from->di_uid));
206 	i_gid_write(inode, be32_to_cpu(from->di_gid));
207 
208 	/*
209 	 * Time is signed, so need to convert to signed 32 bit before
210 	 * storing in inode timestamp which may be 64 bit. Otherwise
211 	 * a time before epoch is converted to a time long after epoch
212 	 * on 64 bit systems.
213 	 */
214 	inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
215 	inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
216 	inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
217 	inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
218 	inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
219 	inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
220 
221 	to->di_size = be64_to_cpu(from->di_size);
222 	to->di_nblocks = be64_to_cpu(from->di_nblocks);
223 	to->di_extsize = be32_to_cpu(from->di_extsize);
224 	to->di_forkoff = from->di_forkoff;
225 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
226 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
227 	to->di_flags	= be16_to_cpu(from->di_flags);
228 
229 	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
230 		inode_set_iversion_queried(inode,
231 					   be64_to_cpu(from->di_changecount));
232 		to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
233 		to->di_crtime.tv_nsec = be32_to_cpu(from->di_crtime.t_nsec);
234 		to->di_flags2 = be64_to_cpu(from->di_flags2);
235 		to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
236 	}
237 
238 	error = xfs_iformat_data_fork(ip, from);
239 	if (error)
240 		return error;
241 	if (from->di_forkoff) {
242 		error = xfs_iformat_attr_fork(ip, from);
243 		if (error)
244 			goto out_destroy_data_fork;
245 	}
246 	if (xfs_is_reflink_inode(ip))
247 		xfs_ifork_init_cow(ip);
248 	return 0;
249 
250 out_destroy_data_fork:
251 	xfs_idestroy_fork(&ip->i_df);
252 	return error;
253 }
254 
255 void
256 xfs_inode_to_disk(
257 	struct xfs_inode	*ip,
258 	struct xfs_dinode	*to,
259 	xfs_lsn_t		lsn)
260 {
261 	struct xfs_icdinode	*from = &ip->i_d;
262 	struct inode		*inode = VFS_I(ip);
263 
264 	to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
265 	to->di_onlink = 0;
266 
267 	to->di_format = xfs_ifork_format(&ip->i_df);
268 	to->di_uid = cpu_to_be32(i_uid_read(inode));
269 	to->di_gid = cpu_to_be32(i_gid_read(inode));
270 	to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
271 	to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
272 
273 	memset(to->di_pad, 0, sizeof(to->di_pad));
274 	to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
275 	to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
276 	to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
277 	to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
278 	to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
279 	to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
280 	to->di_nlink = cpu_to_be32(inode->i_nlink);
281 	to->di_gen = cpu_to_be32(inode->i_generation);
282 	to->di_mode = cpu_to_be16(inode->i_mode);
283 
284 	to->di_size = cpu_to_be64(from->di_size);
285 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
286 	to->di_extsize = cpu_to_be32(from->di_extsize);
287 	to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
288 	to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
289 	to->di_forkoff = from->di_forkoff;
290 	to->di_aformat = xfs_ifork_format(ip->i_afp);
291 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
292 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
293 	to->di_flags = cpu_to_be16(from->di_flags);
294 
295 	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
296 		to->di_version = 3;
297 		to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
298 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
299 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
300 		to->di_flags2 = cpu_to_be64(from->di_flags2);
301 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
302 		to->di_ino = cpu_to_be64(ip->i_ino);
303 		to->di_lsn = cpu_to_be64(lsn);
304 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
305 		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
306 		to->di_flushiter = 0;
307 	} else {
308 		to->di_version = 2;
309 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
310 	}
311 }
312 
313 void
314 xfs_log_dinode_to_disk(
315 	struct xfs_log_dinode	*from,
316 	struct xfs_dinode	*to)
317 {
318 	to->di_magic = cpu_to_be16(from->di_magic);
319 	to->di_mode = cpu_to_be16(from->di_mode);
320 	to->di_version = from->di_version;
321 	to->di_format = from->di_format;
322 	to->di_onlink = 0;
323 	to->di_uid = cpu_to_be32(from->di_uid);
324 	to->di_gid = cpu_to_be32(from->di_gid);
325 	to->di_nlink = cpu_to_be32(from->di_nlink);
326 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
327 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
328 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
329 
330 	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
331 	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
332 	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
333 	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
334 	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
335 	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
336 
337 	to->di_size = cpu_to_be64(from->di_size);
338 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
339 	to->di_extsize = cpu_to_be32(from->di_extsize);
340 	to->di_nextents = cpu_to_be32(from->di_nextents);
341 	to->di_anextents = cpu_to_be16(from->di_anextents);
342 	to->di_forkoff = from->di_forkoff;
343 	to->di_aformat = from->di_aformat;
344 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
345 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
346 	to->di_flags = cpu_to_be16(from->di_flags);
347 	to->di_gen = cpu_to_be32(from->di_gen);
348 
349 	if (from->di_version == 3) {
350 		to->di_changecount = cpu_to_be64(from->di_changecount);
351 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
352 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
353 		to->di_flags2 = cpu_to_be64(from->di_flags2);
354 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
355 		to->di_ino = cpu_to_be64(from->di_ino);
356 		to->di_lsn = cpu_to_be64(from->di_lsn);
357 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
358 		uuid_copy(&to->di_uuid, &from->di_uuid);
359 		to->di_flushiter = 0;
360 	} else {
361 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
362 	}
363 }
364 
365 static xfs_failaddr_t
366 xfs_dinode_verify_fork(
367 	struct xfs_dinode	*dip,
368 	struct xfs_mount	*mp,
369 	int			whichfork)
370 {
371 	uint32_t		di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
372 
373 	switch (XFS_DFORK_FORMAT(dip, whichfork)) {
374 	case XFS_DINODE_FMT_LOCAL:
375 		/*
376 		 * no local regular files yet
377 		 */
378 		if (whichfork == XFS_DATA_FORK) {
379 			if (S_ISREG(be16_to_cpu(dip->di_mode)))
380 				return __this_address;
381 			if (be64_to_cpu(dip->di_size) >
382 					XFS_DFORK_SIZE(dip, mp, whichfork))
383 				return __this_address;
384 		}
385 		if (di_nextents)
386 			return __this_address;
387 		break;
388 	case XFS_DINODE_FMT_EXTENTS:
389 		if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
390 			return __this_address;
391 		break;
392 	case XFS_DINODE_FMT_BTREE:
393 		if (whichfork == XFS_ATTR_FORK) {
394 			if (di_nextents > MAXAEXTNUM)
395 				return __this_address;
396 		} else if (di_nextents > MAXEXTNUM) {
397 			return __this_address;
398 		}
399 		break;
400 	default:
401 		return __this_address;
402 	}
403 	return NULL;
404 }
405 
406 static xfs_failaddr_t
407 xfs_dinode_verify_forkoff(
408 	struct xfs_dinode	*dip,
409 	struct xfs_mount	*mp)
410 {
411 	if (!dip->di_forkoff)
412 		return NULL;
413 
414 	switch (dip->di_format)  {
415 	case XFS_DINODE_FMT_DEV:
416 		if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
417 			return __this_address;
418 		break;
419 	case XFS_DINODE_FMT_LOCAL:	/* fall through ... */
420 	case XFS_DINODE_FMT_EXTENTS:    /* fall through ... */
421 	case XFS_DINODE_FMT_BTREE:
422 		if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
423 			return __this_address;
424 		break;
425 	default:
426 		return __this_address;
427 	}
428 	return NULL;
429 }
430 
431 xfs_failaddr_t
432 xfs_dinode_verify(
433 	struct xfs_mount	*mp,
434 	xfs_ino_t		ino,
435 	struct xfs_dinode	*dip)
436 {
437 	xfs_failaddr_t		fa;
438 	uint16_t		mode;
439 	uint16_t		flags;
440 	uint64_t		flags2;
441 	uint64_t		di_size;
442 
443 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
444 		return __this_address;
445 
446 	/* Verify v3 integrity information first */
447 	if (dip->di_version >= 3) {
448 		if (!xfs_sb_version_has_v3inode(&mp->m_sb))
449 			return __this_address;
450 		if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
451 				      XFS_DINODE_CRC_OFF))
452 			return __this_address;
453 		if (be64_to_cpu(dip->di_ino) != ino)
454 			return __this_address;
455 		if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
456 			return __this_address;
457 	}
458 
459 	/* don't allow invalid i_size */
460 	di_size = be64_to_cpu(dip->di_size);
461 	if (di_size & (1ULL << 63))
462 		return __this_address;
463 
464 	mode = be16_to_cpu(dip->di_mode);
465 	if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
466 		return __this_address;
467 
468 	/* No zero-length symlinks/dirs. */
469 	if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
470 		return __this_address;
471 
472 	/* Fork checks carried over from xfs_iformat_fork */
473 	if (mode &&
474 	    be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
475 			be64_to_cpu(dip->di_nblocks))
476 		return __this_address;
477 
478 	if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
479 		return __this_address;
480 
481 	flags = be16_to_cpu(dip->di_flags);
482 
483 	if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
484 		return __this_address;
485 
486 	/* check for illegal values of forkoff */
487 	fa = xfs_dinode_verify_forkoff(dip, mp);
488 	if (fa)
489 		return fa;
490 
491 	/* Do we have appropriate data fork formats for the mode? */
492 	switch (mode & S_IFMT) {
493 	case S_IFIFO:
494 	case S_IFCHR:
495 	case S_IFBLK:
496 	case S_IFSOCK:
497 		if (dip->di_format != XFS_DINODE_FMT_DEV)
498 			return __this_address;
499 		break;
500 	case S_IFREG:
501 	case S_IFLNK:
502 	case S_IFDIR:
503 		fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
504 		if (fa)
505 			return fa;
506 		break;
507 	case 0:
508 		/* Uninitialized inode ok. */
509 		break;
510 	default:
511 		return __this_address;
512 	}
513 
514 	if (dip->di_forkoff) {
515 		fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
516 		if (fa)
517 			return fa;
518 	} else {
519 		/*
520 		 * If there is no fork offset, this may be a freshly-made inode
521 		 * in a new disk cluster, in which case di_aformat is zeroed.
522 		 * Otherwise, such an inode must be in EXTENTS format; this goes
523 		 * for freed inodes as well.
524 		 */
525 		switch (dip->di_aformat) {
526 		case 0:
527 		case XFS_DINODE_FMT_EXTENTS:
528 			break;
529 		default:
530 			return __this_address;
531 		}
532 		if (dip->di_anextents)
533 			return __this_address;
534 	}
535 
536 	/* extent size hint validation */
537 	fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
538 			mode, flags);
539 	if (fa)
540 		return fa;
541 
542 	/* only version 3 or greater inodes are extensively verified here */
543 	if (dip->di_version < 3)
544 		return NULL;
545 
546 	flags2 = be64_to_cpu(dip->di_flags2);
547 
548 	/* don't allow reflink/cowextsize if we don't have reflink */
549 	if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
550 	     !xfs_sb_version_hasreflink(&mp->m_sb))
551 		return __this_address;
552 
553 	/* only regular files get reflink */
554 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
555 		return __this_address;
556 
557 	/* don't let reflink and realtime mix */
558 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
559 		return __this_address;
560 
561 	/* don't let reflink and dax mix */
562 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
563 		return __this_address;
564 
565 	/* COW extent size hint validation */
566 	fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
567 			mode, flags, flags2);
568 	if (fa)
569 		return fa;
570 
571 	return NULL;
572 }
573 
574 void
575 xfs_dinode_calc_crc(
576 	struct xfs_mount	*mp,
577 	struct xfs_dinode	*dip)
578 {
579 	uint32_t		crc;
580 
581 	if (dip->di_version < 3)
582 		return;
583 
584 	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
585 	crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
586 			      XFS_DINODE_CRC_OFF);
587 	dip->di_crc = xfs_end_cksum(crc);
588 }
589 
590 /*
591  * Validate di_extsize hint.
592  *
593  * The rules are documented at xfs_ioctl_setattr_check_extsize().
594  * These functions must be kept in sync with each other.
595  */
596 xfs_failaddr_t
597 xfs_inode_validate_extsize(
598 	struct xfs_mount		*mp,
599 	uint32_t			extsize,
600 	uint16_t			mode,
601 	uint16_t			flags)
602 {
603 	bool				rt_flag;
604 	bool				hint_flag;
605 	bool				inherit_flag;
606 	uint32_t			extsize_bytes;
607 	uint32_t			blocksize_bytes;
608 
609 	rt_flag = (flags & XFS_DIFLAG_REALTIME);
610 	hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
611 	inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
612 	extsize_bytes = XFS_FSB_TO_B(mp, extsize);
613 
614 	if (rt_flag)
615 		blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
616 	else
617 		blocksize_bytes = mp->m_sb.sb_blocksize;
618 
619 	if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
620 		return __this_address;
621 
622 	if (hint_flag && !S_ISREG(mode))
623 		return __this_address;
624 
625 	if (inherit_flag && !S_ISDIR(mode))
626 		return __this_address;
627 
628 	if ((hint_flag || inherit_flag) && extsize == 0)
629 		return __this_address;
630 
631 	/* free inodes get flags set to zero but extsize remains */
632 	if (mode && !(hint_flag || inherit_flag) && extsize != 0)
633 		return __this_address;
634 
635 	if (extsize_bytes % blocksize_bytes)
636 		return __this_address;
637 
638 	if (extsize > MAXEXTLEN)
639 		return __this_address;
640 
641 	if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
642 		return __this_address;
643 
644 	return NULL;
645 }
646 
647 /*
648  * Validate di_cowextsize hint.
649  *
650  * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
651  * These functions must be kept in sync with each other.
652  */
653 xfs_failaddr_t
654 xfs_inode_validate_cowextsize(
655 	struct xfs_mount		*mp,
656 	uint32_t			cowextsize,
657 	uint16_t			mode,
658 	uint16_t			flags,
659 	uint64_t			flags2)
660 {
661 	bool				rt_flag;
662 	bool				hint_flag;
663 	uint32_t			cowextsize_bytes;
664 
665 	rt_flag = (flags & XFS_DIFLAG_REALTIME);
666 	hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
667 	cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
668 
669 	if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
670 		return __this_address;
671 
672 	if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
673 		return __this_address;
674 
675 	if (hint_flag && cowextsize == 0)
676 		return __this_address;
677 
678 	/* free inodes get flags set to zero but cowextsize remains */
679 	if (mode && !hint_flag && cowextsize != 0)
680 		return __this_address;
681 
682 	if (hint_flag && rt_flag)
683 		return __this_address;
684 
685 	if (cowextsize_bytes % mp->m_sb.sb_blocksize)
686 		return __this_address;
687 
688 	if (cowextsize > MAXEXTLEN)
689 		return __this_address;
690 
691 	if (cowextsize > mp->m_sb.sb_agblocks / 2)
692 		return __this_address;
693 
694 	return NULL;
695 }
696