xref: /openbmc/linux/fs/xfs/libxfs/xfs_inode_buf.c (revision 3932b9ca)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_error.h"
29 #include "xfs_cksum.h"
30 #include "xfs_icache.h"
31 #include "xfs_trans.h"
32 #include "xfs_ialloc.h"
33 #include "xfs_dinode.h"
34 
35 /*
36  * Check that none of the inode's in the buffer have a next
37  * unlinked field of 0.
38  */
39 #if defined(DEBUG)
40 void
41 xfs_inobp_check(
42 	xfs_mount_t	*mp,
43 	xfs_buf_t	*bp)
44 {
45 	int		i;
46 	int		j;
47 	xfs_dinode_t	*dip;
48 
49 	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
50 
51 	for (i = 0; i < j; i++) {
52 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
53 					i * mp->m_sb.sb_inodesize);
54 		if (!dip->di_next_unlinked)  {
55 			xfs_alert(mp,
56 	"Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
57 				i, (long long)bp->b_bn);
58 		}
59 	}
60 }
61 #endif
62 
63 /*
64  * If we are doing readahead on an inode buffer, we might be in log recovery
65  * reading an inode allocation buffer that hasn't yet been replayed, and hence
66  * has not had the inode cores stamped into it. Hence for readahead, the buffer
67  * may be potentially invalid.
68  *
69  * If the readahead buffer is invalid, we don't want to mark it with an error,
70  * but we do want to clear the DONE status of the buffer so that a followup read
71  * will re-read it from disk. This will ensure that we don't get an unnecessary
72  * warnings during log recovery and we don't get unnecssary panics on debug
73  * kernels.
74  */
75 static void
76 xfs_inode_buf_verify(
77 	struct xfs_buf	*bp,
78 	bool		readahead)
79 {
80 	struct xfs_mount *mp = bp->b_target->bt_mount;
81 	int		i;
82 	int		ni;
83 
84 	/*
85 	 * Validate the magic number and version of every inode in the buffer
86 	 */
87 	ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
88 	for (i = 0; i < ni; i++) {
89 		int		di_ok;
90 		xfs_dinode_t	*dip;
91 
92 		dip = (struct xfs_dinode *)xfs_buf_offset(bp,
93 					(i << mp->m_sb.sb_inodelog));
94 		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
95 			    XFS_DINODE_GOOD_VERSION(dip->di_version);
96 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
97 						XFS_ERRTAG_ITOBP_INOTOBP,
98 						XFS_RANDOM_ITOBP_INOTOBP))) {
99 			if (readahead) {
100 				bp->b_flags &= ~XBF_DONE;
101 				return;
102 			}
103 
104 			xfs_buf_ioerror(bp, -EFSCORRUPTED);
105 			xfs_verifier_error(bp);
106 #ifdef DEBUG
107 			xfs_alert(mp,
108 				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
109 				(unsigned long long)bp->b_bn, i,
110 				be16_to_cpu(dip->di_magic));
111 #endif
112 		}
113 	}
114 	xfs_inobp_check(mp, bp);
115 }
116 
117 
118 static void
119 xfs_inode_buf_read_verify(
120 	struct xfs_buf	*bp)
121 {
122 	xfs_inode_buf_verify(bp, false);
123 }
124 
125 static void
126 xfs_inode_buf_readahead_verify(
127 	struct xfs_buf	*bp)
128 {
129 	xfs_inode_buf_verify(bp, true);
130 }
131 
132 static void
133 xfs_inode_buf_write_verify(
134 	struct xfs_buf	*bp)
135 {
136 	xfs_inode_buf_verify(bp, false);
137 }
138 
139 const struct xfs_buf_ops xfs_inode_buf_ops = {
140 	.verify_read = xfs_inode_buf_read_verify,
141 	.verify_write = xfs_inode_buf_write_verify,
142 };
143 
144 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
145 	.verify_read = xfs_inode_buf_readahead_verify,
146 	.verify_write = xfs_inode_buf_write_verify,
147 };
148 
149 
150 /*
151  * This routine is called to map an inode to the buffer containing the on-disk
152  * version of the inode.  It returns a pointer to the buffer containing the
153  * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
154  * pointer to the on-disk inode within that buffer.
155  *
156  * If a non-zero error is returned, then the contents of bpp and dipp are
157  * undefined.
158  */
159 int
160 xfs_imap_to_bp(
161 	struct xfs_mount	*mp,
162 	struct xfs_trans	*tp,
163 	struct xfs_imap		*imap,
164 	struct xfs_dinode       **dipp,
165 	struct xfs_buf		**bpp,
166 	uint			buf_flags,
167 	uint			iget_flags)
168 {
169 	struct xfs_buf		*bp;
170 	int			error;
171 
172 	buf_flags |= XBF_UNMAPPED;
173 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
174 				   (int)imap->im_len, buf_flags, &bp,
175 				   &xfs_inode_buf_ops);
176 	if (error) {
177 		if (error == -EAGAIN) {
178 			ASSERT(buf_flags & XBF_TRYLOCK);
179 			return error;
180 		}
181 
182 		if (error == -EFSCORRUPTED &&
183 		    (iget_flags & XFS_IGET_UNTRUSTED))
184 			return -EINVAL;
185 
186 		xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
187 			__func__, error);
188 		return error;
189 	}
190 
191 	*bpp = bp;
192 	*dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
193 	return 0;
194 }
195 
196 void
197 xfs_dinode_from_disk(
198 	xfs_icdinode_t		*to,
199 	xfs_dinode_t		*from)
200 {
201 	to->di_magic = be16_to_cpu(from->di_magic);
202 	to->di_mode = be16_to_cpu(from->di_mode);
203 	to->di_version = from ->di_version;
204 	to->di_format = from->di_format;
205 	to->di_onlink = be16_to_cpu(from->di_onlink);
206 	to->di_uid = be32_to_cpu(from->di_uid);
207 	to->di_gid = be32_to_cpu(from->di_gid);
208 	to->di_nlink = be32_to_cpu(from->di_nlink);
209 	to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
210 	to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
211 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
212 	to->di_flushiter = be16_to_cpu(from->di_flushiter);
213 	to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
214 	to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
215 	to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
216 	to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
217 	to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
218 	to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
219 	to->di_size = be64_to_cpu(from->di_size);
220 	to->di_nblocks = be64_to_cpu(from->di_nblocks);
221 	to->di_extsize = be32_to_cpu(from->di_extsize);
222 	to->di_nextents = be32_to_cpu(from->di_nextents);
223 	to->di_anextents = be16_to_cpu(from->di_anextents);
224 	to->di_forkoff = from->di_forkoff;
225 	to->di_aformat	= from->di_aformat;
226 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
227 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
228 	to->di_flags	= be16_to_cpu(from->di_flags);
229 	to->di_gen	= be32_to_cpu(from->di_gen);
230 
231 	if (to->di_version == 3) {
232 		to->di_changecount = be64_to_cpu(from->di_changecount);
233 		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
234 		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
235 		to->di_flags2 = be64_to_cpu(from->di_flags2);
236 		to->di_ino = be64_to_cpu(from->di_ino);
237 		to->di_lsn = be64_to_cpu(from->di_lsn);
238 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
239 		uuid_copy(&to->di_uuid, &from->di_uuid);
240 	}
241 }
242 
243 void
244 xfs_dinode_to_disk(
245 	xfs_dinode_t		*to,
246 	xfs_icdinode_t		*from)
247 {
248 	to->di_magic = cpu_to_be16(from->di_magic);
249 	to->di_mode = cpu_to_be16(from->di_mode);
250 	to->di_version = from ->di_version;
251 	to->di_format = from->di_format;
252 	to->di_onlink = cpu_to_be16(from->di_onlink);
253 	to->di_uid = cpu_to_be32(from->di_uid);
254 	to->di_gid = cpu_to_be32(from->di_gid);
255 	to->di_nlink = cpu_to_be32(from->di_nlink);
256 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
257 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
258 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
259 	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
260 	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
261 	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
262 	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
263 	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
264 	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
265 	to->di_size = cpu_to_be64(from->di_size);
266 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
267 	to->di_extsize = cpu_to_be32(from->di_extsize);
268 	to->di_nextents = cpu_to_be32(from->di_nextents);
269 	to->di_anextents = cpu_to_be16(from->di_anextents);
270 	to->di_forkoff = from->di_forkoff;
271 	to->di_aformat = from->di_aformat;
272 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
273 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
274 	to->di_flags = cpu_to_be16(from->di_flags);
275 	to->di_gen = cpu_to_be32(from->di_gen);
276 
277 	if (from->di_version == 3) {
278 		to->di_changecount = cpu_to_be64(from->di_changecount);
279 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
280 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
281 		to->di_flags2 = cpu_to_be64(from->di_flags2);
282 		to->di_ino = cpu_to_be64(from->di_ino);
283 		to->di_lsn = cpu_to_be64(from->di_lsn);
284 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
285 		uuid_copy(&to->di_uuid, &from->di_uuid);
286 		to->di_flushiter = 0;
287 	} else {
288 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
289 	}
290 }
291 
292 static bool
293 xfs_dinode_verify(
294 	struct xfs_mount	*mp,
295 	struct xfs_inode	*ip,
296 	struct xfs_dinode	*dip)
297 {
298 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
299 		return false;
300 
301 	/* only version 3 or greater inodes are extensively verified here */
302 	if (dip->di_version < 3)
303 		return true;
304 
305 	if (!xfs_sb_version_hascrc(&mp->m_sb))
306 		return false;
307 	if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
308 			      XFS_DINODE_CRC_OFF))
309 		return false;
310 	if (be64_to_cpu(dip->di_ino) != ip->i_ino)
311 		return false;
312 	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
313 		return false;
314 	return true;
315 }
316 
317 void
318 xfs_dinode_calc_crc(
319 	struct xfs_mount	*mp,
320 	struct xfs_dinode	*dip)
321 {
322 	__uint32_t		crc;
323 
324 	if (dip->di_version < 3)
325 		return;
326 
327 	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
328 	crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
329 			      XFS_DINODE_CRC_OFF);
330 	dip->di_crc = xfs_end_cksum(crc);
331 }
332 
333 /*
334  * Read the disk inode attributes into the in-core inode structure.
335  *
336  * For version 5 superblocks, if we are initialising a new inode and we are not
337  * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
338  * inode core with a random generation number. If we are keeping inodes around,
339  * we need to read the inode cluster to get the existing generation number off
340  * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
341  * format) then log recovery is dependent on the di_flushiter field being
342  * initialised from the current on-disk value and hence we must also read the
343  * inode off disk.
344  */
345 int
346 xfs_iread(
347 	xfs_mount_t	*mp,
348 	xfs_trans_t	*tp,
349 	xfs_inode_t	*ip,
350 	uint		iget_flags)
351 {
352 	xfs_buf_t	*bp;
353 	xfs_dinode_t	*dip;
354 	int		error;
355 
356 	/*
357 	 * Fill in the location information in the in-core inode.
358 	 */
359 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
360 	if (error)
361 		return error;
362 
363 	/* shortcut IO on inode allocation if possible */
364 	if ((iget_flags & XFS_IGET_CREATE) &&
365 	    xfs_sb_version_hascrc(&mp->m_sb) &&
366 	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
367 		/* initialise the on-disk inode core */
368 		memset(&ip->i_d, 0, sizeof(ip->i_d));
369 		ip->i_d.di_magic = XFS_DINODE_MAGIC;
370 		ip->i_d.di_gen = prandom_u32();
371 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
372 			ip->i_d.di_version = 3;
373 			ip->i_d.di_ino = ip->i_ino;
374 			uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
375 		} else
376 			ip->i_d.di_version = 2;
377 		return 0;
378 	}
379 
380 	/*
381 	 * Get pointers to the on-disk inode and the buffer containing it.
382 	 */
383 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
384 	if (error)
385 		return error;
386 
387 	/* even unallocated inodes are verified */
388 	if (!xfs_dinode_verify(mp, ip, dip)) {
389 		xfs_alert(mp, "%s: validation failed for inode %lld failed",
390 				__func__, ip->i_ino);
391 
392 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
393 		error = -EFSCORRUPTED;
394 		goto out_brelse;
395 	}
396 
397 	/*
398 	 * If the on-disk inode is already linked to a directory
399 	 * entry, copy all of the inode into the in-core inode.
400 	 * xfs_iformat_fork() handles copying in the inode format
401 	 * specific information.
402 	 * Otherwise, just get the truly permanent information.
403 	 */
404 	if (dip->di_mode) {
405 		xfs_dinode_from_disk(&ip->i_d, dip);
406 		error = xfs_iformat_fork(ip, dip);
407 		if (error)  {
408 #ifdef DEBUG
409 			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
410 				__func__, error);
411 #endif /* DEBUG */
412 			goto out_brelse;
413 		}
414 	} else {
415 		/*
416 		 * Partial initialisation of the in-core inode. Just the bits
417 		 * that xfs_ialloc won't overwrite or relies on being correct.
418 		 */
419 		ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
420 		ip->i_d.di_version = dip->di_version;
421 		ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
422 		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
423 
424 		if (dip->di_version == 3) {
425 			ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
426 			uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
427 		}
428 
429 		/*
430 		 * Make sure to pull in the mode here as well in
431 		 * case the inode is released without being used.
432 		 * This ensures that xfs_inactive() will see that
433 		 * the inode is already free and not try to mess
434 		 * with the uninitialized part of it.
435 		 */
436 		ip->i_d.di_mode = 0;
437 	}
438 
439 	/*
440 	 * Automatically convert version 1 inode formats in memory to version 2
441 	 * inode format. If the inode is modified, it will get logged and
442 	 * rewritten as a version 2 inode. We can do this because we set the
443 	 * superblock feature bit for v2 inodes unconditionally during mount
444 	 * and it means the reast of the code can assume the inode version is 2
445 	 * or higher.
446 	 */
447 	if (ip->i_d.di_version == 1) {
448 		ip->i_d.di_version = 2;
449 		memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
450 		ip->i_d.di_nlink = ip->i_d.di_onlink;
451 		ip->i_d.di_onlink = 0;
452 		xfs_set_projid(ip, 0);
453 	}
454 
455 	ip->i_delayed_blks = 0;
456 
457 	/*
458 	 * Mark the buffer containing the inode as something to keep
459 	 * around for a while.  This helps to keep recently accessed
460 	 * meta-data in-core longer.
461 	 */
462 	xfs_buf_set_ref(bp, XFS_INO_REF);
463 
464 	/*
465 	 * Use xfs_trans_brelse() to release the buffer containing the on-disk
466 	 * inode, because it was acquired with xfs_trans_read_buf() in
467 	 * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
468 	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
469 	 * will only release the buffer if it is not dirty within the
470 	 * transaction.  It will be OK to release the buffer in this case,
471 	 * because inodes on disk are never destroyed and we will be locking the
472 	 * new in-core inode before putting it in the cache where other
473 	 * processes can find it.  Thus we don't have to worry about the inode
474 	 * being changed just because we released the buffer.
475 	 */
476  out_brelse:
477 	xfs_trans_brelse(tp, bp);
478 	return error;
479 }
480