xref: /openbmc/linux/fs/xfs/libxfs/xfs_inode_buf.c (revision 0da85d1e)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_cksum.h"
28 #include "xfs_icache.h"
29 #include "xfs_trans.h"
30 #include "xfs_ialloc.h"
31 
32 /*
33  * Check that none of the inode's in the buffer have a next
34  * unlinked field of 0.
35  */
36 #if defined(DEBUG)
37 void
38 xfs_inobp_check(
39 	xfs_mount_t	*mp,
40 	xfs_buf_t	*bp)
41 {
42 	int		i;
43 	int		j;
44 	xfs_dinode_t	*dip;
45 
46 	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
47 
48 	for (i = 0; i < j; i++) {
49 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
50 					i * mp->m_sb.sb_inodesize);
51 		if (!dip->di_next_unlinked)  {
52 			xfs_alert(mp,
53 	"Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
54 				i, (long long)bp->b_bn);
55 		}
56 	}
57 }
58 #endif
59 
60 /*
61  * If we are doing readahead on an inode buffer, we might be in log recovery
62  * reading an inode allocation buffer that hasn't yet been replayed, and hence
63  * has not had the inode cores stamped into it. Hence for readahead, the buffer
64  * may be potentially invalid.
65  *
66  * If the readahead buffer is invalid, we don't want to mark it with an error,
67  * but we do want to clear the DONE status of the buffer so that a followup read
68  * will re-read it from disk. This will ensure that we don't get an unnecessary
69  * warnings during log recovery and we don't get unnecssary panics on debug
70  * kernels.
71  */
72 static void
73 xfs_inode_buf_verify(
74 	struct xfs_buf	*bp,
75 	bool		readahead)
76 {
77 	struct xfs_mount *mp = bp->b_target->bt_mount;
78 	int		i;
79 	int		ni;
80 
81 	/*
82 	 * Validate the magic number and version of every inode in the buffer
83 	 */
84 	ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
85 	for (i = 0; i < ni; i++) {
86 		int		di_ok;
87 		xfs_dinode_t	*dip;
88 
89 		dip = (struct xfs_dinode *)xfs_buf_offset(bp,
90 					(i << mp->m_sb.sb_inodelog));
91 		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
92 			    XFS_DINODE_GOOD_VERSION(dip->di_version);
93 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
94 						XFS_ERRTAG_ITOBP_INOTOBP,
95 						XFS_RANDOM_ITOBP_INOTOBP))) {
96 			if (readahead) {
97 				bp->b_flags &= ~XBF_DONE;
98 				return;
99 			}
100 
101 			xfs_buf_ioerror(bp, -EFSCORRUPTED);
102 			xfs_verifier_error(bp);
103 #ifdef DEBUG
104 			xfs_alert(mp,
105 				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
106 				(unsigned long long)bp->b_bn, i,
107 				be16_to_cpu(dip->di_magic));
108 #endif
109 		}
110 	}
111 	xfs_inobp_check(mp, bp);
112 }
113 
114 
115 static void
116 xfs_inode_buf_read_verify(
117 	struct xfs_buf	*bp)
118 {
119 	xfs_inode_buf_verify(bp, false);
120 }
121 
122 static void
123 xfs_inode_buf_readahead_verify(
124 	struct xfs_buf	*bp)
125 {
126 	xfs_inode_buf_verify(bp, true);
127 }
128 
129 static void
130 xfs_inode_buf_write_verify(
131 	struct xfs_buf	*bp)
132 {
133 	xfs_inode_buf_verify(bp, false);
134 }
135 
136 const struct xfs_buf_ops xfs_inode_buf_ops = {
137 	.verify_read = xfs_inode_buf_read_verify,
138 	.verify_write = xfs_inode_buf_write_verify,
139 };
140 
141 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
142 	.verify_read = xfs_inode_buf_readahead_verify,
143 	.verify_write = xfs_inode_buf_write_verify,
144 };
145 
146 
147 /*
148  * This routine is called to map an inode to the buffer containing the on-disk
149  * version of the inode.  It returns a pointer to the buffer containing the
150  * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
151  * pointer to the on-disk inode within that buffer.
152  *
153  * If a non-zero error is returned, then the contents of bpp and dipp are
154  * undefined.
155  */
156 int
157 xfs_imap_to_bp(
158 	struct xfs_mount	*mp,
159 	struct xfs_trans	*tp,
160 	struct xfs_imap		*imap,
161 	struct xfs_dinode       **dipp,
162 	struct xfs_buf		**bpp,
163 	uint			buf_flags,
164 	uint			iget_flags)
165 {
166 	struct xfs_buf		*bp;
167 	int			error;
168 
169 	buf_flags |= XBF_UNMAPPED;
170 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
171 				   (int)imap->im_len, buf_flags, &bp,
172 				   &xfs_inode_buf_ops);
173 	if (error) {
174 		if (error == -EAGAIN) {
175 			ASSERT(buf_flags & XBF_TRYLOCK);
176 			return error;
177 		}
178 
179 		if (error == -EFSCORRUPTED &&
180 		    (iget_flags & XFS_IGET_UNTRUSTED))
181 			return -EINVAL;
182 
183 		xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
184 			__func__, error);
185 		return error;
186 	}
187 
188 	*bpp = bp;
189 	*dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
190 	return 0;
191 }
192 
193 void
194 xfs_dinode_from_disk(
195 	xfs_icdinode_t		*to,
196 	xfs_dinode_t		*from)
197 {
198 	to->di_magic = be16_to_cpu(from->di_magic);
199 	to->di_mode = be16_to_cpu(from->di_mode);
200 	to->di_version = from ->di_version;
201 	to->di_format = from->di_format;
202 	to->di_onlink = be16_to_cpu(from->di_onlink);
203 	to->di_uid = be32_to_cpu(from->di_uid);
204 	to->di_gid = be32_to_cpu(from->di_gid);
205 	to->di_nlink = be32_to_cpu(from->di_nlink);
206 	to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
207 	to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
208 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
209 	to->di_flushiter = be16_to_cpu(from->di_flushiter);
210 	to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
211 	to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
212 	to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
213 	to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
214 	to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
215 	to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
216 	to->di_size = be64_to_cpu(from->di_size);
217 	to->di_nblocks = be64_to_cpu(from->di_nblocks);
218 	to->di_extsize = be32_to_cpu(from->di_extsize);
219 	to->di_nextents = be32_to_cpu(from->di_nextents);
220 	to->di_anextents = be16_to_cpu(from->di_anextents);
221 	to->di_forkoff = from->di_forkoff;
222 	to->di_aformat	= from->di_aformat;
223 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
224 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
225 	to->di_flags	= be16_to_cpu(from->di_flags);
226 	to->di_gen	= be32_to_cpu(from->di_gen);
227 
228 	if (to->di_version == 3) {
229 		to->di_changecount = be64_to_cpu(from->di_changecount);
230 		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
231 		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
232 		to->di_flags2 = be64_to_cpu(from->di_flags2);
233 		to->di_ino = be64_to_cpu(from->di_ino);
234 		to->di_lsn = be64_to_cpu(from->di_lsn);
235 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
236 		uuid_copy(&to->di_uuid, &from->di_uuid);
237 	}
238 }
239 
240 void
241 xfs_dinode_to_disk(
242 	xfs_dinode_t		*to,
243 	xfs_icdinode_t		*from)
244 {
245 	to->di_magic = cpu_to_be16(from->di_magic);
246 	to->di_mode = cpu_to_be16(from->di_mode);
247 	to->di_version = from ->di_version;
248 	to->di_format = from->di_format;
249 	to->di_onlink = cpu_to_be16(from->di_onlink);
250 	to->di_uid = cpu_to_be32(from->di_uid);
251 	to->di_gid = cpu_to_be32(from->di_gid);
252 	to->di_nlink = cpu_to_be32(from->di_nlink);
253 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
254 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
255 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
256 	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
257 	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
258 	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
259 	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
260 	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
261 	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
262 	to->di_size = cpu_to_be64(from->di_size);
263 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
264 	to->di_extsize = cpu_to_be32(from->di_extsize);
265 	to->di_nextents = cpu_to_be32(from->di_nextents);
266 	to->di_anextents = cpu_to_be16(from->di_anextents);
267 	to->di_forkoff = from->di_forkoff;
268 	to->di_aformat = from->di_aformat;
269 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
270 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
271 	to->di_flags = cpu_to_be16(from->di_flags);
272 	to->di_gen = cpu_to_be32(from->di_gen);
273 
274 	if (from->di_version == 3) {
275 		to->di_changecount = cpu_to_be64(from->di_changecount);
276 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
277 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
278 		to->di_flags2 = cpu_to_be64(from->di_flags2);
279 		to->di_ino = cpu_to_be64(from->di_ino);
280 		to->di_lsn = cpu_to_be64(from->di_lsn);
281 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
282 		uuid_copy(&to->di_uuid, &from->di_uuid);
283 		to->di_flushiter = 0;
284 	} else {
285 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
286 	}
287 }
288 
289 static bool
290 xfs_dinode_verify(
291 	struct xfs_mount	*mp,
292 	struct xfs_inode	*ip,
293 	struct xfs_dinode	*dip)
294 {
295 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
296 		return false;
297 
298 	/* only version 3 or greater inodes are extensively verified here */
299 	if (dip->di_version < 3)
300 		return true;
301 
302 	if (!xfs_sb_version_hascrc(&mp->m_sb))
303 		return false;
304 	if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
305 			      XFS_DINODE_CRC_OFF))
306 		return false;
307 	if (be64_to_cpu(dip->di_ino) != ip->i_ino)
308 		return false;
309 	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
310 		return false;
311 	return true;
312 }
313 
314 void
315 xfs_dinode_calc_crc(
316 	struct xfs_mount	*mp,
317 	struct xfs_dinode	*dip)
318 {
319 	__uint32_t		crc;
320 
321 	if (dip->di_version < 3)
322 		return;
323 
324 	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
325 	crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
326 			      XFS_DINODE_CRC_OFF);
327 	dip->di_crc = xfs_end_cksum(crc);
328 }
329 
330 /*
331  * Read the disk inode attributes into the in-core inode structure.
332  *
333  * For version 5 superblocks, if we are initialising a new inode and we are not
334  * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
335  * inode core with a random generation number. If we are keeping inodes around,
336  * we need to read the inode cluster to get the existing generation number off
337  * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
338  * format) then log recovery is dependent on the di_flushiter field being
339  * initialised from the current on-disk value and hence we must also read the
340  * inode off disk.
341  */
342 int
343 xfs_iread(
344 	xfs_mount_t	*mp,
345 	xfs_trans_t	*tp,
346 	xfs_inode_t	*ip,
347 	uint		iget_flags)
348 {
349 	xfs_buf_t	*bp;
350 	xfs_dinode_t	*dip;
351 	int		error;
352 
353 	/*
354 	 * Fill in the location information in the in-core inode.
355 	 */
356 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
357 	if (error)
358 		return error;
359 
360 	/* shortcut IO on inode allocation if possible */
361 	if ((iget_flags & XFS_IGET_CREATE) &&
362 	    xfs_sb_version_hascrc(&mp->m_sb) &&
363 	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
364 		/* initialise the on-disk inode core */
365 		memset(&ip->i_d, 0, sizeof(ip->i_d));
366 		ip->i_d.di_magic = XFS_DINODE_MAGIC;
367 		ip->i_d.di_gen = prandom_u32();
368 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
369 			ip->i_d.di_version = 3;
370 			ip->i_d.di_ino = ip->i_ino;
371 			uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
372 		} else
373 			ip->i_d.di_version = 2;
374 		return 0;
375 	}
376 
377 	/*
378 	 * Get pointers to the on-disk inode and the buffer containing it.
379 	 */
380 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
381 	if (error)
382 		return error;
383 
384 	/* even unallocated inodes are verified */
385 	if (!xfs_dinode_verify(mp, ip, dip)) {
386 		xfs_alert(mp, "%s: validation failed for inode %lld failed",
387 				__func__, ip->i_ino);
388 
389 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
390 		error = -EFSCORRUPTED;
391 		goto out_brelse;
392 	}
393 
394 	/*
395 	 * If the on-disk inode is already linked to a directory
396 	 * entry, copy all of the inode into the in-core inode.
397 	 * xfs_iformat_fork() handles copying in the inode format
398 	 * specific information.
399 	 * Otherwise, just get the truly permanent information.
400 	 */
401 	if (dip->di_mode) {
402 		xfs_dinode_from_disk(&ip->i_d, dip);
403 		error = xfs_iformat_fork(ip, dip);
404 		if (error)  {
405 #ifdef DEBUG
406 			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
407 				__func__, error);
408 #endif /* DEBUG */
409 			goto out_brelse;
410 		}
411 	} else {
412 		/*
413 		 * Partial initialisation of the in-core inode. Just the bits
414 		 * that xfs_ialloc won't overwrite or relies on being correct.
415 		 */
416 		ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
417 		ip->i_d.di_version = dip->di_version;
418 		ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
419 		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
420 
421 		if (dip->di_version == 3) {
422 			ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
423 			uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
424 		}
425 
426 		/*
427 		 * Make sure to pull in the mode here as well in
428 		 * case the inode is released without being used.
429 		 * This ensures that xfs_inactive() will see that
430 		 * the inode is already free and not try to mess
431 		 * with the uninitialized part of it.
432 		 */
433 		ip->i_d.di_mode = 0;
434 	}
435 
436 	/*
437 	 * Automatically convert version 1 inode formats in memory to version 2
438 	 * inode format. If the inode is modified, it will get logged and
439 	 * rewritten as a version 2 inode. We can do this because we set the
440 	 * superblock feature bit for v2 inodes unconditionally during mount
441 	 * and it means the reast of the code can assume the inode version is 2
442 	 * or higher.
443 	 */
444 	if (ip->i_d.di_version == 1) {
445 		ip->i_d.di_version = 2;
446 		memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
447 		ip->i_d.di_nlink = ip->i_d.di_onlink;
448 		ip->i_d.di_onlink = 0;
449 		xfs_set_projid(ip, 0);
450 	}
451 
452 	ip->i_delayed_blks = 0;
453 
454 	/*
455 	 * Mark the buffer containing the inode as something to keep
456 	 * around for a while.  This helps to keep recently accessed
457 	 * meta-data in-core longer.
458 	 */
459 	xfs_buf_set_ref(bp, XFS_INO_REF);
460 
461 	/*
462 	 * Use xfs_trans_brelse() to release the buffer containing the on-disk
463 	 * inode, because it was acquired with xfs_trans_read_buf() in
464 	 * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
465 	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
466 	 * will only release the buffer if it is not dirty within the
467 	 * transaction.  It will be OK to release the buffer in this case,
468 	 * because inodes on disk are never destroyed and we will be locking the
469 	 * new in-core inode before putting it in the cache where other
470 	 * processes can find it.  Thus we don't have to worry about the inode
471 	 * being changed just because we released the buffer.
472 	 */
473  out_brelse:
474 	xfs_trans_brelse(tp, bp);
475 	return error;
476 }
477