xref: /openbmc/linux/fs/xfs/xfs_inode_item.c (revision 55e43d6abd078ed6d219902ce8cb4d68e3c993ba)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
37b718769SNathan Scott  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include "xfs.h"
7a844f451SNathan Scott #include "xfs_fs.h"
85467b34bSDarrick J. Wong #include "xfs_shared.h"
9a4fbe6abSDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
121da177e4SLinus Torvalds #include "xfs_mount.h"
131da177e4SLinus Torvalds #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15a844f451SNathan Scott #include "xfs_inode_item.h"
160b1b213fSChristoph Hellwig #include "xfs_trace.h"
17239880efSDave Chinner #include "xfs_trans_priv.h"
18d3a304b6SCarlos Maiolino #include "xfs_buf_item.h"
191234351cSChristoph Hellwig #include "xfs_log.h"
2001728b44SDave Chinner #include "xfs_log_priv.h"
21a5155b87SDarrick J. Wong #include "xfs_error.h"
221da177e4SLinus Torvalds 
23f0e28280SJeff Layton #include <linux/iversion.h>
241da177e4SLinus Torvalds 
25182696fbSDarrick J. Wong struct kmem_cache	*xfs_ili_cache;		/* inode log item */
261da177e4SLinus Torvalds 
INODE_ITEM(struct xfs_log_item * lip)277bfa31d8SChristoph Hellwig static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
287bfa31d8SChristoph Hellwig {
297bfa31d8SChristoph Hellwig 	return container_of(lip, struct xfs_inode_log_item, ili_item);
307bfa31d8SChristoph Hellwig }
317bfa31d8SChristoph Hellwig 
3282842feeSDave Chinner static uint64_t
xfs_inode_item_sort(struct xfs_log_item * lip)3382842feeSDave Chinner xfs_inode_item_sort(
3482842feeSDave Chinner 	struct xfs_log_item	*lip)
3582842feeSDave Chinner {
3682842feeSDave Chinner 	return INODE_ITEM(lip)->ili_inode->i_ino;
3782842feeSDave Chinner }
3882842feeSDave Chinner 
39*c08d0399SDarrick J. Wong #ifdef DEBUG_EXPENSIVE
40*c08d0399SDarrick J. Wong static void
xfs_inode_item_precommit_check(struct xfs_inode * ip)41*c08d0399SDarrick J. Wong xfs_inode_item_precommit_check(
42*c08d0399SDarrick J. Wong 	struct xfs_inode	*ip)
43*c08d0399SDarrick J. Wong {
44*c08d0399SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
45*c08d0399SDarrick J. Wong 	struct xfs_dinode	*dip;
46*c08d0399SDarrick J. Wong 	xfs_failaddr_t		fa;
47*c08d0399SDarrick J. Wong 
48*c08d0399SDarrick J. Wong 	dip = kzalloc(mp->m_sb.sb_inodesize, GFP_KERNEL | GFP_NOFS);
49*c08d0399SDarrick J. Wong 	if (!dip) {
50*c08d0399SDarrick J. Wong 		ASSERT(dip != NULL);
51*c08d0399SDarrick J. Wong 		return;
52*c08d0399SDarrick J. Wong 	}
53*c08d0399SDarrick J. Wong 
54*c08d0399SDarrick J. Wong 	xfs_inode_to_disk(ip, dip, 0);
55*c08d0399SDarrick J. Wong 	xfs_dinode_calc_crc(mp, dip);
56*c08d0399SDarrick J. Wong 	fa = xfs_dinode_verify(mp, ip->i_ino, dip);
57*c08d0399SDarrick J. Wong 	if (fa) {
58*c08d0399SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
59*c08d0399SDarrick J. Wong 				sizeof(*dip), fa);
60*c08d0399SDarrick J. Wong 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
61*c08d0399SDarrick J. Wong 		ASSERT(fa == NULL);
62*c08d0399SDarrick J. Wong 	}
63*c08d0399SDarrick J. Wong 	kfree(dip);
64*c08d0399SDarrick J. Wong }
65*c08d0399SDarrick J. Wong #else
66*c08d0399SDarrick J. Wong # define xfs_inode_item_precommit_check(ip)	((void)0)
67*c08d0399SDarrick J. Wong #endif
68*c08d0399SDarrick J. Wong 
6982842feeSDave Chinner /*
7082842feeSDave Chinner  * Prior to finally logging the inode, we have to ensure that all the
7182842feeSDave Chinner  * per-modification inode state changes are applied. This includes VFS inode
7282842feeSDave Chinner  * state updates, format conversions, verifier state synchronisation and
7382842feeSDave Chinner  * ensuring the inode buffer remains in memory whilst the inode is dirty.
7482842feeSDave Chinner  *
7582842feeSDave Chinner  * We have to be careful when we grab the inode cluster buffer due to lock
7682842feeSDave Chinner  * ordering constraints. The unlinked inode modifications (xfs_iunlink_item)
7782842feeSDave Chinner  * require AGI -> inode cluster buffer lock order. The inode cluster buffer is
7882842feeSDave Chinner  * not locked until ->precommit, so it happens after everything else has been
7982842feeSDave Chinner  * modified.
8082842feeSDave Chinner  *
8182842feeSDave Chinner  * Further, we have AGI -> AGF lock ordering, and with O_TMPFILE handling we
8282842feeSDave Chinner  * have AGI -> AGF -> iunlink item -> inode cluster buffer lock order. Hence we
8382842feeSDave Chinner  * cannot safely lock the inode cluster buffer in xfs_trans_log_inode() because
8482842feeSDave Chinner  * it can be called on a inode (e.g. via bumplink/droplink) before we take the
8582842feeSDave Chinner  * AGF lock modifying directory blocks.
8682842feeSDave Chinner  *
8782842feeSDave Chinner  * Rather than force a complete rework of all the transactions to call
8882842feeSDave Chinner  * xfs_trans_log_inode() once and once only at the end of every transaction, we
8982842feeSDave Chinner  * move the pinning of the inode cluster buffer to a ->precommit operation. This
9082842feeSDave Chinner  * matches how the xfs_iunlink_item locks the inode cluster buffer, and it
9182842feeSDave Chinner  * ensures that the inode cluster buffer locking is always done last in a
9282842feeSDave Chinner  * transaction. i.e. we ensure the lock order is always AGI -> AGF -> inode
9382842feeSDave Chinner  * cluster buffer.
9482842feeSDave Chinner  *
9582842feeSDave Chinner  * If we return the inode number as the precommit sort key then we'll also
9682842feeSDave Chinner  * guarantee that the order all inode cluster buffer locking is the same all the
9782842feeSDave Chinner  * inodes and unlink items in the transaction.
9882842feeSDave Chinner  */
9982842feeSDave Chinner static int
xfs_inode_item_precommit(struct xfs_trans * tp,struct xfs_log_item * lip)10082842feeSDave Chinner xfs_inode_item_precommit(
10182842feeSDave Chinner 	struct xfs_trans	*tp,
10282842feeSDave Chinner 	struct xfs_log_item	*lip)
10382842feeSDave Chinner {
10482842feeSDave Chinner 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
10582842feeSDave Chinner 	struct xfs_inode	*ip = iip->ili_inode;
10682842feeSDave Chinner 	struct inode		*inode = VFS_I(ip);
10782842feeSDave Chinner 	unsigned int		flags = iip->ili_dirty_flags;
10882842feeSDave Chinner 
10982842feeSDave Chinner 	/*
11082842feeSDave Chinner 	 * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
11182842feeSDave Chinner 	 * don't matter - we either will need an extra transaction in 24 hours
11282842feeSDave Chinner 	 * to log the timestamps, or will clear already cleared fields in the
11382842feeSDave Chinner 	 * worst case.
11482842feeSDave Chinner 	 */
11582842feeSDave Chinner 	if (inode->i_state & I_DIRTY_TIME) {
11682842feeSDave Chinner 		spin_lock(&inode->i_lock);
11782842feeSDave Chinner 		inode->i_state &= ~I_DIRTY_TIME;
11882842feeSDave Chinner 		spin_unlock(&inode->i_lock);
11982842feeSDave Chinner 	}
12082842feeSDave Chinner 
12182842feeSDave Chinner 	/*
12282842feeSDave Chinner 	 * If we're updating the inode core or the timestamps and it's possible
12382842feeSDave Chinner 	 * to upgrade this inode to bigtime format, do so now.
12482842feeSDave Chinner 	 */
12582842feeSDave Chinner 	if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
12682842feeSDave Chinner 	    xfs_has_bigtime(ip->i_mount) &&
12782842feeSDave Chinner 	    !xfs_inode_has_bigtime(ip)) {
12882842feeSDave Chinner 		ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
12982842feeSDave Chinner 		flags |= XFS_ILOG_CORE;
13082842feeSDave Chinner 	}
13182842feeSDave Chinner 
13282842feeSDave Chinner 	/*
13382842feeSDave Chinner 	 * Inode verifiers do not check that the extent size hint is an integer
13482842feeSDave Chinner 	 * multiple of the rt extent size on a directory with both rtinherit
13582842feeSDave Chinner 	 * and extszinherit flags set.  If we're logging a directory that is
13682842feeSDave Chinner 	 * misconfigured in this way, clear the hint.
13782842feeSDave Chinner 	 */
13882842feeSDave Chinner 	if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
13982842feeSDave Chinner 	    (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
14082842feeSDave Chinner 	    (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
14182842feeSDave Chinner 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
14282842feeSDave Chinner 				   XFS_DIFLAG_EXTSZINHERIT);
14382842feeSDave Chinner 		ip->i_extsize = 0;
14482842feeSDave Chinner 		flags |= XFS_ILOG_CORE;
14582842feeSDave Chinner 	}
14682842feeSDave Chinner 
14782842feeSDave Chinner 	/*
14882842feeSDave Chinner 	 * Record the specific change for fdatasync optimisation. This allows
14982842feeSDave Chinner 	 * fdatasync to skip log forces for inodes that are only timestamp
15082842feeSDave Chinner 	 * dirty. Once we've processed the XFS_ILOG_IVERSION flag, convert it
15182842feeSDave Chinner 	 * to XFS_ILOG_CORE so that the actual on-disk dirty tracking
15282842feeSDave Chinner 	 * (ili_fields) correctly tracks that the version has changed.
15382842feeSDave Chinner 	 */
15482842feeSDave Chinner 	spin_lock(&iip->ili_lock);
15582842feeSDave Chinner 	iip->ili_fsync_fields |= (flags & ~XFS_ILOG_IVERSION);
15682842feeSDave Chinner 	if (flags & XFS_ILOG_IVERSION)
15782842feeSDave Chinner 		flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
15882842feeSDave Chinner 
15982842feeSDave Chinner 	if (!iip->ili_item.li_buf) {
16082842feeSDave Chinner 		struct xfs_buf	*bp;
16182842feeSDave Chinner 		int		error;
16282842feeSDave Chinner 
16382842feeSDave Chinner 		/*
16482842feeSDave Chinner 		 * We hold the ILOCK here, so this inode is not going to be
16582842feeSDave Chinner 		 * flushed while we are here. Further, because there is no
16682842feeSDave Chinner 		 * buffer attached to the item, we know that there is no IO in
16782842feeSDave Chinner 		 * progress, so nothing will clear the ili_fields while we read
16882842feeSDave Chinner 		 * in the buffer. Hence we can safely drop the spin lock and
16982842feeSDave Chinner 		 * read the buffer knowing that the state will not change from
17082842feeSDave Chinner 		 * here.
17182842feeSDave Chinner 		 */
17282842feeSDave Chinner 		spin_unlock(&iip->ili_lock);
17382842feeSDave Chinner 		error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
17482842feeSDave Chinner 		if (error)
17582842feeSDave Chinner 			return error;
17682842feeSDave Chinner 
17782842feeSDave Chinner 		/*
17882842feeSDave Chinner 		 * We need an explicit buffer reference for the log item but
17982842feeSDave Chinner 		 * don't want the buffer to remain attached to the transaction.
18082842feeSDave Chinner 		 * Hold the buffer but release the transaction reference once
18182842feeSDave Chinner 		 * we've attached the inode log item to the buffer log item
18282842feeSDave Chinner 		 * list.
18382842feeSDave Chinner 		 */
18482842feeSDave Chinner 		xfs_buf_hold(bp);
18582842feeSDave Chinner 		spin_lock(&iip->ili_lock);
18682842feeSDave Chinner 		iip->ili_item.li_buf = bp;
18782842feeSDave Chinner 		bp->b_flags |= _XBF_INODES;
18882842feeSDave Chinner 		list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
18982842feeSDave Chinner 		xfs_trans_brelse(tp, bp);
19082842feeSDave Chinner 	}
19182842feeSDave Chinner 
19282842feeSDave Chinner 	/*
19382842feeSDave Chinner 	 * Always OR in the bits from the ili_last_fields field.  This is to
19482842feeSDave Chinner 	 * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
19582842feeSDave Chinner 	 * in the eventual clearing of the ili_fields bits.  See the big comment
19682842feeSDave Chinner 	 * in xfs_iflush() for an explanation of this coordination mechanism.
19782842feeSDave Chinner 	 */
19882842feeSDave Chinner 	iip->ili_fields |= (flags | iip->ili_last_fields);
19982842feeSDave Chinner 	spin_unlock(&iip->ili_lock);
20082842feeSDave Chinner 
201*c08d0399SDarrick J. Wong 	xfs_inode_item_precommit_check(ip);
202*c08d0399SDarrick J. Wong 
20382842feeSDave Chinner 	/*
20482842feeSDave Chinner 	 * We are done with the log item transaction dirty state, so clear it so
20582842feeSDave Chinner 	 * that it doesn't pollute future transactions.
20682842feeSDave Chinner 	 */
20782842feeSDave Chinner 	iip->ili_dirty_flags = 0;
20882842feeSDave Chinner 	return 0;
20982842feeSDave Chinner }
21082842feeSDave Chinner 
21119f4e7ccSDave Chinner /*
21219f4e7ccSDave Chinner  * The logged size of an inode fork is always the current size of the inode
21319f4e7ccSDave Chinner  * fork. This means that when an inode fork is relogged, the size of the logged
21419f4e7ccSDave Chinner  * region is determined by the current state, not the combination of the
21519f4e7ccSDave Chinner  * previously logged state + the current state. This is different relogging
21619f4e7ccSDave Chinner  * behaviour to most other log items which will retain the size of the
21719f4e7ccSDave Chinner  * previously logged changes when smaller regions are relogged.
21819f4e7ccSDave Chinner  *
21919f4e7ccSDave Chinner  * Hence operations that remove data from the inode fork (e.g. shortform
22019f4e7ccSDave Chinner  * dir/attr remove, extent form extent removal, etc), the size of the relogged
22119f4e7ccSDave Chinner  * inode gets -smaller- rather than stays the same size as the previously logged
22219f4e7ccSDave Chinner  * size and this can result in the committing transaction reducing the amount of
22319f4e7ccSDave Chinner  * space being consumed by the CIL.
22419f4e7ccSDave Chinner  */
225ce9641d6SChristoph Hellwig STATIC void
xfs_inode_item_data_fork_size(struct xfs_inode_log_item * iip,int * nvecs,int * nbytes)226ce9641d6SChristoph Hellwig xfs_inode_item_data_fork_size(
227ce9641d6SChristoph Hellwig 	struct xfs_inode_log_item *iip,
228ce9641d6SChristoph Hellwig 	int			*nvecs,
229ce9641d6SChristoph Hellwig 	int			*nbytes)
230ce9641d6SChristoph Hellwig {
231ce9641d6SChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
232ce9641d6SChristoph Hellwig 
233f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
234ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_EXTENTS:
235ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DEXT) &&
236daf83964SChristoph Hellwig 		    ip->i_df.if_nextents > 0 &&
237ce9641d6SChristoph Hellwig 		    ip->i_df.if_bytes > 0) {
238ce9641d6SChristoph Hellwig 			/* worst case, doesn't subtract delalloc extents */
239c01147d9SDarrick J. Wong 			*nbytes += xfs_inode_data_fork_size(ip);
240ce9641d6SChristoph Hellwig 			*nvecs += 1;
241ce9641d6SChristoph Hellwig 		}
242ce9641d6SChristoph Hellwig 		break;
243ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_BTREE:
244ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
245ce9641d6SChristoph Hellwig 		    ip->i_df.if_broot_bytes > 0) {
246ce9641d6SChristoph Hellwig 			*nbytes += ip->i_df.if_broot_bytes;
247ce9641d6SChristoph Hellwig 			*nvecs += 1;
248ce9641d6SChristoph Hellwig 		}
249ce9641d6SChristoph Hellwig 		break;
250ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_LOCAL:
251ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DDATA) &&
252ce9641d6SChristoph Hellwig 		    ip->i_df.if_bytes > 0) {
253b2c28035SDave Chinner 			*nbytes += xlog_calc_iovec_len(ip->i_df.if_bytes);
254ce9641d6SChristoph Hellwig 			*nvecs += 1;
255ce9641d6SChristoph Hellwig 		}
256ce9641d6SChristoph Hellwig 		break;
257ce9641d6SChristoph Hellwig 
258ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_DEV:
259ce9641d6SChristoph Hellwig 		break;
260ce9641d6SChristoph Hellwig 	default:
261ce9641d6SChristoph Hellwig 		ASSERT(0);
262ce9641d6SChristoph Hellwig 		break;
263ce9641d6SChristoph Hellwig 	}
264ce9641d6SChristoph Hellwig }
265ce9641d6SChristoph Hellwig 
266ce9641d6SChristoph Hellwig STATIC void
xfs_inode_item_attr_fork_size(struct xfs_inode_log_item * iip,int * nvecs,int * nbytes)267ce9641d6SChristoph Hellwig xfs_inode_item_attr_fork_size(
268ce9641d6SChristoph Hellwig 	struct xfs_inode_log_item *iip,
269ce9641d6SChristoph Hellwig 	int			*nvecs,
270ce9641d6SChristoph Hellwig 	int			*nbytes)
271ce9641d6SChristoph Hellwig {
272ce9641d6SChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
273ce9641d6SChristoph Hellwig 
2742ed5b09bSDarrick J. Wong 	switch (ip->i_af.if_format) {
275ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_EXTENTS:
276ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_AEXT) &&
2772ed5b09bSDarrick J. Wong 		    ip->i_af.if_nextents > 0 &&
2782ed5b09bSDarrick J. Wong 		    ip->i_af.if_bytes > 0) {
279ce9641d6SChristoph Hellwig 			/* worst case, doesn't subtract unused space */
280c01147d9SDarrick J. Wong 			*nbytes += xfs_inode_attr_fork_size(ip);
281ce9641d6SChristoph Hellwig 			*nvecs += 1;
282ce9641d6SChristoph Hellwig 		}
283ce9641d6SChristoph Hellwig 		break;
284ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_BTREE:
285ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
2862ed5b09bSDarrick J. Wong 		    ip->i_af.if_broot_bytes > 0) {
2872ed5b09bSDarrick J. Wong 			*nbytes += ip->i_af.if_broot_bytes;
288ce9641d6SChristoph Hellwig 			*nvecs += 1;
289ce9641d6SChristoph Hellwig 		}
290ce9641d6SChristoph Hellwig 		break;
291ce9641d6SChristoph Hellwig 	case XFS_DINODE_FMT_LOCAL:
292ce9641d6SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_ADATA) &&
2932ed5b09bSDarrick J. Wong 		    ip->i_af.if_bytes > 0) {
2942ed5b09bSDarrick J. Wong 			*nbytes += xlog_calc_iovec_len(ip->i_af.if_bytes);
295ce9641d6SChristoph Hellwig 			*nvecs += 1;
296ce9641d6SChristoph Hellwig 		}
297ce9641d6SChristoph Hellwig 		break;
298ce9641d6SChristoph Hellwig 	default:
299ce9641d6SChristoph Hellwig 		ASSERT(0);
300ce9641d6SChristoph Hellwig 		break;
301ce9641d6SChristoph Hellwig 	}
302ce9641d6SChristoph Hellwig }
3037bfa31d8SChristoph Hellwig 
3041da177e4SLinus Torvalds /*
3051da177e4SLinus Torvalds  * This returns the number of iovecs needed to log the given inode item.
3061da177e4SLinus Torvalds  *
3071da177e4SLinus Torvalds  * We need one iovec for the inode log format structure, one for the
3081da177e4SLinus Torvalds  * inode core, and possibly one for the inode data/extents/b-tree root
3091da177e4SLinus Torvalds  * and one for the inode attribute data/extents/b-tree root.
3101da177e4SLinus Torvalds  */
311166d1368SDave Chinner STATIC void
xfs_inode_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)3121da177e4SLinus Torvalds xfs_inode_item_size(
313166d1368SDave Chinner 	struct xfs_log_item	*lip,
314166d1368SDave Chinner 	int			*nvecs,
315166d1368SDave Chinner 	int			*nbytes)
3161da177e4SLinus Torvalds {
3177bfa31d8SChristoph Hellwig 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
3187bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
319166d1368SDave Chinner 
320166d1368SDave Chinner 	*nvecs += 2;
321166d1368SDave Chinner 	*nbytes += sizeof(struct xfs_inode_log_format) +
322e9e2eae8SChristoph Hellwig 		   xfs_log_dinode_size(ip->i_mount);
3231da177e4SLinus Torvalds 
324ce9641d6SChristoph Hellwig 	xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
325932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
326ce9641d6SChristoph Hellwig 		xfs_inode_item_attr_fork_size(iip, nvecs, nbytes);
3271da177e4SLinus Torvalds }
3281da177e4SLinus Torvalds 
3291234351cSChristoph Hellwig STATIC void
xfs_inode_item_format_data_fork(struct xfs_inode_log_item * iip,struct xfs_inode_log_format * ilf,struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp)3303de559fbSChristoph Hellwig xfs_inode_item_format_data_fork(
3313de559fbSChristoph Hellwig 	struct xfs_inode_log_item *iip,
332bde7cff6SChristoph Hellwig 	struct xfs_inode_log_format *ilf,
333bde7cff6SChristoph Hellwig 	struct xfs_log_vec	*lv,
334bde7cff6SChristoph Hellwig 	struct xfs_log_iovec	**vecp)
3353de559fbSChristoph Hellwig {
3363de559fbSChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
3373de559fbSChristoph Hellwig 	size_t			data_bytes;
3383de559fbSChristoph Hellwig 
339f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
3401da177e4SLinus Torvalds 	case XFS_DINODE_FMT_EXTENTS:
341f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
34242b67dc6SChristoph Hellwig 			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
343339a5f5dSChristoph Hellwig 
344f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DEXT) &&
345daf83964SChristoph Hellwig 		    ip->i_df.if_nextents > 0 &&
346339a5f5dSChristoph Hellwig 		    ip->i_df.if_bytes > 0) {
347da776503SChristoph Hellwig 			struct xfs_bmbt_rec *p;
348da776503SChristoph Hellwig 
3495d829300SEric Sandeen 			ASSERT(xfs_iext_count(&ip->i_df) > 0);
350339a5f5dSChristoph Hellwig 
351da776503SChristoph Hellwig 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
352da776503SChristoph Hellwig 			data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
353da776503SChristoph Hellwig 			xlog_finish_iovec(lv, *vecp, data_bytes);
354da776503SChristoph Hellwig 
355da776503SChristoph Hellwig 			ASSERT(data_bytes <= ip->i_df.if_bytes);
356da776503SChristoph Hellwig 
357da776503SChristoph Hellwig 			ilf->ilf_dsize = data_bytes;
358bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
359339a5f5dSChristoph Hellwig 		} else {
360f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_DEXT;
3611da177e4SLinus Torvalds 		}
3621da177e4SLinus Torvalds 		break;
3631da177e4SLinus Torvalds 	case XFS_DINODE_FMT_BTREE:
364f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
36542b67dc6SChristoph Hellwig 			~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV);
366339a5f5dSChristoph Hellwig 
367f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
368339a5f5dSChristoph Hellwig 		    ip->i_df.if_broot_bytes > 0) {
3691da177e4SLinus Torvalds 			ASSERT(ip->i_df.if_broot != NULL);
370bde7cff6SChristoph Hellwig 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT,
3711234351cSChristoph Hellwig 					ip->i_df.if_broot,
3721234351cSChristoph Hellwig 					ip->i_df.if_broot_bytes);
373bde7cff6SChristoph Hellwig 			ilf->ilf_dsize = ip->i_df.if_broot_bytes;
374bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
375339a5f5dSChristoph Hellwig 		} else {
376f5d8d5c4SChristoph Hellwig 			ASSERT(!(iip->ili_fields &
377339a5f5dSChristoph Hellwig 				 XFS_ILOG_DBROOT));
378f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_DBROOT;
3791da177e4SLinus Torvalds 		}
3801da177e4SLinus Torvalds 		break;
3811da177e4SLinus Torvalds 	case XFS_DINODE_FMT_LOCAL:
382f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
38342b67dc6SChristoph Hellwig 			~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
384f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_DDATA) &&
385339a5f5dSChristoph Hellwig 		    ip->i_df.if_bytes > 0) {
3861234351cSChristoph Hellwig 			ASSERT(ip->i_df.if_u1.if_data != NULL);
38713d2c10bSChristoph Hellwig 			ASSERT(ip->i_disk_size > 0);
388bde7cff6SChristoph Hellwig 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
389b2c28035SDave Chinner 					ip->i_df.if_u1.if_data,
390b2c28035SDave Chinner 					ip->i_df.if_bytes);
391b2c28035SDave Chinner 			ilf->ilf_dsize = (unsigned)ip->i_df.if_bytes;
392bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
393339a5f5dSChristoph Hellwig 		} else {
394f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_DDATA;
3951da177e4SLinus Torvalds 		}
3961da177e4SLinus Torvalds 		break;
3971da177e4SLinus Torvalds 	case XFS_DINODE_FMT_DEV:
398f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
39942b67dc6SChristoph Hellwig 			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT);
400bde7cff6SChristoph Hellwig 		if (iip->ili_fields & XFS_ILOG_DEV)
40166f36464SChristoph Hellwig 			ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev);
4021da177e4SLinus Torvalds 		break;
4031da177e4SLinus Torvalds 	default:
4041da177e4SLinus Torvalds 		ASSERT(0);
4051da177e4SLinus Torvalds 		break;
4061da177e4SLinus Torvalds 	}
4071da177e4SLinus Torvalds }
4081da177e4SLinus Torvalds 
4091234351cSChristoph Hellwig STATIC void
xfs_inode_item_format_attr_fork(struct xfs_inode_log_item * iip,struct xfs_inode_log_format * ilf,struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp)4103de559fbSChristoph Hellwig xfs_inode_item_format_attr_fork(
4113de559fbSChristoph Hellwig 	struct xfs_inode_log_item *iip,
412bde7cff6SChristoph Hellwig 	struct xfs_inode_log_format *ilf,
413bde7cff6SChristoph Hellwig 	struct xfs_log_vec	*lv,
414bde7cff6SChristoph Hellwig 	struct xfs_log_iovec	**vecp)
4153de559fbSChristoph Hellwig {
4163de559fbSChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
4173de559fbSChristoph Hellwig 	size_t			data_bytes;
4183de559fbSChristoph Hellwig 
4192ed5b09bSDarrick J. Wong 	switch (ip->i_af.if_format) {
4201da177e4SLinus Torvalds 	case XFS_DINODE_FMT_EXTENTS:
421f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
422339a5f5dSChristoph Hellwig 			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
423339a5f5dSChristoph Hellwig 
424f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_AEXT) &&
4252ed5b09bSDarrick J. Wong 		    ip->i_af.if_nextents > 0 &&
4262ed5b09bSDarrick J. Wong 		    ip->i_af.if_bytes > 0) {
427da776503SChristoph Hellwig 			struct xfs_bmbt_rec *p;
428da776503SChristoph Hellwig 
4292ed5b09bSDarrick J. Wong 			ASSERT(xfs_iext_count(&ip->i_af) ==
4302ed5b09bSDarrick J. Wong 				ip->i_af.if_nextents);
431da776503SChristoph Hellwig 
432da776503SChristoph Hellwig 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
433da776503SChristoph Hellwig 			data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
434da776503SChristoph Hellwig 			xlog_finish_iovec(lv, *vecp, data_bytes);
435da776503SChristoph Hellwig 
436da776503SChristoph Hellwig 			ilf->ilf_asize = data_bytes;
437bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
438339a5f5dSChristoph Hellwig 		} else {
439f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_AEXT;
4401da177e4SLinus Torvalds 		}
4411da177e4SLinus Torvalds 		break;
4421da177e4SLinus Torvalds 	case XFS_DINODE_FMT_BTREE:
443f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
444339a5f5dSChristoph Hellwig 			~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
445339a5f5dSChristoph Hellwig 
446f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
4472ed5b09bSDarrick J. Wong 		    ip->i_af.if_broot_bytes > 0) {
4482ed5b09bSDarrick J. Wong 			ASSERT(ip->i_af.if_broot != NULL);
449339a5f5dSChristoph Hellwig 
450bde7cff6SChristoph Hellwig 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT,
4512ed5b09bSDarrick J. Wong 					ip->i_af.if_broot,
4522ed5b09bSDarrick J. Wong 					ip->i_af.if_broot_bytes);
4532ed5b09bSDarrick J. Wong 			ilf->ilf_asize = ip->i_af.if_broot_bytes;
454bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
455339a5f5dSChristoph Hellwig 		} else {
456f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_ABROOT;
4571da177e4SLinus Torvalds 		}
4581da177e4SLinus Torvalds 		break;
4591da177e4SLinus Torvalds 	case XFS_DINODE_FMT_LOCAL:
460f5d8d5c4SChristoph Hellwig 		iip->ili_fields &=
461339a5f5dSChristoph Hellwig 			~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
462339a5f5dSChristoph Hellwig 
463f5d8d5c4SChristoph Hellwig 		if ((iip->ili_fields & XFS_ILOG_ADATA) &&
4642ed5b09bSDarrick J. Wong 		    ip->i_af.if_bytes > 0) {
4652ed5b09bSDarrick J. Wong 			ASSERT(ip->i_af.if_u1.if_data != NULL);
466bde7cff6SChristoph Hellwig 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
4672ed5b09bSDarrick J. Wong 					ip->i_af.if_u1.if_data,
4682ed5b09bSDarrick J. Wong 					ip->i_af.if_bytes);
4692ed5b09bSDarrick J. Wong 			ilf->ilf_asize = (unsigned)ip->i_af.if_bytes;
470bde7cff6SChristoph Hellwig 			ilf->ilf_size++;
471339a5f5dSChristoph Hellwig 		} else {
472f5d8d5c4SChristoph Hellwig 			iip->ili_fields &= ~XFS_ILOG_ADATA;
4731da177e4SLinus Torvalds 		}
4741da177e4SLinus Torvalds 		break;
4751da177e4SLinus Torvalds 	default:
4761da177e4SLinus Torvalds 		ASSERT(0);
4771da177e4SLinus Torvalds 		break;
4781da177e4SLinus Torvalds 	}
4793de559fbSChristoph Hellwig }
4803de559fbSChristoph Hellwig 
48130e05599SDarrick J. Wong /*
48230e05599SDarrick J. Wong  * Convert an incore timestamp to a log timestamp.  Note that the log format
48330e05599SDarrick J. Wong  * specifies host endian format!
48430e05599SDarrick J. Wong  */
4856fc277c7SChristoph Hellwig static inline xfs_log_timestamp_t
xfs_inode_to_log_dinode_ts(struct xfs_inode * ip,const struct timespec64 tv)48630e05599SDarrick J. Wong xfs_inode_to_log_dinode_ts(
487f93e5436SDarrick J. Wong 	struct xfs_inode		*ip,
48830e05599SDarrick J. Wong 	const struct timespec64		tv)
48930e05599SDarrick J. Wong {
490732de7dbSChristoph Hellwig 	struct xfs_log_legacy_timestamp	*lits;
4916fc277c7SChristoph Hellwig 	xfs_log_timestamp_t		its;
49230e05599SDarrick J. Wong 
493f93e5436SDarrick J. Wong 	if (xfs_inode_has_bigtime(ip))
494f93e5436SDarrick J. Wong 		return xfs_inode_encode_bigtime(tv);
495f93e5436SDarrick J. Wong 
496732de7dbSChristoph Hellwig 	lits = (struct xfs_log_legacy_timestamp *)&its;
49730e05599SDarrick J. Wong 	lits->t_sec = tv.tv_sec;
49830e05599SDarrick J. Wong 	lits->t_nsec = tv.tv_nsec;
49930e05599SDarrick J. Wong 
50030e05599SDarrick J. Wong 	return its;
50130e05599SDarrick J. Wong }
50230e05599SDarrick J. Wong 
5039b3beb02SChristoph Hellwig /*
5049b3beb02SChristoph Hellwig  * The legacy DMAPI fields are only present in the on-disk and in-log inodes,
5059b3beb02SChristoph Hellwig  * but not in the in-memory one.  But we are guaranteed to have an inode buffer
5069b3beb02SChristoph Hellwig  * in memory when logging an inode, so we can just copy it from the on-disk
5079b3beb02SChristoph Hellwig  * inode to the in-log inode here so that recovery of file system with these
5089b3beb02SChristoph Hellwig  * fields set to non-zero values doesn't lose them.  For all other cases we zero
5099b3beb02SChristoph Hellwig  * the fields.
5109b3beb02SChristoph Hellwig  */
5119b3beb02SChristoph Hellwig static void
xfs_copy_dm_fields_to_log_dinode(struct xfs_inode * ip,struct xfs_log_dinode * to)5129b3beb02SChristoph Hellwig xfs_copy_dm_fields_to_log_dinode(
5139b3beb02SChristoph Hellwig 	struct xfs_inode	*ip,
5149b3beb02SChristoph Hellwig 	struct xfs_log_dinode	*to)
5159b3beb02SChristoph Hellwig {
5169b3beb02SChristoph Hellwig 	struct xfs_dinode	*dip;
5179b3beb02SChristoph Hellwig 
5189b3beb02SChristoph Hellwig 	dip = xfs_buf_offset(ip->i_itemp->ili_item.li_buf,
5199b3beb02SChristoph Hellwig 			     ip->i_imap.im_boffset);
5209b3beb02SChristoph Hellwig 
5219b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) {
5229b3beb02SChristoph Hellwig 		to->di_dmevmask = be32_to_cpu(dip->di_dmevmask);
5239b3beb02SChristoph Hellwig 		to->di_dmstate = be16_to_cpu(dip->di_dmstate);
5249b3beb02SChristoph Hellwig 	} else {
5259b3beb02SChristoph Hellwig 		to->di_dmevmask = 0;
5269b3beb02SChristoph Hellwig 		to->di_dmstate = 0;
5279b3beb02SChristoph Hellwig 	}
5289b3beb02SChristoph Hellwig }
5299b3beb02SChristoph Hellwig 
53052a4a148SChandan Babu R static inline void
xfs_inode_to_log_dinode_iext_counters(struct xfs_inode * ip,struct xfs_log_dinode * to)53152a4a148SChandan Babu R xfs_inode_to_log_dinode_iext_counters(
53252a4a148SChandan Babu R 	struct xfs_inode	*ip,
53352a4a148SChandan Babu R 	struct xfs_log_dinode	*to)
53452a4a148SChandan Babu R {
53552a4a148SChandan Babu R 	if (xfs_inode_has_large_extent_counts(ip)) {
53652a4a148SChandan Babu R 		to->di_big_nextents = xfs_ifork_nextents(&ip->i_df);
5372ed5b09bSDarrick J. Wong 		to->di_big_anextents = xfs_ifork_nextents(&ip->i_af);
53852a4a148SChandan Babu R 		to->di_nrext64_pad = 0;
53952a4a148SChandan Babu R 	} else {
54052a4a148SChandan Babu R 		to->di_nextents = xfs_ifork_nextents(&ip->i_df);
5412ed5b09bSDarrick J. Wong 		to->di_anextents = xfs_ifork_nextents(&ip->i_af);
54252a4a148SChandan Babu R 	}
54352a4a148SChandan Babu R }
54452a4a148SChandan Babu R 
545f8d55aa0SDave Chinner static void
xfs_inode_to_log_dinode(struct xfs_inode * ip,struct xfs_log_dinode * to,xfs_lsn_t lsn)5463987848cSDave Chinner xfs_inode_to_log_dinode(
5473987848cSDave Chinner 	struct xfs_inode	*ip,
54893f958f9SDave Chinner 	struct xfs_log_dinode	*to,
54993f958f9SDave Chinner 	xfs_lsn_t		lsn)
550f8d55aa0SDave Chinner {
5513987848cSDave Chinner 	struct inode		*inode = VFS_I(ip);
5523987848cSDave Chinner 
55393f958f9SDave Chinner 	to->di_magic = XFS_DINODE_MAGIC;
554f7e67b20SChristoph Hellwig 	to->di_format = xfs_ifork_format(&ip->i_df);
555ba8adad5SChristoph Hellwig 	to->di_uid = i_uid_read(inode);
556ba8adad5SChristoph Hellwig 	to->di_gid = i_gid_read(inode);
557ceaf603cSChristoph Hellwig 	to->di_projid_lo = ip->i_projid & 0xffff;
558ceaf603cSChristoph Hellwig 	to->di_projid_hi = ip->i_projid >> 16;
559f8d55aa0SDave Chinner 
560faeb4e47SDave Chinner 	memset(to->di_pad3, 0, sizeof(to->di_pad3));
561f93e5436SDarrick J. Wong 	to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
562f93e5436SDarrick J. Wong 	to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
563a0a415e3SJeff Layton 	to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode_get_ctime(inode));
56454d7b5c1SDave Chinner 	to->di_nlink = inode->i_nlink;
5659e9a2674SDave Chinner 	to->di_gen = inode->i_generation;
566c19b3b05SDave Chinner 	to->di_mode = inode->i_mode;
567f8d55aa0SDave Chinner 
56813d2c10bSChristoph Hellwig 	to->di_size = ip->i_disk_size;
5696e73a545SChristoph Hellwig 	to->di_nblocks = ip->i_nblocks;
570031474c2SChristoph Hellwig 	to->di_extsize = ip->i_extsize;
5717821ea30SChristoph Hellwig 	to->di_forkoff = ip->i_forkoff;
5722ed5b09bSDarrick J. Wong 	to->di_aformat = xfs_ifork_format(&ip->i_af);
573db07349dSChristoph Hellwig 	to->di_flags = ip->i_diflags;
574f8d55aa0SDave Chinner 
5759b3beb02SChristoph Hellwig 	xfs_copy_dm_fields_to_log_dinode(ip, to);
5769b3beb02SChristoph Hellwig 
57720413e37SDave Chinner 	/* log a dummy value to ensure log structure is fully initialised */
57820413e37SDave Chinner 	to->di_next_unlinked = NULLAGINO;
57920413e37SDave Chinner 
58038c26bfdSDave Chinner 	if (xfs_has_v3inodes(ip->i_mount)) {
5816471e9c5SChristoph Hellwig 		to->di_version = 3;
582f0e28280SJeff Layton 		to->di_changecount = inode_peek_iversion(inode);
583e98d5e88SChristoph Hellwig 		to->di_crtime = xfs_inode_to_log_dinode_ts(ip, ip->i_crtime);
5843e09ab8fSChristoph Hellwig 		to->di_flags2 = ip->i_diflags2;
585b33ce57dSChristoph Hellwig 		to->di_cowextsize = ip->i_cowextsize;
58693f958f9SDave Chinner 		to->di_ino = ip->i_ino;
58793f958f9SDave Chinner 		to->di_lsn = lsn;
58893f958f9SDave Chinner 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
58993f958f9SDave Chinner 		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
59052a4a148SChandan Babu R 		to->di_v3_pad = 0;
59147604cf2SDave Chinner 
59247604cf2SDave Chinner 		/* dummy value for initialisation */
59347604cf2SDave Chinner 		to->di_crc = 0;
594f8d55aa0SDave Chinner 	} else {
5956471e9c5SChristoph Hellwig 		to->di_version = 2;
596965e0a1aSChristoph Hellwig 		to->di_flushiter = ip->i_flushiter;
59752a4a148SChandan Babu R 		memset(to->di_v2_pad, 0, sizeof(to->di_v2_pad));
598f8d55aa0SDave Chinner 	}
59952a4a148SChandan Babu R 
60052a4a148SChandan Babu R 	xfs_inode_to_log_dinode_iext_counters(ip, to);
601f8d55aa0SDave Chinner }
602f8d55aa0SDave Chinner 
603f8d55aa0SDave Chinner /*
604f8d55aa0SDave Chinner  * Format the inode core. Current timestamp data is only in the VFS inode
605f8d55aa0SDave Chinner  * fields, so we need to grab them from there. Hence rather than just copying
606f8d55aa0SDave Chinner  * the XFS inode core structure, format the fields directly into the iovec.
607f8d55aa0SDave Chinner  */
608f8d55aa0SDave Chinner static void
xfs_inode_item_format_core(struct xfs_inode * ip,struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp)609f8d55aa0SDave Chinner xfs_inode_item_format_core(
610f8d55aa0SDave Chinner 	struct xfs_inode	*ip,
611f8d55aa0SDave Chinner 	struct xfs_log_vec	*lv,
612f8d55aa0SDave Chinner 	struct xfs_log_iovec	**vecp)
613f8d55aa0SDave Chinner {
614f8d55aa0SDave Chinner 	struct xfs_log_dinode	*dic;
615f8d55aa0SDave Chinner 
616f8d55aa0SDave Chinner 	dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
61793f958f9SDave Chinner 	xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
618e9e2eae8SChristoph Hellwig 	xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
619f8d55aa0SDave Chinner }
620f8d55aa0SDave Chinner 
6213de559fbSChristoph Hellwig /*
6223de559fbSChristoph Hellwig  * This is called to fill in the vector of log iovecs for the given inode
6233de559fbSChristoph Hellwig  * log item.  It fills the first item with an inode log format structure,
6243de559fbSChristoph Hellwig  * the second with the on-disk inode structure, and a possible third and/or
6253de559fbSChristoph Hellwig  * fourth with the inode data/extents/b-tree root and inode attributes
6263de559fbSChristoph Hellwig  * data/extents/b-tree root.
62720413e37SDave Chinner  *
62820413e37SDave Chinner  * Note: Always use the 64 bit inode log format structure so we don't
62920413e37SDave Chinner  * leave an uninitialised hole in the format item on 64 bit systems. Log
63020413e37SDave Chinner  * recovery on 32 bit systems handles this just fine, so there's no reason
63120413e37SDave Chinner  * for not using an initialising the properly padded structure all the time.
6323de559fbSChristoph Hellwig  */
6333de559fbSChristoph Hellwig STATIC void
xfs_inode_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)6343de559fbSChristoph Hellwig xfs_inode_item_format(
6353de559fbSChristoph Hellwig 	struct xfs_log_item	*lip,
636bde7cff6SChristoph Hellwig 	struct xfs_log_vec	*lv)
6373de559fbSChristoph Hellwig {
6383de559fbSChristoph Hellwig 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
6393de559fbSChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
640bde7cff6SChristoph Hellwig 	struct xfs_log_iovec	*vecp = NULL;
64120413e37SDave Chinner 	struct xfs_inode_log_format *ilf;
6423de559fbSChristoph Hellwig 
6432f251293SChristoph Hellwig 	ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
6442f251293SChristoph Hellwig 	ilf->ilf_type = XFS_LI_INODE;
6452f251293SChristoph Hellwig 	ilf->ilf_ino = ip->i_ino;
6462f251293SChristoph Hellwig 	ilf->ilf_blkno = ip->i_imap.im_blkno;
6472f251293SChristoph Hellwig 	ilf->ilf_len = ip->i_imap.im_len;
6482f251293SChristoph Hellwig 	ilf->ilf_boffset = ip->i_imap.im_boffset;
6492f251293SChristoph Hellwig 	ilf->ilf_fields = XFS_ILOG_CORE;
6502f251293SChristoph Hellwig 	ilf->ilf_size = 2; /* format + core */
65120413e37SDave Chinner 
65220413e37SDave Chinner 	/*
65320413e37SDave Chinner 	 * make sure we don't leak uninitialised data into the log in the case
65420413e37SDave Chinner 	 * when we don't log every field in the inode.
65520413e37SDave Chinner 	 */
65620413e37SDave Chinner 	ilf->ilf_dsize = 0;
65720413e37SDave Chinner 	ilf->ilf_asize = 0;
65820413e37SDave Chinner 	ilf->ilf_pad = 0;
65942b67dc6SChristoph Hellwig 	memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u));
66020413e37SDave Chinner 
66120413e37SDave Chinner 	xlog_finish_iovec(lv, vecp, sizeof(*ilf));
6623de559fbSChristoph Hellwig 
663f8d55aa0SDave Chinner 	xfs_inode_item_format_core(ip, lv, &vecp);
664bde7cff6SChristoph Hellwig 	xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
665932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip)) {
666bde7cff6SChristoph Hellwig 		xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
6673de559fbSChristoph Hellwig 	} else {
6683de559fbSChristoph Hellwig 		iip->ili_fields &=
6693de559fbSChristoph Hellwig 			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
6703de559fbSChristoph Hellwig 	}
6713de559fbSChristoph Hellwig 
6722f251293SChristoph Hellwig 	/* update the format with the exact fields we actually logged */
6732f251293SChristoph Hellwig 	ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP);
6741da177e4SLinus Torvalds }
6751da177e4SLinus Torvalds 
6761da177e4SLinus Torvalds /*
6771da177e4SLinus Torvalds  * This is called to pin the inode associated with the inode log
678a14a5ab5SChristoph Hellwig  * item in memory so it cannot be written out.
6791da177e4SLinus Torvalds  */
6801da177e4SLinus Torvalds STATIC void
xfs_inode_item_pin(struct xfs_log_item * lip)6811da177e4SLinus Torvalds xfs_inode_item_pin(
6827bfa31d8SChristoph Hellwig 	struct xfs_log_item	*lip)
6831da177e4SLinus Torvalds {
6847bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip = INODE_ITEM(lip)->ili_inode;
685a14a5ab5SChristoph Hellwig 
6867bfa31d8SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
687298f7becSDave Chinner 	ASSERT(lip->li_buf);
6887bfa31d8SChristoph Hellwig 
6897bfa31d8SChristoph Hellwig 	trace_xfs_inode_pin(ip, _RET_IP_);
6907bfa31d8SChristoph Hellwig 	atomic_inc(&ip->i_pincount);
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds 
6941da177e4SLinus Torvalds /*
6951da177e4SLinus Torvalds  * This is called to unpin the inode associated with the inode log
6961da177e4SLinus Torvalds  * item which was previously pinned with a call to xfs_inode_item_pin().
697a14a5ab5SChristoph Hellwig  *
698a14a5ab5SChristoph Hellwig  * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
699298f7becSDave Chinner  *
700298f7becSDave Chinner  * Note that unpin can race with inode cluster buffer freeing marking the buffer
701298f7becSDave Chinner  * stale. In that case, flush completions are run from the buffer unpin call,
702298f7becSDave Chinner  * which may happen before the inode is unpinned. If we lose the race, there
703298f7becSDave Chinner  * will be no buffer attached to the log item, but the inode will be marked
704298f7becSDave Chinner  * XFS_ISTALE.
7051da177e4SLinus Torvalds  */
7061da177e4SLinus Torvalds STATIC void
xfs_inode_item_unpin(struct xfs_log_item * lip,int remove)7071da177e4SLinus Torvalds xfs_inode_item_unpin(
7087bfa31d8SChristoph Hellwig 	struct xfs_log_item	*lip,
7099412e318SChristoph Hellwig 	int			remove)
7101da177e4SLinus Torvalds {
7117bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip = INODE_ITEM(lip)->ili_inode;
712a14a5ab5SChristoph Hellwig 
7134aaf15d1SDave Chinner 	trace_xfs_inode_unpin(ip, _RET_IP_);
714298f7becSDave Chinner 	ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE));
715a14a5ab5SChristoph Hellwig 	ASSERT(atomic_read(&ip->i_pincount) > 0);
716a14a5ab5SChristoph Hellwig 	if (atomic_dec_and_test(&ip->i_pincount))
717f392e631SChristoph Hellwig 		wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
7181da177e4SLinus Torvalds }
7191da177e4SLinus Torvalds 
7201da177e4SLinus Torvalds STATIC uint
xfs_inode_item_push(struct xfs_log_item * lip,struct list_head * buffer_list)72143ff2122SChristoph Hellwig xfs_inode_item_push(
72243ff2122SChristoph Hellwig 	struct xfs_log_item	*lip,
72343ff2122SChristoph Hellwig 	struct list_head	*buffer_list)
72457e80956SMatthew Wilcox 		__releases(&lip->li_ailp->ail_lock)
72557e80956SMatthew Wilcox 		__acquires(&lip->li_ailp->ail_lock)
7261da177e4SLinus Torvalds {
7277bfa31d8SChristoph Hellwig 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
7287bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
729d3a304b6SCarlos Maiolino 	struct xfs_buf		*bp = lip->li_buf;
73043ff2122SChristoph Hellwig 	uint			rval = XFS_ITEM_SUCCESS;
73143ff2122SChristoph Hellwig 	int			error;
7321da177e4SLinus Torvalds 
733d2d7c047SDave Chinner 	if (!bp || (ip->i_flags & XFS_ISTALE)) {
734d2d7c047SDave Chinner 		/*
735460281cfSZeng Heng 		 * Inode item/buffer is being aborted due to cluster
736d2d7c047SDave Chinner 		 * buffer deletion. Trigger a log force to have that operation
737d2d7c047SDave Chinner 		 * completed and items removed from the AIL before the next push
738d2d7c047SDave Chinner 		 * attempt.
739d2d7c047SDave Chinner 		 */
740d2d7c047SDave Chinner 		return XFS_ITEM_PINNED;
741d2d7c047SDave Chinner 	}
74290c60e16SDave Chinner 
743d2d7c047SDave Chinner 	if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp))
7441da177e4SLinus Torvalds 		return XFS_ITEM_PINNED;
7451da177e4SLinus Torvalds 
746718ecc50SDave Chinner 	if (xfs_iflags_test(ip, XFS_IFLUSHING))
74790c60e16SDave Chinner 		return XFS_ITEM_FLUSHING;
74890c60e16SDave Chinner 
74990c60e16SDave Chinner 	if (!xfs_buf_trylock(bp))
7501da177e4SLinus Torvalds 		return XFS_ITEM_LOCKED;
7511da177e4SLinus Torvalds 
75257e80956SMatthew Wilcox 	spin_unlock(&lip->li_ailp->ail_lock);
75343ff2122SChristoph Hellwig 
75490c60e16SDave Chinner 	/*
75590c60e16SDave Chinner 	 * We need to hold a reference for flushing the cluster buffer as it may
75690c60e16SDave Chinner 	 * fail the buffer without IO submission. In which case, we better get a
75790c60e16SDave Chinner 	 * reference for that completion because otherwise we don't get a
75890c60e16SDave Chinner 	 * reference for IO until we queue the buffer for delwri submission.
75990c60e16SDave Chinner 	 */
76090c60e16SDave Chinner 	xfs_buf_hold(bp);
7615717ea4dSDave Chinner 	error = xfs_iflush_cluster(bp);
76243ff2122SChristoph Hellwig 	if (!error) {
76343ff2122SChristoph Hellwig 		if (!xfs_buf_delwri_queue(bp, buffer_list))
76443ff2122SChristoph Hellwig 			rval = XFS_ITEM_FLUSHING;
76543ff2122SChristoph Hellwig 		xfs_buf_relse(bp);
76690c60e16SDave Chinner 	} else {
7675717ea4dSDave Chinner 		/*
7685717ea4dSDave Chinner 		 * Release the buffer if we were unable to flush anything. On
7695717ea4dSDave Chinner 		 * any other error, the buffer has already been released.
7705717ea4dSDave Chinner 		 */
7715717ea4dSDave Chinner 		if (error == -EAGAIN)
7725717ea4dSDave Chinner 			xfs_buf_relse(bp);
773d4bc4c5fSBrian Foster 		rval = XFS_ITEM_LOCKED;
77490c60e16SDave Chinner 	}
77543ff2122SChristoph Hellwig 
77657e80956SMatthew Wilcox 	spin_lock(&lip->li_ailp->ail_lock);
77743ff2122SChristoph Hellwig 	return rval;
7781da177e4SLinus Torvalds }
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds /*
7811da177e4SLinus Torvalds  * Unlock the inode associated with the inode log item.
7821da177e4SLinus Torvalds  */
7831da177e4SLinus Torvalds STATIC void
xfs_inode_item_release(struct xfs_log_item * lip)784ddf92053SChristoph Hellwig xfs_inode_item_release(
7857bfa31d8SChristoph Hellwig 	struct xfs_log_item	*lip)
7861da177e4SLinus Torvalds {
7877bfa31d8SChristoph Hellwig 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
7887bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip = iip->ili_inode;
789898621d5SChristoph Hellwig 	unsigned short		lock_flags;
7901da177e4SLinus Torvalds 
791f3ca8738SChristoph Hellwig 	ASSERT(ip->i_itemp != NULL);
792f3ca8738SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
7931da177e4SLinus Torvalds 
794898621d5SChristoph Hellwig 	lock_flags = iip->ili_lock_flags;
795898621d5SChristoph Hellwig 	iip->ili_lock_flags = 0;
796ddc3415aSChristoph Hellwig 	if (lock_flags)
797f3ca8738SChristoph Hellwig 		xfs_iunlock(ip, lock_flags);
7981da177e4SLinus Torvalds }
7991da177e4SLinus Torvalds 
8001da177e4SLinus Torvalds /*
801de25c181SDave Chinner  * This is called to find out where the oldest active copy of the inode log
802de25c181SDave Chinner  * item in the on disk log resides now that the last log write of it completed
803de25c181SDave Chinner  * at the given lsn.  Since we always re-log all dirty data in an inode, the
804de25c181SDave Chinner  * latest copy in the on disk log is the only one that matters.  Therefore,
805de25c181SDave Chinner  * simply return the given lsn.
806de25c181SDave Chinner  *
807de25c181SDave Chinner  * If the inode has been marked stale because the cluster is being freed, we
808de25c181SDave Chinner  * don't want to (re-)insert this inode into the AIL. There is a race condition
809de25c181SDave Chinner  * where the cluster buffer may be unpinned before the inode is inserted into
810de25c181SDave Chinner  * the AIL during transaction committed processing. If the buffer is unpinned
811de25c181SDave Chinner  * before the inode item has been committed and inserted, then it is possible
8121316d4daSDave Chinner  * for the buffer to be written and IO completes before the inode is inserted
813de25c181SDave Chinner  * into the AIL. In that case, we'd be inserting a clean, stale inode into the
814de25c181SDave Chinner  * AIL which will never get removed. It will, however, get reclaimed which
815de25c181SDave Chinner  * triggers an assert in xfs_inode_free() complaining about freein an inode
816de25c181SDave Chinner  * still in the AIL.
817de25c181SDave Chinner  *
8181316d4daSDave Chinner  * To avoid this, just unpin the inode directly and return a LSN of -1 so the
8191316d4daSDave Chinner  * transaction committed code knows that it does not need to do any further
8201316d4daSDave Chinner  * processing on the item.
8211da177e4SLinus Torvalds  */
8221da177e4SLinus Torvalds STATIC xfs_lsn_t
xfs_inode_item_committed(struct xfs_log_item * lip,xfs_lsn_t lsn)8231da177e4SLinus Torvalds xfs_inode_item_committed(
8247bfa31d8SChristoph Hellwig 	struct xfs_log_item	*lip,
8251da177e4SLinus Torvalds 	xfs_lsn_t		lsn)
8261da177e4SLinus Torvalds {
827de25c181SDave Chinner 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
828de25c181SDave Chinner 	struct xfs_inode	*ip = iip->ili_inode;
829de25c181SDave Chinner 
8301316d4daSDave Chinner 	if (xfs_iflags_test(ip, XFS_ISTALE)) {
8311316d4daSDave Chinner 		xfs_inode_item_unpin(lip, 0);
8321316d4daSDave Chinner 		return -1;
8331316d4daSDave Chinner 	}
8347bfa31d8SChristoph Hellwig 	return lsn;
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds STATIC void
xfs_inode_item_committing(struct xfs_log_item * lip,xfs_csn_t seq)8381da177e4SLinus Torvalds xfs_inode_item_committing(
8397bfa31d8SChristoph Hellwig 	struct xfs_log_item	*lip,
8405f9b4b0dSDave Chinner 	xfs_csn_t		seq)
8411da177e4SLinus Torvalds {
8425f9b4b0dSDave Chinner 	INODE_ITEM(lip)->ili_commit_seq = seq;
843ddf92053SChristoph Hellwig 	return xfs_inode_item_release(lip);
8441da177e4SLinus Torvalds }
8451da177e4SLinus Torvalds 
846272e42b2SChristoph Hellwig static const struct xfs_item_ops xfs_inode_item_ops = {
84782842feeSDave Chinner 	.iop_sort	= xfs_inode_item_sort,
84882842feeSDave Chinner 	.iop_precommit	= xfs_inode_item_precommit,
8497bfa31d8SChristoph Hellwig 	.iop_size	= xfs_inode_item_size,
8507bfa31d8SChristoph Hellwig 	.iop_format	= xfs_inode_item_format,
8517bfa31d8SChristoph Hellwig 	.iop_pin	= xfs_inode_item_pin,
8527bfa31d8SChristoph Hellwig 	.iop_unpin	= xfs_inode_item_unpin,
853ddf92053SChristoph Hellwig 	.iop_release	= xfs_inode_item_release,
8547bfa31d8SChristoph Hellwig 	.iop_committed	= xfs_inode_item_committed,
8557bfa31d8SChristoph Hellwig 	.iop_push	= xfs_inode_item_push,
856d3a304b6SCarlos Maiolino 	.iop_committing	= xfs_inode_item_committing,
8571da177e4SLinus Torvalds };
8581da177e4SLinus Torvalds 
8591da177e4SLinus Torvalds 
8601da177e4SLinus Torvalds /*
8611da177e4SLinus Torvalds  * Initialize the inode log item for a newly allocated (in-core) inode.
8621da177e4SLinus Torvalds  */
8631da177e4SLinus Torvalds void
xfs_inode_item_init(struct xfs_inode * ip,struct xfs_mount * mp)8641da177e4SLinus Torvalds xfs_inode_item_init(
8657bfa31d8SChristoph Hellwig 	struct xfs_inode	*ip,
8667bfa31d8SChristoph Hellwig 	struct xfs_mount	*mp)
8671da177e4SLinus Torvalds {
8687bfa31d8SChristoph Hellwig 	struct xfs_inode_log_item *iip;
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds 	ASSERT(ip->i_itemp == NULL);
871182696fbSDarrick J. Wong 	iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_cache,
87232a2b11fSCarlos Maiolino 					      GFP_KERNEL | __GFP_NOFAIL);
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds 	iip->ili_inode = ip;
8751319ebefSDave Chinner 	spin_lock_init(&iip->ili_lock);
87643f5efc5SDave Chinner 	xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
87743f5efc5SDave Chinner 						&xfs_inode_item_ops);
8781da177e4SLinus Torvalds }
8791da177e4SLinus Torvalds 
8801da177e4SLinus Torvalds /*
8811da177e4SLinus Torvalds  * Free the inode log item and any memory hanging off of it.
8821da177e4SLinus Torvalds  */
8831da177e4SLinus Torvalds void
xfs_inode_item_destroy(struct xfs_inode * ip)8841da177e4SLinus Torvalds xfs_inode_item_destroy(
885298f7becSDave Chinner 	struct xfs_inode	*ip)
8861da177e4SLinus Torvalds {
887298f7becSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
888298f7becSDave Chinner 
889298f7becSDave Chinner 	ASSERT(iip->ili_item.li_buf == NULL);
890298f7becSDave Chinner 
891298f7becSDave Chinner 	ip->i_itemp = NULL;
892298f7becSDave Chinner 	kmem_free(iip->ili_item.li_lv_shadow);
893182696fbSDarrick J. Wong 	kmem_cache_free(xfs_ili_cache, iip);
8941da177e4SLinus Torvalds }
8951da177e4SLinus Torvalds 
8961da177e4SLinus Torvalds 
8971da177e4SLinus Torvalds /*
8981dfde687SDave Chinner  * We only want to pull the item from the AIL if it is actually there
8991dfde687SDave Chinner  * and its location in the log has not changed since we started the
9001dfde687SDave Chinner  * flush.  Thus, we only bother if the inode's lsn has not changed.
9011da177e4SLinus Torvalds  */
902a69a1dc2SDave Chinner static void
xfs_iflush_ail_updates(struct xfs_ail * ailp,struct list_head * list)903a69a1dc2SDave Chinner xfs_iflush_ail_updates(
904a69a1dc2SDave Chinner 	struct xfs_ail		*ailp,
905a69a1dc2SDave Chinner 	struct list_head	*list)
906a69a1dc2SDave Chinner {
907a69a1dc2SDave Chinner 	struct xfs_log_item	*lip;
9088eb807bdSDave Chinner 	xfs_lsn_t		tail_lsn = 0;
90927af1bbfSChristoph Hellwig 
91027af1bbfSChristoph Hellwig 	/* this is an opencoded batch version of xfs_trans_ail_delete */
91157e80956SMatthew Wilcox 	spin_lock(&ailp->ail_lock);
912a69a1dc2SDave Chinner 	list_for_each_entry(lip, list, li_bio_list) {
913a69a1dc2SDave Chinner 		xfs_lsn_t	lsn;
914a69a1dc2SDave Chinner 
915298f7becSDave Chinner 		clear_bit(XFS_LI_FAILED, &lip->li_flags);
916a69a1dc2SDave Chinner 		if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn)
917a69a1dc2SDave Chinner 			continue;
918a69a1dc2SDave Chinner 
91901728b44SDave Chinner 		/*
92001728b44SDave Chinner 		 * dgc: Not sure how this happens, but it happens very
92101728b44SDave Chinner 		 * occassionaly via generic/388.  xfs_iflush_abort() also
92201728b44SDave Chinner 		 * silently handles this same "under writeback but not in AIL at
92301728b44SDave Chinner 		 * shutdown" condition via xfs_trans_ail_delete().
92401728b44SDave Chinner 		 */
92501728b44SDave Chinner 		if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
92601728b44SDave Chinner 			ASSERT(xlog_is_shutdown(lip->li_log));
92701728b44SDave Chinner 			continue;
92801728b44SDave Chinner 		}
92901728b44SDave Chinner 
930a69a1dc2SDave Chinner 		lsn = xfs_ail_delete_one(ailp, lip);
9318eb807bdSDave Chinner 		if (!tail_lsn && lsn)
9328eb807bdSDave Chinner 			tail_lsn = lsn;
933d3a304b6SCarlos Maiolino 	}
9348eb807bdSDave Chinner 	xfs_ail_update_finish(ailp, tail_lsn);
93527af1bbfSChristoph Hellwig }
93630136832SDave Chinner 
93730136832SDave Chinner /*
938a69a1dc2SDave Chinner  * Walk the list of inodes that have completed their IOs. If they are clean
939a69a1dc2SDave Chinner  * remove them from the list and dissociate them from the buffer. Buffers that
940a69a1dc2SDave Chinner  * are still dirty remain linked to the buffer and on the list. Caller must
941a69a1dc2SDave Chinner  * handle them appropriately.
94230136832SDave Chinner  */
943a69a1dc2SDave Chinner static void
xfs_iflush_finish(struct xfs_buf * bp,struct list_head * list)944a69a1dc2SDave Chinner xfs_iflush_finish(
945a69a1dc2SDave Chinner 	struct xfs_buf		*bp,
946a69a1dc2SDave Chinner 	struct list_head	*list)
947a69a1dc2SDave Chinner {
948a69a1dc2SDave Chinner 	struct xfs_log_item	*lip, *n;
949298f7becSDave Chinner 
950a69a1dc2SDave Chinner 	list_for_each_entry_safe(lip, n, list, li_bio_list) {
951a69a1dc2SDave Chinner 		struct xfs_inode_log_item *iip = INODE_ITEM(lip);
952a69a1dc2SDave Chinner 		bool	drop_buffer = false;
9531319ebefSDave Chinner 
9541319ebefSDave Chinner 		spin_lock(&iip->ili_lock);
9551319ebefSDave Chinner 
956298f7becSDave Chinner 		/*
957298f7becSDave Chinner 		 * Remove the reference to the cluster buffer if the inode is
958a69a1dc2SDave Chinner 		 * clean in memory and drop the buffer reference once we've
959a69a1dc2SDave Chinner 		 * dropped the locks we hold.
960298f7becSDave Chinner 		 */
961298f7becSDave Chinner 		ASSERT(iip->ili_item.li_buf == bp);
962298f7becSDave Chinner 		if (!iip->ili_fields) {
963298f7becSDave Chinner 			iip->ili_item.li_buf = NULL;
964a69a1dc2SDave Chinner 			list_del_init(&lip->li_bio_list);
965298f7becSDave Chinner 			drop_buffer = true;
966298f7becSDave Chinner 		}
967298f7becSDave Chinner 		iip->ili_last_fields = 0;
968298f7becSDave Chinner 		iip->ili_flush_lsn = 0;
969298f7becSDave Chinner 		spin_unlock(&iip->ili_lock);
970718ecc50SDave Chinner 		xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING);
971298f7becSDave Chinner 		if (drop_buffer)
972298f7becSDave Chinner 			xfs_buf_rele(bp);
97330136832SDave Chinner 	}
9741da177e4SLinus Torvalds }
9751da177e4SLinus Torvalds 
9761da177e4SLinus Torvalds /*
977a69a1dc2SDave Chinner  * Inode buffer IO completion routine.  It is responsible for removing inodes
978718ecc50SDave Chinner  * attached to the buffer from the AIL if they have not been re-logged and
979718ecc50SDave Chinner  * completing the inode flush.
980a69a1dc2SDave Chinner  */
981a69a1dc2SDave Chinner void
xfs_buf_inode_iodone(struct xfs_buf * bp)982664ffb8aSChristoph Hellwig xfs_buf_inode_iodone(
983a69a1dc2SDave Chinner 	struct xfs_buf		*bp)
984a69a1dc2SDave Chinner {
985a69a1dc2SDave Chinner 	struct xfs_log_item	*lip, *n;
986a69a1dc2SDave Chinner 	LIST_HEAD(flushed_inodes);
987a69a1dc2SDave Chinner 	LIST_HEAD(ail_updates);
988a69a1dc2SDave Chinner 
989a69a1dc2SDave Chinner 	/*
990a69a1dc2SDave Chinner 	 * Pull the attached inodes from the buffer one at a time and take the
991a69a1dc2SDave Chinner 	 * appropriate action on them.
992a69a1dc2SDave Chinner 	 */
993a69a1dc2SDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
994a69a1dc2SDave Chinner 		struct xfs_inode_log_item *iip = INODE_ITEM(lip);
995a69a1dc2SDave Chinner 
996a69a1dc2SDave Chinner 		if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) {
997a69a1dc2SDave Chinner 			xfs_iflush_abort(iip->ili_inode);
998a69a1dc2SDave Chinner 			continue;
999a69a1dc2SDave Chinner 		}
1000a69a1dc2SDave Chinner 		if (!iip->ili_last_fields)
1001a69a1dc2SDave Chinner 			continue;
1002a69a1dc2SDave Chinner 
1003a69a1dc2SDave Chinner 		/* Do an unlocked check for needing the AIL lock. */
1004a69a1dc2SDave Chinner 		if (iip->ili_flush_lsn == lip->li_lsn ||
1005a69a1dc2SDave Chinner 		    test_bit(XFS_LI_FAILED, &lip->li_flags))
1006a69a1dc2SDave Chinner 			list_move_tail(&lip->li_bio_list, &ail_updates);
1007a69a1dc2SDave Chinner 		else
1008a69a1dc2SDave Chinner 			list_move_tail(&lip->li_bio_list, &flushed_inodes);
1009a69a1dc2SDave Chinner 	}
1010a69a1dc2SDave Chinner 
1011a69a1dc2SDave Chinner 	if (!list_empty(&ail_updates)) {
1012a69a1dc2SDave Chinner 		xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates);
1013a69a1dc2SDave Chinner 		list_splice_tail(&ail_updates, &flushed_inodes);
1014a69a1dc2SDave Chinner 	}
1015a69a1dc2SDave Chinner 
1016a69a1dc2SDave Chinner 	xfs_iflush_finish(bp, &flushed_inodes);
1017a69a1dc2SDave Chinner 	if (!list_empty(&flushed_inodes))
1018a69a1dc2SDave Chinner 		list_splice_tail(&flushed_inodes, &bp->b_li_list);
1019a69a1dc2SDave Chinner }
1020a69a1dc2SDave Chinner 
1021664ffb8aSChristoph Hellwig void
xfs_buf_inode_io_fail(struct xfs_buf * bp)1022664ffb8aSChristoph Hellwig xfs_buf_inode_io_fail(
1023664ffb8aSChristoph Hellwig 	struct xfs_buf		*bp)
1024664ffb8aSChristoph Hellwig {
1025664ffb8aSChristoph Hellwig 	struct xfs_log_item	*lip;
1026664ffb8aSChristoph Hellwig 
1027664ffb8aSChristoph Hellwig 	list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1028664ffb8aSChristoph Hellwig 		set_bit(XFS_LI_FAILED, &lip->li_flags);
1029664ffb8aSChristoph Hellwig }
1030664ffb8aSChristoph Hellwig 
1031a69a1dc2SDave Chinner /*
1032d2d7c047SDave Chinner  * Clear the inode logging fields so no more flushes are attempted.  If we are
1033d2d7c047SDave Chinner  * on a buffer list, it is now safe to remove it because the buffer is
1034d2d7c047SDave Chinner  * guaranteed to be locked. The caller will drop the reference to the buffer
1035d2d7c047SDave Chinner  * the log item held.
1036d2d7c047SDave Chinner  */
1037d2d7c047SDave Chinner static void
xfs_iflush_abort_clean(struct xfs_inode_log_item * iip)1038d2d7c047SDave Chinner xfs_iflush_abort_clean(
1039d2d7c047SDave Chinner 	struct xfs_inode_log_item *iip)
1040d2d7c047SDave Chinner {
1041d2d7c047SDave Chinner 	iip->ili_last_fields = 0;
1042d2d7c047SDave Chinner 	iip->ili_fields = 0;
1043d2d7c047SDave Chinner 	iip->ili_fsync_fields = 0;
1044d2d7c047SDave Chinner 	iip->ili_flush_lsn = 0;
1045d2d7c047SDave Chinner 	iip->ili_item.li_buf = NULL;
1046d2d7c047SDave Chinner 	list_del_init(&iip->ili_item.li_bio_list);
1047d2d7c047SDave Chinner }
1048d2d7c047SDave Chinner 
1049d2d7c047SDave Chinner /*
1050d2d7c047SDave Chinner  * Abort flushing the inode from a context holding the cluster buffer locked.
1051d2d7c047SDave Chinner  *
1052d2d7c047SDave Chinner  * This is the normal runtime method of aborting writeback of an inode that is
1053d2d7c047SDave Chinner  * attached to a cluster buffer. It occurs when the inode and the backing
1054d2d7c047SDave Chinner  * cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster
1055d2d7c047SDave Chinner  * flushing or buffer IO completion encounters a log shutdown situation.
1056d2d7c047SDave Chinner  *
1057d2d7c047SDave Chinner  * If we need to abort inode writeback and we don't already hold the buffer
1058d2d7c047SDave Chinner  * locked, call xfs_iflush_shutdown_abort() instead as this should only ever be
1059d2d7c047SDave Chinner  * necessary in a shutdown situation.
10601da177e4SLinus Torvalds  */
10611da177e4SLinus Torvalds void
xfs_iflush_abort(struct xfs_inode * ip)10621da177e4SLinus Torvalds xfs_iflush_abort(
106388fc1879SBrian Foster 	struct xfs_inode	*ip)
10641da177e4SLinus Torvalds {
1065fd9cbe51SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
1066d2d7c047SDave Chinner 	struct xfs_buf		*bp;
10671da177e4SLinus Torvalds 
1068d2d7c047SDave Chinner 	if (!iip) {
1069d2d7c047SDave Chinner 		/* clean inode, nothing to do */
1070d2d7c047SDave Chinner 		xfs_iflags_clear(ip, XFS_IFLUSHING);
1071d2d7c047SDave Chinner 		return;
1072d2d7c047SDave Chinner 	}
1073d2d7c047SDave Chinner 
1074298f7becSDave Chinner 	/*
1075d2d7c047SDave Chinner 	 * Remove the inode item from the AIL before we clear its internal
1076d2d7c047SDave Chinner 	 * state. Whilst the inode is in the AIL, it should have a valid buffer
1077d2d7c047SDave Chinner 	 * pointer for push operations to access - it is only safe to remove the
1078d2d7c047SDave Chinner 	 * inode from the buffer once it has been removed from the AIL.
1079d2d7c047SDave Chinner 	 *
1080d2d7c047SDave Chinner 	 * We also clear the failed bit before removing the item from the AIL
1081d2d7c047SDave Chinner 	 * as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
1082d2d7c047SDave Chinner 	 * references the inode item owns and needs to hold until we've fully
1083d2d7c047SDave Chinner 	 * aborted the inode log item and detached it from the buffer.
1084298f7becSDave Chinner 	 */
1085298f7becSDave Chinner 	clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
10862b3cf093SBrian Foster 	xfs_trans_ail_delete(&iip->ili_item, 0);
1087298f7becSDave Chinner 
10881da177e4SLinus Torvalds 	/*
1089d2d7c047SDave Chinner 	 * Grab the inode buffer so can we release the reference the inode log
1090d2d7c047SDave Chinner 	 * item holds on it.
10911da177e4SLinus Torvalds 	 */
10921319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
1093298f7becSDave Chinner 	bp = iip->ili_item.li_buf;
1094d2d7c047SDave Chinner 	xfs_iflush_abort_clean(iip);
10951319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
1096d2d7c047SDave Chinner 
1097718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
1098298f7becSDave Chinner 	if (bp)
1099298f7becSDave Chinner 		xfs_buf_rele(bp);
11001da177e4SLinus Torvalds }
11011da177e4SLinus Torvalds 
11026d192a9bSTim Shimmin /*
1103d2d7c047SDave Chinner  * Abort an inode flush in the case of a shutdown filesystem. This can be called
1104d2d7c047SDave Chinner  * from anywhere with just an inode reference and does not require holding the
1105d2d7c047SDave Chinner  * inode cluster buffer locked. If the inode is attached to a cluster buffer,
1106d2d7c047SDave Chinner  * it will grab and lock it safely, then abort the inode flush.
1107d2d7c047SDave Chinner  */
1108d2d7c047SDave Chinner void
xfs_iflush_shutdown_abort(struct xfs_inode * ip)1109d2d7c047SDave Chinner xfs_iflush_shutdown_abort(
1110d2d7c047SDave Chinner 	struct xfs_inode	*ip)
1111d2d7c047SDave Chinner {
1112d2d7c047SDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
1113d2d7c047SDave Chinner 	struct xfs_buf		*bp;
1114d2d7c047SDave Chinner 
1115d2d7c047SDave Chinner 	if (!iip) {
1116d2d7c047SDave Chinner 		/* clean inode, nothing to do */
1117d2d7c047SDave Chinner 		xfs_iflags_clear(ip, XFS_IFLUSHING);
1118d2d7c047SDave Chinner 		return;
1119d2d7c047SDave Chinner 	}
1120d2d7c047SDave Chinner 
1121d2d7c047SDave Chinner 	spin_lock(&iip->ili_lock);
1122d2d7c047SDave Chinner 	bp = iip->ili_item.li_buf;
1123d2d7c047SDave Chinner 	if (!bp) {
1124d2d7c047SDave Chinner 		spin_unlock(&iip->ili_lock);
1125d2d7c047SDave Chinner 		xfs_iflush_abort(ip);
1126d2d7c047SDave Chinner 		return;
1127d2d7c047SDave Chinner 	}
1128d2d7c047SDave Chinner 
1129d2d7c047SDave Chinner 	/*
1130d2d7c047SDave Chinner 	 * We have to take a reference to the buffer so that it doesn't get
1131d2d7c047SDave Chinner 	 * freed when we drop the ili_lock and then wait to lock the buffer.
1132d2d7c047SDave Chinner 	 * We'll clean up the extra reference after we pick up the ili_lock
1133d2d7c047SDave Chinner 	 * again.
1134d2d7c047SDave Chinner 	 */
1135d2d7c047SDave Chinner 	xfs_buf_hold(bp);
1136d2d7c047SDave Chinner 	spin_unlock(&iip->ili_lock);
1137d2d7c047SDave Chinner 	xfs_buf_lock(bp);
1138d2d7c047SDave Chinner 
1139d2d7c047SDave Chinner 	spin_lock(&iip->ili_lock);
1140d2d7c047SDave Chinner 	if (!iip->ili_item.li_buf) {
1141d2d7c047SDave Chinner 		/*
1142d2d7c047SDave Chinner 		 * Raced with another removal, hold the only reference
1143d2d7c047SDave Chinner 		 * to bp now. Inode should not be in the AIL now, so just clean
1144d2d7c047SDave Chinner 		 * up and return;
1145d2d7c047SDave Chinner 		 */
1146d2d7c047SDave Chinner 		ASSERT(list_empty(&iip->ili_item.li_bio_list));
1147d2d7c047SDave Chinner 		ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags));
1148d2d7c047SDave Chinner 		xfs_iflush_abort_clean(iip);
1149d2d7c047SDave Chinner 		spin_unlock(&iip->ili_lock);
1150d2d7c047SDave Chinner 		xfs_iflags_clear(ip, XFS_IFLUSHING);
1151d2d7c047SDave Chinner 		xfs_buf_relse(bp);
1152d2d7c047SDave Chinner 		return;
1153d2d7c047SDave Chinner 	}
1154d2d7c047SDave Chinner 
1155d2d7c047SDave Chinner 	/*
1156d2d7c047SDave Chinner 	 * Got two references to bp. The first will get dropped by
1157d2d7c047SDave Chinner 	 * xfs_iflush_abort() when the item is removed from the buffer list, but
1158d2d7c047SDave Chinner 	 * we can't drop our reference until _abort() returns because we have to
1159d2d7c047SDave Chinner 	 * unlock the buffer as well. Hence we abort and then unlock and release
1160d2d7c047SDave Chinner 	 * our reference to the buffer.
1161d2d7c047SDave Chinner 	 */
1162d2d7c047SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
1163d2d7c047SDave Chinner 	spin_unlock(&iip->ili_lock);
1164d2d7c047SDave Chinner 	xfs_iflush_abort(ip);
1165d2d7c047SDave Chinner 	xfs_buf_relse(bp);
1166d2d7c047SDave Chinner }
1167d2d7c047SDave Chinner 
1168d2d7c047SDave Chinner 
1169d2d7c047SDave Chinner /*
117020413e37SDave Chinner  * convert an xfs_inode_log_format struct from the old 32 bit version
117120413e37SDave Chinner  * (which can have different field alignments) to the native 64 bit version
11726d192a9bSTim Shimmin  */
11736d192a9bSTim Shimmin int
xfs_inode_item_format_convert(struct xfs_log_iovec * buf,struct xfs_inode_log_format * in_f)11746d192a9bSTim Shimmin xfs_inode_item_format_convert(
117520413e37SDave Chinner 	struct xfs_log_iovec		*buf,
117620413e37SDave Chinner 	struct xfs_inode_log_format	*in_f)
11776d192a9bSTim Shimmin {
117820413e37SDave Chinner 	struct xfs_inode_log_format_32	*in_f32 = buf->i_addr;
117920413e37SDave Chinner 
1180a5155b87SDarrick J. Wong 	if (buf->i_len != sizeof(*in_f32)) {
1181a5155b87SDarrick J. Wong 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
118220413e37SDave Chinner 		return -EFSCORRUPTED;
1183a5155b87SDarrick J. Wong 	}
11846d192a9bSTim Shimmin 
11856d192a9bSTim Shimmin 	in_f->ilf_type = in_f32->ilf_type;
11866d192a9bSTim Shimmin 	in_f->ilf_size = in_f32->ilf_size;
11876d192a9bSTim Shimmin 	in_f->ilf_fields = in_f32->ilf_fields;
11886d192a9bSTim Shimmin 	in_f->ilf_asize = in_f32->ilf_asize;
11896d192a9bSTim Shimmin 	in_f->ilf_dsize = in_f32->ilf_dsize;
11906d192a9bSTim Shimmin 	in_f->ilf_ino = in_f32->ilf_ino;
119142b67dc6SChristoph Hellwig 	memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u));
11926d192a9bSTim Shimmin 	in_f->ilf_blkno = in_f32->ilf_blkno;
11936d192a9bSTim Shimmin 	in_f->ilf_len = in_f32->ilf_len;
11946d192a9bSTim Shimmin 	in_f->ilf_boffset = in_f32->ilf_boffset;
11956d192a9bSTim Shimmin 	return 0;
11966d192a9bSTim Shimmin }
1197