xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision 5806165a6663544ea41bc3216f5c5effbde4799e)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_sb.h"
151da177e4SLinus Torvalds #include "xfs_mount.h"
163ab78df2SDarrick J. Wong #include "xfs_defer.h"
17a4fbe6abSDave Chinner #include "xfs_inode.h"
18c24b5dfaSDave Chinner #include "xfs_dir2.h"
19c24b5dfaSDave Chinner #include "xfs_attr.h"
20239880efSDave Chinner #include "xfs_trans_space.h"
21239880efSDave Chinner #include "xfs_trans.h"
221da177e4SLinus Torvalds #include "xfs_buf_item.h"
23a844f451SNathan Scott #include "xfs_inode_item.h"
24a844f451SNathan Scott #include "xfs_ialloc.h"
25a844f451SNathan Scott #include "xfs_bmap.h"
2668988114SDave Chinner #include "xfs_bmap_util.h"
27e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
281da177e4SLinus Torvalds #include "xfs_error.h"
291da177e4SLinus Torvalds #include "xfs_quota.h"
302a82b8beSDavid Chinner #include "xfs_filestream.h"
310b1b213fSChristoph Hellwig #include "xfs_trace.h"
3233479e05SDave Chinner #include "xfs_icache.h"
33c24b5dfaSDave Chinner #include "xfs_symlink.h"
34239880efSDave Chinner #include "xfs_trans_priv.h"
35239880efSDave Chinner #include "xfs_log.h"
36a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
37aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds kmem_zone_t *xfs_inode_zone;
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds /*
428f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
431da177e4SLinus Torvalds  * freed from a file in a single transaction.
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
461da177e4SLinus Torvalds 
4754d7b5c1SDave Chinner STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
4854d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
4954d7b5c1SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
50ab297431SZhi Yong Wu 
512a0ec1d9SDave Chinner /*
522a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
532a0ec1d9SDave Chinner  */
542a0ec1d9SDave Chinner xfs_extlen_t
552a0ec1d9SDave Chinner xfs_get_extsz_hint(
562a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
572a0ec1d9SDave Chinner {
58bdb2ed2dSChristoph Hellwig 	/*
59bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
60bdb2ed2dSChristoph Hellwig 	 * write to them.
61bdb2ed2dSChristoph Hellwig 	 */
62bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
63bdb2ed2dSChristoph Hellwig 		return 0;
642a0ec1d9SDave Chinner 	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
652a0ec1d9SDave Chinner 		return ip->i_d.di_extsize;
662a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
672a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
682a0ec1d9SDave Chinner 	return 0;
692a0ec1d9SDave Chinner }
702a0ec1d9SDave Chinner 
71fa96acadSDave Chinner /*
72f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
73f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
74e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
75e153aa79SDarrick J. Wong  * use the default size.
76f7ca3522SDarrick J. Wong  */
77f7ca3522SDarrick J. Wong xfs_extlen_t
78f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
79f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
80f7ca3522SDarrick J. Wong {
81f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
82f7ca3522SDarrick J. Wong 
83f7ca3522SDarrick J. Wong 	a = 0;
84f7ca3522SDarrick J. Wong 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
85f7ca3522SDarrick J. Wong 		a = ip->i_d.di_cowextsize;
86f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
87f7ca3522SDarrick J. Wong 
88e153aa79SDarrick J. Wong 	a = max(a, b);
89e153aa79SDarrick J. Wong 	if (a == 0)
90e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
91f7ca3522SDarrick J. Wong 	return a;
92f7ca3522SDarrick J. Wong }
93f7ca3522SDarrick J. Wong 
94f7ca3522SDarrick J. Wong /*
95efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
96efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
97efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
98efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
100efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
101efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
102efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
103efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
104efa70be1SChristoph Hellwig  * if they have not.
105fa96acadSDave Chinner  *
106efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10701f4f327SChristoph Hellwig  * xfs_iunlock() call.
108fa96acadSDave Chinner  */
109fa96acadSDave Chinner uint
110309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
111309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
112fa96acadSDave Chinner {
113309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
114fa96acadSDave Chinner 
115309ecac8SChristoph Hellwig 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
116309ecac8SChristoph Hellwig 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
117fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
118fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
119fa96acadSDave Chinner 	return lock_mode;
120fa96acadSDave Chinner }
121fa96acadSDave Chinner 
122efa70be1SChristoph Hellwig uint
123efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
124efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
125fa96acadSDave Chinner {
126efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
127efa70be1SChristoph Hellwig 
128efa70be1SChristoph Hellwig 	if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
129efa70be1SChristoph Hellwig 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
131efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
132efa70be1SChristoph Hellwig 	return lock_mode;
133fa96acadSDave Chinner }
134fa96acadSDave Chinner 
135fa96acadSDave Chinner /*
13665523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
13765523218SChristoph Hellwig  * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
13865523218SChristoph Hellwig  * various combinations of the locks to be obtained.
139fa96acadSDave Chinner  *
140653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
141653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
142fa96acadSDave Chinner  *
143653c60b6SDave Chinner  * Basic locking order:
144653c60b6SDave Chinner  *
14565523218SChristoph Hellwig  * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
146653c60b6SDave Chinner  *
147653c60b6SDave Chinner  * mmap_sem locking order:
148653c60b6SDave Chinner  *
14965523218SChristoph Hellwig  * i_rwsem -> page lock -> mmap_sem
150653c60b6SDave Chinner  * mmap_sem -> i_mmap_lock -> page_lock
151653c60b6SDave Chinner  *
152653c60b6SDave Chinner  * The difference in mmap_sem locking order mean that we cannot hold the
153653c60b6SDave Chinner  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154653c60b6SDave Chinner  * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
155653c60b6SDave Chinner  * in get_user_pages() to map the user pages into the kernel address space for
15665523218SChristoph Hellwig  * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157653c60b6SDave Chinner  * page faults already hold the mmap_sem.
158653c60b6SDave Chinner  *
159653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
16065523218SChristoph Hellwig  * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161653c60b6SDave Chinner  * taken in places where we need to invalidate the page cache in a race
162653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
163653c60b6SDave Chinner  * functions).
164fa96acadSDave Chinner  */
165fa96acadSDave Chinner void
166fa96acadSDave Chinner xfs_ilock(
167fa96acadSDave Chinner 	xfs_inode_t		*ip,
168fa96acadSDave Chinner 	uint			lock_flags)
169fa96acadSDave Chinner {
170fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171fa96acadSDave Chinner 
172fa96acadSDave Chinner 	/*
173fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
174fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176fa96acadSDave Chinner 	 */
177fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
1830952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184fa96acadSDave Chinner 
18565523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
18665523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
18765523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
18865523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
18965523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
19065523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
19165523218SChristoph Hellwig 	}
192fa96acadSDave Chinner 
193653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
194653c60b6SDave Chinner 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
196653c60b6SDave Chinner 		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197653c60b6SDave Chinner 
198fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
199fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
201fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202fa96acadSDave Chinner }
203fa96acadSDave Chinner 
204fa96acadSDave Chinner /*
205fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
206fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
207fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
208fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
209fa96acadSDave Chinner  * is dropped before returning.
210fa96acadSDave Chinner  *
211fa96acadSDave Chinner  * ip -- the inode being locked
212fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
213fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
214fa96acadSDave Chinner  *	 of valid values.
215fa96acadSDave Chinner  */
216fa96acadSDave Chinner int
217fa96acadSDave Chinner xfs_ilock_nowait(
218fa96acadSDave Chinner 	xfs_inode_t		*ip,
219fa96acadSDave Chinner 	uint			lock_flags)
220fa96acadSDave Chinner {
221fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222fa96acadSDave Chinner 
223fa96acadSDave Chinner 	/*
224fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
225fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227fa96acadSDave Chinner 	 */
228fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
2340952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235fa96acadSDave Chinner 
236fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23765523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238fa96acadSDave Chinner 			goto out;
239fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
24065523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241fa96acadSDave Chinner 			goto out;
242fa96acadSDave Chinner 	}
243653c60b6SDave Chinner 
244653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
245653c60b6SDave Chinner 		if (!mrtryupdate(&ip->i_mmaplock))
246653c60b6SDave Chinner 			goto out_undo_iolock;
247653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248653c60b6SDave Chinner 		if (!mrtryaccess(&ip->i_mmaplock))
249653c60b6SDave Chinner 			goto out_undo_iolock;
250653c60b6SDave Chinner 	}
251653c60b6SDave Chinner 
252fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
253fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
254653c60b6SDave Chinner 			goto out_undo_mmaplock;
255fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
256fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
257653c60b6SDave Chinner 			goto out_undo_mmaplock;
258fa96acadSDave Chinner 	}
259fa96acadSDave Chinner 	return 1;
260fa96acadSDave Chinner 
261653c60b6SDave Chinner out_undo_mmaplock:
262653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
263653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
264653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
265653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
266fa96acadSDave Chinner out_undo_iolock:
267fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26865523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
269fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
27065523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
271fa96acadSDave Chinner out:
272fa96acadSDave Chinner 	return 0;
273fa96acadSDave Chinner }
274fa96acadSDave Chinner 
275fa96acadSDave Chinner /*
276fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
277fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
278fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279fa96acadSDave Chinner  * that we know which locks to drop.
280fa96acadSDave Chinner  *
281fa96acadSDave Chinner  * ip -- the inode being unlocked
282fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
283fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
284fa96acadSDave Chinner  *	 of valid values for this parameter.
285fa96acadSDave Chinner  *
286fa96acadSDave Chinner  */
287fa96acadSDave Chinner void
288fa96acadSDave Chinner xfs_iunlock(
289fa96acadSDave Chinner 	xfs_inode_t		*ip,
290fa96acadSDave Chinner 	uint			lock_flags)
291fa96acadSDave Chinner {
292fa96acadSDave Chinner 	/*
293fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
294fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296fa96acadSDave Chinner 	 */
297fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
3030952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304fa96acadSDave Chinner 	ASSERT(lock_flags != 0);
305fa96acadSDave Chinner 
306fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
30765523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
308fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
30965523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
310fa96acadSDave Chinner 
311653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
312653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
313653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
314653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
315653c60b6SDave Chinner 
316fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
317fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
318fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
319fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
320fa96acadSDave Chinner 
321fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322fa96acadSDave Chinner }
323fa96acadSDave Chinner 
324fa96acadSDave Chinner /*
325fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
326fa96acadSDave Chinner  * if it is being demoted.
327fa96acadSDave Chinner  */
328fa96acadSDave Chinner void
329fa96acadSDave Chinner xfs_ilock_demote(
330fa96acadSDave Chinner 	xfs_inode_t		*ip,
331fa96acadSDave Chinner 	uint			lock_flags)
332fa96acadSDave Chinner {
333653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334653c60b6SDave Chinner 	ASSERT((lock_flags &
335653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336fa96acadSDave Chinner 
337fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
338fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
339653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
340653c60b6SDave Chinner 		mrdemote(&ip->i_mmaplock);
341fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
34265523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
343fa96acadSDave Chinner 
344fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345fa96acadSDave Chinner }
346fa96acadSDave Chinner 
347742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
348fa96acadSDave Chinner int
349fa96acadSDave Chinner xfs_isilocked(
350fa96acadSDave Chinner 	xfs_inode_t		*ip,
351fa96acadSDave Chinner 	uint			lock_flags)
352fa96acadSDave Chinner {
353fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
355fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
356fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
357fa96acadSDave Chinner 	}
358fa96acadSDave Chinner 
359653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360653c60b6SDave Chinner 		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361653c60b6SDave Chinner 			return !!ip->i_mmaplock.mr_writer;
362653c60b6SDave Chinner 		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363653c60b6SDave Chinner 	}
364653c60b6SDave Chinner 
365fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366fa96acadSDave Chinner 		if (!(lock_flags & XFS_IOLOCK_SHARED))
36765523218SChristoph Hellwig 			return !debug_locks ||
36865523218SChristoph Hellwig 				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
36965523218SChristoph Hellwig 		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370fa96acadSDave Chinner 	}
371fa96acadSDave Chinner 
372fa96acadSDave Chinner 	ASSERT(0);
373fa96acadSDave Chinner 	return 0;
374fa96acadSDave Chinner }
375fa96acadSDave Chinner #endif
376fa96acadSDave Chinner 
377b6a9947eSDave Chinner /*
378b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381b6a9947eSDave Chinner  * errors and warnings.
382b6a9947eSDave Chinner  */
383b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3843403ccc0SDave Chinner static bool
3853403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3863403ccc0SDave Chinner 	int subclass)
3873403ccc0SDave Chinner {
3883403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3893403ccc0SDave Chinner }
3903403ccc0SDave Chinner #else
3913403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3923403ccc0SDave Chinner #endif
3933403ccc0SDave Chinner 
394c24b5dfaSDave Chinner /*
395653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
3960952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
3970952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
3980952c818SDave Chinner  * storage fields in the class mask we build.
399c24b5dfaSDave Chinner  */
400c24b5dfaSDave Chinner static inline int
401c24b5dfaSDave Chinner xfs_lock_inumorder(int lock_mode, int subclass)
402c24b5dfaSDave Chinner {
4030952c818SDave Chinner 	int	class = 0;
4040952c818SDave Chinner 
4050952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4060952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4073403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4080952c818SDave Chinner 
409653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4100952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4110952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
412653c60b6SDave Chinner 	}
413653c60b6SDave Chinner 
414653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4150952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4160952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
417653c60b6SDave Chinner 	}
418653c60b6SDave Chinner 
4190952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4200952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4210952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4220952c818SDave Chinner 	}
423c24b5dfaSDave Chinner 
4240952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425c24b5dfaSDave Chinner }
426c24b5dfaSDave Chinner 
427c24b5dfaSDave Chinner /*
42895afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
42995afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
430c24b5dfaSDave Chinner  *
43195afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
43295afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
43395afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
43495afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
43595afcf5cSDave Chinner  * push the tail and free space in the log.
4360952c818SDave Chinner  *
4370952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4380952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4390952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4400952c818SDave Chinner  * have violated locking orders.
441c24b5dfaSDave Chinner  */
4420d5a75e9SEric Sandeen static void
443c24b5dfaSDave Chinner xfs_lock_inodes(
444efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
445c24b5dfaSDave Chinner 	int			inodes,
446c24b5dfaSDave Chinner 	uint			lock_mode)
447c24b5dfaSDave Chinner {
448c24b5dfaSDave Chinner 	int			attempts = 0, i, j, try_lock;
449efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
450c24b5dfaSDave Chinner 
4510952c818SDave Chinner 	/*
4520952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4530952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
4540952c818SDave Chinner 	 * inodes depend on the the type of locking and the limits placed by
4550952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4560952c818SDave Chinner 	 * the asserts.
4570952c818SDave Chinner 	 */
45895afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4590952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4600952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4610952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4620952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4630952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4640952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4650952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4660952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4670952c818SDave Chinner 
4680952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4690952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4700952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4710952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472c24b5dfaSDave Chinner 
473c24b5dfaSDave Chinner 	try_lock = 0;
474c24b5dfaSDave Chinner 	i = 0;
475c24b5dfaSDave Chinner again:
476c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
477c24b5dfaSDave Chinner 		ASSERT(ips[i]);
478c24b5dfaSDave Chinner 
479c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
480c24b5dfaSDave Chinner 			continue;
481c24b5dfaSDave Chinner 
482c24b5dfaSDave Chinner 		/*
48395afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
48495afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
485c24b5dfaSDave Chinner 		 */
486c24b5dfaSDave Chinner 		if (!try_lock) {
487c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
488b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
48922525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490c24b5dfaSDave Chinner 					try_lock++;
491c24b5dfaSDave Chinner 			}
492c24b5dfaSDave Chinner 		}
493c24b5dfaSDave Chinner 
494c24b5dfaSDave Chinner 		/*
495c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
496c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
497c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
498c24b5dfaSDave Chinner 		 * and try again.
499c24b5dfaSDave Chinner 		 */
50095afcf5cSDave Chinner 		if (!try_lock) {
50195afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
50295afcf5cSDave Chinner 			continue;
50395afcf5cSDave Chinner 		}
504c24b5dfaSDave Chinner 
50595afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
506c24b5dfaSDave Chinner 		ASSERT(i != 0);
50795afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
50895afcf5cSDave Chinner 			continue;
50995afcf5cSDave Chinner 
51095afcf5cSDave Chinner 		/*
51195afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
51295afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
51395afcf5cSDave Chinner 		 */
514c24b5dfaSDave Chinner 		attempts++;
515c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
516c24b5dfaSDave Chinner 			/*
51795afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
51895afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
51995afcf5cSDave Chinner 			 * same.
520c24b5dfaSDave Chinner 			 */
52195afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
522c24b5dfaSDave Chinner 				continue;
523c24b5dfaSDave Chinner 
524c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
525c24b5dfaSDave Chinner 		}
526c24b5dfaSDave Chinner 
527c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
528c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
529c24b5dfaSDave Chinner 		}
530c24b5dfaSDave Chinner 		i = 0;
531c24b5dfaSDave Chinner 		try_lock = 0;
532c24b5dfaSDave Chinner 		goto again;
533c24b5dfaSDave Chinner 	}
534c24b5dfaSDave Chinner }
535c24b5dfaSDave Chinner 
536c24b5dfaSDave Chinner /*
537653c60b6SDave Chinner  * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
5387c2d238aSDarrick J. Wong  * the mmaplock or the ilock, but not more than one type at a time. If we lock
5397c2d238aSDarrick J. Wong  * more than one at a time, lockdep will report false positives saying we have
5407c2d238aSDarrick J. Wong  * violated locking orders.  The iolock must be double-locked separately since
5417c2d238aSDarrick J. Wong  * we use i_rwsem for that.  We now support taking one lock EXCL and the other
5427c2d238aSDarrick J. Wong  * SHARED.
543c24b5dfaSDave Chinner  */
544c24b5dfaSDave Chinner void
545c24b5dfaSDave Chinner xfs_lock_two_inodes(
5467c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5477c2d238aSDarrick J. Wong 	uint			ip0_mode,
5487c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5497c2d238aSDarrick J. Wong 	uint			ip1_mode)
550c24b5dfaSDave Chinner {
5517c2d238aSDarrick J. Wong 	struct xfs_inode	*temp;
5527c2d238aSDarrick J. Wong 	uint			mode_temp;
553c24b5dfaSDave Chinner 	int			attempts = 0;
554efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
555c24b5dfaSDave Chinner 
5567c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5577c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5587c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5597c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5607c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5617c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5627c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5637c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5647c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5657c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5667c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5677c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568653c60b6SDave Chinner 
569c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
570c24b5dfaSDave Chinner 
571c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
572c24b5dfaSDave Chinner 		temp = ip0;
573c24b5dfaSDave Chinner 		ip0 = ip1;
574c24b5dfaSDave Chinner 		ip1 = temp;
5757c2d238aSDarrick J. Wong 		mode_temp = ip0_mode;
5767c2d238aSDarrick J. Wong 		ip0_mode = ip1_mode;
5777c2d238aSDarrick J. Wong 		ip1_mode = mode_temp;
578c24b5dfaSDave Chinner 	}
579c24b5dfaSDave Chinner 
580c24b5dfaSDave Chinner  again:
5817c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582c24b5dfaSDave Chinner 
583c24b5dfaSDave Chinner 	/*
584c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
585c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
586c24b5dfaSDave Chinner 	 * and try again.
587c24b5dfaSDave Chinner 	 */
588b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58922525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5907c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5917c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
592c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
593c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
594c24b5dfaSDave Chinner 			goto again;
595c24b5dfaSDave Chinner 		}
596c24b5dfaSDave Chinner 	} else {
5977c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598c24b5dfaSDave Chinner 	}
599c24b5dfaSDave Chinner }
600c24b5dfaSDave Chinner 
601fa96acadSDave Chinner void
602fa96acadSDave Chinner __xfs_iflock(
603fa96acadSDave Chinner 	struct xfs_inode	*ip)
604fa96acadSDave Chinner {
605fa96acadSDave Chinner 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606fa96acadSDave Chinner 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607fa96acadSDave Chinner 
608fa96acadSDave Chinner 	do {
60921417136SIngo Molnar 		prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
610fa96acadSDave Chinner 		if (xfs_isiflocked(ip))
611fa96acadSDave Chinner 			io_schedule();
612fa96acadSDave Chinner 	} while (!xfs_iflock_nowait(ip));
613fa96acadSDave Chinner 
61421417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
615fa96acadSDave Chinner }
616fa96acadSDave Chinner 
6171da177e4SLinus Torvalds STATIC uint
6181da177e4SLinus Torvalds _xfs_dic2xflags(
619c8ce540dSDarrick J. Wong 	uint16_t		di_flags,
62058f88ca2SDave Chinner 	uint64_t		di_flags2,
62158f88ca2SDave Chinner 	bool			has_attr)
6221da177e4SLinus Torvalds {
6231da177e4SLinus Torvalds 	uint			flags = 0;
6241da177e4SLinus Torvalds 
6251da177e4SLinus Torvalds 	if (di_flags & XFS_DIFLAG_ANY) {
6261da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_REALTIME)
627e7b89481SDave Chinner 			flags |= FS_XFLAG_REALTIME;
6281da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_PREALLOC)
629e7b89481SDave Chinner 			flags |= FS_XFLAG_PREALLOC;
6301da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
631e7b89481SDave Chinner 			flags |= FS_XFLAG_IMMUTABLE;
6321da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_APPEND)
633e7b89481SDave Chinner 			flags |= FS_XFLAG_APPEND;
6341da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_SYNC)
635e7b89481SDave Chinner 			flags |= FS_XFLAG_SYNC;
6361da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NOATIME)
637e7b89481SDave Chinner 			flags |= FS_XFLAG_NOATIME;
6381da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NODUMP)
639e7b89481SDave Chinner 			flags |= FS_XFLAG_NODUMP;
6401da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_RTINHERIT)
641e7b89481SDave Chinner 			flags |= FS_XFLAG_RTINHERIT;
6421da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
643e7b89481SDave Chinner 			flags |= FS_XFLAG_PROJINHERIT;
6441da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
645e7b89481SDave Chinner 			flags |= FS_XFLAG_NOSYMLINKS;
646dd9f438eSNathan Scott 		if (di_flags & XFS_DIFLAG_EXTSIZE)
647e7b89481SDave Chinner 			flags |= FS_XFLAG_EXTSIZE;
648dd9f438eSNathan Scott 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
649e7b89481SDave Chinner 			flags |= FS_XFLAG_EXTSZINHERIT;
650d3446eacSBarry Naujok 		if (di_flags & XFS_DIFLAG_NODEFRAG)
651e7b89481SDave Chinner 			flags |= FS_XFLAG_NODEFRAG;
6522a82b8beSDavid Chinner 		if (di_flags & XFS_DIFLAG_FILESTREAM)
653e7b89481SDave Chinner 			flags |= FS_XFLAG_FILESTREAM;
6541da177e4SLinus Torvalds 	}
6551da177e4SLinus Torvalds 
65658f88ca2SDave Chinner 	if (di_flags2 & XFS_DIFLAG2_ANY) {
65758f88ca2SDave Chinner 		if (di_flags2 & XFS_DIFLAG2_DAX)
65858f88ca2SDave Chinner 			flags |= FS_XFLAG_DAX;
659f7ca3522SDarrick J. Wong 		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660f7ca3522SDarrick J. Wong 			flags |= FS_XFLAG_COWEXTSIZE;
66158f88ca2SDave Chinner 	}
66258f88ca2SDave Chinner 
66358f88ca2SDave Chinner 	if (has_attr)
66458f88ca2SDave Chinner 		flags |= FS_XFLAG_HASATTR;
66558f88ca2SDave Chinner 
6661da177e4SLinus Torvalds 	return flags;
6671da177e4SLinus Torvalds }
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds uint
6701da177e4SLinus Torvalds xfs_ip2xflags(
67158f88ca2SDave Chinner 	struct xfs_inode	*ip)
6721da177e4SLinus Torvalds {
67358f88ca2SDave Chinner 	struct xfs_icdinode	*dic = &ip->i_d;
6741da177e4SLinus Torvalds 
67558f88ca2SDave Chinner 	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds /*
679c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
681c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
682c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
683c24b5dfaSDave Chinner  */
684c24b5dfaSDave Chinner int
685c24b5dfaSDave Chinner xfs_lookup(
686c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
687c24b5dfaSDave Chinner 	struct xfs_name		*name,
688c24b5dfaSDave Chinner 	xfs_inode_t		**ipp,
689c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
690c24b5dfaSDave Chinner {
691c24b5dfaSDave Chinner 	xfs_ino_t		inum;
692c24b5dfaSDave Chinner 	int			error;
693c24b5dfaSDave Chinner 
694c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
695c24b5dfaSDave Chinner 
696c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
6972451337dSDave Chinner 		return -EIO;
698c24b5dfaSDave Chinner 
699c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
700c24b5dfaSDave Chinner 	if (error)
701dbad7c99SDave Chinner 		goto out_unlock;
702c24b5dfaSDave Chinner 
703c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704c24b5dfaSDave Chinner 	if (error)
705c24b5dfaSDave Chinner 		goto out_free_name;
706c24b5dfaSDave Chinner 
707c24b5dfaSDave Chinner 	return 0;
708c24b5dfaSDave Chinner 
709c24b5dfaSDave Chinner out_free_name:
710c24b5dfaSDave Chinner 	if (ci_name)
711c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
712dbad7c99SDave Chinner out_unlock:
713c24b5dfaSDave Chinner 	*ipp = NULL;
714c24b5dfaSDave Chinner 	return error;
715c24b5dfaSDave Chinner }
716c24b5dfaSDave Chinner 
717c24b5dfaSDave Chinner /*
7181da177e4SLinus Torvalds  * Allocate an inode on disk and return a copy of its in-core version.
7191da177e4SLinus Torvalds  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
7201da177e4SLinus Torvalds  * appropriately within the inode.  The uid and gid for the inode are
7211da177e4SLinus Torvalds  * set according to the contents of the given cred structure.
7221da177e4SLinus Torvalds  *
7231da177e4SLinus Torvalds  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
724cd856db6SCarlos Maiolino  * has a free inode available, call xfs_iget() to obtain the in-core
725cd856db6SCarlos Maiolino  * version of the allocated inode.  Finally, fill in the inode and
726cd856db6SCarlos Maiolino  * log its initial contents.  In this case, ialloc_context would be
727cd856db6SCarlos Maiolino  * set to NULL.
7281da177e4SLinus Torvalds  *
729cd856db6SCarlos Maiolino  * If xfs_dialloc() does not have an available inode, it will replenish
730cd856db6SCarlos Maiolino  * its supply by doing an allocation. Since we can only do one
731cd856db6SCarlos Maiolino  * allocation within a transaction without deadlocks, we must commit
732cd856db6SCarlos Maiolino  * the current transaction before returning the inode itself.
733cd856db6SCarlos Maiolino  * In this case, therefore, we will set ialloc_context and return.
7341da177e4SLinus Torvalds  * The caller should then commit the current transaction, start a new
7351da177e4SLinus Torvalds  * transaction, and call xfs_ialloc() again to actually get the inode.
7361da177e4SLinus Torvalds  *
7371da177e4SLinus Torvalds  * To ensure that some other process does not grab the inode that
7381da177e4SLinus Torvalds  * was allocated during the first call to xfs_ialloc(), this routine
7391da177e4SLinus Torvalds  * also returns the [locked] bp pointing to the head of the freelist
7401da177e4SLinus Torvalds  * as ialloc_context.  The caller should hold this buffer across
7411da177e4SLinus Torvalds  * the commit and pass it back into this routine on the second call.
742b11f94d5SDavid Chinner  *
743b11f94d5SDavid Chinner  * If we are allocating quota inodes, we do not have a parent inode
744b11f94d5SDavid Chinner  * to attach to or associate with (i.e. pip == NULL) because they
745b11f94d5SDavid Chinner  * are not linked into the directory structure - they are attached
746b11f94d5SDavid Chinner  * directly to the superblock - and so have no parent.
7471da177e4SLinus Torvalds  */
7480d5a75e9SEric Sandeen static int
7491da177e4SLinus Torvalds xfs_ialloc(
7501da177e4SLinus Torvalds 	xfs_trans_t	*tp,
7511da177e4SLinus Torvalds 	xfs_inode_t	*pip,
752576b1d67SAl Viro 	umode_t		mode,
75331b084aeSNathan Scott 	xfs_nlink_t	nlink,
75466f36464SChristoph Hellwig 	dev_t		rdev,
7556743099cSArkadiusz Mi?kiewicz 	prid_t		prid,
7561da177e4SLinus Torvalds 	xfs_buf_t	**ialloc_context,
7571da177e4SLinus Torvalds 	xfs_inode_t	**ipp)
7581da177e4SLinus Torvalds {
75993848a99SChristoph Hellwig 	struct xfs_mount *mp = tp->t_mountp;
7601da177e4SLinus Torvalds 	xfs_ino_t	ino;
7611da177e4SLinus Torvalds 	xfs_inode_t	*ip;
7621da177e4SLinus Torvalds 	uint		flags;
7631da177e4SLinus Torvalds 	int		error;
76495582b00SDeepa Dinamani 	struct timespec64 tv;
7653987848cSDave Chinner 	struct inode	*inode;
7661da177e4SLinus Torvalds 
7671da177e4SLinus Torvalds 	/*
7681da177e4SLinus Torvalds 	 * Call the space management code to pick
7691da177e4SLinus Torvalds 	 * the on-disk inode to be allocated.
7701da177e4SLinus Torvalds 	 */
771f59cf5c2SChristoph Hellwig 	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
77208358906SChristoph Hellwig 			    ialloc_context, &ino);
773bf904248SDavid Chinner 	if (error)
7741da177e4SLinus Torvalds 		return error;
77508358906SChristoph Hellwig 	if (*ialloc_context || ino == NULLFSINO) {
7761da177e4SLinus Torvalds 		*ipp = NULL;
7771da177e4SLinus Torvalds 		return 0;
7781da177e4SLinus Torvalds 	}
7791da177e4SLinus Torvalds 	ASSERT(*ialloc_context == NULL);
7801da177e4SLinus Torvalds 
7811da177e4SLinus Torvalds 	/*
7828b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
7838b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
7848b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
7858b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
7868b26984dSDave Chinner 	 * first.
7878b26984dSDave Chinner 	 */
7888b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
7898b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
7908b26984dSDave Chinner 		return -EFSCORRUPTED;
7918b26984dSDave Chinner 	}
7928b26984dSDave Chinner 
7938b26984dSDave Chinner 	/*
7941da177e4SLinus Torvalds 	 * Get the in-core inode with the lock held exclusively.
7951da177e4SLinus Torvalds 	 * This is because we're setting fields here we need
7961da177e4SLinus Torvalds 	 * to prevent others from looking at until we're done.
7971da177e4SLinus Torvalds 	 */
79893848a99SChristoph Hellwig 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
799ec3ba85fSChristoph Hellwig 			 XFS_ILOCK_EXCL, &ip);
800bf904248SDavid Chinner 	if (error)
8011da177e4SLinus Torvalds 		return error;
8021da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8033987848cSDave Chinner 	inode = VFS_I(ip);
804c19b3b05SDave Chinner 	inode->i_mode = mode;
80554d7b5c1SDave Chinner 	set_nlink(inode, nlink);
8063d8f2821SChristoph Hellwig 	inode->i_uid = current_fsuid();
80766f36464SChristoph Hellwig 	inode->i_rdev = rdev;
808de7a866fSChristoph Hellwig 	ip->i_d.di_projid = prid;
8091da177e4SLinus Torvalds 
810bd186aa9SChristoph Hellwig 	if (pip && XFS_INHERIT_GID(pip)) {
8113d8f2821SChristoph Hellwig 		inode->i_gid = VFS_I(pip)->i_gid;
812c19b3b05SDave Chinner 		if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
813c19b3b05SDave Chinner 			inode->i_mode |= S_ISGID;
8143d8f2821SChristoph Hellwig 	} else {
8153d8f2821SChristoph Hellwig 		inode->i_gid = current_fsgid();
8161da177e4SLinus Torvalds 	}
8171da177e4SLinus Torvalds 
8181da177e4SLinus Torvalds 	/*
8191da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8201da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8211da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8221da177e4SLinus Torvalds 	 */
82354295159SChristoph Hellwig 	if (irix_sgid_inherit &&
82454295159SChristoph Hellwig 	    (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
825c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8261da177e4SLinus Torvalds 
8271da177e4SLinus Torvalds 	ip->i_d.di_size = 0;
8281da177e4SLinus Torvalds 	ip->i_d.di_nextents = 0;
8291da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_nblocks == 0);
830dff35fd4SChristoph Hellwig 
831c2050a45SDeepa Dinamani 	tv = current_time(inode);
8323987848cSDave Chinner 	inode->i_mtime = tv;
8333987848cSDave Chinner 	inode->i_atime = tv;
8343987848cSDave Chinner 	inode->i_ctime = tv;
835dff35fd4SChristoph Hellwig 
8361da177e4SLinus Torvalds 	ip->i_d.di_extsize = 0;
8371da177e4SLinus Torvalds 	ip->i_d.di_dmevmask = 0;
8381da177e4SLinus Torvalds 	ip->i_d.di_dmstate = 0;
8391da177e4SLinus Torvalds 	ip->i_d.di_flags = 0;
84093848a99SChristoph Hellwig 
8416471e9c5SChristoph Hellwig 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
842f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
84393848a99SChristoph Hellwig 		ip->i_d.di_flags2 = 0;
844f7ca3522SDarrick J. Wong 		ip->i_d.di_cowextsize = 0;
8458d2d878dSChristoph Hellwig 		ip->i_d.di_crtime = tv;
84693848a99SChristoph Hellwig 	}
84793848a99SChristoph Hellwig 
8481da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8491da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8501da177e4SLinus Torvalds 	case S_IFIFO:
8511da177e4SLinus Torvalds 	case S_IFCHR:
8521da177e4SLinus Torvalds 	case S_IFBLK:
8531da177e4SLinus Torvalds 	case S_IFSOCK:
8541da177e4SLinus Torvalds 		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
8551da177e4SLinus Torvalds 		ip->i_df.if_flags = 0;
8561da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8571da177e4SLinus Torvalds 		break;
8581da177e4SLinus Torvalds 	case S_IFREG:
8591da177e4SLinus Torvalds 	case S_IFDIR:
860b11f94d5SDavid Chinner 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
861365ca83dSNathan Scott 			uint		di_flags = 0;
862365ca83dSNathan Scott 
863abbede1bSAl Viro 			if (S_ISDIR(mode)) {
864365ca83dSNathan Scott 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
865365ca83dSNathan Scott 					di_flags |= XFS_DIFLAG_RTINHERIT;
866dd9f438eSNathan Scott 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
867dd9f438eSNathan Scott 					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
868dd9f438eSNathan Scott 					ip->i_d.di_extsize = pip->i_d.di_extsize;
869dd9f438eSNathan Scott 				}
8709336e3a7SDave Chinner 				if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
8719336e3a7SDave Chinner 					di_flags |= XFS_DIFLAG_PROJINHERIT;
872abbede1bSAl Viro 			} else if (S_ISREG(mode)) {
873613d7043SChristoph Hellwig 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
874365ca83dSNathan Scott 					di_flags |= XFS_DIFLAG_REALTIME;
875dd9f438eSNathan Scott 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
876dd9f438eSNathan Scott 					di_flags |= XFS_DIFLAG_EXTSIZE;
877dd9f438eSNathan Scott 					ip->i_d.di_extsize = pip->i_d.di_extsize;
878dd9f438eSNathan Scott 				}
8791da177e4SLinus Torvalds 			}
8801da177e4SLinus Torvalds 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
8811da177e4SLinus Torvalds 			    xfs_inherit_noatime)
882365ca83dSNathan Scott 				di_flags |= XFS_DIFLAG_NOATIME;
8831da177e4SLinus Torvalds 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
8841da177e4SLinus Torvalds 			    xfs_inherit_nodump)
885365ca83dSNathan Scott 				di_flags |= XFS_DIFLAG_NODUMP;
8861da177e4SLinus Torvalds 			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
8871da177e4SLinus Torvalds 			    xfs_inherit_sync)
888365ca83dSNathan Scott 				di_flags |= XFS_DIFLAG_SYNC;
8891da177e4SLinus Torvalds 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
8901da177e4SLinus Torvalds 			    xfs_inherit_nosymlinks)
891365ca83dSNathan Scott 				di_flags |= XFS_DIFLAG_NOSYMLINKS;
892d3446eacSBarry Naujok 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
893d3446eacSBarry Naujok 			    xfs_inherit_nodefrag)
894d3446eacSBarry Naujok 				di_flags |= XFS_DIFLAG_NODEFRAG;
8952a82b8beSDavid Chinner 			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
8962a82b8beSDavid Chinner 				di_flags |= XFS_DIFLAG_FILESTREAM;
89758f88ca2SDave Chinner 
898365ca83dSNathan Scott 			ip->i_d.di_flags |= di_flags;
8991da177e4SLinus Torvalds 		}
900b3d1d375SChristoph Hellwig 		if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
901f7ca3522SDarrick J. Wong 			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
902b3d1d375SChristoph Hellwig 				ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
903f7ca3522SDarrick J. Wong 				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
904f7ca3522SDarrick J. Wong 			}
90556bdf855SLukas Czerner 			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
906b3d1d375SChristoph Hellwig 				ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
907f7ca3522SDarrick J. Wong 		}
9081da177e4SLinus Torvalds 		/* FALLTHROUGH */
9091da177e4SLinus Torvalds 	case S_IFLNK:
9101da177e4SLinus Torvalds 		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
9111da177e4SLinus Torvalds 		ip->i_df.if_flags = XFS_IFEXTENTS;
912fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
9136bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
9141da177e4SLinus Torvalds 		break;
9151da177e4SLinus Torvalds 	default:
9161da177e4SLinus Torvalds 		ASSERT(0);
9171da177e4SLinus Torvalds 	}
9181da177e4SLinus Torvalds 	/*
9191da177e4SLinus Torvalds 	 * Attribute fork settings for new inode.
9201da177e4SLinus Torvalds 	 */
9211da177e4SLinus Torvalds 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
9221da177e4SLinus Torvalds 	ip->i_d.di_anextents = 0;
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds 	/*
9251da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
9261da177e4SLinus Torvalds 	 */
927ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
9281da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
9291da177e4SLinus Torvalds 
93058c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
93141be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9321da177e4SLinus Torvalds 
9331da177e4SLinus Torvalds 	*ipp = ip;
9341da177e4SLinus Torvalds 	return 0;
9351da177e4SLinus Torvalds }
9361da177e4SLinus Torvalds 
937e546cb79SDave Chinner /*
938e546cb79SDave Chinner  * Allocates a new inode from disk and return a pointer to the
939e546cb79SDave Chinner  * incore copy. This routine will internally commit the current
940e546cb79SDave Chinner  * transaction and allocate a new one if the Space Manager needed
941e546cb79SDave Chinner  * to do an allocation to replenish the inode free-list.
942e546cb79SDave Chinner  *
943e546cb79SDave Chinner  * This routine is designed to be called from xfs_create and
944e546cb79SDave Chinner  * xfs_create_dir.
945e546cb79SDave Chinner  *
946e546cb79SDave Chinner  */
947e546cb79SDave Chinner int
948e546cb79SDave Chinner xfs_dir_ialloc(
949e546cb79SDave Chinner 	xfs_trans_t	**tpp,		/* input: current transaction;
950e546cb79SDave Chinner 					   output: may be a new transaction. */
951e546cb79SDave Chinner 	xfs_inode_t	*dp,		/* directory within whose allocate
952e546cb79SDave Chinner 					   the inode. */
953e546cb79SDave Chinner 	umode_t		mode,
954e546cb79SDave Chinner 	xfs_nlink_t	nlink,
95566f36464SChristoph Hellwig 	dev_t		rdev,
956e546cb79SDave Chinner 	prid_t		prid,		/* project id */
957c959025eSChandan Rajendra 	xfs_inode_t	**ipp)		/* pointer to inode; it will be
958e546cb79SDave Chinner 					   locked. */
959e546cb79SDave Chinner {
960e546cb79SDave Chinner 	xfs_trans_t	*tp;
961e546cb79SDave Chinner 	xfs_inode_t	*ip;
962e546cb79SDave Chinner 	xfs_buf_t	*ialloc_context = NULL;
963e546cb79SDave Chinner 	int		code;
964e546cb79SDave Chinner 	void		*dqinfo;
965e546cb79SDave Chinner 	uint		tflags;
966e546cb79SDave Chinner 
967e546cb79SDave Chinner 	tp = *tpp;
968e546cb79SDave Chinner 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
969e546cb79SDave Chinner 
970e546cb79SDave Chinner 	/*
971e546cb79SDave Chinner 	 * xfs_ialloc will return a pointer to an incore inode if
972e546cb79SDave Chinner 	 * the Space Manager has an available inode on the free
973e546cb79SDave Chinner 	 * list. Otherwise, it will do an allocation and replenish
974e546cb79SDave Chinner 	 * the freelist.  Since we can only do one allocation per
975e546cb79SDave Chinner 	 * transaction without deadlocks, we will need to commit the
976e546cb79SDave Chinner 	 * current transaction and start a new one.  We will then
977e546cb79SDave Chinner 	 * need to call xfs_ialloc again to get the inode.
978e546cb79SDave Chinner 	 *
979e546cb79SDave Chinner 	 * If xfs_ialloc did an allocation to replenish the freelist,
980e546cb79SDave Chinner 	 * it returns the bp containing the head of the freelist as
981e546cb79SDave Chinner 	 * ialloc_context. We will hold a lock on it across the
982e546cb79SDave Chinner 	 * transaction commit so that no other process can steal
983e546cb79SDave Chinner 	 * the inode(s) that we've just allocated.
984e546cb79SDave Chinner 	 */
985f59cf5c2SChristoph Hellwig 	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
986f59cf5c2SChristoph Hellwig 			&ip);
987e546cb79SDave Chinner 
988e546cb79SDave Chinner 	/*
989e546cb79SDave Chinner 	 * Return an error if we were unable to allocate a new inode.
990e546cb79SDave Chinner 	 * This should only happen if we run out of space on disk or
991e546cb79SDave Chinner 	 * encounter a disk error.
992e546cb79SDave Chinner 	 */
993e546cb79SDave Chinner 	if (code) {
994e546cb79SDave Chinner 		*ipp = NULL;
995e546cb79SDave Chinner 		return code;
996e546cb79SDave Chinner 	}
997e546cb79SDave Chinner 	if (!ialloc_context && !ip) {
998e546cb79SDave Chinner 		*ipp = NULL;
9992451337dSDave Chinner 		return -ENOSPC;
1000e546cb79SDave Chinner 	}
1001e546cb79SDave Chinner 
1002e546cb79SDave Chinner 	/*
1003e546cb79SDave Chinner 	 * If the AGI buffer is non-NULL, then we were unable to get an
1004e546cb79SDave Chinner 	 * inode in one operation.  We need to commit the current
1005e546cb79SDave Chinner 	 * transaction and call xfs_ialloc() again.  It is guaranteed
1006e546cb79SDave Chinner 	 * to succeed the second time.
1007e546cb79SDave Chinner 	 */
1008e546cb79SDave Chinner 	if (ialloc_context) {
1009e546cb79SDave Chinner 		/*
1010e546cb79SDave Chinner 		 * Normally, xfs_trans_commit releases all the locks.
1011e546cb79SDave Chinner 		 * We call bhold to hang on to the ialloc_context across
1012e546cb79SDave Chinner 		 * the commit.  Holding this buffer prevents any other
1013e546cb79SDave Chinner 		 * processes from doing any allocations in this
1014e546cb79SDave Chinner 		 * allocation group.
1015e546cb79SDave Chinner 		 */
1016e546cb79SDave Chinner 		xfs_trans_bhold(tp, ialloc_context);
1017e546cb79SDave Chinner 
1018e546cb79SDave Chinner 		/*
1019e546cb79SDave Chinner 		 * We want the quota changes to be associated with the next
1020e546cb79SDave Chinner 		 * transaction, NOT this one. So, detach the dqinfo from this
1021e546cb79SDave Chinner 		 * and attach it to the next transaction.
1022e546cb79SDave Chinner 		 */
1023e546cb79SDave Chinner 		dqinfo = NULL;
1024e546cb79SDave Chinner 		tflags = 0;
1025e546cb79SDave Chinner 		if (tp->t_dqinfo) {
1026e546cb79SDave Chinner 			dqinfo = (void *)tp->t_dqinfo;
1027e546cb79SDave Chinner 			tp->t_dqinfo = NULL;
1028e546cb79SDave Chinner 			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1029e546cb79SDave Chinner 			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1030e546cb79SDave Chinner 		}
1031e546cb79SDave Chinner 
1032411350dfSChristoph Hellwig 		code = xfs_trans_roll(&tp);
10333d3c8b52SJie Liu 
1034e546cb79SDave Chinner 		/*
1035e546cb79SDave Chinner 		 * Re-attach the quota info that we detached from prev trx.
1036e546cb79SDave Chinner 		 */
1037e546cb79SDave Chinner 		if (dqinfo) {
1038e546cb79SDave Chinner 			tp->t_dqinfo = dqinfo;
1039e546cb79SDave Chinner 			tp->t_flags |= tflags;
1040e546cb79SDave Chinner 		}
1041e546cb79SDave Chinner 
1042e546cb79SDave Chinner 		if (code) {
1043e546cb79SDave Chinner 			xfs_buf_relse(ialloc_context);
10442e6db6c4SChristoph Hellwig 			*tpp = tp;
1045e546cb79SDave Chinner 			*ipp = NULL;
1046e546cb79SDave Chinner 			return code;
1047e546cb79SDave Chinner 		}
1048e546cb79SDave Chinner 		xfs_trans_bjoin(tp, ialloc_context);
1049e546cb79SDave Chinner 
1050e546cb79SDave Chinner 		/*
1051e546cb79SDave Chinner 		 * Call ialloc again. Since we've locked out all
1052e546cb79SDave Chinner 		 * other allocations in this allocation group,
1053e546cb79SDave Chinner 		 * this call should always succeed.
1054e546cb79SDave Chinner 		 */
1055e546cb79SDave Chinner 		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1056f59cf5c2SChristoph Hellwig 				  &ialloc_context, &ip);
1057e546cb79SDave Chinner 
1058e546cb79SDave Chinner 		/*
1059e546cb79SDave Chinner 		 * If we get an error at this point, return to the caller
1060e546cb79SDave Chinner 		 * so that the current transaction can be aborted.
1061e546cb79SDave Chinner 		 */
1062e546cb79SDave Chinner 		if (code) {
1063e546cb79SDave Chinner 			*tpp = tp;
1064e546cb79SDave Chinner 			*ipp = NULL;
1065e546cb79SDave Chinner 			return code;
1066e546cb79SDave Chinner 		}
1067e546cb79SDave Chinner 		ASSERT(!ialloc_context && ip);
1068e546cb79SDave Chinner 
1069e546cb79SDave Chinner 	}
1070e546cb79SDave Chinner 
1071e546cb79SDave Chinner 	*ipp = ip;
1072e546cb79SDave Chinner 	*tpp = tp;
1073e546cb79SDave Chinner 
1074e546cb79SDave Chinner 	return 0;
1075e546cb79SDave Chinner }
1076e546cb79SDave Chinner 
1077e546cb79SDave Chinner /*
107854d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
107954d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
108054d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
1081e546cb79SDave Chinner  */
10820d5a75e9SEric Sandeen static int			/* error */
1083e546cb79SDave Chinner xfs_droplink(
1084e546cb79SDave Chinner 	xfs_trans_t *tp,
1085e546cb79SDave Chinner 	xfs_inode_t *ip)
1086e546cb79SDave Chinner {
1087e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1088e546cb79SDave Chinner 
1089e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
1090e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1091e546cb79SDave Chinner 
109254d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
109354d7b5c1SDave Chinner 		return 0;
109454d7b5c1SDave Chinner 
109554d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
1096e546cb79SDave Chinner }
1097e546cb79SDave Chinner 
1098e546cb79SDave Chinner /*
1099e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
1100e546cb79SDave Chinner  */
110191083269SEric Sandeen static void
1102e546cb79SDave Chinner xfs_bumplink(
1103e546cb79SDave Chinner 	xfs_trans_t *tp,
1104e546cb79SDave Chinner 	xfs_inode_t *ip)
1105e546cb79SDave Chinner {
1106e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1107e546cb79SDave Chinner 
1108e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
1109e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1110e546cb79SDave Chinner }
1111e546cb79SDave Chinner 
1112c24b5dfaSDave Chinner int
1113c24b5dfaSDave Chinner xfs_create(
1114c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
1115c24b5dfaSDave Chinner 	struct xfs_name		*name,
1116c24b5dfaSDave Chinner 	umode_t			mode,
111766f36464SChristoph Hellwig 	dev_t			rdev,
1118c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
1119c24b5dfaSDave Chinner {
1120c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
1121c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
1122c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
1123c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
1124c24b5dfaSDave Chinner 	int			error;
1125c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
1126c24b5dfaSDave Chinner 	prid_t			prid;
1127c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
1128c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
1129c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
1130062647a8SBrian Foster 	struct xfs_trans_res	*tres;
1131c24b5dfaSDave Chinner 	uint			resblks;
1132c24b5dfaSDave Chinner 
1133c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
1134c24b5dfaSDave Chinner 
1135c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
11362451337dSDave Chinner 		return -EIO;
1137c24b5dfaSDave Chinner 
1138163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
1139c24b5dfaSDave Chinner 
1140c24b5dfaSDave Chinner 	/*
1141c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
1142c24b5dfaSDave Chinner 	 */
114354295159SChristoph Hellwig 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1144c24b5dfaSDave Chinner 					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1145c24b5dfaSDave Chinner 					&udqp, &gdqp, &pdqp);
1146c24b5dfaSDave Chinner 	if (error)
1147c24b5dfaSDave Chinner 		return error;
1148c24b5dfaSDave Chinner 
1149c24b5dfaSDave Chinner 	if (is_dir) {
1150c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1151062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
1152c24b5dfaSDave Chinner 	} else {
1153c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1154062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
1155c24b5dfaSDave Chinner 	}
1156c24b5dfaSDave Chinner 
1157c24b5dfaSDave Chinner 	/*
1158c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
1159c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
1160c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
1161c24b5dfaSDave Chinner 	 * appropriate transaction later.
1162c24b5dfaSDave Chinner 	 */
1163253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
11642451337dSDave Chinner 	if (error == -ENOSPC) {
1165c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1166c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1167253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1168c24b5dfaSDave Chinner 	}
11694906e215SChristoph Hellwig 	if (error)
1170253f4911SChristoph Hellwig 		goto out_release_inode;
1171c24b5dfaSDave Chinner 
117265523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1173c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1174c24b5dfaSDave Chinner 
1175c24b5dfaSDave Chinner 	/*
1176c24b5dfaSDave Chinner 	 * Reserve disk quota and the inode.
1177c24b5dfaSDave Chinner 	 */
1178c24b5dfaSDave Chinner 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1179c24b5dfaSDave Chinner 						pdqp, resblks, 1, 0);
1180c24b5dfaSDave Chinner 	if (error)
1181c24b5dfaSDave Chinner 		goto out_trans_cancel;
1182c24b5dfaSDave Chinner 
1183c24b5dfaSDave Chinner 	/*
1184c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1185c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1186c24b5dfaSDave Chinner 	 * pointing to itself.
1187c24b5dfaSDave Chinner 	 */
1188c959025eSChandan Rajendra 	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1189d6077aa3SJan Kara 	if (error)
1190c24b5dfaSDave Chinner 		goto out_trans_cancel;
1191c24b5dfaSDave Chinner 
1192c24b5dfaSDave Chinner 	/*
1193c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1194c24b5dfaSDave Chinner 	 * earlier because xfs_dir_ialloc might commit the previous transaction
1195c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1196c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1197c24b5dfaSDave Chinner 	 * error path.
1198c24b5dfaSDave Chinner 	 */
119965523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1200c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1201c24b5dfaSDave Chinner 
1202381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1203c9cfdb38SBrian Foster 				   resblks ?
1204c24b5dfaSDave Chinner 					resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1205c24b5dfaSDave Chinner 	if (error) {
12062451337dSDave Chinner 		ASSERT(error != -ENOSPC);
12074906e215SChristoph Hellwig 		goto out_trans_cancel;
1208c24b5dfaSDave Chinner 	}
1209c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1210c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1211c24b5dfaSDave Chinner 
1212c24b5dfaSDave Chinner 	if (is_dir) {
1213c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1214c24b5dfaSDave Chinner 		if (error)
1215c8eac49eSBrian Foster 			goto out_trans_cancel;
1216c24b5dfaSDave Chinner 
121791083269SEric Sandeen 		xfs_bumplink(tp, dp);
1218c24b5dfaSDave Chinner 	}
1219c24b5dfaSDave Chinner 
1220c24b5dfaSDave Chinner 	/*
1221c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1222c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1223c24b5dfaSDave Chinner 	 * the user.
1224c24b5dfaSDave Chinner 	 */
1225c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1226c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1227c24b5dfaSDave Chinner 
1228c24b5dfaSDave Chinner 	/*
1229c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1230c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1231c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1232c24b5dfaSDave Chinner 	 */
1233c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1234c24b5dfaSDave Chinner 
123570393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1236c24b5dfaSDave Chinner 	if (error)
1237c24b5dfaSDave Chinner 		goto out_release_inode;
1238c24b5dfaSDave Chinner 
1239c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1240c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1241c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1242c24b5dfaSDave Chinner 
1243c24b5dfaSDave Chinner 	*ipp = ip;
1244c24b5dfaSDave Chinner 	return 0;
1245c24b5dfaSDave Chinner 
1246c24b5dfaSDave Chinner  out_trans_cancel:
12474906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1248c24b5dfaSDave Chinner  out_release_inode:
1249c24b5dfaSDave Chinner 	/*
125058c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
125158c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
125258c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1253c24b5dfaSDave Chinner 	 */
125458c90473SDave Chinner 	if (ip) {
125558c90473SDave Chinner 		xfs_finish_inode_setup(ip);
125644a8736bSDarrick J. Wong 		xfs_irele(ip);
125758c90473SDave Chinner 	}
1258c24b5dfaSDave Chinner 
1259c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1260c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1261c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1262c24b5dfaSDave Chinner 
1263c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
126465523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1265c24b5dfaSDave Chinner 	return error;
1266c24b5dfaSDave Chinner }
1267c24b5dfaSDave Chinner 
1268c24b5dfaSDave Chinner int
126999b6436bSZhi Yong Wu xfs_create_tmpfile(
127099b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1271330033d6SBrian Foster 	umode_t			mode,
1272330033d6SBrian Foster 	struct xfs_inode	**ipp)
127399b6436bSZhi Yong Wu {
127499b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
127599b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
127699b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
127799b6436bSZhi Yong Wu 	int			error;
127899b6436bSZhi Yong Wu 	prid_t                  prid;
127999b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
128099b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
128199b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
128299b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
128399b6436bSZhi Yong Wu 	uint			resblks;
128499b6436bSZhi Yong Wu 
128599b6436bSZhi Yong Wu 	if (XFS_FORCED_SHUTDOWN(mp))
12862451337dSDave Chinner 		return -EIO;
128799b6436bSZhi Yong Wu 
128899b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
128999b6436bSZhi Yong Wu 
129099b6436bSZhi Yong Wu 	/*
129199b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
129299b6436bSZhi Yong Wu 	 */
129354295159SChristoph Hellwig 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
129499b6436bSZhi Yong Wu 				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
129599b6436bSZhi Yong Wu 				&udqp, &gdqp, &pdqp);
129699b6436bSZhi Yong Wu 	if (error)
129799b6436bSZhi Yong Wu 		return error;
129899b6436bSZhi Yong Wu 
129999b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
130099b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1301253f4911SChristoph Hellwig 
1302253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
13034906e215SChristoph Hellwig 	if (error)
1304253f4911SChristoph Hellwig 		goto out_release_inode;
130599b6436bSZhi Yong Wu 
130699b6436bSZhi Yong Wu 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
130799b6436bSZhi Yong Wu 						pdqp, resblks, 1, 0);
130899b6436bSZhi Yong Wu 	if (error)
130999b6436bSZhi Yong Wu 		goto out_trans_cancel;
131099b6436bSZhi Yong Wu 
1311c4a6bf7fSDarrick J. Wong 	error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1312d6077aa3SJan Kara 	if (error)
131399b6436bSZhi Yong Wu 		goto out_trans_cancel;
131499b6436bSZhi Yong Wu 
131599b6436bSZhi Yong Wu 	if (mp->m_flags & XFS_MOUNT_WSYNC)
131699b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
131799b6436bSZhi Yong Wu 
131899b6436bSZhi Yong Wu 	/*
131999b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
132099b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
132199b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
132299b6436bSZhi Yong Wu 	 */
132399b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
132499b6436bSZhi Yong Wu 
132599b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
132699b6436bSZhi Yong Wu 	if (error)
13274906e215SChristoph Hellwig 		goto out_trans_cancel;
132899b6436bSZhi Yong Wu 
132970393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
133099b6436bSZhi Yong Wu 	if (error)
133199b6436bSZhi Yong Wu 		goto out_release_inode;
133299b6436bSZhi Yong Wu 
133399b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
133499b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
133599b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
133699b6436bSZhi Yong Wu 
1337330033d6SBrian Foster 	*ipp = ip;
133899b6436bSZhi Yong Wu 	return 0;
133999b6436bSZhi Yong Wu 
134099b6436bSZhi Yong Wu  out_trans_cancel:
13414906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
134299b6436bSZhi Yong Wu  out_release_inode:
134399b6436bSZhi Yong Wu 	/*
134458c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
134558c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
134658c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
134799b6436bSZhi Yong Wu 	 */
134858c90473SDave Chinner 	if (ip) {
134958c90473SDave Chinner 		xfs_finish_inode_setup(ip);
135044a8736bSDarrick J. Wong 		xfs_irele(ip);
135158c90473SDave Chinner 	}
135299b6436bSZhi Yong Wu 
135399b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
135499b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
135599b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
135699b6436bSZhi Yong Wu 
135799b6436bSZhi Yong Wu 	return error;
135899b6436bSZhi Yong Wu }
135999b6436bSZhi Yong Wu 
136099b6436bSZhi Yong Wu int
1361c24b5dfaSDave Chinner xfs_link(
1362c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1363c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1364c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1365c24b5dfaSDave Chinner {
1366c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1367c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1368c24b5dfaSDave Chinner 	int			error;
1369c24b5dfaSDave Chinner 	int			resblks;
1370c24b5dfaSDave Chinner 
1371c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1372c24b5dfaSDave Chinner 
1373c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1374c24b5dfaSDave Chinner 
1375c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
13762451337dSDave Chinner 		return -EIO;
1377c24b5dfaSDave Chinner 
1378c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1379c24b5dfaSDave Chinner 	if (error)
1380c24b5dfaSDave Chinner 		goto std_return;
1381c24b5dfaSDave Chinner 
1382c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1383c24b5dfaSDave Chinner 	if (error)
1384c24b5dfaSDave Chinner 		goto std_return;
1385c24b5dfaSDave Chinner 
1386c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1387253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
13882451337dSDave Chinner 	if (error == -ENOSPC) {
1389c24b5dfaSDave Chinner 		resblks = 0;
1390253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1391c24b5dfaSDave Chinner 	}
13924906e215SChristoph Hellwig 	if (error)
1393253f4911SChristoph Hellwig 		goto std_return;
1394c24b5dfaSDave Chinner 
13957c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1396c24b5dfaSDave Chinner 
1397c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
139865523218SChristoph Hellwig 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1399c24b5dfaSDave Chinner 
1400c24b5dfaSDave Chinner 	/*
1401c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1402c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1403c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1404c24b5dfaSDave Chinner 	 */
1405c24b5dfaSDave Chinner 	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1406de7a866fSChristoph Hellwig 		     tdp->i_d.di_projid != sip->i_d.di_projid)) {
14072451337dSDave Chinner 		error = -EXDEV;
1408c24b5dfaSDave Chinner 		goto error_return;
1409c24b5dfaSDave Chinner 	}
1410c24b5dfaSDave Chinner 
141194f3cad5SEric Sandeen 	if (!resblks) {
141294f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1413c24b5dfaSDave Chinner 		if (error)
1414c24b5dfaSDave Chinner 			goto error_return;
141594f3cad5SEric Sandeen 	}
1416c24b5dfaSDave Chinner 
141754d7b5c1SDave Chinner 	/*
141854d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
141954d7b5c1SDave Chinner 	 */
142054d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1421ab297431SZhi Yong Wu 		error = xfs_iunlink_remove(tp, sip);
1422ab297431SZhi Yong Wu 		if (error)
14234906e215SChristoph Hellwig 			goto error_return;
1424ab297431SZhi Yong Wu 	}
1425ab297431SZhi Yong Wu 
1426c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1427381eee69SBrian Foster 				   resblks);
1428c24b5dfaSDave Chinner 	if (error)
14294906e215SChristoph Hellwig 		goto error_return;
1430c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1431c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1432c24b5dfaSDave Chinner 
143391083269SEric Sandeen 	xfs_bumplink(tp, sip);
1434c24b5dfaSDave Chinner 
1435c24b5dfaSDave Chinner 	/*
1436c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1437c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1438c24b5dfaSDave Chinner 	 * the user.
1439c24b5dfaSDave Chinner 	 */
1440f6106efaSEric Sandeen 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1441c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1442c24b5dfaSDave Chinner 
144370393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1444c24b5dfaSDave Chinner 
1445c24b5dfaSDave Chinner  error_return:
14464906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1447c24b5dfaSDave Chinner  std_return:
1448c24b5dfaSDave Chinner 	return error;
1449c24b5dfaSDave Chinner }
1450c24b5dfaSDave Chinner 
1451363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1452363e59baSDarrick J. Wong static void
1453363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1454363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1455363e59baSDarrick J. Wong {
1456363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1457363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1458363e59baSDarrick J. Wong 
1459363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1460363e59baSDarrick J. Wong 		return;
1461363e59baSDarrick J. Wong 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1462363e59baSDarrick J. Wong 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1463363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1464363e59baSDarrick J. Wong 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1465363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1466363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1467363e59baSDarrick J. Wong }
1468363e59baSDarrick J. Wong 
14691da177e4SLinus Torvalds /*
14708f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
14718f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
14728f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
14731da177e4SLinus Torvalds  *
1474f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1475f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1476f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1477f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1478f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1479f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1480f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1481f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1482f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
14831da177e4SLinus Torvalds  *
1484f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1485f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1486f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1487f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1488f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
14891da177e4SLinus Torvalds  */
14901da177e4SLinus Torvalds int
14914e529339SBrian Foster xfs_itruncate_extents_flags(
14928f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
14938f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
14948f04c47aSChristoph Hellwig 	int			whichfork,
149513b86fc3SBrian Foster 	xfs_fsize_t		new_size,
14964e529339SBrian Foster 	int			flags)
14971da177e4SLinus Torvalds {
14988f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
14998f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
15001da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
15018f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
15028f04c47aSChristoph Hellwig 	int			error = 0;
15031da177e4SLinus Torvalds 
15040b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
15050b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
15060b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1507ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
15088f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
15091da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1510898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
15111da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
15121da177e4SLinus Torvalds 
1513673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1514673e8e59SChristoph Hellwig 
15154e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
151613b86fc3SBrian Foster 
15171da177e4SLinus Torvalds 	/*
15181da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
15191da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
15201da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
15211da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
15224bbb04abSDarrick J. Wong 	 * possible file size.
15234bbb04abSDarrick J. Wong 	 *
15244bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
15254bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
15261da177e4SLinus Torvalds 	 */
15278f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
15284bbb04abSDarrick J. Wong 	if (first_unmap_block >= XFS_MAX_FILEOFF) {
15294bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
15308f04c47aSChristoph Hellwig 		return 0;
15314bbb04abSDarrick J. Wong 	}
15328f04c47aSChristoph Hellwig 
15334bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
15344bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
153502dff7bfSBrian Foster 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
15364bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
15374bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
15388f04c47aSChristoph Hellwig 		if (error)
1539d5a2e289SBrian Foster 			goto out;
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds 		/*
15421da177e4SLinus Torvalds 		 * Duplicate the transaction that has the permanent
15431da177e4SLinus Torvalds 		 * reservation and commit the old transaction.
15441da177e4SLinus Torvalds 		 */
15459e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
15468f04c47aSChristoph Hellwig 		if (error)
15479b1f4e98SBrian Foster 			goto out;
15481da177e4SLinus Torvalds 
1549411350dfSChristoph Hellwig 		error = xfs_trans_roll_inode(&tp, ip);
15501da177e4SLinus Torvalds 		if (error)
15518f04c47aSChristoph Hellwig 			goto out;
15521da177e4SLinus Torvalds 	}
15538f04c47aSChristoph Hellwig 
15544919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1555aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
15564919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
15574bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1558aa8968f2SDarrick J. Wong 		if (error)
1559aa8968f2SDarrick J. Wong 			goto out;
1560aa8968f2SDarrick J. Wong 
1561363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
15624919d42aSDarrick J. Wong 	}
1563aa8968f2SDarrick J. Wong 
1564673e8e59SChristoph Hellwig 	/*
1565673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1566673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1567673e8e59SChristoph Hellwig 	 */
1568673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1569673e8e59SChristoph Hellwig 
1570673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1571673e8e59SChristoph Hellwig 
15728f04c47aSChristoph Hellwig out:
15738f04c47aSChristoph Hellwig 	*tpp = tp;
15748f04c47aSChristoph Hellwig 	return error;
15758f04c47aSChristoph Hellwig }
15768f04c47aSChristoph Hellwig 
1577c24b5dfaSDave Chinner int
1578c24b5dfaSDave Chinner xfs_release(
1579c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1580c24b5dfaSDave Chinner {
1581c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
1582c24b5dfaSDave Chinner 	int		error;
1583c24b5dfaSDave Chinner 
1584c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1585c24b5dfaSDave Chinner 		return 0;
1586c24b5dfaSDave Chinner 
1587c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1588c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1589c24b5dfaSDave Chinner 		return 0;
1590c24b5dfaSDave Chinner 
1591c24b5dfaSDave Chinner 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1592c24b5dfaSDave Chinner 		int truncated;
1593c24b5dfaSDave Chinner 
1594c24b5dfaSDave Chinner 		/*
1595c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1596c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1597c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1598c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1599c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1600c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1601c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1602c24b5dfaSDave Chinner 		 * be exposed to that problem.
1603c24b5dfaSDave Chinner 		 */
1604c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1605c24b5dfaSDave Chinner 		if (truncated) {
1606c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1607eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
16082451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1609c24b5dfaSDave Chinner 				if (error)
1610c24b5dfaSDave Chinner 					return error;
1611c24b5dfaSDave Chinner 			}
1612c24b5dfaSDave Chinner 		}
1613c24b5dfaSDave Chinner 	}
1614c24b5dfaSDave Chinner 
161554d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1616c24b5dfaSDave Chinner 		return 0;
1617c24b5dfaSDave Chinner 
1618c24b5dfaSDave Chinner 	if (xfs_can_free_eofblocks(ip, false)) {
1619c24b5dfaSDave Chinner 
1620c24b5dfaSDave Chinner 		/*
1621a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1622a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1623a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1624a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1625a36b9261SBrian Foster 		 *
1626a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1627a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1628a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1629a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1630a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1631a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1632a36b9261SBrian Foster 		 * place.
1633a36b9261SBrian Foster 		 */
1634a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1635a36b9261SBrian Foster 			return 0;
1636a36b9261SBrian Foster 		/*
1637c24b5dfaSDave Chinner 		 * If we can't get the iolock just skip truncating the blocks
1638c24b5dfaSDave Chinner 		 * past EOF because we could deadlock with the mmap_sem
1639c24b5dfaSDave Chinner 		 * otherwise. We'll get another chance to drop them once the
1640c24b5dfaSDave Chinner 		 * last reference to the inode is dropped, so we'll never leak
1641c24b5dfaSDave Chinner 		 * blocks permanently.
1642c24b5dfaSDave Chinner 		 */
1643a36b9261SBrian Foster 		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1644a36b9261SBrian Foster 			error = xfs_free_eofblocks(ip);
1645a36b9261SBrian Foster 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1646a36b9261SBrian Foster 			if (error)
1647c24b5dfaSDave Chinner 				return error;
1648a36b9261SBrian Foster 		}
1649c24b5dfaSDave Chinner 
1650c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1651c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1652c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1653c24b5dfaSDave Chinner 	}
1654c24b5dfaSDave Chinner 	return 0;
1655c24b5dfaSDave Chinner }
1656c24b5dfaSDave Chinner 
1657c24b5dfaSDave Chinner /*
1658f7be2d7fSBrian Foster  * xfs_inactive_truncate
1659f7be2d7fSBrian Foster  *
1660f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1661f7be2d7fSBrian Foster  */
1662f7be2d7fSBrian Foster STATIC int
1663f7be2d7fSBrian Foster xfs_inactive_truncate(
1664f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1665f7be2d7fSBrian Foster {
1666f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1667f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1668f7be2d7fSBrian Foster 	int			error;
1669f7be2d7fSBrian Foster 
1670253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1671f7be2d7fSBrian Foster 	if (error) {
1672f7be2d7fSBrian Foster 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1673f7be2d7fSBrian Foster 		return error;
1674f7be2d7fSBrian Foster 	}
1675f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1676f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1677f7be2d7fSBrian Foster 
1678f7be2d7fSBrian Foster 	/*
1679f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1680f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
168169bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1682f7be2d7fSBrian Foster 	 */
1683f7be2d7fSBrian Foster 	ip->i_d.di_size = 0;
1684f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1685f7be2d7fSBrian Foster 
1686f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1687f7be2d7fSBrian Foster 	if (error)
1688f7be2d7fSBrian Foster 		goto error_trans_cancel;
1689f7be2d7fSBrian Foster 
1690f7be2d7fSBrian Foster 	ASSERT(ip->i_d.di_nextents == 0);
1691f7be2d7fSBrian Foster 
169270393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1693f7be2d7fSBrian Foster 	if (error)
1694f7be2d7fSBrian Foster 		goto error_unlock;
1695f7be2d7fSBrian Foster 
1696f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1697f7be2d7fSBrian Foster 	return 0;
1698f7be2d7fSBrian Foster 
1699f7be2d7fSBrian Foster error_trans_cancel:
17004906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1701f7be2d7fSBrian Foster error_unlock:
1702f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1703f7be2d7fSBrian Foster 	return error;
1704f7be2d7fSBrian Foster }
1705f7be2d7fSBrian Foster 
1706f7be2d7fSBrian Foster /*
170788877d2bSBrian Foster  * xfs_inactive_ifree()
170888877d2bSBrian Foster  *
170988877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
171088877d2bSBrian Foster  */
171188877d2bSBrian Foster STATIC int
171288877d2bSBrian Foster xfs_inactive_ifree(
171388877d2bSBrian Foster 	struct xfs_inode *ip)
171488877d2bSBrian Foster {
171588877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
171688877d2bSBrian Foster 	struct xfs_trans	*tp;
171788877d2bSBrian Foster 	int			error;
171888877d2bSBrian Foster 
17199d43b180SBrian Foster 	/*
172076d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
172176d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
172276d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
172376d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
172476d771b4SChristoph Hellwig 	 * pool and pray.
17259d43b180SBrian Foster 	 *
17269d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
17279d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
17289d43b180SBrian Foster 	 * repaired.
17299d43b180SBrian Foster 	 */
1730e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1731253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
173276d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
173376d771b4SChristoph Hellwig 				&tp);
173476d771b4SChristoph Hellwig 	} else {
173576d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
173676d771b4SChristoph Hellwig 	}
173788877d2bSBrian Foster 	if (error) {
17382451337dSDave Chinner 		if (error == -ENOSPC) {
17399d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
17409d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
17419d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
17429d43b180SBrian Foster 		} else {
174388877d2bSBrian Foster 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
17449d43b180SBrian Foster 		}
174588877d2bSBrian Foster 		return error;
174688877d2bSBrian Foster 	}
174788877d2bSBrian Foster 
174888877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
174988877d2bSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
175088877d2bSBrian Foster 
17510e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
175288877d2bSBrian Foster 	if (error) {
175388877d2bSBrian Foster 		/*
175488877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
175588877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
175688877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
175788877d2bSBrian Foster 		 */
175888877d2bSBrian Foster 		if (!XFS_FORCED_SHUTDOWN(mp)) {
175988877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
176088877d2bSBrian Foster 				__func__, error);
176188877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
176288877d2bSBrian Foster 		}
17634906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
176488877d2bSBrian Foster 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
176588877d2bSBrian Foster 		return error;
176688877d2bSBrian Foster 	}
176788877d2bSBrian Foster 
176888877d2bSBrian Foster 	/*
176988877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
177088877d2bSBrian Foster 	 */
177188877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
177288877d2bSBrian Foster 
177388877d2bSBrian Foster 	/*
1774d4a97a04SBrian Foster 	 * Just ignore errors at this point.  There is nothing we can do except
1775d4a97a04SBrian Foster 	 * to try to keep going. Make sure it's not a silent error.
177688877d2bSBrian Foster 	 */
177770393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
177888877d2bSBrian Foster 	if (error)
177988877d2bSBrian Foster 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
178088877d2bSBrian Foster 			__func__, error);
178188877d2bSBrian Foster 
178288877d2bSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
178388877d2bSBrian Foster 	return 0;
178488877d2bSBrian Foster }
178588877d2bSBrian Foster 
178688877d2bSBrian Foster /*
1787c24b5dfaSDave Chinner  * xfs_inactive
1788c24b5dfaSDave Chinner  *
1789c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1790c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1791c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1792c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1793c24b5dfaSDave Chinner  */
179474564fb4SBrian Foster void
1795c24b5dfaSDave Chinner xfs_inactive(
1796c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1797c24b5dfaSDave Chinner {
17983d3c8b52SJie Liu 	struct xfs_mount	*mp;
1799c24b5dfaSDave Chinner 	int			error;
1800c24b5dfaSDave Chinner 	int			truncate = 0;
1801c24b5dfaSDave Chinner 
1802c24b5dfaSDave Chinner 	/*
1803c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1804c24b5dfaSDave Chinner 	 * to clean up here.
1805c24b5dfaSDave Chinner 	 */
1806c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1807c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
180874564fb4SBrian Foster 		return;
1809c24b5dfaSDave Chinner 	}
1810c24b5dfaSDave Chinner 
1811c24b5dfaSDave Chinner 	mp = ip->i_mount;
181217c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1813c24b5dfaSDave Chinner 
1814c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1815c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
181674564fb4SBrian Foster 		return;
1817c24b5dfaSDave Chinner 
18186231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
181951d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
18206231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
18216231848cSDarrick J. Wong 
182254d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1823c24b5dfaSDave Chinner 		/*
1824c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1825c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1826c24b5dfaSDave Chinner 		 * broken free space accounting.
18273b4683c2SBrian Foster 		 *
18283b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
18293b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
18303b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1831c24b5dfaSDave Chinner 		 */
18323b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1833a36b9261SBrian Foster 			xfs_free_eofblocks(ip);
183474564fb4SBrian Foster 
183574564fb4SBrian Foster 		return;
1836c24b5dfaSDave Chinner 	}
1837c24b5dfaSDave Chinner 
1838c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1839c24b5dfaSDave Chinner 	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1840c24b5dfaSDave Chinner 	     ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1841c24b5dfaSDave Chinner 		truncate = 1;
1842c24b5dfaSDave Chinner 
1843c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1844c24b5dfaSDave Chinner 	if (error)
184574564fb4SBrian Foster 		return;
1846c24b5dfaSDave Chinner 
1847c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
184836b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1849f7be2d7fSBrian Foster 	else if (truncate)
1850f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
185136b21ddeSBrian Foster 	if (error)
185274564fb4SBrian Foster 		return;
1853c24b5dfaSDave Chinner 
1854c24b5dfaSDave Chinner 	/*
1855c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1856c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
18576dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1858c24b5dfaSDave Chinner 	 */
18596dfe5a04SDave Chinner 	if (XFS_IFORK_Q(ip)) {
1860c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1861c24b5dfaSDave Chinner 		if (error)
186274564fb4SBrian Foster 			return;
1863c24b5dfaSDave Chinner 	}
1864c24b5dfaSDave Chinner 
18656dfe5a04SDave Chinner 	ASSERT(!ip->i_afp);
1866c24b5dfaSDave Chinner 	ASSERT(ip->i_d.di_anextents == 0);
18676dfe5a04SDave Chinner 	ASSERT(ip->i_d.di_forkoff == 0);
1868c24b5dfaSDave Chinner 
1869c24b5dfaSDave Chinner 	/*
1870c24b5dfaSDave Chinner 	 * Free the inode.
1871c24b5dfaSDave Chinner 	 */
187288877d2bSBrian Foster 	error = xfs_inactive_ifree(ip);
1873c24b5dfaSDave Chinner 	if (error)
187474564fb4SBrian Foster 		return;
1875c24b5dfaSDave Chinner 
1876c24b5dfaSDave Chinner 	/*
1877c24b5dfaSDave Chinner 	 * Release the dquots held by inode, if any.
1878c24b5dfaSDave Chinner 	 */
1879c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1880c24b5dfaSDave Chinner }
1881c24b5dfaSDave Chinner 
18821da177e4SLinus Torvalds /*
18839b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
18849b247179SDarrick J. Wong  * =============================
18859b247179SDarrick J. Wong  *
18869b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
18879b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
18889b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
18899b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
18909b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
18919b247179SDarrick J. Wong  *
18929b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
18939b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
18949b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
18959b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
18969b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
18979b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
18989b247179SDarrick J. Wong  *
18999b247179SDarrick J. Wong  * What if we modelled the unlinked list as a collection of records capturing
19009b247179SDarrick J. Wong  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
19019b247179SDarrick J. Wong  * have a fast way to look up unlinked list predecessors, which avoids the
19029b247179SDarrick J. Wong  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
19039b247179SDarrick J. Wong  * rhashtable.
19049b247179SDarrick J. Wong  *
19059b247179SDarrick J. Wong  * Because this is a backref cache, we ignore operational failures since the
19069b247179SDarrick J. Wong  * iunlink code can fall back to the slow bucket walk.  The only errors that
19079b247179SDarrick J. Wong  * should bubble out are for obviously incorrect situations.
19089b247179SDarrick J. Wong  *
19099b247179SDarrick J. Wong  * All users of the backref cache MUST hold the AGI buffer lock to serialize
19109b247179SDarrick J. Wong  * access or have otherwise provided for concurrency control.
19119b247179SDarrick J. Wong  */
19129b247179SDarrick J. Wong 
19139b247179SDarrick J. Wong /* Capture a "X.next_unlinked = Y" relationship. */
19149b247179SDarrick J. Wong struct xfs_iunlink {
19159b247179SDarrick J. Wong 	struct rhash_head	iu_rhash_head;
19169b247179SDarrick J. Wong 	xfs_agino_t		iu_agino;		/* X */
19179b247179SDarrick J. Wong 	xfs_agino_t		iu_next_unlinked;	/* Y */
19189b247179SDarrick J. Wong };
19199b247179SDarrick J. Wong 
19209b247179SDarrick J. Wong /* Unlinked list predecessor lookup hashtable construction */
19219b247179SDarrick J. Wong static int
19229b247179SDarrick J. Wong xfs_iunlink_obj_cmpfn(
19239b247179SDarrick J. Wong 	struct rhashtable_compare_arg	*arg,
19249b247179SDarrick J. Wong 	const void			*obj)
19259b247179SDarrick J. Wong {
19269b247179SDarrick J. Wong 	const xfs_agino_t		*key = arg->key;
19279b247179SDarrick J. Wong 	const struct xfs_iunlink	*iu = obj;
19289b247179SDarrick J. Wong 
19299b247179SDarrick J. Wong 	if (iu->iu_next_unlinked != *key)
19309b247179SDarrick J. Wong 		return 1;
19319b247179SDarrick J. Wong 	return 0;
19329b247179SDarrick J. Wong }
19339b247179SDarrick J. Wong 
19349b247179SDarrick J. Wong static const struct rhashtable_params xfs_iunlink_hash_params = {
19359b247179SDarrick J. Wong 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
19369b247179SDarrick J. Wong 	.key_len		= sizeof(xfs_agino_t),
19379b247179SDarrick J. Wong 	.key_offset		= offsetof(struct xfs_iunlink,
19389b247179SDarrick J. Wong 					   iu_next_unlinked),
19399b247179SDarrick J. Wong 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
19409b247179SDarrick J. Wong 	.automatic_shrinking	= true,
19419b247179SDarrick J. Wong 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
19429b247179SDarrick J. Wong };
19439b247179SDarrick J. Wong 
19449b247179SDarrick J. Wong /*
19459b247179SDarrick J. Wong  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
19469b247179SDarrick J. Wong  * relation is found.
19479b247179SDarrick J. Wong  */
19489b247179SDarrick J. Wong static xfs_agino_t
19499b247179SDarrick J. Wong xfs_iunlink_lookup_backref(
19509b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19519b247179SDarrick J. Wong 	xfs_agino_t		agino)
19529b247179SDarrick J. Wong {
19539b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19549b247179SDarrick J. Wong 
19559b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
19569b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
19579b247179SDarrick J. Wong 	return iu ? iu->iu_agino : NULLAGINO;
19589b247179SDarrick J. Wong }
19599b247179SDarrick J. Wong 
19609b247179SDarrick J. Wong /*
19619b247179SDarrick J. Wong  * Take ownership of an iunlink cache entry and insert it into the hash table.
19629b247179SDarrick J. Wong  * If successful, the entry will be owned by the cache; if not, it is freed.
19639b247179SDarrick J. Wong  * Either way, the caller does not own @iu after this call.
19649b247179SDarrick J. Wong  */
19659b247179SDarrick J. Wong static int
19669b247179SDarrick J. Wong xfs_iunlink_insert_backref(
19679b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19689b247179SDarrick J. Wong 	struct xfs_iunlink	*iu)
19699b247179SDarrick J. Wong {
19709b247179SDarrick J. Wong 	int			error;
19719b247179SDarrick J. Wong 
19729b247179SDarrick J. Wong 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
19739b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
19749b247179SDarrick J. Wong 	/*
19759b247179SDarrick J. Wong 	 * Fail loudly if there already was an entry because that's a sign of
19769b247179SDarrick J. Wong 	 * corruption of in-memory data.  Also fail loudly if we see an error
19779b247179SDarrick J. Wong 	 * code we didn't anticipate from the rhashtable code.  Currently we
19789b247179SDarrick J. Wong 	 * only anticipate ENOMEM.
19799b247179SDarrick J. Wong 	 */
19809b247179SDarrick J. Wong 	if (error) {
19819b247179SDarrick J. Wong 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
19829b247179SDarrick J. Wong 		kmem_free(iu);
19839b247179SDarrick J. Wong 	}
19849b247179SDarrick J. Wong 	/*
19859b247179SDarrick J. Wong 	 * Absorb any runtime errors that aren't a result of corruption because
19869b247179SDarrick J. Wong 	 * this is a cache and we can always fall back to bucket list scanning.
19879b247179SDarrick J. Wong 	 */
19889b247179SDarrick J. Wong 	if (error != 0 && error != -EEXIST)
19899b247179SDarrick J. Wong 		error = 0;
19909b247179SDarrick J. Wong 	return error;
19919b247179SDarrick J. Wong }
19929b247179SDarrick J. Wong 
19939b247179SDarrick J. Wong /* Remember that @prev_agino.next_unlinked = @this_agino. */
19949b247179SDarrick J. Wong static int
19959b247179SDarrick J. Wong xfs_iunlink_add_backref(
19969b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19979b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
19989b247179SDarrick J. Wong 	xfs_agino_t		this_agino)
19999b247179SDarrick J. Wong {
20009b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
20019b247179SDarrick J. Wong 
20029b247179SDarrick J. Wong 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
20039b247179SDarrick J. Wong 		return 0;
20049b247179SDarrick J. Wong 
2005707e0ddaSTetsuo Handa 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
20069b247179SDarrick J. Wong 	iu->iu_agino = prev_agino;
20079b247179SDarrick J. Wong 	iu->iu_next_unlinked = this_agino;
20089b247179SDarrick J. Wong 
20099b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
20109b247179SDarrick J. Wong }
20119b247179SDarrick J. Wong 
20129b247179SDarrick J. Wong /*
20139b247179SDarrick J. Wong  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
20149b247179SDarrick J. Wong  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
20159b247179SDarrick J. Wong  * wasn't any such entry then we don't bother.
20169b247179SDarrick J. Wong  */
20179b247179SDarrick J. Wong static int
20189b247179SDarrick J. Wong xfs_iunlink_change_backref(
20199b247179SDarrick J. Wong 	struct xfs_perag	*pag,
20209b247179SDarrick J. Wong 	xfs_agino_t		agino,
20219b247179SDarrick J. Wong 	xfs_agino_t		next_unlinked)
20229b247179SDarrick J. Wong {
20239b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
20249b247179SDarrick J. Wong 	int			error;
20259b247179SDarrick J. Wong 
20269b247179SDarrick J. Wong 	/* Look up the old entry; if there wasn't one then exit. */
20279b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
20289b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
20299b247179SDarrick J. Wong 	if (!iu)
20309b247179SDarrick J. Wong 		return 0;
20319b247179SDarrick J. Wong 
20329b247179SDarrick J. Wong 	/*
20339b247179SDarrick J. Wong 	 * Remove the entry.  This shouldn't ever return an error, but if we
20349b247179SDarrick J. Wong 	 * couldn't remove the old entry we don't want to add it again to the
20359b247179SDarrick J. Wong 	 * hash table, and if the entry disappeared on us then someone's
20369b247179SDarrick J. Wong 	 * violated the locking rules and we need to fail loudly.  Either way
20379b247179SDarrick J. Wong 	 * we cannot remove the inode because internal state is or would have
20389b247179SDarrick J. Wong 	 * been corrupt.
20399b247179SDarrick J. Wong 	 */
20409b247179SDarrick J. Wong 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
20419b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
20429b247179SDarrick J. Wong 	if (error)
20439b247179SDarrick J. Wong 		return error;
20449b247179SDarrick J. Wong 
20459b247179SDarrick J. Wong 	/* If there is no new next entry just free our item and return. */
20469b247179SDarrick J. Wong 	if (next_unlinked == NULLAGINO) {
20479b247179SDarrick J. Wong 		kmem_free(iu);
20489b247179SDarrick J. Wong 		return 0;
20499b247179SDarrick J. Wong 	}
20509b247179SDarrick J. Wong 
20519b247179SDarrick J. Wong 	/* Update the entry and re-add it to the hash table. */
20529b247179SDarrick J. Wong 	iu->iu_next_unlinked = next_unlinked;
20539b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
20549b247179SDarrick J. Wong }
20559b247179SDarrick J. Wong 
20569b247179SDarrick J. Wong /* Set up the in-core predecessor structures. */
20579b247179SDarrick J. Wong int
20589b247179SDarrick J. Wong xfs_iunlink_init(
20599b247179SDarrick J. Wong 	struct xfs_perag	*pag)
20609b247179SDarrick J. Wong {
20619b247179SDarrick J. Wong 	return rhashtable_init(&pag->pagi_unlinked_hash,
20629b247179SDarrick J. Wong 			&xfs_iunlink_hash_params);
20639b247179SDarrick J. Wong }
20649b247179SDarrick J. Wong 
20659b247179SDarrick J. Wong /* Free the in-core predecessor structures. */
20669b247179SDarrick J. Wong static void
20679b247179SDarrick J. Wong xfs_iunlink_free_item(
20689b247179SDarrick J. Wong 	void			*ptr,
20699b247179SDarrick J. Wong 	void			*arg)
20709b247179SDarrick J. Wong {
20719b247179SDarrick J. Wong 	struct xfs_iunlink	*iu = ptr;
20729b247179SDarrick J. Wong 	bool			*freed_anything = arg;
20739b247179SDarrick J. Wong 
20749b247179SDarrick J. Wong 	*freed_anything = true;
20759b247179SDarrick J. Wong 	kmem_free(iu);
20769b247179SDarrick J. Wong }
20779b247179SDarrick J. Wong 
20789b247179SDarrick J. Wong void
20799b247179SDarrick J. Wong xfs_iunlink_destroy(
20809b247179SDarrick J. Wong 	struct xfs_perag	*pag)
20819b247179SDarrick J. Wong {
20829b247179SDarrick J. Wong 	bool			freed_anything = false;
20839b247179SDarrick J. Wong 
20849b247179SDarrick J. Wong 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
20859b247179SDarrick J. Wong 			xfs_iunlink_free_item, &freed_anything);
20869b247179SDarrick J. Wong 
20879b247179SDarrick J. Wong 	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
20889b247179SDarrick J. Wong }
20899b247179SDarrick J. Wong 
20909b247179SDarrick J. Wong /*
20919a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
20929a4a5118SDarrick J. Wong  * is responsible for validating the old value.
20939a4a5118SDarrick J. Wong  */
20949a4a5118SDarrick J. Wong STATIC int
20959a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
20969a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
20979a4a5118SDarrick J. Wong 	xfs_agnumber_t		agno,
20989a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
20999a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
21009a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
21019a4a5118SDarrick J. Wong {
2102370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
21039a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
21049a4a5118SDarrick J. Wong 	int			offset;
21059a4a5118SDarrick J. Wong 
21069a4a5118SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
21079a4a5118SDarrick J. Wong 
21089a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
21099a4a5118SDarrick J. Wong 	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
21109a4a5118SDarrick J. Wong 			old_value, new_agino);
21119a4a5118SDarrick J. Wong 
21129a4a5118SDarrick J. Wong 	/*
21139a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
21149a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
21159a4a5118SDarrick J. Wong 	 * head of the list.
21169a4a5118SDarrick J. Wong 	 */
2117a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
21188d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
21199a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
2120a5155b87SDarrick J. Wong 	}
21219a4a5118SDarrick J. Wong 
21229a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
21239a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
21249a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
21259a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
21269a4a5118SDarrick J. Wong 	return 0;
21279a4a5118SDarrick J. Wong }
21289a4a5118SDarrick J. Wong 
2129f2fc16a3SDarrick J. Wong /* Set an on-disk inode's next_unlinked pointer. */
2130f2fc16a3SDarrick J. Wong STATIC void
2131f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(
2132f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2133f2fc16a3SDarrick J. Wong 	xfs_agnumber_t		agno,
2134f2fc16a3SDarrick J. Wong 	xfs_agino_t		agino,
2135f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp,
2136f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip,
2137f2fc16a3SDarrick J. Wong 	struct xfs_imap		*imap,
2138f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino)
2139f2fc16a3SDarrick J. Wong {
2140f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2141f2fc16a3SDarrick J. Wong 	int			offset;
2142f2fc16a3SDarrick J. Wong 
2143f2fc16a3SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2144f2fc16a3SDarrick J. Wong 
2145f2fc16a3SDarrick J. Wong 	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2146f2fc16a3SDarrick J. Wong 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2147f2fc16a3SDarrick J. Wong 
2148f2fc16a3SDarrick J. Wong 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2149f2fc16a3SDarrick J. Wong 	offset = imap->im_boffset +
2150f2fc16a3SDarrick J. Wong 			offsetof(struct xfs_dinode, di_next_unlinked);
2151f2fc16a3SDarrick J. Wong 
2152f2fc16a3SDarrick J. Wong 	/* need to recalc the inode CRC if appropriate */
2153f2fc16a3SDarrick J. Wong 	xfs_dinode_calc_crc(mp, dip);
2154f2fc16a3SDarrick J. Wong 	xfs_trans_inode_buf(tp, ibp);
2155f2fc16a3SDarrick J. Wong 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2156f2fc16a3SDarrick J. Wong 	xfs_inobp_check(mp, ibp);
2157f2fc16a3SDarrick J. Wong }
2158f2fc16a3SDarrick J. Wong 
2159f2fc16a3SDarrick J. Wong /* Set an in-core inode's unlinked pointer and return the old value. */
2160f2fc16a3SDarrick J. Wong STATIC int
2161f2fc16a3SDarrick J. Wong xfs_iunlink_update_inode(
2162f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2163f2fc16a3SDarrick J. Wong 	struct xfs_inode	*ip,
2164f2fc16a3SDarrick J. Wong 	xfs_agnumber_t		agno,
2165f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino,
2166f2fc16a3SDarrick J. Wong 	xfs_agino_t		*old_next_agino)
2167f2fc16a3SDarrick J. Wong {
2168f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2169f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip;
2170f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp;
2171f2fc16a3SDarrick J. Wong 	xfs_agino_t		old_value;
2172f2fc16a3SDarrick J. Wong 	int			error;
2173f2fc16a3SDarrick J. Wong 
2174f2fc16a3SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2175f2fc16a3SDarrick J. Wong 
2176f2fc16a3SDarrick J. Wong 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
2177f2fc16a3SDarrick J. Wong 	if (error)
2178f2fc16a3SDarrick J. Wong 		return error;
2179f2fc16a3SDarrick J. Wong 
2180f2fc16a3SDarrick J. Wong 	/* Make sure the old pointer isn't garbage. */
2181f2fc16a3SDarrick J. Wong 	old_value = be32_to_cpu(dip->di_next_unlinked);
2182f2fc16a3SDarrick J. Wong 	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2183a5155b87SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2184a5155b87SDarrick J. Wong 				sizeof(*dip), __this_address);
2185f2fc16a3SDarrick J. Wong 		error = -EFSCORRUPTED;
2186f2fc16a3SDarrick J. Wong 		goto out;
2187f2fc16a3SDarrick J. Wong 	}
2188f2fc16a3SDarrick J. Wong 
2189f2fc16a3SDarrick J. Wong 	/*
2190f2fc16a3SDarrick J. Wong 	 * Since we're updating a linked list, we should never find that the
2191f2fc16a3SDarrick J. Wong 	 * current pointer is the same as the new value, unless we're
2192f2fc16a3SDarrick J. Wong 	 * terminating the list.
2193f2fc16a3SDarrick J. Wong 	 */
2194f2fc16a3SDarrick J. Wong 	*old_next_agino = old_value;
2195f2fc16a3SDarrick J. Wong 	if (old_value == next_agino) {
2196a5155b87SDarrick J. Wong 		if (next_agino != NULLAGINO) {
2197a5155b87SDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2198a5155b87SDarrick J. Wong 					dip, sizeof(*dip), __this_address);
2199f2fc16a3SDarrick J. Wong 			error = -EFSCORRUPTED;
2200a5155b87SDarrick J. Wong 		}
2201f2fc16a3SDarrick J. Wong 		goto out;
2202f2fc16a3SDarrick J. Wong 	}
2203f2fc16a3SDarrick J. Wong 
2204f2fc16a3SDarrick J. Wong 	/* Ok, update the new pointer. */
2205f2fc16a3SDarrick J. Wong 	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2206f2fc16a3SDarrick J. Wong 			ibp, dip, &ip->i_imap, next_agino);
2207f2fc16a3SDarrick J. Wong 	return 0;
2208f2fc16a3SDarrick J. Wong out:
2209f2fc16a3SDarrick J. Wong 	xfs_trans_brelse(tp, ibp);
2210f2fc16a3SDarrick J. Wong 	return error;
2211f2fc16a3SDarrick J. Wong }
2212f2fc16a3SDarrick J. Wong 
22139a4a5118SDarrick J. Wong /*
2214c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
2215c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
221654d7b5c1SDave Chinner  *
221754d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
221854d7b5c1SDave Chinner  * list when the inode is freed.
22191da177e4SLinus Torvalds  */
222054d7b5c1SDave Chinner STATIC int
22211da177e4SLinus Torvalds xfs_iunlink(
222254d7b5c1SDave Chinner 	struct xfs_trans	*tp,
222354d7b5c1SDave Chinner 	struct xfs_inode	*ip)
22241da177e4SLinus Torvalds {
22255837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
22265837f625SDarrick J. Wong 	struct xfs_agi		*agi;
22275837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
222886bfd375SDarrick J. Wong 	xfs_agino_t		next_agino;
22295837f625SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
22305837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
22315837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
22321da177e4SLinus Torvalds 	int			error;
22331da177e4SLinus Torvalds 
2234c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
2235c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
22364664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
22371da177e4SLinus Torvalds 
22385837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
22395837f625SDarrick J. Wong 	error = xfs_read_agi(mp, tp, agno, &agibp);
2240859d7182SVlad Apostolov 	if (error)
22411da177e4SLinus Torvalds 		return error;
2242370c782bSChristoph Hellwig 	agi = agibp->b_addr;
22435e1be0fbSChristoph Hellwig 
22441da177e4SLinus Torvalds 	/*
224586bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
224686bfd375SDarrick J. Wong 	 * go on.  Make sure the pointer isn't garbage and that this inode
224786bfd375SDarrick J. Wong 	 * isn't already on the list.
22481da177e4SLinus Torvalds 	 */
224986bfd375SDarrick J. Wong 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
225086bfd375SDarrick J. Wong 	if (next_agino == agino ||
2251a5155b87SDarrick J. Wong 	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
22528d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
225386bfd375SDarrick J. Wong 		return -EFSCORRUPTED;
2254a5155b87SDarrick J. Wong 	}
22551da177e4SLinus Torvalds 
225686bfd375SDarrick J. Wong 	if (next_agino != NULLAGINO) {
22579b247179SDarrick J. Wong 		struct xfs_perag	*pag;
2258f2fc16a3SDarrick J. Wong 		xfs_agino_t		old_agino;
2259f2fc16a3SDarrick J. Wong 
22601da177e4SLinus Torvalds 		/*
2261f2fc16a3SDarrick J. Wong 		 * There is already another inode in the bucket, so point this
2262f2fc16a3SDarrick J. Wong 		 * inode to the current head of the list.
22631da177e4SLinus Torvalds 		 */
2264f2fc16a3SDarrick J. Wong 		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2265f2fc16a3SDarrick J. Wong 				&old_agino);
2266c319b58bSVlad Apostolov 		if (error)
2267c319b58bSVlad Apostolov 			return error;
2268f2fc16a3SDarrick J. Wong 		ASSERT(old_agino == NULLAGINO);
22699b247179SDarrick J. Wong 
22709b247179SDarrick J. Wong 		/*
22719b247179SDarrick J. Wong 		 * agino has been unlinked, add a backref from the next inode
22729b247179SDarrick J. Wong 		 * back to agino.
22739b247179SDarrick J. Wong 		 */
22749b247179SDarrick J. Wong 		pag = xfs_perag_get(mp, agno);
22759b247179SDarrick J. Wong 		error = xfs_iunlink_add_backref(pag, agino, next_agino);
22769b247179SDarrick J. Wong 		xfs_perag_put(pag);
22779b247179SDarrick J. Wong 		if (error)
22789b247179SDarrick J. Wong 			return error;
22791da177e4SLinus Torvalds 	}
22801da177e4SLinus Torvalds 
22819a4a5118SDarrick J. Wong 	/* Point the head of the list to point to this inode. */
22829a4a5118SDarrick J. Wong 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
22831da177e4SLinus Torvalds }
22841da177e4SLinus Torvalds 
228523ffa52cSDarrick J. Wong /* Return the imap, dinode pointer, and buffer for an inode. */
228623ffa52cSDarrick J. Wong STATIC int
228723ffa52cSDarrick J. Wong xfs_iunlink_map_ino(
228823ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
228923ffa52cSDarrick J. Wong 	xfs_agnumber_t		agno,
229023ffa52cSDarrick J. Wong 	xfs_agino_t		agino,
229123ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
229223ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
229323ffa52cSDarrick J. Wong 	struct xfs_buf		**bpp)
229423ffa52cSDarrick J. Wong {
229523ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
229623ffa52cSDarrick J. Wong 	int			error;
229723ffa52cSDarrick J. Wong 
229823ffa52cSDarrick J. Wong 	imap->im_blkno = 0;
229923ffa52cSDarrick J. Wong 	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
230023ffa52cSDarrick J. Wong 	if (error) {
230123ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap returned error %d.",
230223ffa52cSDarrick J. Wong 				__func__, error);
230323ffa52cSDarrick J. Wong 		return error;
230423ffa52cSDarrick J. Wong 	}
230523ffa52cSDarrick J. Wong 
230623ffa52cSDarrick J. Wong 	error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
230723ffa52cSDarrick J. Wong 	if (error) {
230823ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
230923ffa52cSDarrick J. Wong 				__func__, error);
231023ffa52cSDarrick J. Wong 		return error;
231123ffa52cSDarrick J. Wong 	}
231223ffa52cSDarrick J. Wong 
231323ffa52cSDarrick J. Wong 	return 0;
231423ffa52cSDarrick J. Wong }
231523ffa52cSDarrick J. Wong 
231623ffa52cSDarrick J. Wong /*
231723ffa52cSDarrick J. Wong  * Walk the unlinked chain from @head_agino until we find the inode that
231823ffa52cSDarrick J. Wong  * points to @target_agino.  Return the inode number, map, dinode pointer,
231923ffa52cSDarrick J. Wong  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
232023ffa52cSDarrick J. Wong  *
232123ffa52cSDarrick J. Wong  * @tp, @pag, @head_agino, and @target_agino are input parameters.
232223ffa52cSDarrick J. Wong  * @agino, @imap, @dipp, and @bpp are all output parameters.
232323ffa52cSDarrick J. Wong  *
232423ffa52cSDarrick J. Wong  * Do not call this function if @target_agino is the head of the list.
232523ffa52cSDarrick J. Wong  */
232623ffa52cSDarrick J. Wong STATIC int
232723ffa52cSDarrick J. Wong xfs_iunlink_map_prev(
232823ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
232923ffa52cSDarrick J. Wong 	xfs_agnumber_t		agno,
233023ffa52cSDarrick J. Wong 	xfs_agino_t		head_agino,
233123ffa52cSDarrick J. Wong 	xfs_agino_t		target_agino,
233223ffa52cSDarrick J. Wong 	xfs_agino_t		*agino,
233323ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
233423ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
23359b247179SDarrick J. Wong 	struct xfs_buf		**bpp,
23369b247179SDarrick J. Wong 	struct xfs_perag	*pag)
233723ffa52cSDarrick J. Wong {
233823ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
233923ffa52cSDarrick J. Wong 	xfs_agino_t		next_agino;
234023ffa52cSDarrick J. Wong 	int			error;
234123ffa52cSDarrick J. Wong 
234223ffa52cSDarrick J. Wong 	ASSERT(head_agino != target_agino);
234323ffa52cSDarrick J. Wong 	*bpp = NULL;
234423ffa52cSDarrick J. Wong 
23459b247179SDarrick J. Wong 	/* See if our backref cache can find it faster. */
23469b247179SDarrick J. Wong 	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
23479b247179SDarrick J. Wong 	if (*agino != NULLAGINO) {
23489b247179SDarrick J. Wong 		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
23499b247179SDarrick J. Wong 		if (error)
23509b247179SDarrick J. Wong 			return error;
23519b247179SDarrick J. Wong 
23529b247179SDarrick J. Wong 		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
23539b247179SDarrick J. Wong 			return 0;
23549b247179SDarrick J. Wong 
23559b247179SDarrick J. Wong 		/*
23569b247179SDarrick J. Wong 		 * If we get here the cache contents were corrupt, so drop the
23579b247179SDarrick J. Wong 		 * buffer and fall back to walking the bucket list.
23589b247179SDarrick J. Wong 		 */
23599b247179SDarrick J. Wong 		xfs_trans_brelse(tp, *bpp);
23609b247179SDarrick J. Wong 		*bpp = NULL;
23619b247179SDarrick J. Wong 		WARN_ON_ONCE(1);
23629b247179SDarrick J. Wong 	}
23639b247179SDarrick J. Wong 
23649b247179SDarrick J. Wong 	trace_xfs_iunlink_map_prev_fallback(mp, agno);
23659b247179SDarrick J. Wong 
23669b247179SDarrick J. Wong 	/* Otherwise, walk the entire bucket until we find it. */
236723ffa52cSDarrick J. Wong 	next_agino = head_agino;
236823ffa52cSDarrick J. Wong 	while (next_agino != target_agino) {
236923ffa52cSDarrick J. Wong 		xfs_agino_t	unlinked_agino;
237023ffa52cSDarrick J. Wong 
237123ffa52cSDarrick J. Wong 		if (*bpp)
237223ffa52cSDarrick J. Wong 			xfs_trans_brelse(tp, *bpp);
237323ffa52cSDarrick J. Wong 
237423ffa52cSDarrick J. Wong 		*agino = next_agino;
237523ffa52cSDarrick J. Wong 		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
237623ffa52cSDarrick J. Wong 				bpp);
237723ffa52cSDarrick J. Wong 		if (error)
237823ffa52cSDarrick J. Wong 			return error;
237923ffa52cSDarrick J. Wong 
238023ffa52cSDarrick J. Wong 		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
238123ffa52cSDarrick J. Wong 		/*
238223ffa52cSDarrick J. Wong 		 * Make sure this pointer is valid and isn't an obvious
238323ffa52cSDarrick J. Wong 		 * infinite loop.
238423ffa52cSDarrick J. Wong 		 */
238523ffa52cSDarrick J. Wong 		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
238623ffa52cSDarrick J. Wong 		    next_agino == unlinked_agino) {
238723ffa52cSDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__,
238823ffa52cSDarrick J. Wong 					XFS_ERRLEVEL_LOW, mp,
238923ffa52cSDarrick J. Wong 					*dipp, sizeof(**dipp));
239023ffa52cSDarrick J. Wong 			error = -EFSCORRUPTED;
239123ffa52cSDarrick J. Wong 			return error;
239223ffa52cSDarrick J. Wong 		}
239323ffa52cSDarrick J. Wong 		next_agino = unlinked_agino;
239423ffa52cSDarrick J. Wong 	}
239523ffa52cSDarrick J. Wong 
239623ffa52cSDarrick J. Wong 	return 0;
239723ffa52cSDarrick J. Wong }
239823ffa52cSDarrick J. Wong 
23991da177e4SLinus Torvalds /*
24001da177e4SLinus Torvalds  * Pull the on-disk inode from the AGI unlinked list.
24011da177e4SLinus Torvalds  */
24021da177e4SLinus Torvalds STATIC int
24031da177e4SLinus Torvalds xfs_iunlink_remove(
24045837f625SDarrick J. Wong 	struct xfs_trans	*tp,
24055837f625SDarrick J. Wong 	struct xfs_inode	*ip)
24061da177e4SLinus Torvalds {
24075837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
24085837f625SDarrick J. Wong 	struct xfs_agi		*agi;
24095837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
24105837f625SDarrick J. Wong 	struct xfs_buf		*last_ibp;
24115837f625SDarrick J. Wong 	struct xfs_dinode	*last_dip = NULL;
24129b247179SDarrick J. Wong 	struct xfs_perag	*pag = NULL;
24135837f625SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
24145837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
24151da177e4SLinus Torvalds 	xfs_agino_t		next_agino;
2416b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
24175837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
24181da177e4SLinus Torvalds 	int			error;
24191da177e4SLinus Torvalds 
24204664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
24214664c66cSDarrick J. Wong 
24225837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
24235e1be0fbSChristoph Hellwig 	error = xfs_read_agi(mp, tp, agno, &agibp);
24245e1be0fbSChristoph Hellwig 	if (error)
24251da177e4SLinus Torvalds 		return error;
2426370c782bSChristoph Hellwig 	agi = agibp->b_addr;
24275e1be0fbSChristoph Hellwig 
24281da177e4SLinus Torvalds 	/*
242986bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
243086bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
24311da177e4SLinus Torvalds 	 */
2432b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2433b1d2a068SDarrick J. Wong 	if (!xfs_verify_agino(mp, agno, head_agino)) {
2434d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2435d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2436d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2437d2e73665SDarrick J. Wong 	}
24381da177e4SLinus Torvalds 
24391da177e4SLinus Torvalds 	/*
2440b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2441b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2442b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
24431da177e4SLinus Torvalds 	 */
2444b1d2a068SDarrick J. Wong 	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2445f2fc16a3SDarrick J. Wong 	if (error)
24461da177e4SLinus Torvalds 		return error;
24479a4a5118SDarrick J. Wong 
24489b247179SDarrick J. Wong 	/*
24499b247179SDarrick J. Wong 	 * If there was a backref pointing from the next inode back to this
24509b247179SDarrick J. Wong 	 * one, remove it because we've removed this inode from the list.
24519b247179SDarrick J. Wong 	 *
24529b247179SDarrick J. Wong 	 * Later, if this inode was in the middle of the list we'll update
24539b247179SDarrick J. Wong 	 * this inode's backref to point from the next inode.
24549b247179SDarrick J. Wong 	 */
24559b247179SDarrick J. Wong 	if (next_agino != NULLAGINO) {
24569b247179SDarrick J. Wong 		pag = xfs_perag_get(mp, agno);
24579b247179SDarrick J. Wong 		error = xfs_iunlink_change_backref(pag, next_agino,
24589b247179SDarrick J. Wong 				NULLAGINO);
24599b247179SDarrick J. Wong 		if (error)
24609b247179SDarrick J. Wong 			goto out;
24619b247179SDarrick J. Wong 	}
24629b247179SDarrick J. Wong 
2463b1d2a068SDarrick J. Wong 	if (head_agino == agino) {
24649a4a5118SDarrick J. Wong 		/* Point the head of the list to the next unlinked inode. */
24659a4a5118SDarrick J. Wong 		error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
24669a4a5118SDarrick J. Wong 				next_agino);
24679a4a5118SDarrick J. Wong 		if (error)
24689b247179SDarrick J. Wong 			goto out;
24691da177e4SLinus Torvalds 	} else {
2470f2fc16a3SDarrick J. Wong 		struct xfs_imap	imap;
2471f2fc16a3SDarrick J. Wong 		xfs_agino_t	prev_agino;
2472f2fc16a3SDarrick J. Wong 
24739b247179SDarrick J. Wong 		if (!pag)
24749b247179SDarrick J. Wong 			pag = xfs_perag_get(mp, agno);
24759b247179SDarrick J. Wong 
247623ffa52cSDarrick J. Wong 		/* We need to search the list for the inode being freed. */
2477b1d2a068SDarrick J. Wong 		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
24789b247179SDarrick J. Wong 				&prev_agino, &imap, &last_dip, &last_ibp,
24799b247179SDarrick J. Wong 				pag);
248023ffa52cSDarrick J. Wong 		if (error)
24819b247179SDarrick J. Wong 			goto out;
2482475ee413SChristoph Hellwig 
2483f2fc16a3SDarrick J. Wong 		/* Point the previous inode on the list to the next inode. */
2484f2fc16a3SDarrick J. Wong 		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2485f2fc16a3SDarrick J. Wong 				last_dip, &imap, next_agino);
24869b247179SDarrick J. Wong 
24879b247179SDarrick J. Wong 		/*
24889b247179SDarrick J. Wong 		 * Now we deal with the backref for this inode.  If this inode
24899b247179SDarrick J. Wong 		 * pointed at a real inode, change the backref that pointed to
24909b247179SDarrick J. Wong 		 * us to point to our old next.  If this inode was the end of
24919b247179SDarrick J. Wong 		 * the list, delete the backref that pointed to us.  Note that
24929b247179SDarrick J. Wong 		 * change_backref takes care of deleting the backref if
24939b247179SDarrick J. Wong 		 * next_agino is NULLAGINO.
24949b247179SDarrick J. Wong 		 */
24959b247179SDarrick J. Wong 		error = xfs_iunlink_change_backref(pag, agino, next_agino);
24969b247179SDarrick J. Wong 		if (error)
24979b247179SDarrick J. Wong 			goto out;
24981da177e4SLinus Torvalds 	}
24999b247179SDarrick J. Wong 
25009b247179SDarrick J. Wong out:
25019b247179SDarrick J. Wong 	if (pag)
25029b247179SDarrick J. Wong 		xfs_perag_put(pag);
25039b247179SDarrick J. Wong 	return error;
25041da177e4SLinus Torvalds }
25051da177e4SLinus Torvalds 
25065b3eed75SDave Chinner /*
2507*5806165aSDave Chinner  * Look up the inode number specified and mark it stale if it is found. If it is
2508*5806165aSDave Chinner  * dirty, return the inode so it can be attached to the cluster buffer so it can
2509*5806165aSDave Chinner  * be processed appropriately when the cluster free transaction completes.
2510*5806165aSDave Chinner  */
2511*5806165aSDave Chinner static struct xfs_inode *
2512*5806165aSDave Chinner xfs_ifree_get_one_inode(
2513*5806165aSDave Chinner 	struct xfs_perag	*pag,
2514*5806165aSDave Chinner 	struct xfs_inode	*free_ip,
2515*5806165aSDave Chinner 	int			inum)
2516*5806165aSDave Chinner {
2517*5806165aSDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
2518*5806165aSDave Chinner 	struct xfs_inode	*ip;
2519*5806165aSDave Chinner 
2520*5806165aSDave Chinner retry:
2521*5806165aSDave Chinner 	rcu_read_lock();
2522*5806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2523*5806165aSDave Chinner 
2524*5806165aSDave Chinner 	/* Inode not in memory, nothing to do */
2525*5806165aSDave Chinner 	if (!ip)
2526*5806165aSDave Chinner 		goto out_rcu_unlock;
2527*5806165aSDave Chinner 
2528*5806165aSDave Chinner 	/*
2529*5806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
2530*5806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
2531*5806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2532*5806165aSDave Chinner 	 * valid, the wrong inode or stale.
2533*5806165aSDave Chinner 	 */
2534*5806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2535*5806165aSDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) {
2536*5806165aSDave Chinner 		spin_unlock(&ip->i_flags_lock);
2537*5806165aSDave Chinner 		goto out_rcu_unlock;
2538*5806165aSDave Chinner 	}
2539*5806165aSDave Chinner 	spin_unlock(&ip->i_flags_lock);
2540*5806165aSDave Chinner 
2541*5806165aSDave Chinner 	/*
2542*5806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2543*5806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
2544*5806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
2545*5806165aSDave Chinner 	 * retry.
2546*5806165aSDave Chinner 	 */
2547*5806165aSDave Chinner 	if (ip != free_ip) {
2548*5806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2549*5806165aSDave Chinner 			rcu_read_unlock();
2550*5806165aSDave Chinner 			delay(1);
2551*5806165aSDave Chinner 			goto retry;
2552*5806165aSDave Chinner 		}
2553*5806165aSDave Chinner 
2554*5806165aSDave Chinner 		/*
2555*5806165aSDave Chinner 		 * Check the inode number again in case we're racing with
2556*5806165aSDave Chinner 		 * freeing in xfs_reclaim_inode().  See the comments in that
2557*5806165aSDave Chinner 		 * function for more information as to why the initial check is
2558*5806165aSDave Chinner 		 * not sufficient.
2559*5806165aSDave Chinner 		 */
2560*5806165aSDave Chinner 		if (ip->i_ino != inum) {
2561*5806165aSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_EXCL);
2562*5806165aSDave Chinner 			goto out_rcu_unlock;
2563*5806165aSDave Chinner 		}
2564*5806165aSDave Chinner 	}
2565*5806165aSDave Chinner 	rcu_read_unlock();
2566*5806165aSDave Chinner 
2567*5806165aSDave Chinner 	xfs_iflock(ip);
2568*5806165aSDave Chinner 	xfs_iflags_set(ip, XFS_ISTALE);
2569*5806165aSDave Chinner 
2570*5806165aSDave Chinner 	/*
2571*5806165aSDave Chinner 	 * We don't need to attach clean inodes or those only with unlogged
2572*5806165aSDave Chinner 	 * changes (which we throw away, anyway).
2573*5806165aSDave Chinner 	 */
2574*5806165aSDave Chinner 	if (!ip->i_itemp || xfs_inode_clean(ip)) {
2575*5806165aSDave Chinner 		ASSERT(ip != free_ip);
2576*5806165aSDave Chinner 		xfs_ifunlock(ip);
2577*5806165aSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2578*5806165aSDave Chinner 		goto out_no_inode;
2579*5806165aSDave Chinner 	}
2580*5806165aSDave Chinner 	return ip;
2581*5806165aSDave Chinner 
2582*5806165aSDave Chinner out_rcu_unlock:
2583*5806165aSDave Chinner 	rcu_read_unlock();
2584*5806165aSDave Chinner out_no_inode:
2585*5806165aSDave Chinner 	return NULL;
2586*5806165aSDave Chinner }
2587*5806165aSDave Chinner 
2588*5806165aSDave Chinner /*
25890b8182dbSZhi Yong Wu  * A big issue when freeing the inode cluster is that we _cannot_ skip any
25905b3eed75SDave Chinner  * inodes that are in memory - they all must be marked stale and attached to
25915b3eed75SDave Chinner  * the cluster buffer.
25925b3eed75SDave Chinner  */
25932a30f36dSChandra Seetharaman STATIC int
25941da177e4SLinus Torvalds xfs_ifree_cluster(
25951da177e4SLinus Torvalds 	xfs_inode_t		*free_ip,
25961da177e4SLinus Torvalds 	xfs_trans_t		*tp,
259709b56604SBrian Foster 	struct xfs_icluster	*xic)
25981da177e4SLinus Torvalds {
25991da177e4SLinus Torvalds 	xfs_mount_t		*mp = free_ip->i_mount;
26001da177e4SLinus Torvalds 	int			nbufs;
26015b257b4aSDave Chinner 	int			i, j;
26023cdaa189SBrian Foster 	int			ioffset;
26031da177e4SLinus Torvalds 	xfs_daddr_t		blkno;
26041da177e4SLinus Torvalds 	xfs_buf_t		*bp;
26055b257b4aSDave Chinner 	xfs_inode_t		*ip;
26061da177e4SLinus Torvalds 	xfs_inode_log_item_t	*iip;
2607643c8c05SCarlos Maiolino 	struct xfs_log_item	*lip;
26085017e97dSDave Chinner 	struct xfs_perag	*pag;
2609ef325959SDarrick J. Wong 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
261009b56604SBrian Foster 	xfs_ino_t		inum;
2611ce92464cSDarrick J. Wong 	int			error;
26121da177e4SLinus Torvalds 
261309b56604SBrian Foster 	inum = xic->first_ino;
26145017e97dSDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2615ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
26161da177e4SLinus Torvalds 
2617ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
261809b56604SBrian Foster 		/*
261909b56604SBrian Foster 		 * The allocation bitmap tells us which inodes of the chunk were
262009b56604SBrian Foster 		 * physically allocated. Skip the cluster if an inode falls into
262109b56604SBrian Foster 		 * a sparse region.
262209b56604SBrian Foster 		 */
26233cdaa189SBrian Foster 		ioffset = inum - xic->first_ino;
26243cdaa189SBrian Foster 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2625ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
262609b56604SBrian Foster 			continue;
262709b56604SBrian Foster 		}
262809b56604SBrian Foster 
26291da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
26301da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
26311da177e4SLinus Torvalds 
26321da177e4SLinus Torvalds 		/*
26335b257b4aSDave Chinner 		 * We obtain and lock the backing buffer first in the process
26345b257b4aSDave Chinner 		 * here, as we have to ensure that any dirty inode that we
26355b257b4aSDave Chinner 		 * can't get the flush lock on is attached to the buffer.
26365b257b4aSDave Chinner 		 * If we scan the in-memory inodes first, then buffer IO can
26375b257b4aSDave Chinner 		 * complete before we get a lock on it, and hence we may fail
26385b257b4aSDave Chinner 		 * to mark all the active inodes on the buffer stale.
26391da177e4SLinus Torvalds 		 */
2640ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2641ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2642ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
2643ce92464cSDarrick J. Wong 		if (error)
2644ce92464cSDarrick J. Wong 			return error;
2645b0f539deSDave Chinner 
2646b0f539deSDave Chinner 		/*
2647b0f539deSDave Chinner 		 * This buffer may not have been correctly initialised as we
2648b0f539deSDave Chinner 		 * didn't read it from disk. That's not important because we are
2649b0f539deSDave Chinner 		 * only using to mark the buffer as stale in the log, and to
2650b0f539deSDave Chinner 		 * attach stale cached inodes on it. That means it will never be
2651b0f539deSDave Chinner 		 * dispatched for IO. If it is, we want to know about it, and we
2652b0f539deSDave Chinner 		 * want it to fail. We can acheive this by adding a write
2653b0f539deSDave Chinner 		 * verifier to the buffer.
2654b0f539deSDave Chinner 		 */
26551813dd64SDave Chinner 		bp->b_ops = &xfs_inode_buf_ops;
2656b0f539deSDave Chinner 
26575b257b4aSDave Chinner 		/*
26585b257b4aSDave Chinner 		 * Walk the inodes already attached to the buffer and mark them
26595b257b4aSDave Chinner 		 * stale. These will all have the flush locks held, so an
26605b3eed75SDave Chinner 		 * in-memory inode walk can't lock them. By marking them all
26615b3eed75SDave Chinner 		 * stale first, we will not attempt to lock them in the loop
26625b3eed75SDave Chinner 		 * below as the XFS_ISTALE flag will be set.
26635b257b4aSDave Chinner 		 */
2664643c8c05SCarlos Maiolino 		list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
26651da177e4SLinus Torvalds 			if (lip->li_type == XFS_LI_INODE) {
26661da177e4SLinus Torvalds 				iip = (xfs_inode_log_item_t *)lip;
26671da177e4SLinus Torvalds 				ASSERT(iip->ili_logged == 1);
2668ca30b2a7SChristoph Hellwig 				lip->li_cb = xfs_istale_done;
26697b2e2a31SDavid Chinner 				xfs_trans_ail_copy_lsn(mp->m_ail,
26707b2e2a31SDavid Chinner 							&iip->ili_flush_lsn,
26717b2e2a31SDavid Chinner 							&iip->ili_item.li_lsn);
2672e5ffd2bbSDavid Chinner 				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
26731da177e4SLinus Torvalds 			}
26741da177e4SLinus Torvalds 		}
26751da177e4SLinus Torvalds 
26765b3eed75SDave Chinner 
26775b257b4aSDave Chinner 		/*
26785b257b4aSDave Chinner 		 * For each inode in memory attempt to add it to the inode
26795b257b4aSDave Chinner 		 * buffer and set it up for being staled on buffer IO
26805b257b4aSDave Chinner 		 * completion.  This is safe as we've locked out tail pushing
26815b257b4aSDave Chinner 		 * and flushing by locking the buffer.
26825b257b4aSDave Chinner 		 *
26835b257b4aSDave Chinner 		 * We have already marked every inode that was part of a
26845b257b4aSDave Chinner 		 * transaction stale above, which means there is no point in
26855b257b4aSDave Chinner 		 * even trying to lock them.
26865b257b4aSDave Chinner 		 */
2687ef325959SDarrick J. Wong 		for (i = 0; i < igeo->inodes_per_cluster; i++) {
2688*5806165aSDave Chinner 			ip = xfs_ifree_get_one_inode(pag, free_ip, inum + i);
2689*5806165aSDave Chinner 			if (!ip)
26905b257b4aSDave Chinner 				continue;
26915b257b4aSDave Chinner 
26925b257b4aSDave Chinner 			iip = ip->i_itemp;
2693f5d8d5c4SChristoph Hellwig 			iip->ili_last_fields = iip->ili_fields;
2694f5d8d5c4SChristoph Hellwig 			iip->ili_fields = 0;
2695fc0561ceSDave Chinner 			iip->ili_fsync_fields = 0;
26961da177e4SLinus Torvalds 			iip->ili_logged = 1;
26977b2e2a31SDavid Chinner 			xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
26987b2e2a31SDavid Chinner 						&iip->ili_item.li_lsn);
26991da177e4SLinus Torvalds 
2700ca30b2a7SChristoph Hellwig 			xfs_buf_attach_iodone(bp, xfs_istale_done,
2701ca30b2a7SChristoph Hellwig 						  &iip->ili_item);
27025b257b4aSDave Chinner 
27035b257b4aSDave Chinner 			if (ip != free_ip)
27041da177e4SLinus Torvalds 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
27051da177e4SLinus Torvalds 		}
27061da177e4SLinus Torvalds 
27071da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
27081da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
27091da177e4SLinus Torvalds 	}
27101da177e4SLinus Torvalds 
27115017e97dSDave Chinner 	xfs_perag_put(pag);
27122a30f36dSChandra Seetharaman 	return 0;
27131da177e4SLinus Torvalds }
27141da177e4SLinus Torvalds 
27151da177e4SLinus Torvalds /*
271698c4f78dSDarrick J. Wong  * Free any local-format buffers sitting around before we reset to
271798c4f78dSDarrick J. Wong  * extents format.
271898c4f78dSDarrick J. Wong  */
271998c4f78dSDarrick J. Wong static inline void
272098c4f78dSDarrick J. Wong xfs_ifree_local_data(
272198c4f78dSDarrick J. Wong 	struct xfs_inode	*ip,
272298c4f78dSDarrick J. Wong 	int			whichfork)
272398c4f78dSDarrick J. Wong {
272498c4f78dSDarrick J. Wong 	struct xfs_ifork	*ifp;
272598c4f78dSDarrick J. Wong 
272698c4f78dSDarrick J. Wong 	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
272798c4f78dSDarrick J. Wong 		return;
272898c4f78dSDarrick J. Wong 
272998c4f78dSDarrick J. Wong 	ifp = XFS_IFORK_PTR(ip, whichfork);
273098c4f78dSDarrick J. Wong 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
273198c4f78dSDarrick J. Wong }
273298c4f78dSDarrick J. Wong 
273398c4f78dSDarrick J. Wong /*
27341da177e4SLinus Torvalds  * This is called to return an inode to the inode free list.
27351da177e4SLinus Torvalds  * The inode should already be truncated to 0 length and have
27361da177e4SLinus Torvalds  * no pages associated with it.  This routine also assumes that
27371da177e4SLinus Torvalds  * the inode is already a part of the transaction.
27381da177e4SLinus Torvalds  *
27391da177e4SLinus Torvalds  * The on-disk copy of the inode will have been added to the list
27401da177e4SLinus Torvalds  * of unlinked inodes in the AGI. We need to remove the inode from
27411da177e4SLinus Torvalds  * that list atomically with respect to freeing it here.
27421da177e4SLinus Torvalds  */
27431da177e4SLinus Torvalds int
27441da177e4SLinus Torvalds xfs_ifree(
27450e0417f3SBrian Foster 	struct xfs_trans	*tp,
27460e0417f3SBrian Foster 	struct xfs_inode	*ip)
27471da177e4SLinus Torvalds {
27481da177e4SLinus Torvalds 	int			error;
274909b56604SBrian Foster 	struct xfs_icluster	xic = { 0 };
27501da177e4SLinus Torvalds 
2751579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
275254d7b5c1SDave Chinner 	ASSERT(VFS_I(ip)->i_nlink == 0);
27531da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_nextents == 0);
27541da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_anextents == 0);
2755c19b3b05SDave Chinner 	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
27561da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_nblocks == 0);
27571da177e4SLinus Torvalds 
27581da177e4SLinus Torvalds 	/*
27591da177e4SLinus Torvalds 	 * Pull the on-disk inode from the AGI unlinked list.
27601da177e4SLinus Torvalds 	 */
27611da177e4SLinus Torvalds 	error = xfs_iunlink_remove(tp, ip);
27621baaed8fSDave Chinner 	if (error)
27631da177e4SLinus Torvalds 		return error;
27641da177e4SLinus Torvalds 
27650e0417f3SBrian Foster 	error = xfs_difree(tp, ip->i_ino, &xic);
27661baaed8fSDave Chinner 	if (error)
27671da177e4SLinus Torvalds 		return error;
27681baaed8fSDave Chinner 
276998c4f78dSDarrick J. Wong 	xfs_ifree_local_data(ip, XFS_DATA_FORK);
277098c4f78dSDarrick J. Wong 	xfs_ifree_local_data(ip, XFS_ATTR_FORK);
277198c4f78dSDarrick J. Wong 
2772c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
27731da177e4SLinus Torvalds 	ip->i_d.di_flags = 0;
2774beaae8cdSDarrick J. Wong 	ip->i_d.di_flags2 = 0;
27751da177e4SLinus Torvalds 	ip->i_d.di_dmevmask = 0;
27761da177e4SLinus Torvalds 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
27771da177e4SLinus Torvalds 	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
27781da177e4SLinus Torvalds 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2779dc1baa71SEric Sandeen 
2780dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
2781dc1baa71SEric Sandeen 	ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2782dc1baa71SEric Sandeen 
27831da177e4SLinus Torvalds 	/*
27841da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
27851da177e4SLinus Torvalds 	 * by reincarnations of this inode.
27861da177e4SLinus Torvalds 	 */
27879e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
27881da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
27891da177e4SLinus Torvalds 
279009b56604SBrian Foster 	if (xic.deleted)
279109b56604SBrian Foster 		error = xfs_ifree_cluster(ip, tp, &xic);
27921da177e4SLinus Torvalds 
27932a30f36dSChandra Seetharaman 	return error;
27941da177e4SLinus Torvalds }
27951da177e4SLinus Torvalds 
27961da177e4SLinus Torvalds /*
279760ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
279860ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
279960ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
28001da177e4SLinus Torvalds  */
280160ec6783SChristoph Hellwig static void
2802f392e631SChristoph Hellwig xfs_iunpin(
280360ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2804a3f74ffbSDavid Chinner {
2805579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2806a3f74ffbSDavid Chinner 
28074aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
28084aaf15d1SDave Chinner 
2809a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
2810656de4ffSChristoph Hellwig 	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2811a14a348bSChristoph Hellwig 
2812a3f74ffbSDavid Chinner }
2813a3f74ffbSDavid Chinner 
2814f392e631SChristoph Hellwig static void
2815f392e631SChristoph Hellwig __xfs_iunpin_wait(
2816f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2817f392e631SChristoph Hellwig {
2818f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2819f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2820f392e631SChristoph Hellwig 
2821f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2822f392e631SChristoph Hellwig 
2823f392e631SChristoph Hellwig 	do {
282421417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2825f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2826f392e631SChristoph Hellwig 			io_schedule();
2827f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
282821417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2829f392e631SChristoph Hellwig }
2830f392e631SChristoph Hellwig 
2831777df5afSDave Chinner void
28321da177e4SLinus Torvalds xfs_iunpin_wait(
283360ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
28341da177e4SLinus Torvalds {
2835f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2836f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
28371da177e4SLinus Torvalds }
28381da177e4SLinus Torvalds 
283927320369SDave Chinner /*
284027320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
284127320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
284227320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
284327320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
284427320369SDave Chinner  * locking an AGI.
284527320369SDave Chinner  *
284627320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
284727320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
284827320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
284927320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
285027320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
285127320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
285227320369SDave Chinner  *
285327320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
285427320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
285527320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
285627320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
285727320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
285827320369SDave Chinner  * directory entry.
285927320369SDave Chinner  *
286027320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2861310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
286227320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
286327320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
286427320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
286527320369SDave Chinner  */
2866c24b5dfaSDave Chinner int
2867c24b5dfaSDave Chinner xfs_remove(
2868c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2869c24b5dfaSDave Chinner 	struct xfs_name		*name,
2870c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2871c24b5dfaSDave Chinner {
2872c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2873c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2874c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2875c24b5dfaSDave Chinner 	int                     error = 0;
2876c24b5dfaSDave Chinner 	uint			resblks;
2877c24b5dfaSDave Chinner 
2878c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2879c24b5dfaSDave Chinner 
2880c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
28812451337dSDave Chinner 		return -EIO;
2882c24b5dfaSDave Chinner 
2883c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2884c24b5dfaSDave Chinner 	if (error)
2885c24b5dfaSDave Chinner 		goto std_return;
2886c24b5dfaSDave Chinner 
2887c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2888c24b5dfaSDave Chinner 	if (error)
2889c24b5dfaSDave Chinner 		goto std_return;
2890c24b5dfaSDave Chinner 
2891c24b5dfaSDave Chinner 	/*
2892c24b5dfaSDave Chinner 	 * We try to get the real space reservation first,
2893c24b5dfaSDave Chinner 	 * allowing for directory btree deletion(s) implying
2894c24b5dfaSDave Chinner 	 * possible bmap insert(s).  If we can't get the space
2895c24b5dfaSDave Chinner 	 * reservation then we use 0 instead, and avoid the bmap
2896c24b5dfaSDave Chinner 	 * btree insert(s) in the directory code by, if the bmap
2897c24b5dfaSDave Chinner 	 * insert tries to happen, instead trimming the LAST
2898c24b5dfaSDave Chinner 	 * block from the directory.
2899c24b5dfaSDave Chinner 	 */
2900c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2901253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
29022451337dSDave Chinner 	if (error == -ENOSPC) {
2903c24b5dfaSDave Chinner 		resblks = 0;
2904253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2905253f4911SChristoph Hellwig 				&tp);
2906c24b5dfaSDave Chinner 	}
2907c24b5dfaSDave Chinner 	if (error) {
29082451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2909253f4911SChristoph Hellwig 		goto std_return;
2910c24b5dfaSDave Chinner 	}
2911c24b5dfaSDave Chinner 
29127c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2913c24b5dfaSDave Chinner 
291465523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2915c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2916c24b5dfaSDave Chinner 
2917c24b5dfaSDave Chinner 	/*
2918c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2919c24b5dfaSDave Chinner 	 */
2920c24b5dfaSDave Chinner 	if (is_dir) {
292154d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
292254d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
29232451337dSDave Chinner 			error = -ENOTEMPTY;
2924c24b5dfaSDave Chinner 			goto out_trans_cancel;
2925c24b5dfaSDave Chinner 		}
2926c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
29272451337dSDave Chinner 			error = -ENOTEMPTY;
2928c24b5dfaSDave Chinner 			goto out_trans_cancel;
2929c24b5dfaSDave Chinner 		}
2930c24b5dfaSDave Chinner 
293127320369SDave Chinner 		/* Drop the link from ip's "..".  */
2932c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2933c24b5dfaSDave Chinner 		if (error)
293427320369SDave Chinner 			goto out_trans_cancel;
2935c24b5dfaSDave Chinner 
293627320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2937c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2938c24b5dfaSDave Chinner 		if (error)
293927320369SDave Chinner 			goto out_trans_cancel;
2940c24b5dfaSDave Chinner 	} else {
2941c24b5dfaSDave Chinner 		/*
2942c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2943c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2944c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2945c24b5dfaSDave Chinner 		 */
2946c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2947c24b5dfaSDave Chinner 	}
294827320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2949c24b5dfaSDave Chinner 
295027320369SDave Chinner 	/* Drop the link from dp to ip. */
2951c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2952c24b5dfaSDave Chinner 	if (error)
295327320369SDave Chinner 		goto out_trans_cancel;
2954c24b5dfaSDave Chinner 
2955381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
295627320369SDave Chinner 	if (error) {
29572451337dSDave Chinner 		ASSERT(error != -ENOENT);
2958c8eac49eSBrian Foster 		goto out_trans_cancel;
295927320369SDave Chinner 	}
296027320369SDave Chinner 
2961c24b5dfaSDave Chinner 	/*
2962c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2963c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2964c24b5dfaSDave Chinner 	 * the user.
2965c24b5dfaSDave Chinner 	 */
2966c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2967c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2968c24b5dfaSDave Chinner 
296970393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2970c24b5dfaSDave Chinner 	if (error)
2971c24b5dfaSDave Chinner 		goto std_return;
2972c24b5dfaSDave Chinner 
29732cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2974c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2975c24b5dfaSDave Chinner 
2976c24b5dfaSDave Chinner 	return 0;
2977c24b5dfaSDave Chinner 
2978c24b5dfaSDave Chinner  out_trans_cancel:
29794906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2980c24b5dfaSDave Chinner  std_return:
2981c24b5dfaSDave Chinner 	return error;
2982c24b5dfaSDave Chinner }
2983c24b5dfaSDave Chinner 
2984f6bba201SDave Chinner /*
2985f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2986f6bba201SDave Chinner  */
298795afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2988f6bba201SDave Chinner STATIC void
2989f6bba201SDave Chinner xfs_sort_for_rename(
299095afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
299195afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
299295afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
299395afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
299495afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
299595afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
299695afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2997f6bba201SDave Chinner {
2998f6bba201SDave Chinner 	int			i, j;
2999f6bba201SDave Chinner 
300095afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
300195afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
300295afcf5cSDave Chinner 
3003f6bba201SDave Chinner 	/*
3004f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
3005f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
3006f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
3007f6bba201SDave Chinner 	 *
3008f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
3009f6bba201SDave Chinner 	 */
301095afcf5cSDave Chinner 	i = 0;
301195afcf5cSDave Chinner 	i_tab[i++] = dp1;
301295afcf5cSDave Chinner 	i_tab[i++] = dp2;
301395afcf5cSDave Chinner 	i_tab[i++] = ip1;
301495afcf5cSDave Chinner 	if (ip2)
301595afcf5cSDave Chinner 		i_tab[i++] = ip2;
301695afcf5cSDave Chinner 	if (wip)
301795afcf5cSDave Chinner 		i_tab[i++] = wip;
301895afcf5cSDave Chinner 	*num_inodes = i;
3019f6bba201SDave Chinner 
3020f6bba201SDave Chinner 	/*
3021f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
302295afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
3023f6bba201SDave Chinner 	 */
3024f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
3025f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
3026f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
302795afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
3028f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
3029f6bba201SDave Chinner 				i_tab[j-1] = temp;
3030f6bba201SDave Chinner 			}
3031f6bba201SDave Chinner 		}
3032f6bba201SDave Chinner 	}
3033f6bba201SDave Chinner }
3034f6bba201SDave Chinner 
3035310606b0SDave Chinner static int
3036310606b0SDave Chinner xfs_finish_rename(
3037c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
3038310606b0SDave Chinner {
3039310606b0SDave Chinner 	/*
3040310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
3041310606b0SDave Chinner 	 * goes to disk before returning to the user.
3042310606b0SDave Chinner 	 */
3043310606b0SDave Chinner 	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
3044310606b0SDave Chinner 		xfs_trans_set_sync(tp);
3045310606b0SDave Chinner 
304670393313SChristoph Hellwig 	return xfs_trans_commit(tp);
3047310606b0SDave Chinner }
3048310606b0SDave Chinner 
3049f6bba201SDave Chinner /*
3050d31a1825SCarlos Maiolino  * xfs_cross_rename()
3051d31a1825SCarlos Maiolino  *
3052d31a1825SCarlos Maiolino  * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3053d31a1825SCarlos Maiolino  */
3054d31a1825SCarlos Maiolino STATIC int
3055d31a1825SCarlos Maiolino xfs_cross_rename(
3056d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
3057d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
3058d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
3059d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
3060d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
3061d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
3062d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
3063d31a1825SCarlos Maiolino 	int			spaceres)
3064d31a1825SCarlos Maiolino {
3065d31a1825SCarlos Maiolino 	int		error = 0;
3066d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
3067d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
3068d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
3069d31a1825SCarlos Maiolino 
3070d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
3071381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3072d31a1825SCarlos Maiolino 	if (error)
3073eeacd321SDave Chinner 		goto out_trans_abort;
3074d31a1825SCarlos Maiolino 
3075d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
3076381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3077d31a1825SCarlos Maiolino 	if (error)
3078eeacd321SDave Chinner 		goto out_trans_abort;
3079d31a1825SCarlos Maiolino 
3080d31a1825SCarlos Maiolino 	/*
3081d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
3082d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
3083d31a1825SCarlos Maiolino 	 * parents.
3084d31a1825SCarlos Maiolino 	 */
3085d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
3086d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3087d31a1825SCarlos Maiolino 
3088c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3089d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3090381eee69SBrian Foster 						dp1->i_ino, spaceres);
3091d31a1825SCarlos Maiolino 			if (error)
3092eeacd321SDave Chinner 				goto out_trans_abort;
3093d31a1825SCarlos Maiolino 
3094d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
3095c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3096d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
3097d31a1825SCarlos Maiolino 				if (error)
3098eeacd321SDave Chinner 					goto out_trans_abort;
309991083269SEric Sandeen 				xfs_bumplink(tp, dp1);
3100d31a1825SCarlos Maiolino 			}
3101d31a1825SCarlos Maiolino 
3102d31a1825SCarlos Maiolino 			/*
3103d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
3104d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
3105d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
3106d31a1825SCarlos Maiolino 			 * notify the change
3107d31a1825SCarlos Maiolino 			 */
3108d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
3109d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3110d31a1825SCarlos Maiolino 		}
3111d31a1825SCarlos Maiolino 
3112c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3113d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3114381eee69SBrian Foster 						dp2->i_ino, spaceres);
3115d31a1825SCarlos Maiolino 			if (error)
3116eeacd321SDave Chinner 				goto out_trans_abort;
3117d31a1825SCarlos Maiolino 
3118d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
3119c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3120d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
3121d31a1825SCarlos Maiolino 				if (error)
3122eeacd321SDave Chinner 					goto out_trans_abort;
312391083269SEric Sandeen 				xfs_bumplink(tp, dp2);
3124d31a1825SCarlos Maiolino 			}
3125d31a1825SCarlos Maiolino 
3126d31a1825SCarlos Maiolino 			/*
3127d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
3128d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
3129d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
3130d31a1825SCarlos Maiolino 			 * notify the change
3131d31a1825SCarlos Maiolino 			 */
3132d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3133d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
3134d31a1825SCarlos Maiolino 		}
3135d31a1825SCarlos Maiolino 	}
3136d31a1825SCarlos Maiolino 
3137d31a1825SCarlos Maiolino 	if (ip1_flags) {
3138d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3139d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3140d31a1825SCarlos Maiolino 	}
3141d31a1825SCarlos Maiolino 	if (ip2_flags) {
3142d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3143d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3144d31a1825SCarlos Maiolino 	}
3145d31a1825SCarlos Maiolino 	if (dp2_flags) {
3146d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3147d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3148d31a1825SCarlos Maiolino 	}
3149d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3150d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3151c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
3152eeacd321SDave Chinner 
3153eeacd321SDave Chinner out_trans_abort:
31544906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3155d31a1825SCarlos Maiolino 	return error;
3156d31a1825SCarlos Maiolino }
3157d31a1825SCarlos Maiolino 
3158d31a1825SCarlos Maiolino /*
31597dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
31607dcf5c3eSDave Chinner  *
31617dcf5c3eSDave Chinner  * Return a referenced, unlinked, unlocked inode that that can be used as a
31627dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
31637dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
31647dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
31657dcf5c3eSDave Chinner  */
31667dcf5c3eSDave Chinner static int
31677dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
31687dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
31697dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
31707dcf5c3eSDave Chinner {
31717dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
31727dcf5c3eSDave Chinner 	int			error;
31737dcf5c3eSDave Chinner 
3174a1f69417SEric Sandeen 	error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
31757dcf5c3eSDave Chinner 	if (error)
31767dcf5c3eSDave Chinner 		return error;
31777dcf5c3eSDave Chinner 
317822419ac9SBrian Foster 	/*
317922419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
3180c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
3181c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
318222419ac9SBrian Foster 	 */
31832b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
31847dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
31857dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
31867dcf5c3eSDave Chinner 
31877dcf5c3eSDave Chinner 	*wip = tmpfile;
31887dcf5c3eSDave Chinner 	return 0;
31897dcf5c3eSDave Chinner }
31907dcf5c3eSDave Chinner 
31917dcf5c3eSDave Chinner /*
3192f6bba201SDave Chinner  * xfs_rename
3193f6bba201SDave Chinner  */
3194f6bba201SDave Chinner int
3195f6bba201SDave Chinner xfs_rename(
31967dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
3197f6bba201SDave Chinner 	struct xfs_name		*src_name,
31987dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
31997dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
3200f6bba201SDave Chinner 	struct xfs_name		*target_name,
32017dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
3202d31a1825SCarlos Maiolino 	unsigned int		flags)
3203f6bba201SDave Chinner {
32047dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
32057dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
32067dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
32077dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
320893597ae8Skaixuxia 	struct xfs_buf		*agibp;
320995afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
32102b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
3211c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3212f6bba201SDave Chinner 	int			spaceres;
32137dcf5c3eSDave Chinner 	int			error;
3214f6bba201SDave Chinner 
3215f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3216f6bba201SDave Chinner 
3217eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3218eeacd321SDave Chinner 		return -EINVAL;
3219f6bba201SDave Chinner 
32207dcf5c3eSDave Chinner 	/*
32217dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
32227dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
32237dcf5c3eSDave Chinner 	 * appropriately.
32247dcf5c3eSDave Chinner 	 */
32257dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
32267dcf5c3eSDave Chinner 		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
32277dcf5c3eSDave Chinner 		error = xfs_rename_alloc_whiteout(target_dp, &wip);
32287dcf5c3eSDave Chinner 		if (error)
32297dcf5c3eSDave Chinner 			return error;
3230f6bba201SDave Chinner 
32317dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
32327dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
32337dcf5c3eSDave Chinner 	}
32347dcf5c3eSDave Chinner 
32357dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3236f6bba201SDave Chinner 				inodes, &num_inodes);
3237f6bba201SDave Chinner 
3238f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3239253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
32402451337dSDave Chinner 	if (error == -ENOSPC) {
3241f6bba201SDave Chinner 		spaceres = 0;
3242253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3243253f4911SChristoph Hellwig 				&tp);
3244f6bba201SDave Chinner 	}
3245445883e8SDave Chinner 	if (error)
3246253f4911SChristoph Hellwig 		goto out_release_wip;
3247f6bba201SDave Chinner 
3248f6bba201SDave Chinner 	/*
3249f6bba201SDave Chinner 	 * Attach the dquots to the inodes
3250f6bba201SDave Chinner 	 */
3251f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
3252445883e8SDave Chinner 	if (error)
3253445883e8SDave Chinner 		goto out_trans_cancel;
3254f6bba201SDave Chinner 
3255f6bba201SDave Chinner 	/*
3256f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
3257f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
3258f6bba201SDave Chinner 	 * whether the target directory is the same as the source
3259f6bba201SDave Chinner 	 * directory, we can lock from 2 to 4 inodes.
3260f6bba201SDave Chinner 	 */
3261f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3262f6bba201SDave Chinner 
3263f6bba201SDave Chinner 	/*
3264f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
3265f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
3266f6bba201SDave Chinner 	 * them.
3267f6bba201SDave Chinner 	 */
326865523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3269f6bba201SDave Chinner 	if (new_parent)
327065523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3271f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3272f6bba201SDave Chinner 	if (target_ip)
3273f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
32747dcf5c3eSDave Chinner 	if (wip)
32757dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3276f6bba201SDave Chinner 
3277f6bba201SDave Chinner 	/*
3278f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
3279f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
3280f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
3281f6bba201SDave Chinner 	 */
3282f6bba201SDave Chinner 	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3283de7a866fSChristoph Hellwig 		     target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
32842451337dSDave Chinner 		error = -EXDEV;
3285445883e8SDave Chinner 		goto out_trans_cancel;
3286f6bba201SDave Chinner 	}
3287f6bba201SDave Chinner 
3288eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
3289eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
3290eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3291d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
3292f16dea54SBrian Foster 					spaceres);
3293d31a1825SCarlos Maiolino 
3294d31a1825SCarlos Maiolino 	/*
3295bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
3296bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
3297f6bba201SDave Chinner 	 */
3298f6bba201SDave Chinner 	if (target_ip == NULL) {
3299f6bba201SDave Chinner 		/*
3300f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
3301f6bba201SDave Chinner 		 * fit before actually inserting it.
3302f6bba201SDave Chinner 		 */
330394f3cad5SEric Sandeen 		if (!spaceres) {
330494f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
3305f6bba201SDave Chinner 			if (error)
3306445883e8SDave Chinner 				goto out_trans_cancel;
330794f3cad5SEric Sandeen 		}
3308bc56ad8cSkaixuxia 	} else {
3309bc56ad8cSkaixuxia 		/*
3310bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
3311bc56ad8cSkaixuxia 		 * it can be destroyed.
3312bc56ad8cSkaixuxia 		 */
3313bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3314bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
3315bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
3316bc56ad8cSkaixuxia 			error = -EEXIST;
3317bc56ad8cSkaixuxia 			goto out_trans_cancel;
3318bc56ad8cSkaixuxia 		}
3319bc56ad8cSkaixuxia 	}
3320bc56ad8cSkaixuxia 
3321bc56ad8cSkaixuxia 	/*
3322bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
3323bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
3324bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
3325bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
3326bc56ad8cSkaixuxia 	 * mess.
3327bc56ad8cSkaixuxia 	 *
3328bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
3329bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
3330bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
3331bc56ad8cSkaixuxia 	 * in future.
3332bc56ad8cSkaixuxia 	 */
3333bc56ad8cSkaixuxia 	if (wip) {
3334bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
3335bc56ad8cSkaixuxia 		error = xfs_iunlink_remove(tp, wip);
3336bc56ad8cSkaixuxia 		if (error)
3337bc56ad8cSkaixuxia 			goto out_trans_cancel;
3338bc56ad8cSkaixuxia 
3339bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
3340bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3341bc56ad8cSkaixuxia 	}
3342bc56ad8cSkaixuxia 
3343bc56ad8cSkaixuxia 	/*
3344bc56ad8cSkaixuxia 	 * Set up the target.
3345bc56ad8cSkaixuxia 	 */
3346bc56ad8cSkaixuxia 	if (target_ip == NULL) {
3347f6bba201SDave Chinner 		/*
3348f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
3349f6bba201SDave Chinner 		 * directories, adjust the target directory link count
3350f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
3351f6bba201SDave Chinner 		 */
3352f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
3353381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
3354f6bba201SDave Chinner 		if (error)
3355c8eac49eSBrian Foster 			goto out_trans_cancel;
3356f6bba201SDave Chinner 
3357f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3358f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3359f6bba201SDave Chinner 
3360f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
336191083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
3362f6bba201SDave Chinner 		}
3363f6bba201SDave Chinner 	} else { /* target_ip != NULL */
3364f6bba201SDave Chinner 		/*
3365f6bba201SDave Chinner 		 * Link the source inode under the target name.
3366f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
3367f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
3368f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
3369f6bba201SDave Chinner 		 *
3370f6bba201SDave Chinner 		 * In case there is already an entry with the same
3371f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
3372f6bba201SDave Chinner 		 */
337393597ae8Skaixuxia 
337493597ae8Skaixuxia 		/*
337593597ae8Skaixuxia 		 * Check whether the replace operation will need to allocate
337693597ae8Skaixuxia 		 * blocks.  This happens when the shortform directory lacks
337793597ae8Skaixuxia 		 * space and we have to convert it to a block format directory.
337893597ae8Skaixuxia 		 * When more blocks are necessary, we must lock the AGI first
337993597ae8Skaixuxia 		 * to preserve locking order (AGI -> AGF).
338093597ae8Skaixuxia 		 */
338193597ae8Skaixuxia 		if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
338293597ae8Skaixuxia 			error = xfs_read_agi(mp, tp,
338393597ae8Skaixuxia 					XFS_INO_TO_AGNO(mp, target_ip->i_ino),
338493597ae8Skaixuxia 					&agibp);
338593597ae8Skaixuxia 			if (error)
338693597ae8Skaixuxia 				goto out_trans_cancel;
338793597ae8Skaixuxia 		}
338893597ae8Skaixuxia 
3389f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
3390381eee69SBrian Foster 					src_ip->i_ino, spaceres);
3391f6bba201SDave Chinner 		if (error)
3392c8eac49eSBrian Foster 			goto out_trans_cancel;
3393f6bba201SDave Chinner 
3394f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3395f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3396f6bba201SDave Chinner 
3397f6bba201SDave Chinner 		/*
3398f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3399f6bba201SDave Chinner 		 * dir no longer points to it.
3400f6bba201SDave Chinner 		 */
3401f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3402f6bba201SDave Chinner 		if (error)
3403c8eac49eSBrian Foster 			goto out_trans_cancel;
3404f6bba201SDave Chinner 
3405f6bba201SDave Chinner 		if (src_is_directory) {
3406f6bba201SDave Chinner 			/*
3407f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3408f6bba201SDave Chinner 			 */
3409f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3410f6bba201SDave Chinner 			if (error)
3411c8eac49eSBrian Foster 				goto out_trans_cancel;
3412f6bba201SDave Chinner 		}
3413f6bba201SDave Chinner 	} /* target_ip != NULL */
3414f6bba201SDave Chinner 
3415f6bba201SDave Chinner 	/*
3416f6bba201SDave Chinner 	 * Remove the source.
3417f6bba201SDave Chinner 	 */
3418f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3419f6bba201SDave Chinner 		/*
3420f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3421f6bba201SDave Chinner 		 * directory.
3422f6bba201SDave Chinner 		 */
3423f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3424381eee69SBrian Foster 					target_dp->i_ino, spaceres);
34252451337dSDave Chinner 		ASSERT(error != -EEXIST);
3426f6bba201SDave Chinner 		if (error)
3427c8eac49eSBrian Foster 			goto out_trans_cancel;
3428f6bba201SDave Chinner 	}
3429f6bba201SDave Chinner 
3430f6bba201SDave Chinner 	/*
3431f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3432f6bba201SDave Chinner 	 *
3433f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3434f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3435f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3436f6bba201SDave Chinner 	 */
3437f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3438f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3439f6bba201SDave Chinner 
3440f6bba201SDave Chinner 	/*
3441f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3442f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3443f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3444f6bba201SDave Chinner 	 */
3445f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3446f6bba201SDave Chinner 
3447f6bba201SDave Chinner 		/*
3448f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3449f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3450f6bba201SDave Chinner 		 */
3451f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3452f6bba201SDave Chinner 		if (error)
3453c8eac49eSBrian Foster 			goto out_trans_cancel;
3454f6bba201SDave Chinner 	}
3455f6bba201SDave Chinner 
34567dcf5c3eSDave Chinner 	/*
34577dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
34587dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
34597dcf5c3eSDave Chinner 	 * altogether.
34607dcf5c3eSDave Chinner 	 */
34617dcf5c3eSDave Chinner 	if (wip) {
34627dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3463381eee69SBrian Foster 					spaceres);
34647dcf5c3eSDave Chinner 	} else
3465f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3466381eee69SBrian Foster 					   spaceres);
3467f6bba201SDave Chinner 	if (error)
3468c8eac49eSBrian Foster 		goto out_trans_cancel;
3469f6bba201SDave Chinner 
3470f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3471f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3472f6bba201SDave Chinner 	if (new_parent)
3473f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3474f6bba201SDave Chinner 
3475c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
34767dcf5c3eSDave Chinner 	if (wip)
347744a8736bSDarrick J. Wong 		xfs_irele(wip);
34787dcf5c3eSDave Chinner 	return error;
3479f6bba201SDave Chinner 
3480445883e8SDave Chinner out_trans_cancel:
34814906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3482253f4911SChristoph Hellwig out_release_wip:
34837dcf5c3eSDave Chinner 	if (wip)
348444a8736bSDarrick J. Wong 		xfs_irele(wip);
3485f6bba201SDave Chinner 	return error;
3486f6bba201SDave Chinner }
3487f6bba201SDave Chinner 
3488bad55843SDavid Chinner STATIC int
3489bad55843SDavid Chinner xfs_iflush_cluster(
349019429363SDave Chinner 	struct xfs_inode	*ip,
349119429363SDave Chinner 	struct xfs_buf		*bp)
3492bad55843SDavid Chinner {
349319429363SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
34945017e97dSDave Chinner 	struct xfs_perag	*pag;
3495bad55843SDavid Chinner 	unsigned long		first_index, mask;
349619429363SDave Chinner 	int			cilist_size;
349719429363SDave Chinner 	struct xfs_inode	**cilist;
349819429363SDave Chinner 	struct xfs_inode	*cip;
3499ef325959SDarrick J. Wong 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
3500bad55843SDavid Chinner 	int			nr_found;
3501bad55843SDavid Chinner 	int			clcount = 0;
3502bad55843SDavid Chinner 	int			i;
3503bad55843SDavid Chinner 
35045017e97dSDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3505bad55843SDavid Chinner 
35064b4d98ccSDarrick J. Wong 	cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *);
350719429363SDave Chinner 	cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
350819429363SDave Chinner 	if (!cilist)
350944b56e0aSDave Chinner 		goto out_put;
3510bad55843SDavid Chinner 
35114b4d98ccSDarrick J. Wong 	mask = ~(igeo->inodes_per_cluster - 1);
3512bad55843SDavid Chinner 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
35131a3e8f3dSDave Chinner 	rcu_read_lock();
3514bad55843SDavid Chinner 	/* really need a gang lookup range call here */
351519429363SDave Chinner 	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
35164b4d98ccSDarrick J. Wong 					first_index, igeo->inodes_per_cluster);
3517bad55843SDavid Chinner 	if (nr_found == 0)
3518bad55843SDavid Chinner 		goto out_free;
3519bad55843SDavid Chinner 
3520bad55843SDavid Chinner 	for (i = 0; i < nr_found; i++) {
352119429363SDave Chinner 		cip = cilist[i];
352219429363SDave Chinner 		if (cip == ip)
3523bad55843SDavid Chinner 			continue;
35241a3e8f3dSDave Chinner 
35251a3e8f3dSDave Chinner 		/*
35261a3e8f3dSDave Chinner 		 * because this is an RCU protected lookup, we could find a
35271a3e8f3dSDave Chinner 		 * recently freed or even reallocated inode during the lookup.
35281a3e8f3dSDave Chinner 		 * We need to check under the i_flags_lock for a valid inode
35291a3e8f3dSDave Chinner 		 * here. Skip it if it is not valid or the wrong inode.
35301a3e8f3dSDave Chinner 		 */
353119429363SDave Chinner 		spin_lock(&cip->i_flags_lock);
353219429363SDave Chinner 		if (!cip->i_ino ||
353319429363SDave Chinner 		    __xfs_iflags_test(cip, XFS_ISTALE)) {
353419429363SDave Chinner 			spin_unlock(&cip->i_flags_lock);
35351a3e8f3dSDave Chinner 			continue;
35361a3e8f3dSDave Chinner 		}
35375a90e53eSDave Chinner 
35385a90e53eSDave Chinner 		/*
35395a90e53eSDave Chinner 		 * Once we fall off the end of the cluster, no point checking
35405a90e53eSDave Chinner 		 * any more inodes in the list because they will also all be
35415a90e53eSDave Chinner 		 * outside the cluster.
35425a90e53eSDave Chinner 		 */
354319429363SDave Chinner 		if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
354419429363SDave Chinner 			spin_unlock(&cip->i_flags_lock);
35455a90e53eSDave Chinner 			break;
35465a90e53eSDave Chinner 		}
354719429363SDave Chinner 		spin_unlock(&cip->i_flags_lock);
35481a3e8f3dSDave Chinner 
3549bad55843SDavid Chinner 		/*
3550bad55843SDavid Chinner 		 * Do an un-protected check to see if the inode is dirty and
3551bad55843SDavid Chinner 		 * is a candidate for flushing.  These checks will be repeated
3552bad55843SDavid Chinner 		 * later after the appropriate locks are acquired.
3553bad55843SDavid Chinner 		 */
355419429363SDave Chinner 		if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3555bad55843SDavid Chinner 			continue;
3556bad55843SDavid Chinner 
3557bad55843SDavid Chinner 		/*
3558bad55843SDavid Chinner 		 * Try to get locks.  If any are unavailable or it is pinned,
3559bad55843SDavid Chinner 		 * then this inode cannot be flushed and is skipped.
3560bad55843SDavid Chinner 		 */
3561bad55843SDavid Chinner 
356219429363SDave Chinner 		if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3563bad55843SDavid Chinner 			continue;
356419429363SDave Chinner 		if (!xfs_iflock_nowait(cip)) {
356519429363SDave Chinner 			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3566bad55843SDavid Chinner 			continue;
3567bad55843SDavid Chinner 		}
356819429363SDave Chinner 		if (xfs_ipincount(cip)) {
356919429363SDave Chinner 			xfs_ifunlock(cip);
357019429363SDave Chinner 			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3571bad55843SDavid Chinner 			continue;
3572bad55843SDavid Chinner 		}
3573bad55843SDavid Chinner 
35748a17d7ddSDave Chinner 
35758a17d7ddSDave Chinner 		/*
35768a17d7ddSDave Chinner 		 * Check the inode number again, just to be certain we are not
35778a17d7ddSDave Chinner 		 * racing with freeing in xfs_reclaim_inode(). See the comments
35788a17d7ddSDave Chinner 		 * in that function for more information as to why the initial
35798a17d7ddSDave Chinner 		 * check is not sufficient.
35808a17d7ddSDave Chinner 		 */
358119429363SDave Chinner 		if (!cip->i_ino) {
358219429363SDave Chinner 			xfs_ifunlock(cip);
358319429363SDave Chinner 			xfs_iunlock(cip, XFS_ILOCK_SHARED);
3584bad55843SDavid Chinner 			continue;
3585bad55843SDavid Chinner 		}
3586bad55843SDavid Chinner 
3587bad55843SDavid Chinner 		/*
3588bad55843SDavid Chinner 		 * arriving here means that this inode can be flushed.  First
3589bad55843SDavid Chinner 		 * re-check that it's dirty before flushing.
3590bad55843SDavid Chinner 		 */
359119429363SDave Chinner 		if (!xfs_inode_clean(cip)) {
3592bad55843SDavid Chinner 			int	error;
359319429363SDave Chinner 			error = xfs_iflush_int(cip, bp);
3594bad55843SDavid Chinner 			if (error) {
359519429363SDave Chinner 				xfs_iunlock(cip, XFS_ILOCK_SHARED);
3596bad55843SDavid Chinner 				goto cluster_corrupt_out;
3597bad55843SDavid Chinner 			}
3598bad55843SDavid Chinner 			clcount++;
3599bad55843SDavid Chinner 		} else {
360019429363SDave Chinner 			xfs_ifunlock(cip);
3601bad55843SDavid Chinner 		}
360219429363SDave Chinner 		xfs_iunlock(cip, XFS_ILOCK_SHARED);
3603bad55843SDavid Chinner 	}
3604bad55843SDavid Chinner 
3605bad55843SDavid Chinner 	if (clcount) {
3606ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_icluster_flushcnt);
3607ff6d6af2SBill O'Donnell 		XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3608bad55843SDavid Chinner 	}
3609bad55843SDavid Chinner 
3610bad55843SDavid Chinner out_free:
36111a3e8f3dSDave Chinner 	rcu_read_unlock();
361219429363SDave Chinner 	kmem_free(cilist);
361344b56e0aSDave Chinner out_put:
361444b56e0aSDave Chinner 	xfs_perag_put(pag);
3615bad55843SDavid Chinner 	return 0;
3616bad55843SDavid Chinner 
3617bad55843SDavid Chinner 
3618bad55843SDavid Chinner cluster_corrupt_out:
3619bad55843SDavid Chinner 	/*
3620bad55843SDavid Chinner 	 * Corruption detected in the clustering loop.  Invalidate the
3621bad55843SDavid Chinner 	 * inode buffer and shut down the filesystem.
3622bad55843SDavid Chinner 	 */
36231a3e8f3dSDave Chinner 	rcu_read_unlock();
3624bad55843SDavid Chinner 
3625bad55843SDavid Chinner 	/*
3626e53946dbSDave Chinner 	 * We'll always have an inode attached to the buffer for completion
3627e53946dbSDave Chinner 	 * process by the time we are called from xfs_iflush(). Hence we have
3628e53946dbSDave Chinner 	 * always need to do IO completion processing to abort the inodes
3629e53946dbSDave Chinner 	 * attached to the buffer.  handle them just like the shutdown case in
3630e53946dbSDave Chinner 	 * xfs_buf_submit().
3631bad55843SDavid Chinner 	 */
3632e53946dbSDave Chinner 	ASSERT(bp->b_iodone);
363322fedd80SBrian Foster 	bp->b_flags |= XBF_ASYNC;
3634b0388bf1SDave Chinner 	bp->b_flags &= ~XBF_DONE;
3635c867cb61SChristoph Hellwig 	xfs_buf_stale(bp);
36362451337dSDave Chinner 	xfs_buf_ioerror(bp, -EIO);
3637e8aaba9aSDave Chinner 	xfs_buf_ioend(bp);
3638bad55843SDavid Chinner 
363922fedd80SBrian Foster 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
364022fedd80SBrian Foster 
3641e53946dbSDave Chinner 	/* abort the corrupt inode, as it was not attached to the buffer */
364219429363SDave Chinner 	xfs_iflush_abort(cip, false);
364319429363SDave Chinner 	kmem_free(cilist);
364444b56e0aSDave Chinner 	xfs_perag_put(pag);
36452451337dSDave Chinner 	return -EFSCORRUPTED;
3646bad55843SDavid Chinner }
3647bad55843SDavid Chinner 
36481da177e4SLinus Torvalds /*
36494c46819aSChristoph Hellwig  * Flush dirty inode metadata into the backing buffer.
36504c46819aSChristoph Hellwig  *
36514c46819aSChristoph Hellwig  * The caller must have the inode lock and the inode flush lock held.  The
36524c46819aSChristoph Hellwig  * inode lock will still be held upon return to the caller, and the inode
36534c46819aSChristoph Hellwig  * flush lock will be released after the inode has reached the disk.
36544c46819aSChristoph Hellwig  *
36554c46819aSChristoph Hellwig  * The caller must write out the buffer returned in *bpp and release it.
36561da177e4SLinus Torvalds  */
36571da177e4SLinus Torvalds int
36581da177e4SLinus Torvalds xfs_iflush(
36594c46819aSChristoph Hellwig 	struct xfs_inode	*ip,
36604c46819aSChristoph Hellwig 	struct xfs_buf		**bpp)
36611da177e4SLinus Torvalds {
36624c46819aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3663b1438f47SDave Chinner 	struct xfs_buf		*bp = NULL;
36644c46819aSChristoph Hellwig 	struct xfs_dinode	*dip;
36651da177e4SLinus Torvalds 	int			error;
36661da177e4SLinus Torvalds 
3667ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_iflush_count);
36681da177e4SLinus Torvalds 
3669579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3670474fce06SChristoph Hellwig 	ASSERT(xfs_isiflocked(ip));
36711da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
36728096b1ebSChristoph Hellwig 	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
36731da177e4SLinus Torvalds 
36744c46819aSChristoph Hellwig 	*bpp = NULL;
36751da177e4SLinus Torvalds 
36761da177e4SLinus Torvalds 	xfs_iunpin_wait(ip);
36771da177e4SLinus Torvalds 
36781da177e4SLinus Torvalds 	/*
36794b6a4688SDave Chinner 	 * For stale inodes we cannot rely on the backing buffer remaining
36804b6a4688SDave Chinner 	 * stale in cache for the remaining life of the stale inode and so
3681475ee413SChristoph Hellwig 	 * xfs_imap_to_bp() below may give us a buffer that no longer contains
36824b6a4688SDave Chinner 	 * inodes below. We have to check this after ensuring the inode is
36834b6a4688SDave Chinner 	 * unpinned so that it is safe to reclaim the stale inode after the
36844b6a4688SDave Chinner 	 * flush call.
36854b6a4688SDave Chinner 	 */
36864b6a4688SDave Chinner 	if (xfs_iflags_test(ip, XFS_ISTALE)) {
36874b6a4688SDave Chinner 		xfs_ifunlock(ip);
36884b6a4688SDave Chinner 		return 0;
36894b6a4688SDave Chinner 	}
36904b6a4688SDave Chinner 
36914b6a4688SDave Chinner 	/*
36921da177e4SLinus Torvalds 	 * This may have been unpinned because the filesystem is shutting
36931da177e4SLinus Torvalds 	 * down forcibly. If that's the case we must not write this inode
369432ce90a4SChristoph Hellwig 	 * to disk, because the log record didn't make it to disk.
369532ce90a4SChristoph Hellwig 	 *
369632ce90a4SChristoph Hellwig 	 * We also have to remove the log item from the AIL in this case,
369732ce90a4SChristoph Hellwig 	 * as we wait for an empty AIL as part of the unmount process.
36981da177e4SLinus Torvalds 	 */
36991da177e4SLinus Torvalds 	if (XFS_FORCED_SHUTDOWN(mp)) {
37002451337dSDave Chinner 		error = -EIO;
370132ce90a4SChristoph Hellwig 		goto abort_out;
37021da177e4SLinus Torvalds 	}
37031da177e4SLinus Torvalds 
37041da177e4SLinus Torvalds 	/*
3705b1438f47SDave Chinner 	 * Get the buffer containing the on-disk inode. We are doing a try-lock
3706b1438f47SDave Chinner 	 * operation here, so we may get  an EAGAIN error. In that case, we
3707b1438f47SDave Chinner 	 * simply want to return with the inode still dirty.
3708b1438f47SDave Chinner 	 *
3709b1438f47SDave Chinner 	 * If we get any other error, we effectively have a corruption situation
3710b1438f47SDave Chinner 	 * and we cannot flush the inode, so we treat it the same as failing
3711b1438f47SDave Chinner 	 * xfs_iflush_int().
3712a3f74ffbSDavid Chinner 	 */
3713475ee413SChristoph Hellwig 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3714475ee413SChristoph Hellwig 			       0);
3715b1438f47SDave Chinner 	if (error == -EAGAIN) {
3716a3f74ffbSDavid Chinner 		xfs_ifunlock(ip);
3717a3f74ffbSDavid Chinner 		return error;
3718a3f74ffbSDavid Chinner 	}
3719b1438f47SDave Chinner 	if (error)
3720b1438f47SDave Chinner 		goto corrupt_out;
3721a3f74ffbSDavid Chinner 
3722a3f74ffbSDavid Chinner 	/*
37231da177e4SLinus Torvalds 	 * First flush out the inode that xfs_iflush was called with.
37241da177e4SLinus Torvalds 	 */
37251da177e4SLinus Torvalds 	error = xfs_iflush_int(ip, bp);
3726bad55843SDavid Chinner 	if (error)
37271da177e4SLinus Torvalds 		goto corrupt_out;
37281da177e4SLinus Torvalds 
37291da177e4SLinus Torvalds 	/*
3730a3f74ffbSDavid Chinner 	 * If the buffer is pinned then push on the log now so we won't
3731a3f74ffbSDavid Chinner 	 * get stuck waiting in the write for too long.
3732a3f74ffbSDavid Chinner 	 */
3733811e64c7SChandra Seetharaman 	if (xfs_buf_ispinned(bp))
3734a14a348bSChristoph Hellwig 		xfs_log_force(mp, 0);
3735a3f74ffbSDavid Chinner 
3736a3f74ffbSDavid Chinner 	/*
3737e53946dbSDave Chinner 	 * inode clustering: try to gather other inodes into this write
3738e53946dbSDave Chinner 	 *
3739e53946dbSDave Chinner 	 * Note: Any error during clustering will result in the filesystem
3740e53946dbSDave Chinner 	 * being shut down and completion callbacks run on the cluster buffer.
3741e53946dbSDave Chinner 	 * As we have already flushed and attached this inode to the buffer,
3742e53946dbSDave Chinner 	 * it has already been aborted and released by xfs_iflush_cluster() and
3743e53946dbSDave Chinner 	 * so we have no further error handling to do here.
37441da177e4SLinus Torvalds 	 */
3745bad55843SDavid Chinner 	error = xfs_iflush_cluster(ip, bp);
3746bad55843SDavid Chinner 	if (error)
3747e53946dbSDave Chinner 		return error;
37481da177e4SLinus Torvalds 
37494c46819aSChristoph Hellwig 	*bpp = bp;
37504c46819aSChristoph Hellwig 	return 0;
37511da177e4SLinus Torvalds 
37521da177e4SLinus Torvalds corrupt_out:
3753b1438f47SDave Chinner 	if (bp)
37541da177e4SLinus Torvalds 		xfs_buf_relse(bp);
37557d04a335SNathan Scott 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
375632ce90a4SChristoph Hellwig abort_out:
3757e53946dbSDave Chinner 	/* abort the corrupt inode, as it was not attached to the buffer */
375804913fddSDave Chinner 	xfs_iflush_abort(ip, false);
375932ce90a4SChristoph Hellwig 	return error;
37601da177e4SLinus Torvalds }
37611da177e4SLinus Torvalds 
37629cfb9b47SDarrick J. Wong /*
37639cfb9b47SDarrick J. Wong  * If there are inline format data / attr forks attached to this inode,
37649cfb9b47SDarrick J. Wong  * make sure they're not corrupt.
37659cfb9b47SDarrick J. Wong  */
37669cfb9b47SDarrick J. Wong bool
37679cfb9b47SDarrick J. Wong xfs_inode_verify_forks(
37689cfb9b47SDarrick J. Wong 	struct xfs_inode	*ip)
37699cfb9b47SDarrick J. Wong {
377022431bf3SDarrick J. Wong 	struct xfs_ifork	*ifp;
37719cfb9b47SDarrick J. Wong 	xfs_failaddr_t		fa;
37729cfb9b47SDarrick J. Wong 
37739cfb9b47SDarrick J. Wong 	fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
37749cfb9b47SDarrick J. Wong 	if (fa) {
377522431bf3SDarrick J. Wong 		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
377622431bf3SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
377722431bf3SDarrick J. Wong 				ifp->if_u1.if_data, ifp->if_bytes, fa);
37789cfb9b47SDarrick J. Wong 		return false;
37799cfb9b47SDarrick J. Wong 	}
37809cfb9b47SDarrick J. Wong 
37819cfb9b47SDarrick J. Wong 	fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
37829cfb9b47SDarrick J. Wong 	if (fa) {
378322431bf3SDarrick J. Wong 		ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
378422431bf3SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
378522431bf3SDarrick J. Wong 				ifp ? ifp->if_u1.if_data : NULL,
378622431bf3SDarrick J. Wong 				ifp ? ifp->if_bytes : 0, fa);
37879cfb9b47SDarrick J. Wong 		return false;
37889cfb9b47SDarrick J. Wong 	}
37899cfb9b47SDarrick J. Wong 	return true;
37909cfb9b47SDarrick J. Wong }
37919cfb9b47SDarrick J. Wong 
37921da177e4SLinus Torvalds STATIC int
37931da177e4SLinus Torvalds xfs_iflush_int(
379493848a99SChristoph Hellwig 	struct xfs_inode	*ip,
379593848a99SChristoph Hellwig 	struct xfs_buf		*bp)
37961da177e4SLinus Torvalds {
379793848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
379893848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
379993848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
38001da177e4SLinus Torvalds 
3801579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3802474fce06SChristoph Hellwig 	ASSERT(xfs_isiflocked(ip));
38031da177e4SLinus Torvalds 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
38048096b1ebSChristoph Hellwig 	       ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
380593848a99SChristoph Hellwig 	ASSERT(iip != NULL && iip->ili_fields != 0);
38061da177e4SLinus Torvalds 
38071da177e4SLinus Torvalds 	/* set *dip = inode's place in the buffer */
380888ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
38091da177e4SLinus Torvalds 
381069ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
38119e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
38126a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3813c9690043SDarrick J. Wong 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
38146a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
38151da177e4SLinus Torvalds 		goto corrupt_out;
38161da177e4SLinus Torvalds 	}
3817c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
38181da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
38191da177e4SLinus Torvalds 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
38201da177e4SLinus Torvalds 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
38219e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
38226a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3823c9690043SDarrick J. Wong 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
38246a19d939SDave Chinner 				__func__, ip->i_ino, ip);
38251da177e4SLinus Torvalds 			goto corrupt_out;
38261da177e4SLinus Torvalds 		}
3827c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
38281da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
38291da177e4SLinus Torvalds 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
38301da177e4SLinus Torvalds 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
38311da177e4SLinus Torvalds 		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
38329e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
38336a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3834c9690043SDarrick J. Wong 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
38356a19d939SDave Chinner 				__func__, ip->i_ino, ip);
38361da177e4SLinus Torvalds 			goto corrupt_out;
38371da177e4SLinus Torvalds 		}
38381da177e4SLinus Torvalds 	}
38391da177e4SLinus Torvalds 	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
38409e24cfd0SDarrick J. Wong 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
38416a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
38426a19d939SDave Chinner 			"%s: detected corrupt incore inode %Lu, "
3843c9690043SDarrick J. Wong 			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
38446a19d939SDave Chinner 			__func__, ip->i_ino,
38451da177e4SLinus Torvalds 			ip->i_d.di_nextents + ip->i_d.di_anextents,
38466a19d939SDave Chinner 			ip->i_d.di_nblocks, ip);
38471da177e4SLinus Torvalds 		goto corrupt_out;
38481da177e4SLinus Torvalds 	}
38491da177e4SLinus Torvalds 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
38509e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
38516a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3852c9690043SDarrick J. Wong 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
38536a19d939SDave Chinner 			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
38541da177e4SLinus Torvalds 		goto corrupt_out;
38551da177e4SLinus Torvalds 	}
3856e60896d8SDave Chinner 
38571da177e4SLinus Torvalds 	/*
3858263997a6SDave Chinner 	 * Inode item log recovery for v2 inodes are dependent on the
3859e60896d8SDave Chinner 	 * di_flushiter count for correct sequencing. We bump the flush
3860e60896d8SDave Chinner 	 * iteration count so we can detect flushes which postdate a log record
3861e60896d8SDave Chinner 	 * during recovery. This is redundant as we now log every change and
3862e60896d8SDave Chinner 	 * hence this can't happen but we need to still do it to ensure
3863e60896d8SDave Chinner 	 * backwards compatibility with old kernels that predate logging all
3864e60896d8SDave Chinner 	 * inode changes.
38651da177e4SLinus Torvalds 	 */
38666471e9c5SChristoph Hellwig 	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
38671da177e4SLinus Torvalds 		ip->i_d.di_flushiter++;
38681da177e4SLinus Torvalds 
38699cfb9b47SDarrick J. Wong 	/* Check the inline fork data before we write out. */
38709cfb9b47SDarrick J. Wong 	if (!xfs_inode_verify_forks(ip))
3871005c5db8SDarrick J. Wong 		goto corrupt_out;
3872005c5db8SDarrick J. Wong 
38731da177e4SLinus Torvalds 	/*
38743987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
38753987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
38763987848cSDave Chinner 	 * the core must be.
38771da177e4SLinus Torvalds 	 */
387893f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
38791da177e4SLinus Torvalds 
38801da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
38811da177e4SLinus Torvalds 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
38821da177e4SLinus Torvalds 		ip->i_d.di_flushiter = 0;
38831da177e4SLinus Torvalds 
3884005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3885005c5db8SDarrick J. Wong 	if (XFS_IFORK_Q(ip))
3886005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
38871da177e4SLinus Torvalds 	xfs_inobp_check(mp, bp);
38881da177e4SLinus Torvalds 
38891da177e4SLinus Torvalds 	/*
3890f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3891f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3892f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3893f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3894f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3895f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3896f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
38971da177e4SLinus Torvalds 	 *
3898f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3899f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3900f5d8d5c4SChristoph Hellwig 	 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3901f5d8d5c4SChristoph Hellwig 	 * know that the information those bits represent is permanently on
3902f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3903f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
39041da177e4SLinus Torvalds 	 *
3905f5d8d5c4SChristoph Hellwig 	 * We can play with the ili_fields bits here, because the inode lock
3906f5d8d5c4SChristoph Hellwig 	 * must be held exclusively in order to set bits there and the flush
3907f5d8d5c4SChristoph Hellwig 	 * lock protects the ili_last_fields bits.  Set ili_logged so the flush
3908f5d8d5c4SChristoph Hellwig 	 * done routine can tell whether or not to look in the AIL.  Also, store
3909f5d8d5c4SChristoph Hellwig 	 * the current LSN of the inode so that we can tell whether the item has
3910f5d8d5c4SChristoph Hellwig 	 * moved in the AIL from xfs_iflush_done().  In order to read the lsn we
3911f5d8d5c4SChristoph Hellwig 	 * need the AIL lock, because it is a 64 bit value that cannot be read
3912f5d8d5c4SChristoph Hellwig 	 * atomically.
39131da177e4SLinus Torvalds 	 */
3914f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3915f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3916fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
39171da177e4SLinus Torvalds 	iip->ili_logged = 1;
39181da177e4SLinus Torvalds 
39197b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
39207b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
39211da177e4SLinus Torvalds 
39221da177e4SLinus Torvalds 	/*
39231da177e4SLinus Torvalds 	 * Attach the function xfs_iflush_done to the inode's
39241da177e4SLinus Torvalds 	 * buffer.  This will remove the inode from the AIL
39251da177e4SLinus Torvalds 	 * and unlock the inode's flush lock when the inode is
39261da177e4SLinus Torvalds 	 * completely written to disk.
39271da177e4SLinus Torvalds 	 */
3928ca30b2a7SChristoph Hellwig 	xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
39291da177e4SLinus Torvalds 
393093848a99SChristoph Hellwig 	/* generate the checksum. */
393193848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
393293848a99SChristoph Hellwig 
3933643c8c05SCarlos Maiolino 	ASSERT(!list_empty(&bp->b_li_list));
3934cb669ca5SChristoph Hellwig 	ASSERT(bp->b_iodone != NULL);
39351da177e4SLinus Torvalds 	return 0;
39361da177e4SLinus Torvalds 
39371da177e4SLinus Torvalds corrupt_out:
39382451337dSDave Chinner 	return -EFSCORRUPTED;
39391da177e4SLinus Torvalds }
394044a8736bSDarrick J. Wong 
394144a8736bSDarrick J. Wong /* Release an inode. */
394244a8736bSDarrick J. Wong void
394344a8736bSDarrick J. Wong xfs_irele(
394444a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
394544a8736bSDarrick J. Wong {
394644a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
394744a8736bSDarrick J. Wong 	iput(VFS_I(ip));
394844a8736bSDarrick J. Wong }
3949