xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision b33ce57d3e61020328582ce6d7dbae1d694ac496)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_sb.h"
151da177e4SLinus Torvalds #include "xfs_mount.h"
163ab78df2SDarrick J. Wong #include "xfs_defer.h"
17a4fbe6abSDave Chinner #include "xfs_inode.h"
18c24b5dfaSDave Chinner #include "xfs_dir2.h"
19c24b5dfaSDave Chinner #include "xfs_attr.h"
20239880efSDave Chinner #include "xfs_trans_space.h"
21239880efSDave Chinner #include "xfs_trans.h"
221da177e4SLinus Torvalds #include "xfs_buf_item.h"
23a844f451SNathan Scott #include "xfs_inode_item.h"
24a844f451SNathan Scott #include "xfs_ialloc.h"
25a844f451SNathan Scott #include "xfs_bmap.h"
2668988114SDave Chinner #include "xfs_bmap_util.h"
27e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
281da177e4SLinus Torvalds #include "xfs_error.h"
291da177e4SLinus Torvalds #include "xfs_quota.h"
302a82b8beSDavid Chinner #include "xfs_filestream.h"
310b1b213fSChristoph Hellwig #include "xfs_trace.h"
3233479e05SDave Chinner #include "xfs_icache.h"
33c24b5dfaSDave Chinner #include "xfs_symlink.h"
34239880efSDave Chinner #include "xfs_trans_priv.h"
35239880efSDave Chinner #include "xfs_log.h"
36a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
37aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds kmem_zone_t *xfs_inode_zone;
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds /*
428f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
431da177e4SLinus Torvalds  * freed from a file in a single transaction.
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
461da177e4SLinus Torvalds 
4754d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
4854d7b5c1SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
49ab297431SZhi Yong Wu 
502a0ec1d9SDave Chinner /*
512a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
522a0ec1d9SDave Chinner  */
532a0ec1d9SDave Chinner xfs_extlen_t
542a0ec1d9SDave Chinner xfs_get_extsz_hint(
552a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
562a0ec1d9SDave Chinner {
57bdb2ed2dSChristoph Hellwig 	/*
58bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
59bdb2ed2dSChristoph Hellwig 	 * write to them.
60bdb2ed2dSChristoph Hellwig 	 */
61bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
62bdb2ed2dSChristoph Hellwig 		return 0;
63031474c2SChristoph Hellwig 	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
64031474c2SChristoph Hellwig 		return ip->i_extsize;
652a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
662a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
672a0ec1d9SDave Chinner 	return 0;
682a0ec1d9SDave Chinner }
692a0ec1d9SDave Chinner 
70fa96acadSDave Chinner /*
71f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
72f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
73e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
74e153aa79SDarrick J. Wong  * use the default size.
75f7ca3522SDarrick J. Wong  */
76f7ca3522SDarrick J. Wong xfs_extlen_t
77f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
78f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
79f7ca3522SDarrick J. Wong {
80f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
81f7ca3522SDarrick J. Wong 
82f7ca3522SDarrick J. Wong 	a = 0;
83f7ca3522SDarrick J. Wong 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
84*b33ce57dSChristoph Hellwig 		a = ip->i_cowextsize;
85f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
86f7ca3522SDarrick J. Wong 
87e153aa79SDarrick J. Wong 	a = max(a, b);
88e153aa79SDarrick J. Wong 	if (a == 0)
89e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
90f7ca3522SDarrick J. Wong 	return a;
91f7ca3522SDarrick J. Wong }
92f7ca3522SDarrick J. Wong 
93f7ca3522SDarrick J. Wong /*
94efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
95efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
96efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
97efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
98efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
99efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
100efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
101efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
102efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
103efa70be1SChristoph Hellwig  * if they have not.
104fa96acadSDave Chinner  *
105efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10601f4f327SChristoph Hellwig  * xfs_iunlock() call.
107fa96acadSDave Chinner  */
108fa96acadSDave Chinner uint
109309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
110309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
111fa96acadSDave Chinner {
112309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
113fa96acadSDave Chinner 
114f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
115309ecac8SChristoph Hellwig 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
116fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
117fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
118fa96acadSDave Chinner 	return lock_mode;
119fa96acadSDave Chinner }
120fa96acadSDave Chinner 
121efa70be1SChristoph Hellwig uint
122efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
123efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
124fa96acadSDave Chinner {
125efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
126efa70be1SChristoph Hellwig 
127f7e67b20SChristoph Hellwig 	if (ip->i_afp &&
128f7e67b20SChristoph Hellwig 	    ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
129efa70be1SChristoph Hellwig 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
131efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
132efa70be1SChristoph Hellwig 	return lock_mode;
133fa96acadSDave Chinner }
134fa96acadSDave Chinner 
135fa96acadSDave Chinner /*
13665523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
13765523218SChristoph Hellwig  * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
13865523218SChristoph Hellwig  * various combinations of the locks to be obtained.
139fa96acadSDave Chinner  *
140653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
141653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
142fa96acadSDave Chinner  *
143653c60b6SDave Chinner  * Basic locking order:
144653c60b6SDave Chinner  *
14565523218SChristoph Hellwig  * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
146653c60b6SDave Chinner  *
147c1e8d7c6SMichel Lespinasse  * mmap_lock locking order:
148653c60b6SDave Chinner  *
149c1e8d7c6SMichel Lespinasse  * i_rwsem -> page lock -> mmap_lock
150c1e8d7c6SMichel Lespinasse  * mmap_lock -> i_mmap_lock -> page_lock
151653c60b6SDave Chinner  *
152c1e8d7c6SMichel Lespinasse  * The difference in mmap_lock locking order mean that we cannot hold the
153653c60b6SDave Chinner  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154c1e8d7c6SMichel Lespinasse  * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
155653c60b6SDave Chinner  * in get_user_pages() to map the user pages into the kernel address space for
15665523218SChristoph Hellwig  * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157c1e8d7c6SMichel Lespinasse  * page faults already hold the mmap_lock.
158653c60b6SDave Chinner  *
159653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
16065523218SChristoph Hellwig  * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161653c60b6SDave Chinner  * taken in places where we need to invalidate the page cache in a race
162653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
163653c60b6SDave Chinner  * functions).
164fa96acadSDave Chinner  */
165fa96acadSDave Chinner void
166fa96acadSDave Chinner xfs_ilock(
167fa96acadSDave Chinner 	xfs_inode_t		*ip,
168fa96acadSDave Chinner 	uint			lock_flags)
169fa96acadSDave Chinner {
170fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171fa96acadSDave Chinner 
172fa96acadSDave Chinner 	/*
173fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
174fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176fa96acadSDave Chinner 	 */
177fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
1830952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184fa96acadSDave Chinner 
18565523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
18665523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
18765523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
18865523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
18965523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
19065523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
19165523218SChristoph Hellwig 	}
192fa96acadSDave Chinner 
193653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
194653c60b6SDave Chinner 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
196653c60b6SDave Chinner 		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197653c60b6SDave Chinner 
198fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
199fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
201fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202fa96acadSDave Chinner }
203fa96acadSDave Chinner 
204fa96acadSDave Chinner /*
205fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
206fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
207fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
208fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
209fa96acadSDave Chinner  * is dropped before returning.
210fa96acadSDave Chinner  *
211fa96acadSDave Chinner  * ip -- the inode being locked
212fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
213fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
214fa96acadSDave Chinner  *	 of valid values.
215fa96acadSDave Chinner  */
216fa96acadSDave Chinner int
217fa96acadSDave Chinner xfs_ilock_nowait(
218fa96acadSDave Chinner 	xfs_inode_t		*ip,
219fa96acadSDave Chinner 	uint			lock_flags)
220fa96acadSDave Chinner {
221fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222fa96acadSDave Chinner 
223fa96acadSDave Chinner 	/*
224fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
225fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227fa96acadSDave Chinner 	 */
228fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
2340952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235fa96acadSDave Chinner 
236fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23765523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238fa96acadSDave Chinner 			goto out;
239fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
24065523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241fa96acadSDave Chinner 			goto out;
242fa96acadSDave Chinner 	}
243653c60b6SDave Chinner 
244653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
245653c60b6SDave Chinner 		if (!mrtryupdate(&ip->i_mmaplock))
246653c60b6SDave Chinner 			goto out_undo_iolock;
247653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248653c60b6SDave Chinner 		if (!mrtryaccess(&ip->i_mmaplock))
249653c60b6SDave Chinner 			goto out_undo_iolock;
250653c60b6SDave Chinner 	}
251653c60b6SDave Chinner 
252fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
253fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
254653c60b6SDave Chinner 			goto out_undo_mmaplock;
255fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
256fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
257653c60b6SDave Chinner 			goto out_undo_mmaplock;
258fa96acadSDave Chinner 	}
259fa96acadSDave Chinner 	return 1;
260fa96acadSDave Chinner 
261653c60b6SDave Chinner out_undo_mmaplock:
262653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
263653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
264653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
265653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
266fa96acadSDave Chinner out_undo_iolock:
267fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26865523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
269fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
27065523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
271fa96acadSDave Chinner out:
272fa96acadSDave Chinner 	return 0;
273fa96acadSDave Chinner }
274fa96acadSDave Chinner 
275fa96acadSDave Chinner /*
276fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
277fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
278fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279fa96acadSDave Chinner  * that we know which locks to drop.
280fa96acadSDave Chinner  *
281fa96acadSDave Chinner  * ip -- the inode being unlocked
282fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
283fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
284fa96acadSDave Chinner  *	 of valid values for this parameter.
285fa96acadSDave Chinner  *
286fa96acadSDave Chinner  */
287fa96acadSDave Chinner void
288fa96acadSDave Chinner xfs_iunlock(
289fa96acadSDave Chinner 	xfs_inode_t		*ip,
290fa96acadSDave Chinner 	uint			lock_flags)
291fa96acadSDave Chinner {
292fa96acadSDave Chinner 	/*
293fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
294fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296fa96acadSDave Chinner 	 */
297fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
3030952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304fa96acadSDave Chinner 	ASSERT(lock_flags != 0);
305fa96acadSDave Chinner 
306fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
30765523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
308fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
30965523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
310fa96acadSDave Chinner 
311653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
312653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
313653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
314653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
315653c60b6SDave Chinner 
316fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
317fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
318fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
319fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
320fa96acadSDave Chinner 
321fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322fa96acadSDave Chinner }
323fa96acadSDave Chinner 
324fa96acadSDave Chinner /*
325fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
326fa96acadSDave Chinner  * if it is being demoted.
327fa96acadSDave Chinner  */
328fa96acadSDave Chinner void
329fa96acadSDave Chinner xfs_ilock_demote(
330fa96acadSDave Chinner 	xfs_inode_t		*ip,
331fa96acadSDave Chinner 	uint			lock_flags)
332fa96acadSDave Chinner {
333653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334653c60b6SDave Chinner 	ASSERT((lock_flags &
335653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336fa96acadSDave Chinner 
337fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
338fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
339653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
340653c60b6SDave Chinner 		mrdemote(&ip->i_mmaplock);
341fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
34265523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
343fa96acadSDave Chinner 
344fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345fa96acadSDave Chinner }
346fa96acadSDave Chinner 
347742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
348fa96acadSDave Chinner int
349fa96acadSDave Chinner xfs_isilocked(
350fa96acadSDave Chinner 	xfs_inode_t		*ip,
351fa96acadSDave Chinner 	uint			lock_flags)
352fa96acadSDave Chinner {
353fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
355fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
356fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
357fa96acadSDave Chinner 	}
358fa96acadSDave Chinner 
359653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360653c60b6SDave Chinner 		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361653c60b6SDave Chinner 			return !!ip->i_mmaplock.mr_writer;
362653c60b6SDave Chinner 		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363653c60b6SDave Chinner 	}
364653c60b6SDave Chinner 
365fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366fa96acadSDave Chinner 		if (!(lock_flags & XFS_IOLOCK_SHARED))
36765523218SChristoph Hellwig 			return !debug_locks ||
36865523218SChristoph Hellwig 				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
36965523218SChristoph Hellwig 		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370fa96acadSDave Chinner 	}
371fa96acadSDave Chinner 
372fa96acadSDave Chinner 	ASSERT(0);
373fa96acadSDave Chinner 	return 0;
374fa96acadSDave Chinner }
375fa96acadSDave Chinner #endif
376fa96acadSDave Chinner 
377b6a9947eSDave Chinner /*
378b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381b6a9947eSDave Chinner  * errors and warnings.
382b6a9947eSDave Chinner  */
383b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3843403ccc0SDave Chinner static bool
3853403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3863403ccc0SDave Chinner 	int subclass)
3873403ccc0SDave Chinner {
3883403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3893403ccc0SDave Chinner }
3903403ccc0SDave Chinner #else
3913403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3923403ccc0SDave Chinner #endif
3933403ccc0SDave Chinner 
394c24b5dfaSDave Chinner /*
395653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
3960952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
3970952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
3980952c818SDave Chinner  * storage fields in the class mask we build.
399c24b5dfaSDave Chinner  */
400c24b5dfaSDave Chinner static inline int
401c24b5dfaSDave Chinner xfs_lock_inumorder(int lock_mode, int subclass)
402c24b5dfaSDave Chinner {
4030952c818SDave Chinner 	int	class = 0;
4040952c818SDave Chinner 
4050952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4060952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4073403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4080952c818SDave Chinner 
409653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4100952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4110952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
412653c60b6SDave Chinner 	}
413653c60b6SDave Chinner 
414653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4150952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4160952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
417653c60b6SDave Chinner 	}
418653c60b6SDave Chinner 
4190952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4200952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4210952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4220952c818SDave Chinner 	}
423c24b5dfaSDave Chinner 
4240952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425c24b5dfaSDave Chinner }
426c24b5dfaSDave Chinner 
427c24b5dfaSDave Chinner /*
42895afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
42995afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
430c24b5dfaSDave Chinner  *
43195afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
43295afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
43395afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
43495afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
43595afcf5cSDave Chinner  * push the tail and free space in the log.
4360952c818SDave Chinner  *
4370952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4380952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4390952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4400952c818SDave Chinner  * have violated locking orders.
441c24b5dfaSDave Chinner  */
4420d5a75e9SEric Sandeen static void
443c24b5dfaSDave Chinner xfs_lock_inodes(
444efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
445c24b5dfaSDave Chinner 	int			inodes,
446c24b5dfaSDave Chinner 	uint			lock_mode)
447c24b5dfaSDave Chinner {
448c24b5dfaSDave Chinner 	int			attempts = 0, i, j, try_lock;
449efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
450c24b5dfaSDave Chinner 
4510952c818SDave Chinner 	/*
4520952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4530952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
454b63da6c8SRandy Dunlap 	 * inodes depend on the type of locking and the limits placed by
4550952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4560952c818SDave Chinner 	 * the asserts.
4570952c818SDave Chinner 	 */
45895afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4590952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4600952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4610952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4620952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4630952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4640952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4650952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4660952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4670952c818SDave Chinner 
4680952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4690952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4700952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4710952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472c24b5dfaSDave Chinner 
473c24b5dfaSDave Chinner 	try_lock = 0;
474c24b5dfaSDave Chinner 	i = 0;
475c24b5dfaSDave Chinner again:
476c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
477c24b5dfaSDave Chinner 		ASSERT(ips[i]);
478c24b5dfaSDave Chinner 
479c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
480c24b5dfaSDave Chinner 			continue;
481c24b5dfaSDave Chinner 
482c24b5dfaSDave Chinner 		/*
48395afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
48495afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
485c24b5dfaSDave Chinner 		 */
486c24b5dfaSDave Chinner 		if (!try_lock) {
487c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
488b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
48922525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490c24b5dfaSDave Chinner 					try_lock++;
491c24b5dfaSDave Chinner 			}
492c24b5dfaSDave Chinner 		}
493c24b5dfaSDave Chinner 
494c24b5dfaSDave Chinner 		/*
495c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
496c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
497c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
498c24b5dfaSDave Chinner 		 * and try again.
499c24b5dfaSDave Chinner 		 */
50095afcf5cSDave Chinner 		if (!try_lock) {
50195afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
50295afcf5cSDave Chinner 			continue;
50395afcf5cSDave Chinner 		}
504c24b5dfaSDave Chinner 
50595afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
506c24b5dfaSDave Chinner 		ASSERT(i != 0);
50795afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
50895afcf5cSDave Chinner 			continue;
50995afcf5cSDave Chinner 
51095afcf5cSDave Chinner 		/*
51195afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
51295afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
51395afcf5cSDave Chinner 		 */
514c24b5dfaSDave Chinner 		attempts++;
515c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
516c24b5dfaSDave Chinner 			/*
51795afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
51895afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
51995afcf5cSDave Chinner 			 * same.
520c24b5dfaSDave Chinner 			 */
52195afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
522c24b5dfaSDave Chinner 				continue;
523c24b5dfaSDave Chinner 
524c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
525c24b5dfaSDave Chinner 		}
526c24b5dfaSDave Chinner 
527c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
528c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
529c24b5dfaSDave Chinner 		}
530c24b5dfaSDave Chinner 		i = 0;
531c24b5dfaSDave Chinner 		try_lock = 0;
532c24b5dfaSDave Chinner 		goto again;
533c24b5dfaSDave Chinner 	}
534c24b5dfaSDave Chinner }
535c24b5dfaSDave Chinner 
536c24b5dfaSDave Chinner /*
537653c60b6SDave Chinner  * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
5387c2d238aSDarrick J. Wong  * the mmaplock or the ilock, but not more than one type at a time. If we lock
5397c2d238aSDarrick J. Wong  * more than one at a time, lockdep will report false positives saying we have
5407c2d238aSDarrick J. Wong  * violated locking orders.  The iolock must be double-locked separately since
5417c2d238aSDarrick J. Wong  * we use i_rwsem for that.  We now support taking one lock EXCL and the other
5427c2d238aSDarrick J. Wong  * SHARED.
543c24b5dfaSDave Chinner  */
544c24b5dfaSDave Chinner void
545c24b5dfaSDave Chinner xfs_lock_two_inodes(
5467c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5477c2d238aSDarrick J. Wong 	uint			ip0_mode,
5487c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5497c2d238aSDarrick J. Wong 	uint			ip1_mode)
550c24b5dfaSDave Chinner {
5517c2d238aSDarrick J. Wong 	struct xfs_inode	*temp;
5527c2d238aSDarrick J. Wong 	uint			mode_temp;
553c24b5dfaSDave Chinner 	int			attempts = 0;
554efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
555c24b5dfaSDave Chinner 
5567c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5577c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5587c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5597c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5607c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5617c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5627c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5637c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5647c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5657c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5667c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5677c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568653c60b6SDave Chinner 
569c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
570c24b5dfaSDave Chinner 
571c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
572c24b5dfaSDave Chinner 		temp = ip0;
573c24b5dfaSDave Chinner 		ip0 = ip1;
574c24b5dfaSDave Chinner 		ip1 = temp;
5757c2d238aSDarrick J. Wong 		mode_temp = ip0_mode;
5767c2d238aSDarrick J. Wong 		ip0_mode = ip1_mode;
5777c2d238aSDarrick J. Wong 		ip1_mode = mode_temp;
578c24b5dfaSDave Chinner 	}
579c24b5dfaSDave Chinner 
580c24b5dfaSDave Chinner  again:
5817c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582c24b5dfaSDave Chinner 
583c24b5dfaSDave Chinner 	/*
584c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
585c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
586c24b5dfaSDave Chinner 	 * and try again.
587c24b5dfaSDave Chinner 	 */
588b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58922525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5907c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5917c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
592c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
593c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
594c24b5dfaSDave Chinner 			goto again;
595c24b5dfaSDave Chinner 		}
596c24b5dfaSDave Chinner 	} else {
5977c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598c24b5dfaSDave Chinner 	}
599c24b5dfaSDave Chinner }
600c24b5dfaSDave Chinner 
6011da177e4SLinus Torvalds STATIC uint
6021da177e4SLinus Torvalds _xfs_dic2xflags(
603c8ce540dSDarrick J. Wong 	uint16_t		di_flags,
60458f88ca2SDave Chinner 	uint64_t		di_flags2,
60558f88ca2SDave Chinner 	bool			has_attr)
6061da177e4SLinus Torvalds {
6071da177e4SLinus Torvalds 	uint			flags = 0;
6081da177e4SLinus Torvalds 
6091da177e4SLinus Torvalds 	if (di_flags & XFS_DIFLAG_ANY) {
6101da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_REALTIME)
611e7b89481SDave Chinner 			flags |= FS_XFLAG_REALTIME;
6121da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_PREALLOC)
613e7b89481SDave Chinner 			flags |= FS_XFLAG_PREALLOC;
6141da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
615e7b89481SDave Chinner 			flags |= FS_XFLAG_IMMUTABLE;
6161da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_APPEND)
617e7b89481SDave Chinner 			flags |= FS_XFLAG_APPEND;
6181da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_SYNC)
619e7b89481SDave Chinner 			flags |= FS_XFLAG_SYNC;
6201da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NOATIME)
621e7b89481SDave Chinner 			flags |= FS_XFLAG_NOATIME;
6221da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NODUMP)
623e7b89481SDave Chinner 			flags |= FS_XFLAG_NODUMP;
6241da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_RTINHERIT)
625e7b89481SDave Chinner 			flags |= FS_XFLAG_RTINHERIT;
6261da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
627e7b89481SDave Chinner 			flags |= FS_XFLAG_PROJINHERIT;
6281da177e4SLinus Torvalds 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
629e7b89481SDave Chinner 			flags |= FS_XFLAG_NOSYMLINKS;
630dd9f438eSNathan Scott 		if (di_flags & XFS_DIFLAG_EXTSIZE)
631e7b89481SDave Chinner 			flags |= FS_XFLAG_EXTSIZE;
632dd9f438eSNathan Scott 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
633e7b89481SDave Chinner 			flags |= FS_XFLAG_EXTSZINHERIT;
634d3446eacSBarry Naujok 		if (di_flags & XFS_DIFLAG_NODEFRAG)
635e7b89481SDave Chinner 			flags |= FS_XFLAG_NODEFRAG;
6362a82b8beSDavid Chinner 		if (di_flags & XFS_DIFLAG_FILESTREAM)
637e7b89481SDave Chinner 			flags |= FS_XFLAG_FILESTREAM;
6381da177e4SLinus Torvalds 	}
6391da177e4SLinus Torvalds 
64058f88ca2SDave Chinner 	if (di_flags2 & XFS_DIFLAG2_ANY) {
64158f88ca2SDave Chinner 		if (di_flags2 & XFS_DIFLAG2_DAX)
64258f88ca2SDave Chinner 			flags |= FS_XFLAG_DAX;
643f7ca3522SDarrick J. Wong 		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
644f7ca3522SDarrick J. Wong 			flags |= FS_XFLAG_COWEXTSIZE;
64558f88ca2SDave Chinner 	}
64658f88ca2SDave Chinner 
64758f88ca2SDave Chinner 	if (has_attr)
64858f88ca2SDave Chinner 		flags |= FS_XFLAG_HASATTR;
64958f88ca2SDave Chinner 
6501da177e4SLinus Torvalds 	return flags;
6511da177e4SLinus Torvalds }
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds uint
6541da177e4SLinus Torvalds xfs_ip2xflags(
65558f88ca2SDave Chinner 	struct xfs_inode	*ip)
6561da177e4SLinus Torvalds {
65758f88ca2SDave Chinner 	struct xfs_icdinode	*dic = &ip->i_d;
6581da177e4SLinus Torvalds 
65958f88ca2SDave Chinner 	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
6601da177e4SLinus Torvalds }
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds /*
663c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
664c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
665c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
666c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
667c24b5dfaSDave Chinner  */
668c24b5dfaSDave Chinner int
669c24b5dfaSDave Chinner xfs_lookup(
670c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
671c24b5dfaSDave Chinner 	struct xfs_name		*name,
672c24b5dfaSDave Chinner 	xfs_inode_t		**ipp,
673c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
674c24b5dfaSDave Chinner {
675c24b5dfaSDave Chinner 	xfs_ino_t		inum;
676c24b5dfaSDave Chinner 	int			error;
677c24b5dfaSDave Chinner 
678c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
679c24b5dfaSDave Chinner 
680c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
6812451337dSDave Chinner 		return -EIO;
682c24b5dfaSDave Chinner 
683c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
684c24b5dfaSDave Chinner 	if (error)
685dbad7c99SDave Chinner 		goto out_unlock;
686c24b5dfaSDave Chinner 
687c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
688c24b5dfaSDave Chinner 	if (error)
689c24b5dfaSDave Chinner 		goto out_free_name;
690c24b5dfaSDave Chinner 
691c24b5dfaSDave Chinner 	return 0;
692c24b5dfaSDave Chinner 
693c24b5dfaSDave Chinner out_free_name:
694c24b5dfaSDave Chinner 	if (ci_name)
695c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
696dbad7c99SDave Chinner out_unlock:
697c24b5dfaSDave Chinner 	*ipp = NULL;
698c24b5dfaSDave Chinner 	return error;
699c24b5dfaSDave Chinner }
700c24b5dfaSDave Chinner 
7018a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */
7028a569d71SDarrick J. Wong static void
7038a569d71SDarrick J. Wong xfs_inode_inherit_flags(
7048a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7058a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7068a569d71SDarrick J. Wong {
7078a569d71SDarrick J. Wong 	unsigned int		di_flags = 0;
7088a569d71SDarrick J. Wong 	umode_t			mode = VFS_I(ip)->i_mode;
7098a569d71SDarrick J. Wong 
7108a569d71SDarrick J. Wong 	if (S_ISDIR(mode)) {
7118a569d71SDarrick J. Wong 		if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
7128a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_RTINHERIT;
7138a569d71SDarrick J. Wong 		if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
7148a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
715031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7168a569d71SDarrick J. Wong 		}
7178a569d71SDarrick J. Wong 		if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
7188a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_PROJINHERIT;
7198a569d71SDarrick J. Wong 	} else if (S_ISREG(mode)) {
720d4f2c14cSDarrick J. Wong 		if ((pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) &&
721d4f2c14cSDarrick J. Wong 		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
7228a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_REALTIME;
7238a569d71SDarrick J. Wong 		if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
7248a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSIZE;
725031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7268a569d71SDarrick J. Wong 		}
7278a569d71SDarrick J. Wong 	}
7288a569d71SDarrick J. Wong 	if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
7298a569d71SDarrick J. Wong 	    xfs_inherit_noatime)
7308a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOATIME;
7318a569d71SDarrick J. Wong 	if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
7328a569d71SDarrick J. Wong 	    xfs_inherit_nodump)
7338a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODUMP;
7348a569d71SDarrick J. Wong 	if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
7358a569d71SDarrick J. Wong 	    xfs_inherit_sync)
7368a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_SYNC;
7378a569d71SDarrick J. Wong 	if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
7388a569d71SDarrick J. Wong 	    xfs_inherit_nosymlinks)
7398a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
7408a569d71SDarrick J. Wong 	if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
7418a569d71SDarrick J. Wong 	    xfs_inherit_nodefrag)
7428a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODEFRAG;
7438a569d71SDarrick J. Wong 	if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
7448a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_FILESTREAM;
7458a569d71SDarrick J. Wong 
7468a569d71SDarrick J. Wong 	ip->i_d.di_flags |= di_flags;
7478a569d71SDarrick J. Wong }
7488a569d71SDarrick J. Wong 
7498a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */
7508a569d71SDarrick J. Wong static void
7518a569d71SDarrick J. Wong xfs_inode_inherit_flags2(
7528a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7538a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7548a569d71SDarrick J. Wong {
7558a569d71SDarrick J. Wong 	if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
7568a569d71SDarrick J. Wong 		ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
757*b33ce57dSChristoph Hellwig 		ip->i_cowextsize = pip->i_cowextsize;
7588a569d71SDarrick J. Wong 	}
7598a569d71SDarrick J. Wong 	if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
7608a569d71SDarrick J. Wong 		ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
7618a569d71SDarrick J. Wong }
7628a569d71SDarrick J. Wong 
763c24b5dfaSDave Chinner /*
7641abcf261SDave Chinner  * Initialise a newly allocated inode and return the in-core inode to the
7651abcf261SDave Chinner  * caller locked exclusively.
7661da177e4SLinus Torvalds  */
7670d5a75e9SEric Sandeen static int
7681abcf261SDave Chinner xfs_init_new_inode(
769f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
7701abcf261SDave Chinner 	struct xfs_trans	*tp,
7711abcf261SDave Chinner 	struct xfs_inode	*pip,
7721abcf261SDave Chinner 	xfs_ino_t		ino,
773576b1d67SAl Viro 	umode_t			mode,
77431b084aeSNathan Scott 	xfs_nlink_t		nlink,
77566f36464SChristoph Hellwig 	dev_t			rdev,
7766743099cSArkadiusz Mi?kiewicz 	prid_t			prid,
777e6a688c3SDave Chinner 	bool			init_xattrs,
7781abcf261SDave Chinner 	struct xfs_inode	**ipp)
7791da177e4SLinus Torvalds {
78001ea173eSChristoph Hellwig 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
78193848a99SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
7821abcf261SDave Chinner 	struct xfs_inode	*ip;
7831abcf261SDave Chinner 	unsigned int		flags;
7841da177e4SLinus Torvalds 	int			error;
78595582b00SDeepa Dinamani 	struct timespec64	tv;
7863987848cSDave Chinner 	struct inode		*inode;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 	/*
7898b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
7908b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
7918b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
7928b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
7938b26984dSDave Chinner 	 * first.
7948b26984dSDave Chinner 	 */
7958b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
7968b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
7978b26984dSDave Chinner 		return -EFSCORRUPTED;
7988b26984dSDave Chinner 	}
7998b26984dSDave Chinner 
8008b26984dSDave Chinner 	/*
8011abcf261SDave Chinner 	 * Get the in-core inode with the lock held exclusively to prevent
8021abcf261SDave Chinner 	 * others from looking at until we're done.
8031da177e4SLinus Torvalds 	 */
8041abcf261SDave Chinner 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
805bf904248SDavid Chinner 	if (error)
8061da177e4SLinus Torvalds 		return error;
8071abcf261SDave Chinner 
8081da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8093987848cSDave Chinner 	inode = VFS_I(ip);
81054d7b5c1SDave Chinner 	set_nlink(inode, nlink);
81166f36464SChristoph Hellwig 	inode->i_rdev = rdev;
812ceaf603cSChristoph Hellwig 	ip->i_projid = prid;
8131da177e4SLinus Torvalds 
81401ea173eSChristoph Hellwig 	if (dir && !(dir->i_mode & S_ISGID) &&
81501ea173eSChristoph Hellwig 	    (mp->m_flags & XFS_MOUNT_GRPID)) {
8167d6beb71SLinus Torvalds 		inode->i_uid = fsuid_into_mnt(mnt_userns);
81701ea173eSChristoph Hellwig 		inode->i_gid = dir->i_gid;
81801ea173eSChristoph Hellwig 		inode->i_mode = mode;
8193d8f2821SChristoph Hellwig 	} else {
8207d6beb71SLinus Torvalds 		inode_init_owner(mnt_userns, inode, dir, mode);
8211da177e4SLinus Torvalds 	}
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds 	/*
8241da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8251da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8261da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8271da177e4SLinus Torvalds 	 */
82854295159SChristoph Hellwig 	if (irix_sgid_inherit &&
829f736d93dSChristoph Hellwig 	    (inode->i_mode & S_ISGID) &&
830f736d93dSChristoph Hellwig 	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
831c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8321da177e4SLinus Torvalds 
83313d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
834daf83964SChristoph Hellwig 	ip->i_df.if_nextents = 0;
8356e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
836dff35fd4SChristoph Hellwig 
837c2050a45SDeepa Dinamani 	tv = current_time(inode);
8383987848cSDave Chinner 	inode->i_mtime = tv;
8393987848cSDave Chinner 	inode->i_atime = tv;
8403987848cSDave Chinner 	inode->i_ctime = tv;
841dff35fd4SChristoph Hellwig 
842031474c2SChristoph Hellwig 	ip->i_extsize = 0;
8431da177e4SLinus Torvalds 	ip->i_d.di_flags = 0;
84493848a99SChristoph Hellwig 
8456471e9c5SChristoph Hellwig 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
846f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
847*b33ce57dSChristoph Hellwig 		ip->i_cowextsize = 0;
8488d2d878dSChristoph Hellwig 		ip->i_d.di_crtime = tv;
84993848a99SChristoph Hellwig 	}
85093848a99SChristoph Hellwig 
8511da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8521da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8531da177e4SLinus Torvalds 	case S_IFIFO:
8541da177e4SLinus Torvalds 	case S_IFCHR:
8551da177e4SLinus Torvalds 	case S_IFBLK:
8561da177e4SLinus Torvalds 	case S_IFSOCK:
857f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
8581da177e4SLinus Torvalds 		ip->i_df.if_flags = 0;
8591da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8601da177e4SLinus Torvalds 		break;
8611da177e4SLinus Torvalds 	case S_IFREG:
8621da177e4SLinus Torvalds 	case S_IFDIR:
8638a569d71SDarrick J. Wong 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY))
8648a569d71SDarrick J. Wong 			xfs_inode_inherit_flags(ip, pip);
8658a569d71SDarrick J. Wong 		if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY))
8668a569d71SDarrick J. Wong 			xfs_inode_inherit_flags2(ip, pip);
8671da177e4SLinus Torvalds 		/* FALLTHROUGH */
8681da177e4SLinus Torvalds 	case S_IFLNK:
869f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
8701da177e4SLinus Torvalds 		ip->i_df.if_flags = XFS_IFEXTENTS;
871fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
8726bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
8731da177e4SLinus Torvalds 		break;
8741da177e4SLinus Torvalds 	default:
8751da177e4SLinus Torvalds 		ASSERT(0);
8761da177e4SLinus Torvalds 	}
8771da177e4SLinus Torvalds 
8781da177e4SLinus Torvalds 	/*
879e6a688c3SDave Chinner 	 * If we need to create attributes immediately after allocating the
880e6a688c3SDave Chinner 	 * inode, initialise an empty attribute fork right now. We use the
881e6a688c3SDave Chinner 	 * default fork offset for attributes here as we don't know exactly what
882e6a688c3SDave Chinner 	 * size or how many attributes we might be adding. We can do this
883e6a688c3SDave Chinner 	 * safely here because we know the data fork is completely empty and
884e6a688c3SDave Chinner 	 * this saves us from needing to run a separate transaction to set the
885e6a688c3SDave Chinner 	 * fork offset in the immediate future.
886e6a688c3SDave Chinner 	 */
887e6a688c3SDave Chinner 	if (init_xattrs) {
888e6a688c3SDave Chinner 		ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
889e6a688c3SDave Chinner 		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
890e6a688c3SDave Chinner 	}
891e6a688c3SDave Chinner 
892e6a688c3SDave Chinner 	/*
8931da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
8941da177e4SLinus Torvalds 	 */
895ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
8961da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
8971da177e4SLinus Torvalds 
89858c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
89941be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 	*ipp = ip;
9021da177e4SLinus Torvalds 	return 0;
9031da177e4SLinus Torvalds }
9041da177e4SLinus Torvalds 
905e546cb79SDave Chinner /*
9061abcf261SDave Chinner  * Allocates a new inode from disk and return a pointer to the incore copy. This
9071abcf261SDave Chinner  * routine will internally commit the current transaction and allocate a new one
9081abcf261SDave Chinner  * if we needed to allocate more on-disk free inodes to perform the requested
9091abcf261SDave Chinner  * operation.
910e546cb79SDave Chinner  *
9111abcf261SDave Chinner  * If we are allocating quota inodes, we do not have a parent inode to attach to
9121abcf261SDave Chinner  * or associate with (i.e. dp == NULL) because they are not linked into the
9131abcf261SDave Chinner  * directory structure - they are attached directly to the superblock - and so
9141abcf261SDave Chinner  * have no parent.
915e546cb79SDave Chinner  */
916e546cb79SDave Chinner int
917e546cb79SDave Chinner xfs_dir_ialloc(
918f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
9191abcf261SDave Chinner 	struct xfs_trans	**tpp,
9201abcf261SDave Chinner 	struct xfs_inode	*dp,
921e546cb79SDave Chinner 	umode_t			mode,
922e546cb79SDave Chinner 	xfs_nlink_t		nlink,
92366f36464SChristoph Hellwig 	dev_t			rdev,
9241abcf261SDave Chinner 	prid_t			prid,
925e6a688c3SDave Chinner 	bool			init_xattrs,
9261abcf261SDave Chinner 	struct xfs_inode	**ipp)
927e546cb79SDave Chinner {
9288d822dc3SDave Chinner 	struct xfs_buf		*agibp;
9291abcf261SDave Chinner 	xfs_ino_t		parent_ino = dp ? dp->i_ino : 0;
9301abcf261SDave Chinner 	xfs_ino_t		ino;
9311abcf261SDave Chinner 	int			error;
932e546cb79SDave Chinner 
9331abcf261SDave Chinner 	ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
934e546cb79SDave Chinner 
935e546cb79SDave Chinner 	/*
9361abcf261SDave Chinner 	 * Call the space management code to pick the on-disk inode to be
937f3bf6e0fSDave Chinner 	 * allocated.
938e546cb79SDave Chinner 	 */
9398d822dc3SDave Chinner 	error = xfs_dialloc_select_ag(tpp, parent_ino, mode, &agibp);
9401abcf261SDave Chinner 	if (error)
9411abcf261SDave Chinner 		return error;
942e546cb79SDave Chinner 
9438d822dc3SDave Chinner 	if (!agibp)
9441abcf261SDave Chinner 		return -ENOSPC;
945e546cb79SDave Chinner 
9468d822dc3SDave Chinner 	/* Allocate an inode from the selected AG */
9478d822dc3SDave Chinner 	error = xfs_dialloc_ag(*tpp, agibp, parent_ino, &ino);
9488d822dc3SDave Chinner 	if (error)
9498d822dc3SDave Chinner 		return error;
9508d822dc3SDave Chinner 	ASSERT(ino != NULLFSINO);
9518d822dc3SDave Chinner 
952f736d93dSChristoph Hellwig 	return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
953e6a688c3SDave Chinner 				  prid, init_xattrs, ipp);
954e546cb79SDave Chinner }
955e546cb79SDave Chinner 
956e546cb79SDave Chinner /*
95754d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
95854d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
95954d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
960e546cb79SDave Chinner  */
9610d5a75e9SEric Sandeen static int			/* error */
962e546cb79SDave Chinner xfs_droplink(
963e546cb79SDave Chinner 	xfs_trans_t *tp,
964e546cb79SDave Chinner 	xfs_inode_t *ip)
965e546cb79SDave Chinner {
966e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
967e546cb79SDave Chinner 
968e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
969e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
970e546cb79SDave Chinner 
97154d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
97254d7b5c1SDave Chinner 		return 0;
97354d7b5c1SDave Chinner 
97454d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
975e546cb79SDave Chinner }
976e546cb79SDave Chinner 
977e546cb79SDave Chinner /*
978e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
979e546cb79SDave Chinner  */
98091083269SEric Sandeen static void
981e546cb79SDave Chinner xfs_bumplink(
982e546cb79SDave Chinner 	xfs_trans_t *tp,
983e546cb79SDave Chinner 	xfs_inode_t *ip)
984e546cb79SDave Chinner {
985e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
986e546cb79SDave Chinner 
987e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
988e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
989e546cb79SDave Chinner }
990e546cb79SDave Chinner 
991c24b5dfaSDave Chinner int
992c24b5dfaSDave Chinner xfs_create(
993f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
994c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
995c24b5dfaSDave Chinner 	struct xfs_name		*name,
996c24b5dfaSDave Chinner 	umode_t			mode,
99766f36464SChristoph Hellwig 	dev_t			rdev,
998e6a688c3SDave Chinner 	bool			init_xattrs,
999c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
1000c24b5dfaSDave Chinner {
1001c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
1002c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
1003c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
1004c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
1005c24b5dfaSDave Chinner 	int			error;
1006c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
1007c24b5dfaSDave Chinner 	prid_t			prid;
1008c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
1009c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
1010c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
1011062647a8SBrian Foster 	struct xfs_trans_res	*tres;
1012c24b5dfaSDave Chinner 	uint			resblks;
1013c24b5dfaSDave Chinner 
1014c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
1015c24b5dfaSDave Chinner 
1016c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
10172451337dSDave Chinner 		return -EIO;
1018c24b5dfaSDave Chinner 
1019163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
1020c24b5dfaSDave Chinner 
1021c24b5dfaSDave Chinner 	/*
1022c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
1023c24b5dfaSDave Chinner 	 */
1024b5a08423SDarrick J. Wong 	error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
1025b5a08423SDarrick J. Wong 			fsgid_into_mnt(mnt_userns), prid,
1026c24b5dfaSDave Chinner 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1027c24b5dfaSDave Chinner 			&udqp, &gdqp, &pdqp);
1028c24b5dfaSDave Chinner 	if (error)
1029c24b5dfaSDave Chinner 		return error;
1030c24b5dfaSDave Chinner 
1031c24b5dfaSDave Chinner 	if (is_dir) {
1032c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1033062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
1034c24b5dfaSDave Chinner 	} else {
1035c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1036062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
1037c24b5dfaSDave Chinner 	}
1038c24b5dfaSDave Chinner 
1039c24b5dfaSDave Chinner 	/*
1040c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
1041c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
1042c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
1043c24b5dfaSDave Chinner 	 * appropriate transaction later.
1044c24b5dfaSDave Chinner 	 */
1045f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1046f2f7b9ffSDarrick J. Wong 			&tp);
10472451337dSDave Chinner 	if (error == -ENOSPC) {
1048c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1049c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1050f2f7b9ffSDarrick J. Wong 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1051f2f7b9ffSDarrick J. Wong 				resblks, &tp);
1052c24b5dfaSDave Chinner 	}
10534906e215SChristoph Hellwig 	if (error)
1054f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
1055c24b5dfaSDave Chinner 
105665523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1057c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1058c24b5dfaSDave Chinner 
1059f5d92749SChandan Babu R 	error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1060f5d92749SChandan Babu R 			XFS_IEXT_DIR_MANIP_CNT(mp));
1061f5d92749SChandan Babu R 	if (error)
1062f5d92749SChandan Babu R 		goto out_trans_cancel;
1063f5d92749SChandan Babu R 
1064c24b5dfaSDave Chinner 	/*
1065c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1066c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1067c24b5dfaSDave Chinner 	 * pointing to itself.
1068c24b5dfaSDave Chinner 	 */
1069f736d93dSChristoph Hellwig 	error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
1070e6a688c3SDave Chinner 			       prid, init_xattrs, &ip);
1071d6077aa3SJan Kara 	if (error)
1072c24b5dfaSDave Chinner 		goto out_trans_cancel;
1073c24b5dfaSDave Chinner 
1074c24b5dfaSDave Chinner 	/*
1075c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1076c24b5dfaSDave Chinner 	 * earlier because xfs_dir_ialloc might commit the previous transaction
1077c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1078c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1079c24b5dfaSDave Chinner 	 * error path.
1080c24b5dfaSDave Chinner 	 */
108165523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1082c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1083c24b5dfaSDave Chinner 
1084381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
108563337b63SKaixu Xia 					resblks - XFS_IALLOC_SPACE_RES(mp));
1086c24b5dfaSDave Chinner 	if (error) {
10872451337dSDave Chinner 		ASSERT(error != -ENOSPC);
10884906e215SChristoph Hellwig 		goto out_trans_cancel;
1089c24b5dfaSDave Chinner 	}
1090c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1091c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1092c24b5dfaSDave Chinner 
1093c24b5dfaSDave Chinner 	if (is_dir) {
1094c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1095c24b5dfaSDave Chinner 		if (error)
1096c8eac49eSBrian Foster 			goto out_trans_cancel;
1097c24b5dfaSDave Chinner 
109891083269SEric Sandeen 		xfs_bumplink(tp, dp);
1099c24b5dfaSDave Chinner 	}
1100c24b5dfaSDave Chinner 
1101c24b5dfaSDave Chinner 	/*
1102c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1103c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1104c24b5dfaSDave Chinner 	 * the user.
1105c24b5dfaSDave Chinner 	 */
1106c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1107c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1108c24b5dfaSDave Chinner 
1109c24b5dfaSDave Chinner 	/*
1110c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1111c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1112c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1113c24b5dfaSDave Chinner 	 */
1114c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1115c24b5dfaSDave Chinner 
111670393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1117c24b5dfaSDave Chinner 	if (error)
1118c24b5dfaSDave Chinner 		goto out_release_inode;
1119c24b5dfaSDave Chinner 
1120c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1121c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1122c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1123c24b5dfaSDave Chinner 
1124c24b5dfaSDave Chinner 	*ipp = ip;
1125c24b5dfaSDave Chinner 	return 0;
1126c24b5dfaSDave Chinner 
1127c24b5dfaSDave Chinner  out_trans_cancel:
11284906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1129c24b5dfaSDave Chinner  out_release_inode:
1130c24b5dfaSDave Chinner 	/*
113158c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
113258c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
113358c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1134c24b5dfaSDave Chinner 	 */
113558c90473SDave Chinner 	if (ip) {
113658c90473SDave Chinner 		xfs_finish_inode_setup(ip);
113744a8736bSDarrick J. Wong 		xfs_irele(ip);
113858c90473SDave Chinner 	}
1139f2f7b9ffSDarrick J. Wong  out_release_dquots:
1140c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1141c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1142c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1143c24b5dfaSDave Chinner 
1144c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
114565523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1146c24b5dfaSDave Chinner 	return error;
1147c24b5dfaSDave Chinner }
1148c24b5dfaSDave Chinner 
1149c24b5dfaSDave Chinner int
115099b6436bSZhi Yong Wu xfs_create_tmpfile(
1151f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
115299b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1153330033d6SBrian Foster 	umode_t			mode,
1154330033d6SBrian Foster 	struct xfs_inode	**ipp)
115599b6436bSZhi Yong Wu {
115699b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
115799b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
115899b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
115999b6436bSZhi Yong Wu 	int			error;
116099b6436bSZhi Yong Wu 	prid_t                  prid;
116199b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
116299b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
116399b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
116499b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
116599b6436bSZhi Yong Wu 	uint			resblks;
116699b6436bSZhi Yong Wu 
116799b6436bSZhi Yong Wu 	if (XFS_FORCED_SHUTDOWN(mp))
11682451337dSDave Chinner 		return -EIO;
116999b6436bSZhi Yong Wu 
117099b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
117199b6436bSZhi Yong Wu 
117299b6436bSZhi Yong Wu 	/*
117399b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
117499b6436bSZhi Yong Wu 	 */
1175b5a08423SDarrick J. Wong 	error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
1176b5a08423SDarrick J. Wong 			fsgid_into_mnt(mnt_userns), prid,
117799b6436bSZhi Yong Wu 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
117899b6436bSZhi Yong Wu 			&udqp, &gdqp, &pdqp);
117999b6436bSZhi Yong Wu 	if (error)
118099b6436bSZhi Yong Wu 		return error;
118199b6436bSZhi Yong Wu 
118299b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
118399b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1184253f4911SChristoph Hellwig 
1185f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1186f2f7b9ffSDarrick J. Wong 			&tp);
11874906e215SChristoph Hellwig 	if (error)
1188f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
118999b6436bSZhi Yong Wu 
1190e6a688c3SDave Chinner 	error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid,
1191e6a688c3SDave Chinner 				false, &ip);
1192d6077aa3SJan Kara 	if (error)
119399b6436bSZhi Yong Wu 		goto out_trans_cancel;
119499b6436bSZhi Yong Wu 
119599b6436bSZhi Yong Wu 	if (mp->m_flags & XFS_MOUNT_WSYNC)
119699b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
119799b6436bSZhi Yong Wu 
119899b6436bSZhi Yong Wu 	/*
119999b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
120099b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
120199b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
120299b6436bSZhi Yong Wu 	 */
120399b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
120499b6436bSZhi Yong Wu 
120599b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
120699b6436bSZhi Yong Wu 	if (error)
12074906e215SChristoph Hellwig 		goto out_trans_cancel;
120899b6436bSZhi Yong Wu 
120970393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
121099b6436bSZhi Yong Wu 	if (error)
121199b6436bSZhi Yong Wu 		goto out_release_inode;
121299b6436bSZhi Yong Wu 
121399b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
121499b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
121599b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
121699b6436bSZhi Yong Wu 
1217330033d6SBrian Foster 	*ipp = ip;
121899b6436bSZhi Yong Wu 	return 0;
121999b6436bSZhi Yong Wu 
122099b6436bSZhi Yong Wu  out_trans_cancel:
12214906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
122299b6436bSZhi Yong Wu  out_release_inode:
122399b6436bSZhi Yong Wu 	/*
122458c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
122558c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
122658c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
122799b6436bSZhi Yong Wu 	 */
122858c90473SDave Chinner 	if (ip) {
122958c90473SDave Chinner 		xfs_finish_inode_setup(ip);
123044a8736bSDarrick J. Wong 		xfs_irele(ip);
123158c90473SDave Chinner 	}
1232f2f7b9ffSDarrick J. Wong  out_release_dquots:
123399b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
123499b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
123599b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
123699b6436bSZhi Yong Wu 
123799b6436bSZhi Yong Wu 	return error;
123899b6436bSZhi Yong Wu }
123999b6436bSZhi Yong Wu 
124099b6436bSZhi Yong Wu int
1241c24b5dfaSDave Chinner xfs_link(
1242c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1243c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1244c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1245c24b5dfaSDave Chinner {
1246c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1247c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1248c24b5dfaSDave Chinner 	int			error;
1249c24b5dfaSDave Chinner 	int			resblks;
1250c24b5dfaSDave Chinner 
1251c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1252c24b5dfaSDave Chinner 
1253c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1254c24b5dfaSDave Chinner 
1255c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
12562451337dSDave Chinner 		return -EIO;
1257c24b5dfaSDave Chinner 
1258c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1259c24b5dfaSDave Chinner 	if (error)
1260c24b5dfaSDave Chinner 		goto std_return;
1261c24b5dfaSDave Chinner 
1262c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1263c24b5dfaSDave Chinner 	if (error)
1264c24b5dfaSDave Chinner 		goto std_return;
1265c24b5dfaSDave Chinner 
1266c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1267253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
12682451337dSDave Chinner 	if (error == -ENOSPC) {
1269c24b5dfaSDave Chinner 		resblks = 0;
1270253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1271c24b5dfaSDave Chinner 	}
12724906e215SChristoph Hellwig 	if (error)
1273253f4911SChristoph Hellwig 		goto std_return;
1274c24b5dfaSDave Chinner 
12757c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1276c24b5dfaSDave Chinner 
1277c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
127865523218SChristoph Hellwig 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1279c24b5dfaSDave Chinner 
1280f5d92749SChandan Babu R 	error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1281f5d92749SChandan Babu R 			XFS_IEXT_DIR_MANIP_CNT(mp));
1282f5d92749SChandan Babu R 	if (error)
1283f5d92749SChandan Babu R 		goto error_return;
1284f5d92749SChandan Babu R 
1285c24b5dfaSDave Chinner 	/*
1286c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1287c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1288c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1289c24b5dfaSDave Chinner 	 */
1290c24b5dfaSDave Chinner 	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1291ceaf603cSChristoph Hellwig 		     tdp->i_projid != sip->i_projid)) {
12922451337dSDave Chinner 		error = -EXDEV;
1293c24b5dfaSDave Chinner 		goto error_return;
1294c24b5dfaSDave Chinner 	}
1295c24b5dfaSDave Chinner 
129694f3cad5SEric Sandeen 	if (!resblks) {
129794f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1298c24b5dfaSDave Chinner 		if (error)
1299c24b5dfaSDave Chinner 			goto error_return;
130094f3cad5SEric Sandeen 	}
1301c24b5dfaSDave Chinner 
130254d7b5c1SDave Chinner 	/*
130354d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
130454d7b5c1SDave Chinner 	 */
130554d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1306ab297431SZhi Yong Wu 		error = xfs_iunlink_remove(tp, sip);
1307ab297431SZhi Yong Wu 		if (error)
13084906e215SChristoph Hellwig 			goto error_return;
1309ab297431SZhi Yong Wu 	}
1310ab297431SZhi Yong Wu 
1311c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1312381eee69SBrian Foster 				   resblks);
1313c24b5dfaSDave Chinner 	if (error)
13144906e215SChristoph Hellwig 		goto error_return;
1315c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1316c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1317c24b5dfaSDave Chinner 
131891083269SEric Sandeen 	xfs_bumplink(tp, sip);
1319c24b5dfaSDave Chinner 
1320c24b5dfaSDave Chinner 	/*
1321c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1322c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1323c24b5dfaSDave Chinner 	 * the user.
1324c24b5dfaSDave Chinner 	 */
1325f6106efaSEric Sandeen 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1326c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1327c24b5dfaSDave Chinner 
132870393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1329c24b5dfaSDave Chinner 
1330c24b5dfaSDave Chinner  error_return:
13314906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1332c24b5dfaSDave Chinner  std_return:
1333c24b5dfaSDave Chinner 	return error;
1334c24b5dfaSDave Chinner }
1335c24b5dfaSDave Chinner 
1336363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1337363e59baSDarrick J. Wong static void
1338363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1339363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1340363e59baSDarrick J. Wong {
1341363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1342363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1343363e59baSDarrick J. Wong 
1344363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1345363e59baSDarrick J. Wong 		return;
1346363e59baSDarrick J. Wong 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1347363e59baSDarrick J. Wong 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1348363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1349363e59baSDarrick J. Wong 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1350363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1351363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1352363e59baSDarrick J. Wong }
1353363e59baSDarrick J. Wong 
13541da177e4SLinus Torvalds /*
13558f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
13568f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
13578f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
13581da177e4SLinus Torvalds  *
1359f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1360f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1361f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1362f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1363f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1364f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1365f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1366f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1367f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
13681da177e4SLinus Torvalds  *
1369f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1370f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1371f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1372f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1373f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
13741da177e4SLinus Torvalds  */
13751da177e4SLinus Torvalds int
13764e529339SBrian Foster xfs_itruncate_extents_flags(
13778f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
13788f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
13798f04c47aSChristoph Hellwig 	int			whichfork,
138013b86fc3SBrian Foster 	xfs_fsize_t		new_size,
13814e529339SBrian Foster 	int			flags)
13821da177e4SLinus Torvalds {
13838f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
13848f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
13851da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
13868f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
13878f04c47aSChristoph Hellwig 	int			error = 0;
13881da177e4SLinus Torvalds 
13890b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
13900b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
13910b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1392ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
13938f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
13941da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1395898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
13961da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
13971da177e4SLinus Torvalds 
1398673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1399673e8e59SChristoph Hellwig 
14004e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
140113b86fc3SBrian Foster 
14021da177e4SLinus Torvalds 	/*
14031da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
14041da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
14051da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
14061da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
14074bbb04abSDarrick J. Wong 	 * possible file size.
14084bbb04abSDarrick J. Wong 	 *
14094bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
14104bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
14111da177e4SLinus Torvalds 	 */
14128f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
141333005fd0SDarrick J. Wong 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
14144bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
14158f04c47aSChristoph Hellwig 		return 0;
14164bbb04abSDarrick J. Wong 	}
14178f04c47aSChristoph Hellwig 
14184bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
14194bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
142002dff7bfSBrian Foster 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
14214bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
14224bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
14238f04c47aSChristoph Hellwig 		if (error)
1424d5a2e289SBrian Foster 			goto out;
14251da177e4SLinus Torvalds 
14266dd379c7SBrian Foster 		/* free the just unmapped extents */
14279e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
14288f04c47aSChristoph Hellwig 		if (error)
14299b1f4e98SBrian Foster 			goto out;
14301da177e4SLinus Torvalds 	}
14318f04c47aSChristoph Hellwig 
14324919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1433aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
14344919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
14354bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1436aa8968f2SDarrick J. Wong 		if (error)
1437aa8968f2SDarrick J. Wong 			goto out;
1438aa8968f2SDarrick J. Wong 
1439363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
14404919d42aSDarrick J. Wong 	}
1441aa8968f2SDarrick J. Wong 
1442673e8e59SChristoph Hellwig 	/*
1443673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1444673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1445673e8e59SChristoph Hellwig 	 */
1446673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1447673e8e59SChristoph Hellwig 
1448673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1449673e8e59SChristoph Hellwig 
14508f04c47aSChristoph Hellwig out:
14518f04c47aSChristoph Hellwig 	*tpp = tp;
14528f04c47aSChristoph Hellwig 	return error;
14538f04c47aSChristoph Hellwig }
14548f04c47aSChristoph Hellwig 
1455c24b5dfaSDave Chinner int
1456c24b5dfaSDave Chinner xfs_release(
1457c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1458c24b5dfaSDave Chinner {
1459c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
1460c24b5dfaSDave Chinner 	int		error;
1461c24b5dfaSDave Chinner 
1462c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1463c24b5dfaSDave Chinner 		return 0;
1464c24b5dfaSDave Chinner 
1465c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1466c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1467c24b5dfaSDave Chinner 		return 0;
1468c24b5dfaSDave Chinner 
1469c24b5dfaSDave Chinner 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1470c24b5dfaSDave Chinner 		int truncated;
1471c24b5dfaSDave Chinner 
1472c24b5dfaSDave Chinner 		/*
1473c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1474c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1475c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1476c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1477c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1478c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1479c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1480c24b5dfaSDave Chinner 		 * be exposed to that problem.
1481c24b5dfaSDave Chinner 		 */
1482c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1483c24b5dfaSDave Chinner 		if (truncated) {
1484c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1485eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
14862451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1487c24b5dfaSDave Chinner 				if (error)
1488c24b5dfaSDave Chinner 					return error;
1489c24b5dfaSDave Chinner 			}
1490c24b5dfaSDave Chinner 		}
1491c24b5dfaSDave Chinner 	}
1492c24b5dfaSDave Chinner 
149354d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1494c24b5dfaSDave Chinner 		return 0;
1495c24b5dfaSDave Chinner 
1496c24b5dfaSDave Chinner 	if (xfs_can_free_eofblocks(ip, false)) {
1497c24b5dfaSDave Chinner 
1498c24b5dfaSDave Chinner 		/*
1499a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1500a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1501a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1502a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1503a36b9261SBrian Foster 		 *
1504a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1505a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1506a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1507a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1508a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1509a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1510a36b9261SBrian Foster 		 * place.
1511a36b9261SBrian Foster 		 */
1512a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1513a36b9261SBrian Foster 			return 0;
1514a36b9261SBrian Foster 		/*
1515c24b5dfaSDave Chinner 		 * If we can't get the iolock just skip truncating the blocks
1516c1e8d7c6SMichel Lespinasse 		 * past EOF because we could deadlock with the mmap_lock
1517c24b5dfaSDave Chinner 		 * otherwise. We'll get another chance to drop them once the
1518c24b5dfaSDave Chinner 		 * last reference to the inode is dropped, so we'll never leak
1519c24b5dfaSDave Chinner 		 * blocks permanently.
1520c24b5dfaSDave Chinner 		 */
1521a36b9261SBrian Foster 		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1522a36b9261SBrian Foster 			error = xfs_free_eofblocks(ip);
1523a36b9261SBrian Foster 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1524a36b9261SBrian Foster 			if (error)
1525c24b5dfaSDave Chinner 				return error;
1526a36b9261SBrian Foster 		}
1527c24b5dfaSDave Chinner 
1528c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1529c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1530c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1531c24b5dfaSDave Chinner 	}
1532c24b5dfaSDave Chinner 	return 0;
1533c24b5dfaSDave Chinner }
1534c24b5dfaSDave Chinner 
1535c24b5dfaSDave Chinner /*
1536f7be2d7fSBrian Foster  * xfs_inactive_truncate
1537f7be2d7fSBrian Foster  *
1538f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1539f7be2d7fSBrian Foster  */
1540f7be2d7fSBrian Foster STATIC int
1541f7be2d7fSBrian Foster xfs_inactive_truncate(
1542f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1543f7be2d7fSBrian Foster {
1544f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1545f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1546f7be2d7fSBrian Foster 	int			error;
1547f7be2d7fSBrian Foster 
1548253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1549f7be2d7fSBrian Foster 	if (error) {
1550f7be2d7fSBrian Foster 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1551f7be2d7fSBrian Foster 		return error;
1552f7be2d7fSBrian Foster 	}
1553f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1554f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1555f7be2d7fSBrian Foster 
1556f7be2d7fSBrian Foster 	/*
1557f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1558f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
155969bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1560f7be2d7fSBrian Foster 	 */
156113d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
1562f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1563f7be2d7fSBrian Foster 
1564f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1565f7be2d7fSBrian Foster 	if (error)
1566f7be2d7fSBrian Foster 		goto error_trans_cancel;
1567f7be2d7fSBrian Foster 
1568daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
1569f7be2d7fSBrian Foster 
157070393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1571f7be2d7fSBrian Foster 	if (error)
1572f7be2d7fSBrian Foster 		goto error_unlock;
1573f7be2d7fSBrian Foster 
1574f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1575f7be2d7fSBrian Foster 	return 0;
1576f7be2d7fSBrian Foster 
1577f7be2d7fSBrian Foster error_trans_cancel:
15784906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1579f7be2d7fSBrian Foster error_unlock:
1580f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1581f7be2d7fSBrian Foster 	return error;
1582f7be2d7fSBrian Foster }
1583f7be2d7fSBrian Foster 
1584f7be2d7fSBrian Foster /*
158588877d2bSBrian Foster  * xfs_inactive_ifree()
158688877d2bSBrian Foster  *
158788877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
158888877d2bSBrian Foster  */
158988877d2bSBrian Foster STATIC int
159088877d2bSBrian Foster xfs_inactive_ifree(
159188877d2bSBrian Foster 	struct xfs_inode *ip)
159288877d2bSBrian Foster {
159388877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
159488877d2bSBrian Foster 	struct xfs_trans	*tp;
159588877d2bSBrian Foster 	int			error;
159688877d2bSBrian Foster 
15979d43b180SBrian Foster 	/*
159876d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
159976d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
160076d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
160176d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
160276d771b4SChristoph Hellwig 	 * pool and pray.
16039d43b180SBrian Foster 	 *
16049d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
16059d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
16069d43b180SBrian Foster 	 * repaired.
16079d43b180SBrian Foster 	 */
1608e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1609253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
161076d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
161176d771b4SChristoph Hellwig 				&tp);
161276d771b4SChristoph Hellwig 	} else {
161376d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
161476d771b4SChristoph Hellwig 	}
161588877d2bSBrian Foster 	if (error) {
16162451337dSDave Chinner 		if (error == -ENOSPC) {
16179d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
16189d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
16199d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
16209d43b180SBrian Foster 		} else {
162188877d2bSBrian Foster 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
16229d43b180SBrian Foster 		}
162388877d2bSBrian Foster 		return error;
162488877d2bSBrian Foster 	}
162588877d2bSBrian Foster 
162696355d5aSDave Chinner 	/*
162796355d5aSDave Chinner 	 * We do not hold the inode locked across the entire rolling transaction
162896355d5aSDave Chinner 	 * here. We only need to hold it for the first transaction that
162996355d5aSDave Chinner 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
163096355d5aSDave Chinner 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
163196355d5aSDave Chinner 	 * here breaks the relationship between cluster buffer invalidation and
163296355d5aSDave Chinner 	 * stale inode invalidation on cluster buffer item journal commit
163396355d5aSDave Chinner 	 * completion, and can result in leaving dirty stale inodes hanging
163496355d5aSDave Chinner 	 * around in memory.
163596355d5aSDave Chinner 	 *
163696355d5aSDave Chinner 	 * We have no need for serialising this inode operation against other
163796355d5aSDave Chinner 	 * operations - we freed the inode and hence reallocation is required
163896355d5aSDave Chinner 	 * and that will serialise on reallocating the space the deferops need
163996355d5aSDave Chinner 	 * to free. Hence we can unlock the inode on the first commit of
164096355d5aSDave Chinner 	 * the transaction rather than roll it right through the deferops. This
164196355d5aSDave Chinner 	 * avoids relogging the XFS_ISTALE inode.
164296355d5aSDave Chinner 	 *
164396355d5aSDave Chinner 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
164496355d5aSDave Chinner 	 * by asserting that the inode is still locked when it returns.
164596355d5aSDave Chinner 	 */
164688877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
164796355d5aSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
164888877d2bSBrian Foster 
16490e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
165096355d5aSDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
165188877d2bSBrian Foster 	if (error) {
165288877d2bSBrian Foster 		/*
165388877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
165488877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
165588877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
165688877d2bSBrian Foster 		 */
165788877d2bSBrian Foster 		if (!XFS_FORCED_SHUTDOWN(mp)) {
165888877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
165988877d2bSBrian Foster 				__func__, error);
166088877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
166188877d2bSBrian Foster 		}
16624906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
166388877d2bSBrian Foster 		return error;
166488877d2bSBrian Foster 	}
166588877d2bSBrian Foster 
166688877d2bSBrian Foster 	/*
166788877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
166888877d2bSBrian Foster 	 */
166988877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
167088877d2bSBrian Foster 
167188877d2bSBrian Foster 	/*
1672d4a97a04SBrian Foster 	 * Just ignore errors at this point.  There is nothing we can do except
1673d4a97a04SBrian Foster 	 * to try to keep going. Make sure it's not a silent error.
167488877d2bSBrian Foster 	 */
167570393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
167688877d2bSBrian Foster 	if (error)
167788877d2bSBrian Foster 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
167888877d2bSBrian Foster 			__func__, error);
167988877d2bSBrian Foster 
168088877d2bSBrian Foster 	return 0;
168188877d2bSBrian Foster }
168288877d2bSBrian Foster 
168388877d2bSBrian Foster /*
1684c24b5dfaSDave Chinner  * xfs_inactive
1685c24b5dfaSDave Chinner  *
1686c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1687c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1688c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1689c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1690c24b5dfaSDave Chinner  */
169174564fb4SBrian Foster void
1692c24b5dfaSDave Chinner xfs_inactive(
1693c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1694c24b5dfaSDave Chinner {
16953d3c8b52SJie Liu 	struct xfs_mount	*mp;
1696c24b5dfaSDave Chinner 	int			error;
1697c24b5dfaSDave Chinner 	int			truncate = 0;
1698c24b5dfaSDave Chinner 
1699c24b5dfaSDave Chinner 	/*
1700c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1701c24b5dfaSDave Chinner 	 * to clean up here.
1702c24b5dfaSDave Chinner 	 */
1703c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1704c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
170574564fb4SBrian Foster 		return;
1706c24b5dfaSDave Chinner 	}
1707c24b5dfaSDave Chinner 
1708c24b5dfaSDave Chinner 	mp = ip->i_mount;
170917c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1710c24b5dfaSDave Chinner 
1711c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1712c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
171374564fb4SBrian Foster 		return;
1714c24b5dfaSDave Chinner 
1715383e32b0SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1716383e32b0SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
1717383e32b0SDarrick J. Wong 		return;
1718383e32b0SDarrick J. Wong 
17196231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
172051d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
17216231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
17226231848cSDarrick J. Wong 
172354d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1724c24b5dfaSDave Chinner 		/*
1725c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1726c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1727c24b5dfaSDave Chinner 		 * broken free space accounting.
17283b4683c2SBrian Foster 		 *
17293b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
17303b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
17313b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1732c24b5dfaSDave Chinner 		 */
17333b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1734a36b9261SBrian Foster 			xfs_free_eofblocks(ip);
173574564fb4SBrian Foster 
173674564fb4SBrian Foster 		return;
1737c24b5dfaSDave Chinner 	}
1738c24b5dfaSDave Chinner 
1739c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
174013d2c10bSChristoph Hellwig 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1741daf83964SChristoph Hellwig 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1742c24b5dfaSDave Chinner 		truncate = 1;
1743c24b5dfaSDave Chinner 
1744c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1745c24b5dfaSDave Chinner 	if (error)
174674564fb4SBrian Foster 		return;
1747c24b5dfaSDave Chinner 
1748c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
174936b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1750f7be2d7fSBrian Foster 	else if (truncate)
1751f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
175236b21ddeSBrian Foster 	if (error)
175374564fb4SBrian Foster 		return;
1754c24b5dfaSDave Chinner 
1755c24b5dfaSDave Chinner 	/*
1756c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1757c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
17586dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1759c24b5dfaSDave Chinner 	 */
17606dfe5a04SDave Chinner 	if (XFS_IFORK_Q(ip)) {
1761c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1762c24b5dfaSDave Chinner 		if (error)
176374564fb4SBrian Foster 			return;
1764c24b5dfaSDave Chinner 	}
1765c24b5dfaSDave Chinner 
17666dfe5a04SDave Chinner 	ASSERT(!ip->i_afp);
17676dfe5a04SDave Chinner 	ASSERT(ip->i_d.di_forkoff == 0);
1768c24b5dfaSDave Chinner 
1769c24b5dfaSDave Chinner 	/*
1770c24b5dfaSDave Chinner 	 * Free the inode.
1771c24b5dfaSDave Chinner 	 */
177288877d2bSBrian Foster 	error = xfs_inactive_ifree(ip);
1773c24b5dfaSDave Chinner 	if (error)
177474564fb4SBrian Foster 		return;
1775c24b5dfaSDave Chinner 
1776c24b5dfaSDave Chinner 	/*
1777c24b5dfaSDave Chinner 	 * Release the dquots held by inode, if any.
1778c24b5dfaSDave Chinner 	 */
1779c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1780c24b5dfaSDave Chinner }
1781c24b5dfaSDave Chinner 
17821da177e4SLinus Torvalds /*
17839b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
17849b247179SDarrick J. Wong  * =============================
17859b247179SDarrick J. Wong  *
17869b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
17879b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
17889b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
17899b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
17909b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
17919b247179SDarrick J. Wong  *
17929b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
17939b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
17949b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
17959b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
17969b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
17979b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
17989b247179SDarrick J. Wong  *
17999b247179SDarrick J. Wong  * What if we modelled the unlinked list as a collection of records capturing
18009b247179SDarrick J. Wong  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
18019b247179SDarrick J. Wong  * have a fast way to look up unlinked list predecessors, which avoids the
18029b247179SDarrick J. Wong  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
18039b247179SDarrick J. Wong  * rhashtable.
18049b247179SDarrick J. Wong  *
18059b247179SDarrick J. Wong  * Because this is a backref cache, we ignore operational failures since the
18069b247179SDarrick J. Wong  * iunlink code can fall back to the slow bucket walk.  The only errors that
18079b247179SDarrick J. Wong  * should bubble out are for obviously incorrect situations.
18089b247179SDarrick J. Wong  *
18099b247179SDarrick J. Wong  * All users of the backref cache MUST hold the AGI buffer lock to serialize
18109b247179SDarrick J. Wong  * access or have otherwise provided for concurrency control.
18119b247179SDarrick J. Wong  */
18129b247179SDarrick J. Wong 
18139b247179SDarrick J. Wong /* Capture a "X.next_unlinked = Y" relationship. */
18149b247179SDarrick J. Wong struct xfs_iunlink {
18159b247179SDarrick J. Wong 	struct rhash_head	iu_rhash_head;
18169b247179SDarrick J. Wong 	xfs_agino_t		iu_agino;		/* X */
18179b247179SDarrick J. Wong 	xfs_agino_t		iu_next_unlinked;	/* Y */
18189b247179SDarrick J. Wong };
18199b247179SDarrick J. Wong 
18209b247179SDarrick J. Wong /* Unlinked list predecessor lookup hashtable construction */
18219b247179SDarrick J. Wong static int
18229b247179SDarrick J. Wong xfs_iunlink_obj_cmpfn(
18239b247179SDarrick J. Wong 	struct rhashtable_compare_arg	*arg,
18249b247179SDarrick J. Wong 	const void			*obj)
18259b247179SDarrick J. Wong {
18269b247179SDarrick J. Wong 	const xfs_agino_t		*key = arg->key;
18279b247179SDarrick J. Wong 	const struct xfs_iunlink	*iu = obj;
18289b247179SDarrick J. Wong 
18299b247179SDarrick J. Wong 	if (iu->iu_next_unlinked != *key)
18309b247179SDarrick J. Wong 		return 1;
18319b247179SDarrick J. Wong 	return 0;
18329b247179SDarrick J. Wong }
18339b247179SDarrick J. Wong 
18349b247179SDarrick J. Wong static const struct rhashtable_params xfs_iunlink_hash_params = {
18359b247179SDarrick J. Wong 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
18369b247179SDarrick J. Wong 	.key_len		= sizeof(xfs_agino_t),
18379b247179SDarrick J. Wong 	.key_offset		= offsetof(struct xfs_iunlink,
18389b247179SDarrick J. Wong 					   iu_next_unlinked),
18399b247179SDarrick J. Wong 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
18409b247179SDarrick J. Wong 	.automatic_shrinking	= true,
18419b247179SDarrick J. Wong 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
18429b247179SDarrick J. Wong };
18439b247179SDarrick J. Wong 
18449b247179SDarrick J. Wong /*
18459b247179SDarrick J. Wong  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
18469b247179SDarrick J. Wong  * relation is found.
18479b247179SDarrick J. Wong  */
18489b247179SDarrick J. Wong static xfs_agino_t
18499b247179SDarrick J. Wong xfs_iunlink_lookup_backref(
18509b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18519b247179SDarrick J. Wong 	xfs_agino_t		agino)
18529b247179SDarrick J. Wong {
18539b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
18549b247179SDarrick J. Wong 
18559b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
18569b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
18579b247179SDarrick J. Wong 	return iu ? iu->iu_agino : NULLAGINO;
18589b247179SDarrick J. Wong }
18599b247179SDarrick J. Wong 
18609b247179SDarrick J. Wong /*
18619b247179SDarrick J. Wong  * Take ownership of an iunlink cache entry and insert it into the hash table.
18629b247179SDarrick J. Wong  * If successful, the entry will be owned by the cache; if not, it is freed.
18639b247179SDarrick J. Wong  * Either way, the caller does not own @iu after this call.
18649b247179SDarrick J. Wong  */
18659b247179SDarrick J. Wong static int
18669b247179SDarrick J. Wong xfs_iunlink_insert_backref(
18679b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18689b247179SDarrick J. Wong 	struct xfs_iunlink	*iu)
18699b247179SDarrick J. Wong {
18709b247179SDarrick J. Wong 	int			error;
18719b247179SDarrick J. Wong 
18729b247179SDarrick J. Wong 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
18739b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
18749b247179SDarrick J. Wong 	/*
18759b247179SDarrick J. Wong 	 * Fail loudly if there already was an entry because that's a sign of
18769b247179SDarrick J. Wong 	 * corruption of in-memory data.  Also fail loudly if we see an error
18779b247179SDarrick J. Wong 	 * code we didn't anticipate from the rhashtable code.  Currently we
18789b247179SDarrick J. Wong 	 * only anticipate ENOMEM.
18799b247179SDarrick J. Wong 	 */
18809b247179SDarrick J. Wong 	if (error) {
18819b247179SDarrick J. Wong 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
18829b247179SDarrick J. Wong 		kmem_free(iu);
18839b247179SDarrick J. Wong 	}
18849b247179SDarrick J. Wong 	/*
18859b247179SDarrick J. Wong 	 * Absorb any runtime errors that aren't a result of corruption because
18869b247179SDarrick J. Wong 	 * this is a cache and we can always fall back to bucket list scanning.
18879b247179SDarrick J. Wong 	 */
18889b247179SDarrick J. Wong 	if (error != 0 && error != -EEXIST)
18899b247179SDarrick J. Wong 		error = 0;
18909b247179SDarrick J. Wong 	return error;
18919b247179SDarrick J. Wong }
18929b247179SDarrick J. Wong 
18939b247179SDarrick J. Wong /* Remember that @prev_agino.next_unlinked = @this_agino. */
18949b247179SDarrick J. Wong static int
18959b247179SDarrick J. Wong xfs_iunlink_add_backref(
18969b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18979b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
18989b247179SDarrick J. Wong 	xfs_agino_t		this_agino)
18999b247179SDarrick J. Wong {
19009b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19019b247179SDarrick J. Wong 
19029b247179SDarrick J. Wong 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
19039b247179SDarrick J. Wong 		return 0;
19049b247179SDarrick J. Wong 
1905707e0ddaSTetsuo Handa 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
19069b247179SDarrick J. Wong 	iu->iu_agino = prev_agino;
19079b247179SDarrick J. Wong 	iu->iu_next_unlinked = this_agino;
19089b247179SDarrick J. Wong 
19099b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19109b247179SDarrick J. Wong }
19119b247179SDarrick J. Wong 
19129b247179SDarrick J. Wong /*
19139b247179SDarrick J. Wong  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
19149b247179SDarrick J. Wong  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
19159b247179SDarrick J. Wong  * wasn't any such entry then we don't bother.
19169b247179SDarrick J. Wong  */
19179b247179SDarrick J. Wong static int
19189b247179SDarrick J. Wong xfs_iunlink_change_backref(
19199b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19209b247179SDarrick J. Wong 	xfs_agino_t		agino,
19219b247179SDarrick J. Wong 	xfs_agino_t		next_unlinked)
19229b247179SDarrick J. Wong {
19239b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19249b247179SDarrick J. Wong 	int			error;
19259b247179SDarrick J. Wong 
19269b247179SDarrick J. Wong 	/* Look up the old entry; if there wasn't one then exit. */
19279b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
19289b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
19299b247179SDarrick J. Wong 	if (!iu)
19309b247179SDarrick J. Wong 		return 0;
19319b247179SDarrick J. Wong 
19329b247179SDarrick J. Wong 	/*
19339b247179SDarrick J. Wong 	 * Remove the entry.  This shouldn't ever return an error, but if we
19349b247179SDarrick J. Wong 	 * couldn't remove the old entry we don't want to add it again to the
19359b247179SDarrick J. Wong 	 * hash table, and if the entry disappeared on us then someone's
19369b247179SDarrick J. Wong 	 * violated the locking rules and we need to fail loudly.  Either way
19379b247179SDarrick J. Wong 	 * we cannot remove the inode because internal state is or would have
19389b247179SDarrick J. Wong 	 * been corrupt.
19399b247179SDarrick J. Wong 	 */
19409b247179SDarrick J. Wong 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
19419b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
19429b247179SDarrick J. Wong 	if (error)
19439b247179SDarrick J. Wong 		return error;
19449b247179SDarrick J. Wong 
19459b247179SDarrick J. Wong 	/* If there is no new next entry just free our item and return. */
19469b247179SDarrick J. Wong 	if (next_unlinked == NULLAGINO) {
19479b247179SDarrick J. Wong 		kmem_free(iu);
19489b247179SDarrick J. Wong 		return 0;
19499b247179SDarrick J. Wong 	}
19509b247179SDarrick J. Wong 
19519b247179SDarrick J. Wong 	/* Update the entry and re-add it to the hash table. */
19529b247179SDarrick J. Wong 	iu->iu_next_unlinked = next_unlinked;
19539b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19549b247179SDarrick J. Wong }
19559b247179SDarrick J. Wong 
19569b247179SDarrick J. Wong /* Set up the in-core predecessor structures. */
19579b247179SDarrick J. Wong int
19589b247179SDarrick J. Wong xfs_iunlink_init(
19599b247179SDarrick J. Wong 	struct xfs_perag	*pag)
19609b247179SDarrick J. Wong {
19619b247179SDarrick J. Wong 	return rhashtable_init(&pag->pagi_unlinked_hash,
19629b247179SDarrick J. Wong 			&xfs_iunlink_hash_params);
19639b247179SDarrick J. Wong }
19649b247179SDarrick J. Wong 
19659b247179SDarrick J. Wong /* Free the in-core predecessor structures. */
19669b247179SDarrick J. Wong static void
19679b247179SDarrick J. Wong xfs_iunlink_free_item(
19689b247179SDarrick J. Wong 	void			*ptr,
19699b247179SDarrick J. Wong 	void			*arg)
19709b247179SDarrick J. Wong {
19719b247179SDarrick J. Wong 	struct xfs_iunlink	*iu = ptr;
19729b247179SDarrick J. Wong 	bool			*freed_anything = arg;
19739b247179SDarrick J. Wong 
19749b247179SDarrick J. Wong 	*freed_anything = true;
19759b247179SDarrick J. Wong 	kmem_free(iu);
19769b247179SDarrick J. Wong }
19779b247179SDarrick J. Wong 
19789b247179SDarrick J. Wong void
19799b247179SDarrick J. Wong xfs_iunlink_destroy(
19809b247179SDarrick J. Wong 	struct xfs_perag	*pag)
19819b247179SDarrick J. Wong {
19829b247179SDarrick J. Wong 	bool			freed_anything = false;
19839b247179SDarrick J. Wong 
19849b247179SDarrick J. Wong 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
19859b247179SDarrick J. Wong 			xfs_iunlink_free_item, &freed_anything);
19869b247179SDarrick J. Wong 
19879b247179SDarrick J. Wong 	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
19889b247179SDarrick J. Wong }
19899b247179SDarrick J. Wong 
19909b247179SDarrick J. Wong /*
19919a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
19929a4a5118SDarrick J. Wong  * is responsible for validating the old value.
19939a4a5118SDarrick J. Wong  */
19949a4a5118SDarrick J. Wong STATIC int
19959a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
19969a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
19979a4a5118SDarrick J. Wong 	xfs_agnumber_t		agno,
19989a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
19999a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
20009a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
20019a4a5118SDarrick J. Wong {
2002370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
20039a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
20049a4a5118SDarrick J. Wong 	int			offset;
20059a4a5118SDarrick J. Wong 
20069a4a5118SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
20079a4a5118SDarrick J. Wong 
20089a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
20099a4a5118SDarrick J. Wong 	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
20109a4a5118SDarrick J. Wong 			old_value, new_agino);
20119a4a5118SDarrick J. Wong 
20129a4a5118SDarrick J. Wong 	/*
20139a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
20149a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
20159a4a5118SDarrick J. Wong 	 * head of the list.
20169a4a5118SDarrick J. Wong 	 */
2017a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
20188d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
20199a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
2020a5155b87SDarrick J. Wong 	}
20219a4a5118SDarrick J. Wong 
20229a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
20239a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
20249a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
20259a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
20269a4a5118SDarrick J. Wong 	return 0;
20279a4a5118SDarrick J. Wong }
20289a4a5118SDarrick J. Wong 
2029f2fc16a3SDarrick J. Wong /* Set an on-disk inode's next_unlinked pointer. */
2030f2fc16a3SDarrick J. Wong STATIC void
2031f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(
2032f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2033f2fc16a3SDarrick J. Wong 	xfs_agnumber_t		agno,
2034f2fc16a3SDarrick J. Wong 	xfs_agino_t		agino,
2035f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp,
2036f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip,
2037f2fc16a3SDarrick J. Wong 	struct xfs_imap		*imap,
2038f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino)
2039f2fc16a3SDarrick J. Wong {
2040f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2041f2fc16a3SDarrick J. Wong 	int			offset;
2042f2fc16a3SDarrick J. Wong 
2043f2fc16a3SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2044f2fc16a3SDarrick J. Wong 
2045f2fc16a3SDarrick J. Wong 	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2046f2fc16a3SDarrick J. Wong 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2047f2fc16a3SDarrick J. Wong 
2048f2fc16a3SDarrick J. Wong 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2049f2fc16a3SDarrick J. Wong 	offset = imap->im_boffset +
2050f2fc16a3SDarrick J. Wong 			offsetof(struct xfs_dinode, di_next_unlinked);
2051f2fc16a3SDarrick J. Wong 
2052f2fc16a3SDarrick J. Wong 	/* need to recalc the inode CRC if appropriate */
2053f2fc16a3SDarrick J. Wong 	xfs_dinode_calc_crc(mp, dip);
2054f2fc16a3SDarrick J. Wong 	xfs_trans_inode_buf(tp, ibp);
2055f2fc16a3SDarrick J. Wong 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2056f2fc16a3SDarrick J. Wong }
2057f2fc16a3SDarrick J. Wong 
2058f2fc16a3SDarrick J. Wong /* Set an in-core inode's unlinked pointer and return the old value. */
2059f2fc16a3SDarrick J. Wong STATIC int
2060f2fc16a3SDarrick J. Wong xfs_iunlink_update_inode(
2061f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2062f2fc16a3SDarrick J. Wong 	struct xfs_inode	*ip,
2063f2fc16a3SDarrick J. Wong 	xfs_agnumber_t		agno,
2064f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino,
2065f2fc16a3SDarrick J. Wong 	xfs_agino_t		*old_next_agino)
2066f2fc16a3SDarrick J. Wong {
2067f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2068f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip;
2069f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp;
2070f2fc16a3SDarrick J. Wong 	xfs_agino_t		old_value;
2071f2fc16a3SDarrick J. Wong 	int			error;
2072f2fc16a3SDarrick J. Wong 
2073f2fc16a3SDarrick J. Wong 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2074f2fc16a3SDarrick J. Wong 
2075af9dcddeSChristoph Hellwig 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2076f2fc16a3SDarrick J. Wong 	if (error)
2077f2fc16a3SDarrick J. Wong 		return error;
2078af9dcddeSChristoph Hellwig 	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2079f2fc16a3SDarrick J. Wong 
2080f2fc16a3SDarrick J. Wong 	/* Make sure the old pointer isn't garbage. */
2081f2fc16a3SDarrick J. Wong 	old_value = be32_to_cpu(dip->di_next_unlinked);
2082f2fc16a3SDarrick J. Wong 	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2083a5155b87SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2084a5155b87SDarrick J. Wong 				sizeof(*dip), __this_address);
2085f2fc16a3SDarrick J. Wong 		error = -EFSCORRUPTED;
2086f2fc16a3SDarrick J. Wong 		goto out;
2087f2fc16a3SDarrick J. Wong 	}
2088f2fc16a3SDarrick J. Wong 
2089f2fc16a3SDarrick J. Wong 	/*
2090f2fc16a3SDarrick J. Wong 	 * Since we're updating a linked list, we should never find that the
2091f2fc16a3SDarrick J. Wong 	 * current pointer is the same as the new value, unless we're
2092f2fc16a3SDarrick J. Wong 	 * terminating the list.
2093f2fc16a3SDarrick J. Wong 	 */
2094f2fc16a3SDarrick J. Wong 	*old_next_agino = old_value;
2095f2fc16a3SDarrick J. Wong 	if (old_value == next_agino) {
2096a5155b87SDarrick J. Wong 		if (next_agino != NULLAGINO) {
2097a5155b87SDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2098a5155b87SDarrick J. Wong 					dip, sizeof(*dip), __this_address);
2099f2fc16a3SDarrick J. Wong 			error = -EFSCORRUPTED;
2100a5155b87SDarrick J. Wong 		}
2101f2fc16a3SDarrick J. Wong 		goto out;
2102f2fc16a3SDarrick J. Wong 	}
2103f2fc16a3SDarrick J. Wong 
2104f2fc16a3SDarrick J. Wong 	/* Ok, update the new pointer. */
2105f2fc16a3SDarrick J. Wong 	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2106f2fc16a3SDarrick J. Wong 			ibp, dip, &ip->i_imap, next_agino);
2107f2fc16a3SDarrick J. Wong 	return 0;
2108f2fc16a3SDarrick J. Wong out:
2109f2fc16a3SDarrick J. Wong 	xfs_trans_brelse(tp, ibp);
2110f2fc16a3SDarrick J. Wong 	return error;
2111f2fc16a3SDarrick J. Wong }
2112f2fc16a3SDarrick J. Wong 
21139a4a5118SDarrick J. Wong /*
2114c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
2115c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
211654d7b5c1SDave Chinner  *
211754d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
211854d7b5c1SDave Chinner  * list when the inode is freed.
21191da177e4SLinus Torvalds  */
212054d7b5c1SDave Chinner STATIC int
21211da177e4SLinus Torvalds xfs_iunlink(
212254d7b5c1SDave Chinner 	struct xfs_trans	*tp,
212354d7b5c1SDave Chinner 	struct xfs_inode	*ip)
21241da177e4SLinus Torvalds {
21255837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
21265837f625SDarrick J. Wong 	struct xfs_agi		*agi;
21275837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
212886bfd375SDarrick J. Wong 	xfs_agino_t		next_agino;
21295837f625SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
21305837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
21315837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
21321da177e4SLinus Torvalds 	int			error;
21331da177e4SLinus Torvalds 
2134c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
2135c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
21364664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
21371da177e4SLinus Torvalds 
21385837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
21395837f625SDarrick J. Wong 	error = xfs_read_agi(mp, tp, agno, &agibp);
2140859d7182SVlad Apostolov 	if (error)
21411da177e4SLinus Torvalds 		return error;
2142370c782bSChristoph Hellwig 	agi = agibp->b_addr;
21435e1be0fbSChristoph Hellwig 
21441da177e4SLinus Torvalds 	/*
214586bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
214686bfd375SDarrick J. Wong 	 * go on.  Make sure the pointer isn't garbage and that this inode
214786bfd375SDarrick J. Wong 	 * isn't already on the list.
21481da177e4SLinus Torvalds 	 */
214986bfd375SDarrick J. Wong 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
215086bfd375SDarrick J. Wong 	if (next_agino == agino ||
2151a5155b87SDarrick J. Wong 	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
21528d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
215386bfd375SDarrick J. Wong 		return -EFSCORRUPTED;
2154a5155b87SDarrick J. Wong 	}
21551da177e4SLinus Torvalds 
215686bfd375SDarrick J. Wong 	if (next_agino != NULLAGINO) {
2157f2fc16a3SDarrick J. Wong 		xfs_agino_t		old_agino;
2158f2fc16a3SDarrick J. Wong 
21591da177e4SLinus Torvalds 		/*
2160f2fc16a3SDarrick J. Wong 		 * There is already another inode in the bucket, so point this
2161f2fc16a3SDarrick J. Wong 		 * inode to the current head of the list.
21621da177e4SLinus Torvalds 		 */
2163f2fc16a3SDarrick J. Wong 		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2164f2fc16a3SDarrick J. Wong 				&old_agino);
2165c319b58bSVlad Apostolov 		if (error)
2166c319b58bSVlad Apostolov 			return error;
2167f2fc16a3SDarrick J. Wong 		ASSERT(old_agino == NULLAGINO);
21689b247179SDarrick J. Wong 
21699b247179SDarrick J. Wong 		/*
21709b247179SDarrick J. Wong 		 * agino has been unlinked, add a backref from the next inode
21719b247179SDarrick J. Wong 		 * back to agino.
21729b247179SDarrick J. Wong 		 */
217392a00544SGao Xiang 		error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
21749b247179SDarrick J. Wong 		if (error)
21759b247179SDarrick J. Wong 			return error;
21761da177e4SLinus Torvalds 	}
21771da177e4SLinus Torvalds 
21789a4a5118SDarrick J. Wong 	/* Point the head of the list to point to this inode. */
21799a4a5118SDarrick J. Wong 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
21801da177e4SLinus Torvalds }
21811da177e4SLinus Torvalds 
218223ffa52cSDarrick J. Wong /* Return the imap, dinode pointer, and buffer for an inode. */
218323ffa52cSDarrick J. Wong STATIC int
218423ffa52cSDarrick J. Wong xfs_iunlink_map_ino(
218523ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
218623ffa52cSDarrick J. Wong 	xfs_agnumber_t		agno,
218723ffa52cSDarrick J. Wong 	xfs_agino_t		agino,
218823ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
218923ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
219023ffa52cSDarrick J. Wong 	struct xfs_buf		**bpp)
219123ffa52cSDarrick J. Wong {
219223ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
219323ffa52cSDarrick J. Wong 	int			error;
219423ffa52cSDarrick J. Wong 
219523ffa52cSDarrick J. Wong 	imap->im_blkno = 0;
219623ffa52cSDarrick J. Wong 	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
219723ffa52cSDarrick J. Wong 	if (error) {
219823ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap returned error %d.",
219923ffa52cSDarrick J. Wong 				__func__, error);
220023ffa52cSDarrick J. Wong 		return error;
220123ffa52cSDarrick J. Wong 	}
220223ffa52cSDarrick J. Wong 
2203af9dcddeSChristoph Hellwig 	error = xfs_imap_to_bp(mp, tp, imap, bpp);
220423ffa52cSDarrick J. Wong 	if (error) {
220523ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
220623ffa52cSDarrick J. Wong 				__func__, error);
220723ffa52cSDarrick J. Wong 		return error;
220823ffa52cSDarrick J. Wong 	}
220923ffa52cSDarrick J. Wong 
2210af9dcddeSChristoph Hellwig 	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
221123ffa52cSDarrick J. Wong 	return 0;
221223ffa52cSDarrick J. Wong }
221323ffa52cSDarrick J. Wong 
221423ffa52cSDarrick J. Wong /*
221523ffa52cSDarrick J. Wong  * Walk the unlinked chain from @head_agino until we find the inode that
221623ffa52cSDarrick J. Wong  * points to @target_agino.  Return the inode number, map, dinode pointer,
221723ffa52cSDarrick J. Wong  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
221823ffa52cSDarrick J. Wong  *
221923ffa52cSDarrick J. Wong  * @tp, @pag, @head_agino, and @target_agino are input parameters.
222023ffa52cSDarrick J. Wong  * @agino, @imap, @dipp, and @bpp are all output parameters.
222123ffa52cSDarrick J. Wong  *
222223ffa52cSDarrick J. Wong  * Do not call this function if @target_agino is the head of the list.
222323ffa52cSDarrick J. Wong  */
222423ffa52cSDarrick J. Wong STATIC int
222523ffa52cSDarrick J. Wong xfs_iunlink_map_prev(
222623ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
222723ffa52cSDarrick J. Wong 	xfs_agnumber_t		agno,
222823ffa52cSDarrick J. Wong 	xfs_agino_t		head_agino,
222923ffa52cSDarrick J. Wong 	xfs_agino_t		target_agino,
223023ffa52cSDarrick J. Wong 	xfs_agino_t		*agino,
223123ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
223223ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
22339b247179SDarrick J. Wong 	struct xfs_buf		**bpp,
22349b247179SDarrick J. Wong 	struct xfs_perag	*pag)
223523ffa52cSDarrick J. Wong {
223623ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
223723ffa52cSDarrick J. Wong 	xfs_agino_t		next_agino;
223823ffa52cSDarrick J. Wong 	int			error;
223923ffa52cSDarrick J. Wong 
224023ffa52cSDarrick J. Wong 	ASSERT(head_agino != target_agino);
224123ffa52cSDarrick J. Wong 	*bpp = NULL;
224223ffa52cSDarrick J. Wong 
22439b247179SDarrick J. Wong 	/* See if our backref cache can find it faster. */
22449b247179SDarrick J. Wong 	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
22459b247179SDarrick J. Wong 	if (*agino != NULLAGINO) {
22469b247179SDarrick J. Wong 		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
22479b247179SDarrick J. Wong 		if (error)
22489b247179SDarrick J. Wong 			return error;
22499b247179SDarrick J. Wong 
22509b247179SDarrick J. Wong 		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
22519b247179SDarrick J. Wong 			return 0;
22529b247179SDarrick J. Wong 
22539b247179SDarrick J. Wong 		/*
22549b247179SDarrick J. Wong 		 * If we get here the cache contents were corrupt, so drop the
22559b247179SDarrick J. Wong 		 * buffer and fall back to walking the bucket list.
22569b247179SDarrick J. Wong 		 */
22579b247179SDarrick J. Wong 		xfs_trans_brelse(tp, *bpp);
22589b247179SDarrick J. Wong 		*bpp = NULL;
22599b247179SDarrick J. Wong 		WARN_ON_ONCE(1);
22609b247179SDarrick J. Wong 	}
22619b247179SDarrick J. Wong 
22629b247179SDarrick J. Wong 	trace_xfs_iunlink_map_prev_fallback(mp, agno);
22639b247179SDarrick J. Wong 
22649b247179SDarrick J. Wong 	/* Otherwise, walk the entire bucket until we find it. */
226523ffa52cSDarrick J. Wong 	next_agino = head_agino;
226623ffa52cSDarrick J. Wong 	while (next_agino != target_agino) {
226723ffa52cSDarrick J. Wong 		xfs_agino_t	unlinked_agino;
226823ffa52cSDarrick J. Wong 
226923ffa52cSDarrick J. Wong 		if (*bpp)
227023ffa52cSDarrick J. Wong 			xfs_trans_brelse(tp, *bpp);
227123ffa52cSDarrick J. Wong 
227223ffa52cSDarrick J. Wong 		*agino = next_agino;
227323ffa52cSDarrick J. Wong 		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
227423ffa52cSDarrick J. Wong 				bpp);
227523ffa52cSDarrick J. Wong 		if (error)
227623ffa52cSDarrick J. Wong 			return error;
227723ffa52cSDarrick J. Wong 
227823ffa52cSDarrick J. Wong 		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
227923ffa52cSDarrick J. Wong 		/*
228023ffa52cSDarrick J. Wong 		 * Make sure this pointer is valid and isn't an obvious
228123ffa52cSDarrick J. Wong 		 * infinite loop.
228223ffa52cSDarrick J. Wong 		 */
228323ffa52cSDarrick J. Wong 		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
228423ffa52cSDarrick J. Wong 		    next_agino == unlinked_agino) {
228523ffa52cSDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__,
228623ffa52cSDarrick J. Wong 					XFS_ERRLEVEL_LOW, mp,
228723ffa52cSDarrick J. Wong 					*dipp, sizeof(**dipp));
228823ffa52cSDarrick J. Wong 			error = -EFSCORRUPTED;
228923ffa52cSDarrick J. Wong 			return error;
229023ffa52cSDarrick J. Wong 		}
229123ffa52cSDarrick J. Wong 		next_agino = unlinked_agino;
229223ffa52cSDarrick J. Wong 	}
229323ffa52cSDarrick J. Wong 
229423ffa52cSDarrick J. Wong 	return 0;
229523ffa52cSDarrick J. Wong }
229623ffa52cSDarrick J. Wong 
22971da177e4SLinus Torvalds /*
22981da177e4SLinus Torvalds  * Pull the on-disk inode from the AGI unlinked list.
22991da177e4SLinus Torvalds  */
23001da177e4SLinus Torvalds STATIC int
23011da177e4SLinus Torvalds xfs_iunlink_remove(
23025837f625SDarrick J. Wong 	struct xfs_trans	*tp,
23035837f625SDarrick J. Wong 	struct xfs_inode	*ip)
23041da177e4SLinus Torvalds {
23055837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
23065837f625SDarrick J. Wong 	struct xfs_agi		*agi;
23075837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
23085837f625SDarrick J. Wong 	struct xfs_buf		*last_ibp;
23095837f625SDarrick J. Wong 	struct xfs_dinode	*last_dip = NULL;
23105837f625SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
23115837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
23121da177e4SLinus Torvalds 	xfs_agino_t		next_agino;
2313b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
23145837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
23151da177e4SLinus Torvalds 	int			error;
23161da177e4SLinus Torvalds 
23174664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
23184664c66cSDarrick J. Wong 
23195837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
23205e1be0fbSChristoph Hellwig 	error = xfs_read_agi(mp, tp, agno, &agibp);
23215e1be0fbSChristoph Hellwig 	if (error)
23221da177e4SLinus Torvalds 		return error;
2323370c782bSChristoph Hellwig 	agi = agibp->b_addr;
23245e1be0fbSChristoph Hellwig 
23251da177e4SLinus Torvalds 	/*
232686bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
232786bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
23281da177e4SLinus Torvalds 	 */
2329b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2330b1d2a068SDarrick J. Wong 	if (!xfs_verify_agino(mp, agno, head_agino)) {
2331d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2332d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2333d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2334d2e73665SDarrick J. Wong 	}
23351da177e4SLinus Torvalds 
23361da177e4SLinus Torvalds 	/*
2337b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2338b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2339b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
23401da177e4SLinus Torvalds 	 */
2341b1d2a068SDarrick J. Wong 	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2342f2fc16a3SDarrick J. Wong 	if (error)
23431da177e4SLinus Torvalds 		return error;
23449a4a5118SDarrick J. Wong 
23459b247179SDarrick J. Wong 	/*
23469b247179SDarrick J. Wong 	 * If there was a backref pointing from the next inode back to this
23479b247179SDarrick J. Wong 	 * one, remove it because we've removed this inode from the list.
23489b247179SDarrick J. Wong 	 *
23499b247179SDarrick J. Wong 	 * Later, if this inode was in the middle of the list we'll update
23509b247179SDarrick J. Wong 	 * this inode's backref to point from the next inode.
23519b247179SDarrick J. Wong 	 */
23529b247179SDarrick J. Wong 	if (next_agino != NULLAGINO) {
235392a00544SGao Xiang 		error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
23549b247179SDarrick J. Wong 				NULLAGINO);
23559b247179SDarrick J. Wong 		if (error)
235692a00544SGao Xiang 			return error;
23579b247179SDarrick J. Wong 	}
23589b247179SDarrick J. Wong 
235992a00544SGao Xiang 	if (head_agino != agino) {
2360f2fc16a3SDarrick J. Wong 		struct xfs_imap	imap;
2361f2fc16a3SDarrick J. Wong 		xfs_agino_t	prev_agino;
2362f2fc16a3SDarrick J. Wong 
236323ffa52cSDarrick J. Wong 		/* We need to search the list for the inode being freed. */
2364b1d2a068SDarrick J. Wong 		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
23659b247179SDarrick J. Wong 				&prev_agino, &imap, &last_dip, &last_ibp,
236692a00544SGao Xiang 				agibp->b_pag);
236723ffa52cSDarrick J. Wong 		if (error)
236892a00544SGao Xiang 			return error;
2369475ee413SChristoph Hellwig 
2370f2fc16a3SDarrick J. Wong 		/* Point the previous inode on the list to the next inode. */
2371f2fc16a3SDarrick J. Wong 		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2372f2fc16a3SDarrick J. Wong 				last_dip, &imap, next_agino);
23739b247179SDarrick J. Wong 
23749b247179SDarrick J. Wong 		/*
23759b247179SDarrick J. Wong 		 * Now we deal with the backref for this inode.  If this inode
23769b247179SDarrick J. Wong 		 * pointed at a real inode, change the backref that pointed to
23779b247179SDarrick J. Wong 		 * us to point to our old next.  If this inode was the end of
23789b247179SDarrick J. Wong 		 * the list, delete the backref that pointed to us.  Note that
23799b247179SDarrick J. Wong 		 * change_backref takes care of deleting the backref if
23809b247179SDarrick J. Wong 		 * next_agino is NULLAGINO.
23819b247179SDarrick J. Wong 		 */
238292a00544SGao Xiang 		return xfs_iunlink_change_backref(agibp->b_pag, agino,
238392a00544SGao Xiang 				next_agino);
23841da177e4SLinus Torvalds 	}
23859b247179SDarrick J. Wong 
238692a00544SGao Xiang 	/* Point the head of the list to the next unlinked inode. */
238792a00544SGao Xiang 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
238892a00544SGao Xiang 			next_agino);
23891da177e4SLinus Torvalds }
23901da177e4SLinus Torvalds 
23915b3eed75SDave Chinner /*
239271e3e356SDave Chinner  * Look up the inode number specified and if it is not already marked XFS_ISTALE
239371e3e356SDave Chinner  * mark it stale. We should only find clean inodes in this lookup that aren't
239471e3e356SDave Chinner  * already stale.
23955806165aSDave Chinner  */
239671e3e356SDave Chinner static void
239771e3e356SDave Chinner xfs_ifree_mark_inode_stale(
239871e3e356SDave Chinner 	struct xfs_buf		*bp,
23995806165aSDave Chinner 	struct xfs_inode	*free_ip,
2400d9fdd0adSBrian Foster 	xfs_ino_t		inum)
24015806165aSDave Chinner {
240271e3e356SDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
240371e3e356SDave Chinner 	struct xfs_perag	*pag = bp->b_pag;
240471e3e356SDave Chinner 	struct xfs_inode_log_item *iip;
24055806165aSDave Chinner 	struct xfs_inode	*ip;
24065806165aSDave Chinner 
24075806165aSDave Chinner retry:
24085806165aSDave Chinner 	rcu_read_lock();
24095806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
24105806165aSDave Chinner 
24115806165aSDave Chinner 	/* Inode not in memory, nothing to do */
241271e3e356SDave Chinner 	if (!ip) {
241371e3e356SDave Chinner 		rcu_read_unlock();
241471e3e356SDave Chinner 		return;
241571e3e356SDave Chinner 	}
24165806165aSDave Chinner 
24175806165aSDave Chinner 	/*
24185806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
24195806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
24205806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
24215806165aSDave Chinner 	 * valid, the wrong inode or stale.
24225806165aSDave Chinner 	 */
24235806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2424718ecc50SDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2425718ecc50SDave Chinner 		goto out_iflags_unlock;
24265806165aSDave Chinner 
24275806165aSDave Chinner 	/*
24285806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
24295806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
24305806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
24315806165aSDave Chinner 	 * retry.
24325806165aSDave Chinner 	 */
24335806165aSDave Chinner 	if (ip != free_ip) {
24345806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
243571e3e356SDave Chinner 			spin_unlock(&ip->i_flags_lock);
24365806165aSDave Chinner 			rcu_read_unlock();
24375806165aSDave Chinner 			delay(1);
24385806165aSDave Chinner 			goto retry;
24395806165aSDave Chinner 		}
24405806165aSDave Chinner 	}
244171e3e356SDave Chinner 	ip->i_flags |= XFS_ISTALE;
24425806165aSDave Chinner 
244371e3e356SDave Chinner 	/*
2444718ecc50SDave Chinner 	 * If the inode is flushing, it is already attached to the buffer.  All
244571e3e356SDave Chinner 	 * we needed to do here is mark the inode stale so buffer IO completion
244671e3e356SDave Chinner 	 * will remove it from the AIL.
244771e3e356SDave Chinner 	 */
244871e3e356SDave Chinner 	iip = ip->i_itemp;
2449718ecc50SDave Chinner 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
245071e3e356SDave Chinner 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
245171e3e356SDave Chinner 		ASSERT(iip->ili_last_fields);
245271e3e356SDave Chinner 		goto out_iunlock;
245371e3e356SDave Chinner 	}
24545806165aSDave Chinner 
24555806165aSDave Chinner 	/*
245648d55e2aSDave Chinner 	 * Inodes not attached to the buffer can be released immediately.
245748d55e2aSDave Chinner 	 * Everything else has to go through xfs_iflush_abort() on journal
245848d55e2aSDave Chinner 	 * commit as the flock synchronises removal of the inode from the
245948d55e2aSDave Chinner 	 * cluster buffer against inode reclaim.
24605806165aSDave Chinner 	 */
2461718ecc50SDave Chinner 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
246271e3e356SDave Chinner 		goto out_iunlock;
2463718ecc50SDave Chinner 
2464718ecc50SDave Chinner 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2465718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2466718ecc50SDave Chinner 	rcu_read_unlock();
24675806165aSDave Chinner 
246871e3e356SDave Chinner 	/* we have a dirty inode in memory that has not yet been flushed. */
246971e3e356SDave Chinner 	spin_lock(&iip->ili_lock);
247071e3e356SDave Chinner 	iip->ili_last_fields = iip->ili_fields;
247171e3e356SDave Chinner 	iip->ili_fields = 0;
247271e3e356SDave Chinner 	iip->ili_fsync_fields = 0;
247371e3e356SDave Chinner 	spin_unlock(&iip->ili_lock);
247471e3e356SDave Chinner 	ASSERT(iip->ili_last_fields);
247571e3e356SDave Chinner 
2476718ecc50SDave Chinner 	if (ip != free_ip)
2477718ecc50SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2478718ecc50SDave Chinner 	return;
2479718ecc50SDave Chinner 
248071e3e356SDave Chinner out_iunlock:
248171e3e356SDave Chinner 	if (ip != free_ip)
248271e3e356SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2483718ecc50SDave Chinner out_iflags_unlock:
2484718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2485718ecc50SDave Chinner 	rcu_read_unlock();
24865806165aSDave Chinner }
24875806165aSDave Chinner 
24885806165aSDave Chinner /*
24890b8182dbSZhi Yong Wu  * A big issue when freeing the inode cluster is that we _cannot_ skip any
24905b3eed75SDave Chinner  * inodes that are in memory - they all must be marked stale and attached to
24915b3eed75SDave Chinner  * the cluster buffer.
24925b3eed75SDave Chinner  */
24932a30f36dSChandra Seetharaman STATIC int
24941da177e4SLinus Torvalds xfs_ifree_cluster(
249571e3e356SDave Chinner 	struct xfs_inode	*free_ip,
249671e3e356SDave Chinner 	struct xfs_trans	*tp,
249709b56604SBrian Foster 	struct xfs_icluster	*xic)
24981da177e4SLinus Torvalds {
249971e3e356SDave Chinner 	struct xfs_mount	*mp = free_ip->i_mount;
250071e3e356SDave Chinner 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
250171e3e356SDave Chinner 	struct xfs_buf		*bp;
250271e3e356SDave Chinner 	xfs_daddr_t		blkno;
250371e3e356SDave Chinner 	xfs_ino_t		inum = xic->first_ino;
25041da177e4SLinus Torvalds 	int			nbufs;
25055b257b4aSDave Chinner 	int			i, j;
25063cdaa189SBrian Foster 	int			ioffset;
2507ce92464cSDarrick J. Wong 	int			error;
25081da177e4SLinus Torvalds 
2509ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
25101da177e4SLinus Torvalds 
2511ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
251209b56604SBrian Foster 		/*
251309b56604SBrian Foster 		 * The allocation bitmap tells us which inodes of the chunk were
251409b56604SBrian Foster 		 * physically allocated. Skip the cluster if an inode falls into
251509b56604SBrian Foster 		 * a sparse region.
251609b56604SBrian Foster 		 */
25173cdaa189SBrian Foster 		ioffset = inum - xic->first_ino;
25183cdaa189SBrian Foster 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2519ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
252009b56604SBrian Foster 			continue;
252109b56604SBrian Foster 		}
252209b56604SBrian Foster 
25231da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
25241da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
25251da177e4SLinus Torvalds 
25261da177e4SLinus Torvalds 		/*
25275b257b4aSDave Chinner 		 * We obtain and lock the backing buffer first in the process
2528718ecc50SDave Chinner 		 * here to ensure dirty inodes attached to the buffer remain in
2529718ecc50SDave Chinner 		 * the flushing state while we mark them stale.
2530718ecc50SDave Chinner 		 *
25315b257b4aSDave Chinner 		 * If we scan the in-memory inodes first, then buffer IO can
25325b257b4aSDave Chinner 		 * complete before we get a lock on it, and hence we may fail
25335b257b4aSDave Chinner 		 * to mark all the active inodes on the buffer stale.
25341da177e4SLinus Torvalds 		 */
2535ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2536ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2537ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
253871e3e356SDave Chinner 		if (error)
2539ce92464cSDarrick J. Wong 			return error;
2540b0f539deSDave Chinner 
2541b0f539deSDave Chinner 		/*
2542b0f539deSDave Chinner 		 * This buffer may not have been correctly initialised as we
2543b0f539deSDave Chinner 		 * didn't read it from disk. That's not important because we are
2544b0f539deSDave Chinner 		 * only using to mark the buffer as stale in the log, and to
2545b0f539deSDave Chinner 		 * attach stale cached inodes on it. That means it will never be
2546b0f539deSDave Chinner 		 * dispatched for IO. If it is, we want to know about it, and we
2547b0f539deSDave Chinner 		 * want it to fail. We can acheive this by adding a write
2548b0f539deSDave Chinner 		 * verifier to the buffer.
2549b0f539deSDave Chinner 		 */
25501813dd64SDave Chinner 		bp->b_ops = &xfs_inode_buf_ops;
2551b0f539deSDave Chinner 
25525b257b4aSDave Chinner 		/*
255371e3e356SDave Chinner 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
255471e3e356SDave Chinner 		 * too. This requires lookups, and will skip inodes that we've
255571e3e356SDave Chinner 		 * already marked XFS_ISTALE.
25565b257b4aSDave Chinner 		 */
255771e3e356SDave Chinner 		for (i = 0; i < igeo->inodes_per_cluster; i++)
255871e3e356SDave Chinner 			xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
25611da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
25621da177e4SLinus Torvalds 	}
25632a30f36dSChandra Seetharaman 	return 0;
25641da177e4SLinus Torvalds }
25651da177e4SLinus Torvalds 
25661da177e4SLinus Torvalds /*
25671da177e4SLinus Torvalds  * This is called to return an inode to the inode free list.
25681da177e4SLinus Torvalds  * The inode should already be truncated to 0 length and have
25691da177e4SLinus Torvalds  * no pages associated with it.  This routine also assumes that
25701da177e4SLinus Torvalds  * the inode is already a part of the transaction.
25711da177e4SLinus Torvalds  *
25721da177e4SLinus Torvalds  * The on-disk copy of the inode will have been added to the list
25731da177e4SLinus Torvalds  * of unlinked inodes in the AGI. We need to remove the inode from
25741da177e4SLinus Torvalds  * that list atomically with respect to freeing it here.
25751da177e4SLinus Torvalds  */
25761da177e4SLinus Torvalds int
25771da177e4SLinus Torvalds xfs_ifree(
25780e0417f3SBrian Foster 	struct xfs_trans	*tp,
25790e0417f3SBrian Foster 	struct xfs_inode	*ip)
25801da177e4SLinus Torvalds {
25811da177e4SLinus Torvalds 	int			error;
258209b56604SBrian Foster 	struct xfs_icluster	xic = { 0 };
25831319ebefSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
25841da177e4SLinus Torvalds 
2585579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
258654d7b5c1SDave Chinner 	ASSERT(VFS_I(ip)->i_nlink == 0);
2587daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
258813d2c10bSChristoph Hellwig 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
25896e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
25901da177e4SLinus Torvalds 
25911da177e4SLinus Torvalds 	/*
25921da177e4SLinus Torvalds 	 * Pull the on-disk inode from the AGI unlinked list.
25931da177e4SLinus Torvalds 	 */
25941da177e4SLinus Torvalds 	error = xfs_iunlink_remove(tp, ip);
25951baaed8fSDave Chinner 	if (error)
25961da177e4SLinus Torvalds 		return error;
25971da177e4SLinus Torvalds 
25980e0417f3SBrian Foster 	error = xfs_difree(tp, ip->i_ino, &xic);
25991baaed8fSDave Chinner 	if (error)
26001da177e4SLinus Torvalds 		return error;
26011baaed8fSDave Chinner 
2602b2c20045SChristoph Hellwig 	/*
2603b2c20045SChristoph Hellwig 	 * Free any local-format data sitting around before we reset the
2604b2c20045SChristoph Hellwig 	 * data fork to extents format.  Note that the attr fork data has
2605b2c20045SChristoph Hellwig 	 * already been freed by xfs_attr_inactive.
2606b2c20045SChristoph Hellwig 	 */
2607f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2608b2c20045SChristoph Hellwig 		kmem_free(ip->i_df.if_u1.if_data);
2609b2c20045SChristoph Hellwig 		ip->i_df.if_u1.if_data = NULL;
2610b2c20045SChristoph Hellwig 		ip->i_df.if_bytes = 0;
2611b2c20045SChristoph Hellwig 	}
261298c4f78dSDarrick J. Wong 
2613c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
26141da177e4SLinus Torvalds 	ip->i_d.di_flags = 0;
2615f93e5436SDarrick J. Wong 	ip->i_d.di_flags2 = ip->i_mount->m_ino_geo.new_diflags2;
26161da177e4SLinus Torvalds 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2617f7e67b20SChristoph Hellwig 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
26189b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
26199b3beb02SChristoph Hellwig 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2620dc1baa71SEric Sandeen 
2621dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
26221319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
26231319ebefSDave Chinner 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
26241319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
2625dc1baa71SEric Sandeen 
26261da177e4SLinus Torvalds 	/*
26271da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
26281da177e4SLinus Torvalds 	 * by reincarnations of this inode.
26291da177e4SLinus Torvalds 	 */
26309e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
26311da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
26321da177e4SLinus Torvalds 
263309b56604SBrian Foster 	if (xic.deleted)
263409b56604SBrian Foster 		error = xfs_ifree_cluster(ip, tp, &xic);
26351da177e4SLinus Torvalds 
26362a30f36dSChandra Seetharaman 	return error;
26371da177e4SLinus Torvalds }
26381da177e4SLinus Torvalds 
26391da177e4SLinus Torvalds /*
264060ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
264160ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
264260ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
26431da177e4SLinus Torvalds  */
264460ec6783SChristoph Hellwig static void
2645f392e631SChristoph Hellwig xfs_iunpin(
264660ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2647a3f74ffbSDavid Chinner {
2648579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2649a3f74ffbSDavid Chinner 
26504aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
26514aaf15d1SDave Chinner 
2652a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
2653656de4ffSChristoph Hellwig 	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2654a14a348bSChristoph Hellwig 
2655a3f74ffbSDavid Chinner }
2656a3f74ffbSDavid Chinner 
2657f392e631SChristoph Hellwig static void
2658f392e631SChristoph Hellwig __xfs_iunpin_wait(
2659f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2660f392e631SChristoph Hellwig {
2661f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2662f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2663f392e631SChristoph Hellwig 
2664f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2665f392e631SChristoph Hellwig 
2666f392e631SChristoph Hellwig 	do {
266721417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2668f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2669f392e631SChristoph Hellwig 			io_schedule();
2670f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
267121417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2672f392e631SChristoph Hellwig }
2673f392e631SChristoph Hellwig 
2674777df5afSDave Chinner void
26751da177e4SLinus Torvalds xfs_iunpin_wait(
267660ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
26771da177e4SLinus Torvalds {
2678f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2679f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
26801da177e4SLinus Torvalds }
26811da177e4SLinus Torvalds 
268227320369SDave Chinner /*
268327320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
268427320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
268527320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
268627320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
268727320369SDave Chinner  * locking an AGI.
268827320369SDave Chinner  *
268927320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
269027320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
269127320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
269227320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
269327320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
269427320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
269527320369SDave Chinner  *
269627320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
269727320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
269827320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
269927320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
270027320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
270127320369SDave Chinner  * directory entry.
270227320369SDave Chinner  *
270327320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2704310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
270527320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
270627320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
270727320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
270827320369SDave Chinner  */
2709c24b5dfaSDave Chinner int
2710c24b5dfaSDave Chinner xfs_remove(
2711c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2712c24b5dfaSDave Chinner 	struct xfs_name		*name,
2713c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2714c24b5dfaSDave Chinner {
2715c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2716c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2717c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2718c24b5dfaSDave Chinner 	int                     error = 0;
2719c24b5dfaSDave Chinner 	uint			resblks;
2720c24b5dfaSDave Chinner 
2721c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2722c24b5dfaSDave Chinner 
2723c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
27242451337dSDave Chinner 		return -EIO;
2725c24b5dfaSDave Chinner 
2726c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2727c24b5dfaSDave Chinner 	if (error)
2728c24b5dfaSDave Chinner 		goto std_return;
2729c24b5dfaSDave Chinner 
2730c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2731c24b5dfaSDave Chinner 	if (error)
2732c24b5dfaSDave Chinner 		goto std_return;
2733c24b5dfaSDave Chinner 
2734c24b5dfaSDave Chinner 	/*
2735c24b5dfaSDave Chinner 	 * We try to get the real space reservation first,
2736c24b5dfaSDave Chinner 	 * allowing for directory btree deletion(s) implying
2737c24b5dfaSDave Chinner 	 * possible bmap insert(s).  If we can't get the space
2738c24b5dfaSDave Chinner 	 * reservation then we use 0 instead, and avoid the bmap
2739c24b5dfaSDave Chinner 	 * btree insert(s) in the directory code by, if the bmap
2740c24b5dfaSDave Chinner 	 * insert tries to happen, instead trimming the LAST
2741c24b5dfaSDave Chinner 	 * block from the directory.
2742c24b5dfaSDave Chinner 	 */
2743c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2744253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
27452451337dSDave Chinner 	if (error == -ENOSPC) {
2746c24b5dfaSDave Chinner 		resblks = 0;
2747253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2748253f4911SChristoph Hellwig 				&tp);
2749c24b5dfaSDave Chinner 	}
2750c24b5dfaSDave Chinner 	if (error) {
27512451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2752253f4911SChristoph Hellwig 		goto std_return;
2753c24b5dfaSDave Chinner 	}
2754c24b5dfaSDave Chinner 
27557c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2756c24b5dfaSDave Chinner 
275765523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2758c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2759c24b5dfaSDave Chinner 
2760c24b5dfaSDave Chinner 	/*
2761c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2762c24b5dfaSDave Chinner 	 */
2763c24b5dfaSDave Chinner 	if (is_dir) {
276454d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
276554d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
27662451337dSDave Chinner 			error = -ENOTEMPTY;
2767c24b5dfaSDave Chinner 			goto out_trans_cancel;
2768c24b5dfaSDave Chinner 		}
2769c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
27702451337dSDave Chinner 			error = -ENOTEMPTY;
2771c24b5dfaSDave Chinner 			goto out_trans_cancel;
2772c24b5dfaSDave Chinner 		}
2773c24b5dfaSDave Chinner 
277427320369SDave Chinner 		/* Drop the link from ip's "..".  */
2775c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2776c24b5dfaSDave Chinner 		if (error)
277727320369SDave Chinner 			goto out_trans_cancel;
2778c24b5dfaSDave Chinner 
277927320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2780c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2781c24b5dfaSDave Chinner 		if (error)
278227320369SDave Chinner 			goto out_trans_cancel;
2783c24b5dfaSDave Chinner 	} else {
2784c24b5dfaSDave Chinner 		/*
2785c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2786c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2787c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2788c24b5dfaSDave Chinner 		 */
2789c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2790c24b5dfaSDave Chinner 	}
279127320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2792c24b5dfaSDave Chinner 
279327320369SDave Chinner 	/* Drop the link from dp to ip. */
2794c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2795c24b5dfaSDave Chinner 	if (error)
279627320369SDave Chinner 		goto out_trans_cancel;
2797c24b5dfaSDave Chinner 
2798381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
279927320369SDave Chinner 	if (error) {
28002451337dSDave Chinner 		ASSERT(error != -ENOENT);
2801c8eac49eSBrian Foster 		goto out_trans_cancel;
280227320369SDave Chinner 	}
280327320369SDave Chinner 
2804c24b5dfaSDave Chinner 	/*
2805c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2806c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2807c24b5dfaSDave Chinner 	 * the user.
2808c24b5dfaSDave Chinner 	 */
2809c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2810c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2811c24b5dfaSDave Chinner 
281270393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2813c24b5dfaSDave Chinner 	if (error)
2814c24b5dfaSDave Chinner 		goto std_return;
2815c24b5dfaSDave Chinner 
28162cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2817c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2818c24b5dfaSDave Chinner 
2819c24b5dfaSDave Chinner 	return 0;
2820c24b5dfaSDave Chinner 
2821c24b5dfaSDave Chinner  out_trans_cancel:
28224906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2823c24b5dfaSDave Chinner  std_return:
2824c24b5dfaSDave Chinner 	return error;
2825c24b5dfaSDave Chinner }
2826c24b5dfaSDave Chinner 
2827f6bba201SDave Chinner /*
2828f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2829f6bba201SDave Chinner  */
283095afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2831f6bba201SDave Chinner STATIC void
2832f6bba201SDave Chinner xfs_sort_for_rename(
283395afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
283495afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
283595afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
283695afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
283795afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
283895afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
283995afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2840f6bba201SDave Chinner {
2841f6bba201SDave Chinner 	int			i, j;
2842f6bba201SDave Chinner 
284395afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
284495afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
284595afcf5cSDave Chinner 
2846f6bba201SDave Chinner 	/*
2847f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
2848f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
2849f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
2850f6bba201SDave Chinner 	 *
2851f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2852f6bba201SDave Chinner 	 */
285395afcf5cSDave Chinner 	i = 0;
285495afcf5cSDave Chinner 	i_tab[i++] = dp1;
285595afcf5cSDave Chinner 	i_tab[i++] = dp2;
285695afcf5cSDave Chinner 	i_tab[i++] = ip1;
285795afcf5cSDave Chinner 	if (ip2)
285895afcf5cSDave Chinner 		i_tab[i++] = ip2;
285995afcf5cSDave Chinner 	if (wip)
286095afcf5cSDave Chinner 		i_tab[i++] = wip;
286195afcf5cSDave Chinner 	*num_inodes = i;
2862f6bba201SDave Chinner 
2863f6bba201SDave Chinner 	/*
2864f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
286595afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
2866f6bba201SDave Chinner 	 */
2867f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
2868f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
2869f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
287095afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
2871f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
2872f6bba201SDave Chinner 				i_tab[j-1] = temp;
2873f6bba201SDave Chinner 			}
2874f6bba201SDave Chinner 		}
2875f6bba201SDave Chinner 	}
2876f6bba201SDave Chinner }
2877f6bba201SDave Chinner 
2878310606b0SDave Chinner static int
2879310606b0SDave Chinner xfs_finish_rename(
2880c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
2881310606b0SDave Chinner {
2882310606b0SDave Chinner 	/*
2883310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
2884310606b0SDave Chinner 	 * goes to disk before returning to the user.
2885310606b0SDave Chinner 	 */
2886310606b0SDave Chinner 	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2887310606b0SDave Chinner 		xfs_trans_set_sync(tp);
2888310606b0SDave Chinner 
288970393313SChristoph Hellwig 	return xfs_trans_commit(tp);
2890310606b0SDave Chinner }
2891310606b0SDave Chinner 
2892f6bba201SDave Chinner /*
2893d31a1825SCarlos Maiolino  * xfs_cross_rename()
2894d31a1825SCarlos Maiolino  *
28950145225eSBhaskar Chowdhury  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2896d31a1825SCarlos Maiolino  */
2897d31a1825SCarlos Maiolino STATIC int
2898d31a1825SCarlos Maiolino xfs_cross_rename(
2899d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
2900d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
2901d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
2902d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
2903d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
2904d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
2905d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
2906d31a1825SCarlos Maiolino 	int			spaceres)
2907d31a1825SCarlos Maiolino {
2908d31a1825SCarlos Maiolino 	int		error = 0;
2909d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
2910d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
2911d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
2912d31a1825SCarlos Maiolino 
2913d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
2914381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2915d31a1825SCarlos Maiolino 	if (error)
2916eeacd321SDave Chinner 		goto out_trans_abort;
2917d31a1825SCarlos Maiolino 
2918d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
2919381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2920d31a1825SCarlos Maiolino 	if (error)
2921eeacd321SDave Chinner 		goto out_trans_abort;
2922d31a1825SCarlos Maiolino 
2923d31a1825SCarlos Maiolino 	/*
2924d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
2925d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
2926d31a1825SCarlos Maiolino 	 * parents.
2927d31a1825SCarlos Maiolino 	 */
2928d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
2929d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2930d31a1825SCarlos Maiolino 
2931c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2932d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2933381eee69SBrian Foster 						dp1->i_ino, spaceres);
2934d31a1825SCarlos Maiolino 			if (error)
2935eeacd321SDave Chinner 				goto out_trans_abort;
2936d31a1825SCarlos Maiolino 
2937d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
2938c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2939d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
2940d31a1825SCarlos Maiolino 				if (error)
2941eeacd321SDave Chinner 					goto out_trans_abort;
294291083269SEric Sandeen 				xfs_bumplink(tp, dp1);
2943d31a1825SCarlos Maiolino 			}
2944d31a1825SCarlos Maiolino 
2945d31a1825SCarlos Maiolino 			/*
2946d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
2947d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2948d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2949d31a1825SCarlos Maiolino 			 * notify the change
2950d31a1825SCarlos Maiolino 			 */
2951d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
2952d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2953d31a1825SCarlos Maiolino 		}
2954d31a1825SCarlos Maiolino 
2955c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2956d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2957381eee69SBrian Foster 						dp2->i_ino, spaceres);
2958d31a1825SCarlos Maiolino 			if (error)
2959eeacd321SDave Chinner 				goto out_trans_abort;
2960d31a1825SCarlos Maiolino 
2961d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
2962c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2963d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
2964d31a1825SCarlos Maiolino 				if (error)
2965eeacd321SDave Chinner 					goto out_trans_abort;
296691083269SEric Sandeen 				xfs_bumplink(tp, dp2);
2967d31a1825SCarlos Maiolino 			}
2968d31a1825SCarlos Maiolino 
2969d31a1825SCarlos Maiolino 			/*
2970d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
2971d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2972d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2973d31a1825SCarlos Maiolino 			 * notify the change
2974d31a1825SCarlos Maiolino 			 */
2975d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2976d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
2977d31a1825SCarlos Maiolino 		}
2978d31a1825SCarlos Maiolino 	}
2979d31a1825SCarlos Maiolino 
2980d31a1825SCarlos Maiolino 	if (ip1_flags) {
2981d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2982d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2983d31a1825SCarlos Maiolino 	}
2984d31a1825SCarlos Maiolino 	if (ip2_flags) {
2985d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2986d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2987d31a1825SCarlos Maiolino 	}
2988d31a1825SCarlos Maiolino 	if (dp2_flags) {
2989d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2990d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2991d31a1825SCarlos Maiolino 	}
2992d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2993d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2994c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
2995eeacd321SDave Chinner 
2996eeacd321SDave Chinner out_trans_abort:
29974906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2998d31a1825SCarlos Maiolino 	return error;
2999d31a1825SCarlos Maiolino }
3000d31a1825SCarlos Maiolino 
3001d31a1825SCarlos Maiolino /*
30027dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
30037dcf5c3eSDave Chinner  *
3004b63da6c8SRandy Dunlap  * Return a referenced, unlinked, unlocked inode that can be used as a
30057dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
30067dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
30077dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
30087dcf5c3eSDave Chinner  */
30097dcf5c3eSDave Chinner static int
30107dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
3011f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30127dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
30137dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
30147dcf5c3eSDave Chinner {
30157dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
30167dcf5c3eSDave Chinner 	int			error;
30177dcf5c3eSDave Chinner 
3018f736d93dSChristoph Hellwig 	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3019f736d93dSChristoph Hellwig 				   &tmpfile);
30207dcf5c3eSDave Chinner 	if (error)
30217dcf5c3eSDave Chinner 		return error;
30227dcf5c3eSDave Chinner 
302322419ac9SBrian Foster 	/*
302422419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
3025c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
3026c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
302722419ac9SBrian Foster 	 */
30282b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
30297dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
30307dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
30317dcf5c3eSDave Chinner 
30327dcf5c3eSDave Chinner 	*wip = tmpfile;
30337dcf5c3eSDave Chinner 	return 0;
30347dcf5c3eSDave Chinner }
30357dcf5c3eSDave Chinner 
30367dcf5c3eSDave Chinner /*
3037f6bba201SDave Chinner  * xfs_rename
3038f6bba201SDave Chinner  */
3039f6bba201SDave Chinner int
3040f6bba201SDave Chinner xfs_rename(
3041f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30427dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
3043f6bba201SDave Chinner 	struct xfs_name		*src_name,
30447dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
30457dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
3046f6bba201SDave Chinner 	struct xfs_name		*target_name,
30477dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
3048d31a1825SCarlos Maiolino 	unsigned int		flags)
3049f6bba201SDave Chinner {
30507dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
30517dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
30527dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
30537dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
30546da1b4b1SDarrick J. Wong 	int			i;
305595afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
30562b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
3057c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3058f6bba201SDave Chinner 	int			spaceres;
30597dcf5c3eSDave Chinner 	int			error;
3060f6bba201SDave Chinner 
3061f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3062f6bba201SDave Chinner 
3063eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3064eeacd321SDave Chinner 		return -EINVAL;
3065f6bba201SDave Chinner 
30667dcf5c3eSDave Chinner 	/*
30677dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
30687dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
30697dcf5c3eSDave Chinner 	 * appropriately.
30707dcf5c3eSDave Chinner 	 */
30717dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
30727dcf5c3eSDave Chinner 		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3073f736d93dSChristoph Hellwig 		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
30747dcf5c3eSDave Chinner 		if (error)
30757dcf5c3eSDave Chinner 			return error;
3076f6bba201SDave Chinner 
30777dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
30787dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
30797dcf5c3eSDave Chinner 	}
30807dcf5c3eSDave Chinner 
30817dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3082f6bba201SDave Chinner 				inodes, &num_inodes);
3083f6bba201SDave Chinner 
3084f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3085253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
30862451337dSDave Chinner 	if (error == -ENOSPC) {
3087f6bba201SDave Chinner 		spaceres = 0;
3088253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3089253f4911SChristoph Hellwig 				&tp);
3090f6bba201SDave Chinner 	}
3091445883e8SDave Chinner 	if (error)
3092253f4911SChristoph Hellwig 		goto out_release_wip;
3093f6bba201SDave Chinner 
3094f6bba201SDave Chinner 	/*
3095f6bba201SDave Chinner 	 * Attach the dquots to the inodes
3096f6bba201SDave Chinner 	 */
3097f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
3098445883e8SDave Chinner 	if (error)
3099445883e8SDave Chinner 		goto out_trans_cancel;
3100f6bba201SDave Chinner 
3101f6bba201SDave Chinner 	/*
3102f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
3103f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
3104f6bba201SDave Chinner 	 * whether the target directory is the same as the source
3105f6bba201SDave Chinner 	 * directory, we can lock from 2 to 4 inodes.
3106f6bba201SDave Chinner 	 */
3107f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3108f6bba201SDave Chinner 
3109f6bba201SDave Chinner 	/*
3110f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
3111f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
3112f6bba201SDave Chinner 	 * them.
3113f6bba201SDave Chinner 	 */
311465523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3115f6bba201SDave Chinner 	if (new_parent)
311665523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3117f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3118f6bba201SDave Chinner 	if (target_ip)
3119f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
31207dcf5c3eSDave Chinner 	if (wip)
31217dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3122f6bba201SDave Chinner 
3123f6bba201SDave Chinner 	/*
3124f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
3125f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
3126f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
3127f6bba201SDave Chinner 	 */
3128f6bba201SDave Chinner 	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3129ceaf603cSChristoph Hellwig 		     target_dp->i_projid != src_ip->i_projid)) {
31302451337dSDave Chinner 		error = -EXDEV;
3131445883e8SDave Chinner 		goto out_trans_cancel;
3132f6bba201SDave Chinner 	}
3133f6bba201SDave Chinner 
3134eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
3135eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
3136eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3137d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
3138f16dea54SBrian Foster 					spaceres);
3139d31a1825SCarlos Maiolino 
3140d31a1825SCarlos Maiolino 	/*
3141bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
3142bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
314302092a2fSChandan Babu R 	 *
314402092a2fSChandan Babu R 	 * Extent count overflow check:
314502092a2fSChandan Babu R 	 *
314602092a2fSChandan Babu R 	 * From the perspective of src_dp, a rename operation is essentially a
314702092a2fSChandan Babu R 	 * directory entry remove operation. Hence the only place where we check
314802092a2fSChandan Babu R 	 * for extent count overflow for src_dp is in
314902092a2fSChandan Babu R 	 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
315002092a2fSChandan Babu R 	 * -ENOSPC when it detects a possible extent count overflow and in
315102092a2fSChandan Babu R 	 * response, the higher layers of directory handling code do the
315202092a2fSChandan Babu R 	 * following:
315302092a2fSChandan Babu R 	 * 1. Data/Free blocks: XFS lets these blocks linger until a
315402092a2fSChandan Babu R 	 *    future remove operation removes them.
315502092a2fSChandan Babu R 	 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
315602092a2fSChandan Babu R 	 *    Leaf space and unmaps the last block.
315702092a2fSChandan Babu R 	 *
315802092a2fSChandan Babu R 	 * For target_dp, there are two cases depending on whether the
315902092a2fSChandan Babu R 	 * destination directory entry exists or not.
316002092a2fSChandan Babu R 	 *
316102092a2fSChandan Babu R 	 * When destination directory entry does not exist (i.e. target_ip ==
316202092a2fSChandan Babu R 	 * NULL), extent count overflow check is performed only when transaction
316302092a2fSChandan Babu R 	 * has a non-zero sized space reservation associated with it.  With a
316402092a2fSChandan Babu R 	 * zero-sized space reservation, XFS allows a rename operation to
316502092a2fSChandan Babu R 	 * continue only when the directory has sufficient free space in its
316602092a2fSChandan Babu R 	 * data/leaf/free space blocks to hold the new entry.
316702092a2fSChandan Babu R 	 *
316802092a2fSChandan Babu R 	 * When destination directory entry exists (i.e. target_ip != NULL), all
316902092a2fSChandan Babu R 	 * we need to do is change the inode number associated with the already
317002092a2fSChandan Babu R 	 * existing entry. Hence there is no need to perform an extent count
317102092a2fSChandan Babu R 	 * overflow check.
3172f6bba201SDave Chinner 	 */
3173f6bba201SDave Chinner 	if (target_ip == NULL) {
3174f6bba201SDave Chinner 		/*
3175f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
3176f6bba201SDave Chinner 		 * fit before actually inserting it.
3177f6bba201SDave Chinner 		 */
317894f3cad5SEric Sandeen 		if (!spaceres) {
317994f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
3180f6bba201SDave Chinner 			if (error)
3181445883e8SDave Chinner 				goto out_trans_cancel;
318202092a2fSChandan Babu R 		} else {
318302092a2fSChandan Babu R 			error = xfs_iext_count_may_overflow(target_dp,
318402092a2fSChandan Babu R 					XFS_DATA_FORK,
318502092a2fSChandan Babu R 					XFS_IEXT_DIR_MANIP_CNT(mp));
318602092a2fSChandan Babu R 			if (error)
318702092a2fSChandan Babu R 				goto out_trans_cancel;
318894f3cad5SEric Sandeen 		}
3189bc56ad8cSkaixuxia 	} else {
3190bc56ad8cSkaixuxia 		/*
3191bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
3192bc56ad8cSkaixuxia 		 * it can be destroyed.
3193bc56ad8cSkaixuxia 		 */
3194bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3195bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
3196bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
3197bc56ad8cSkaixuxia 			error = -EEXIST;
3198bc56ad8cSkaixuxia 			goto out_trans_cancel;
3199bc56ad8cSkaixuxia 		}
3200bc56ad8cSkaixuxia 	}
3201bc56ad8cSkaixuxia 
3202bc56ad8cSkaixuxia 	/*
32036da1b4b1SDarrick J. Wong 	 * Lock the AGI buffers we need to handle bumping the nlink of the
32046da1b4b1SDarrick J. Wong 	 * whiteout inode off the unlinked list and to handle dropping the
32056da1b4b1SDarrick J. Wong 	 * nlink of the target inode.  Per locking order rules, do this in
32066da1b4b1SDarrick J. Wong 	 * increasing AG order and before directory block allocation tries to
32076da1b4b1SDarrick J. Wong 	 * grab AGFs because we grab AGIs before AGFs.
32086da1b4b1SDarrick J. Wong 	 *
32096da1b4b1SDarrick J. Wong 	 * The (vfs) caller must ensure that if src is a directory then
32106da1b4b1SDarrick J. Wong 	 * target_ip is either null or an empty directory.
32116da1b4b1SDarrick J. Wong 	 */
32126da1b4b1SDarrick J. Wong 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
32136da1b4b1SDarrick J. Wong 		if (inodes[i] == wip ||
32146da1b4b1SDarrick J. Wong 		    (inodes[i] == target_ip &&
32156da1b4b1SDarrick J. Wong 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
32166da1b4b1SDarrick J. Wong 			struct xfs_buf	*bp;
32176da1b4b1SDarrick J. Wong 			xfs_agnumber_t	agno;
32186da1b4b1SDarrick J. Wong 
32196da1b4b1SDarrick J. Wong 			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
32206da1b4b1SDarrick J. Wong 			error = xfs_read_agi(mp, tp, agno, &bp);
32216da1b4b1SDarrick J. Wong 			if (error)
32226da1b4b1SDarrick J. Wong 				goto out_trans_cancel;
32236da1b4b1SDarrick J. Wong 		}
32246da1b4b1SDarrick J. Wong 	}
32256da1b4b1SDarrick J. Wong 
32266da1b4b1SDarrick J. Wong 	/*
3227bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
3228bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
3229bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
3230bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
3231bc56ad8cSkaixuxia 	 * mess.
3232bc56ad8cSkaixuxia 	 *
3233bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
3234bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
3235bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
3236bc56ad8cSkaixuxia 	 * in future.
3237bc56ad8cSkaixuxia 	 */
3238bc56ad8cSkaixuxia 	if (wip) {
3239bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
3240bc56ad8cSkaixuxia 		error = xfs_iunlink_remove(tp, wip);
3241bc56ad8cSkaixuxia 		if (error)
3242bc56ad8cSkaixuxia 			goto out_trans_cancel;
3243bc56ad8cSkaixuxia 
3244bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
3245bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3246bc56ad8cSkaixuxia 	}
3247bc56ad8cSkaixuxia 
3248bc56ad8cSkaixuxia 	/*
3249bc56ad8cSkaixuxia 	 * Set up the target.
3250bc56ad8cSkaixuxia 	 */
3251bc56ad8cSkaixuxia 	if (target_ip == NULL) {
3252f6bba201SDave Chinner 		/*
3253f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
3254f6bba201SDave Chinner 		 * directories, adjust the target directory link count
3255f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
3256f6bba201SDave Chinner 		 */
3257f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
3258381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
3259f6bba201SDave Chinner 		if (error)
3260c8eac49eSBrian Foster 			goto out_trans_cancel;
3261f6bba201SDave Chinner 
3262f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3263f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3264f6bba201SDave Chinner 
3265f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
326691083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
3267f6bba201SDave Chinner 		}
3268f6bba201SDave Chinner 	} else { /* target_ip != NULL */
3269f6bba201SDave Chinner 		/*
3270f6bba201SDave Chinner 		 * Link the source inode under the target name.
3271f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
3272f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
3273f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
3274f6bba201SDave Chinner 		 *
3275f6bba201SDave Chinner 		 * In case there is already an entry with the same
3276f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
3277f6bba201SDave Chinner 		 */
3278f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
3279381eee69SBrian Foster 					src_ip->i_ino, spaceres);
3280f6bba201SDave Chinner 		if (error)
3281c8eac49eSBrian Foster 			goto out_trans_cancel;
3282f6bba201SDave Chinner 
3283f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3284f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3285f6bba201SDave Chinner 
3286f6bba201SDave Chinner 		/*
3287f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3288f6bba201SDave Chinner 		 * dir no longer points to it.
3289f6bba201SDave Chinner 		 */
3290f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3291f6bba201SDave Chinner 		if (error)
3292c8eac49eSBrian Foster 			goto out_trans_cancel;
3293f6bba201SDave Chinner 
3294f6bba201SDave Chinner 		if (src_is_directory) {
3295f6bba201SDave Chinner 			/*
3296f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3297f6bba201SDave Chinner 			 */
3298f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3299f6bba201SDave Chinner 			if (error)
3300c8eac49eSBrian Foster 				goto out_trans_cancel;
3301f6bba201SDave Chinner 		}
3302f6bba201SDave Chinner 	} /* target_ip != NULL */
3303f6bba201SDave Chinner 
3304f6bba201SDave Chinner 	/*
3305f6bba201SDave Chinner 	 * Remove the source.
3306f6bba201SDave Chinner 	 */
3307f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3308f6bba201SDave Chinner 		/*
3309f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3310f6bba201SDave Chinner 		 * directory.
3311f6bba201SDave Chinner 		 */
3312f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3313381eee69SBrian Foster 					target_dp->i_ino, spaceres);
33142451337dSDave Chinner 		ASSERT(error != -EEXIST);
3315f6bba201SDave Chinner 		if (error)
3316c8eac49eSBrian Foster 			goto out_trans_cancel;
3317f6bba201SDave Chinner 	}
3318f6bba201SDave Chinner 
3319f6bba201SDave Chinner 	/*
3320f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3321f6bba201SDave Chinner 	 *
3322f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3323f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3324f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3325f6bba201SDave Chinner 	 */
3326f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3327f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3328f6bba201SDave Chinner 
3329f6bba201SDave Chinner 	/*
3330f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3331f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3332f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3333f6bba201SDave Chinner 	 */
3334f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3335f6bba201SDave Chinner 
3336f6bba201SDave Chinner 		/*
3337f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3338f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3339f6bba201SDave Chinner 		 */
3340f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3341f6bba201SDave Chinner 		if (error)
3342c8eac49eSBrian Foster 			goto out_trans_cancel;
3343f6bba201SDave Chinner 	}
3344f6bba201SDave Chinner 
33457dcf5c3eSDave Chinner 	/*
33467dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
33477dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
33487dcf5c3eSDave Chinner 	 * altogether.
33497dcf5c3eSDave Chinner 	 */
33507dcf5c3eSDave Chinner 	if (wip) {
33517dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3352381eee69SBrian Foster 					spaceres);
335302092a2fSChandan Babu R 	} else {
335402092a2fSChandan Babu R 		/*
335502092a2fSChandan Babu R 		 * NOTE: We don't need to check for extent count overflow here
335602092a2fSChandan Babu R 		 * because the dir remove name code will leave the dir block in
335702092a2fSChandan Babu R 		 * place if the extent count would overflow.
335802092a2fSChandan Babu R 		 */
3359f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3360381eee69SBrian Foster 					   spaceres);
336102092a2fSChandan Babu R 	}
336202092a2fSChandan Babu R 
3363f6bba201SDave Chinner 	if (error)
3364c8eac49eSBrian Foster 		goto out_trans_cancel;
3365f6bba201SDave Chinner 
3366f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3367f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3368f6bba201SDave Chinner 	if (new_parent)
3369f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3370f6bba201SDave Chinner 
3371c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
33727dcf5c3eSDave Chinner 	if (wip)
337344a8736bSDarrick J. Wong 		xfs_irele(wip);
33747dcf5c3eSDave Chinner 	return error;
3375f6bba201SDave Chinner 
3376445883e8SDave Chinner out_trans_cancel:
33774906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3378253f4911SChristoph Hellwig out_release_wip:
33797dcf5c3eSDave Chinner 	if (wip)
338044a8736bSDarrick J. Wong 		xfs_irele(wip);
3381f6bba201SDave Chinner 	return error;
3382f6bba201SDave Chinner }
3383f6bba201SDave Chinner 
3384e6187b34SDave Chinner static int
3385e6187b34SDave Chinner xfs_iflush(
338693848a99SChristoph Hellwig 	struct xfs_inode	*ip,
338793848a99SChristoph Hellwig 	struct xfs_buf		*bp)
33881da177e4SLinus Torvalds {
338993848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
339093848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
339193848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3392f2019299SBrian Foster 	int			error;
33931da177e4SLinus Torvalds 
3394579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3395718ecc50SDave Chinner 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3396f7e67b20SChristoph Hellwig 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3397daf83964SChristoph Hellwig 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
339890c60e16SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
33991da177e4SLinus Torvalds 
340088ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
34011da177e4SLinus Torvalds 
3402f2019299SBrian Foster 	/*
3403f2019299SBrian Foster 	 * We don't flush the inode if any of the following checks fail, but we
3404f2019299SBrian Foster 	 * do still update the log item and attach to the backing buffer as if
3405f2019299SBrian Foster 	 * the flush happened. This is a formality to facilitate predictable
3406f2019299SBrian Foster 	 * error handling as the caller will shutdown and fail the buffer.
3407f2019299SBrian Foster 	 */
3408f2019299SBrian Foster 	error = -EFSCORRUPTED;
340969ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
34109e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
34116a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3412c9690043SDarrick J. Wong 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
34136a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3414f2019299SBrian Foster 		goto flush_out;
34151da177e4SLinus Torvalds 	}
3416c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
34171da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3418f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3419f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
34209e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
34216a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3422c9690043SDarrick J. Wong 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
34236a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3424f2019299SBrian Foster 			goto flush_out;
34251da177e4SLinus Torvalds 		}
3426c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
34271da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3428f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3429f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3430f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
34319e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
34326a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3433c9690043SDarrick J. Wong 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
34346a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3435f2019299SBrian Foster 			goto flush_out;
34361da177e4SLinus Torvalds 		}
34371da177e4SLinus Torvalds 	}
3438daf83964SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
34396e73a545SChristoph Hellwig 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
34406a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
34416a19d939SDave Chinner 			"%s: detected corrupt incore inode %Lu, "
3442c9690043SDarrick J. Wong 			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
34436a19d939SDave Chinner 			__func__, ip->i_ino,
3444daf83964SChristoph Hellwig 			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
34456e73a545SChristoph Hellwig 			ip->i_nblocks, ip);
3446f2019299SBrian Foster 		goto flush_out;
34471da177e4SLinus Torvalds 	}
34481da177e4SLinus Torvalds 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
34499e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
34506a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3451c9690043SDarrick J. Wong 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
34526a19d939SDave Chinner 			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3453f2019299SBrian Foster 		goto flush_out;
34541da177e4SLinus Torvalds 	}
3455e60896d8SDave Chinner 
34561da177e4SLinus Torvalds 	/*
3457263997a6SDave Chinner 	 * Inode item log recovery for v2 inodes are dependent on the
3458e60896d8SDave Chinner 	 * di_flushiter count for correct sequencing. We bump the flush
3459e60896d8SDave Chinner 	 * iteration count so we can detect flushes which postdate a log record
3460e60896d8SDave Chinner 	 * during recovery. This is redundant as we now log every change and
3461e60896d8SDave Chinner 	 * hence this can't happen but we need to still do it to ensure
3462e60896d8SDave Chinner 	 * backwards compatibility with old kernels that predate logging all
3463e60896d8SDave Chinner 	 * inode changes.
34641da177e4SLinus Torvalds 	 */
34656471e9c5SChristoph Hellwig 	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
34661da177e4SLinus Torvalds 		ip->i_d.di_flushiter++;
34671da177e4SLinus Torvalds 
34680f45a1b2SChristoph Hellwig 	/*
34690f45a1b2SChristoph Hellwig 	 * If there are inline format data / attr forks attached to this inode,
34700f45a1b2SChristoph Hellwig 	 * make sure they are not corrupt.
34710f45a1b2SChristoph Hellwig 	 */
3472f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
34730f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_data(ip))
34740f45a1b2SChristoph Hellwig 		goto flush_out;
3475f7e67b20SChristoph Hellwig 	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
34760f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_attr(ip))
3477f2019299SBrian Foster 		goto flush_out;
3478005c5db8SDarrick J. Wong 
34791da177e4SLinus Torvalds 	/*
34803987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
34813987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
34823987848cSDave Chinner 	 * the core must be.
34831da177e4SLinus Torvalds 	 */
348493f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
34851da177e4SLinus Torvalds 
34861da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
34871da177e4SLinus Torvalds 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
34881da177e4SLinus Torvalds 		ip->i_d.di_flushiter = 0;
34891da177e4SLinus Torvalds 
3490005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3491005c5db8SDarrick J. Wong 	if (XFS_IFORK_Q(ip))
3492005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
34931da177e4SLinus Torvalds 
34941da177e4SLinus Torvalds 	/*
3495f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3496f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3497f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3498f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3499f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3500f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3501f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
35021da177e4SLinus Torvalds 	 *
3503f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3504f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3505664ffb8aSChristoph Hellwig 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3506664ffb8aSChristoph Hellwig 	 * we know that the information those bits represent is permanently on
3507f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3508f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
35091da177e4SLinus Torvalds 	 */
3510f2019299SBrian Foster 	error = 0;
3511f2019299SBrian Foster flush_out:
35121319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
3513f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3514f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3515fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
35161319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
35171da177e4SLinus Torvalds 
35181319ebefSDave Chinner 	/*
35191319ebefSDave Chinner 	 * Store the current LSN of the inode so that we can tell whether the
3520664ffb8aSChristoph Hellwig 	 * item has moved in the AIL from xfs_buf_inode_iodone().
35211319ebefSDave Chinner 	 */
35227b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
35237b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
35241da177e4SLinus Torvalds 
352593848a99SChristoph Hellwig 	/* generate the checksum. */
352693848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
3527f2019299SBrian Foster 	return error;
35281da177e4SLinus Torvalds }
352944a8736bSDarrick J. Wong 
3530e6187b34SDave Chinner /*
3531e6187b34SDave Chinner  * Non-blocking flush of dirty inode metadata into the backing buffer.
3532e6187b34SDave Chinner  *
3533e6187b34SDave Chinner  * The caller must have a reference to the inode and hold the cluster buffer
3534e6187b34SDave Chinner  * locked. The function will walk across all the inodes on the cluster buffer it
3535e6187b34SDave Chinner  * can find and lock without blocking, and flush them to the cluster buffer.
3536e6187b34SDave Chinner  *
35375717ea4dSDave Chinner  * On successful flushing of at least one inode, the caller must write out the
35385717ea4dSDave Chinner  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
35395717ea4dSDave Chinner  * the caller needs to release the buffer. On failure, the filesystem will be
35405717ea4dSDave Chinner  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
35415717ea4dSDave Chinner  * will be returned.
3542e6187b34SDave Chinner  */
3543e6187b34SDave Chinner int
3544e6187b34SDave Chinner xfs_iflush_cluster(
3545e6187b34SDave Chinner 	struct xfs_buf		*bp)
3546e6187b34SDave Chinner {
35475717ea4dSDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
35485717ea4dSDave Chinner 	struct xfs_log_item	*lip, *n;
35495717ea4dSDave Chinner 	struct xfs_inode	*ip;
35505717ea4dSDave Chinner 	struct xfs_inode_log_item *iip;
3551e6187b34SDave Chinner 	int			clcount = 0;
35525717ea4dSDave Chinner 	int			error = 0;
3553e6187b34SDave Chinner 
3554e6187b34SDave Chinner 	/*
35555717ea4dSDave Chinner 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
35565717ea4dSDave Chinner 	 * can remove itself from the list.
3557e6187b34SDave Chinner 	 */
35585717ea4dSDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
35595717ea4dSDave Chinner 		iip = (struct xfs_inode_log_item *)lip;
35605717ea4dSDave Chinner 		ip = iip->ili_inode;
35615717ea4dSDave Chinner 
35625717ea4dSDave Chinner 		/*
35635717ea4dSDave Chinner 		 * Quick and dirty check to avoid locks if possible.
35645717ea4dSDave Chinner 		 */
3565718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
35665717ea4dSDave Chinner 			continue;
35675717ea4dSDave Chinner 		if (xfs_ipincount(ip))
35685717ea4dSDave Chinner 			continue;
35695717ea4dSDave Chinner 
35705717ea4dSDave Chinner 		/*
35715717ea4dSDave Chinner 		 * The inode is still attached to the buffer, which means it is
35725717ea4dSDave Chinner 		 * dirty but reclaim might try to grab it. Check carefully for
35735717ea4dSDave Chinner 		 * that, and grab the ilock while still holding the i_flags_lock
35745717ea4dSDave Chinner 		 * to guarantee reclaim will not be able to reclaim this inode
35755717ea4dSDave Chinner 		 * once we drop the i_flags_lock.
35765717ea4dSDave Chinner 		 */
35775717ea4dSDave Chinner 		spin_lock(&ip->i_flags_lock);
35785717ea4dSDave Chinner 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3579718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
35805717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
3581e6187b34SDave Chinner 			continue;
3582e6187b34SDave Chinner 		}
3583e6187b34SDave Chinner 
3584e6187b34SDave Chinner 		/*
35855717ea4dSDave Chinner 		 * ILOCK will pin the inode against reclaim and prevent
35865717ea4dSDave Chinner 		 * concurrent transactions modifying the inode while we are
3587718ecc50SDave Chinner 		 * flushing the inode. If we get the lock, set the flushing
3588718ecc50SDave Chinner 		 * state before we drop the i_flags_lock.
3589e6187b34SDave Chinner 		 */
35905717ea4dSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
35915717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
35925717ea4dSDave Chinner 			continue;
35935717ea4dSDave Chinner 		}
3594718ecc50SDave Chinner 		__xfs_iflags_set(ip, XFS_IFLUSHING);
35955717ea4dSDave Chinner 		spin_unlock(&ip->i_flags_lock);
35965717ea4dSDave Chinner 
35975717ea4dSDave Chinner 		/*
35985717ea4dSDave Chinner 		 * Abort flushing this inode if we are shut down because the
35995717ea4dSDave Chinner 		 * inode may not currently be in the AIL. This can occur when
36005717ea4dSDave Chinner 		 * log I/O failure unpins the inode without inserting into the
36015717ea4dSDave Chinner 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
36025717ea4dSDave Chinner 		 * that otherwise looks like it should be flushed.
36035717ea4dSDave Chinner 		 */
36045717ea4dSDave Chinner 		if (XFS_FORCED_SHUTDOWN(mp)) {
36055717ea4dSDave Chinner 			xfs_iunpin_wait(ip);
36065717ea4dSDave Chinner 			xfs_iflush_abort(ip);
36075717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36085717ea4dSDave Chinner 			error = -EIO;
36095717ea4dSDave Chinner 			continue;
36105717ea4dSDave Chinner 		}
36115717ea4dSDave Chinner 
36125717ea4dSDave Chinner 		/* don't block waiting on a log force to unpin dirty inodes */
36135717ea4dSDave Chinner 		if (xfs_ipincount(ip)) {
3614718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36155717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36165717ea4dSDave Chinner 			continue;
36175717ea4dSDave Chinner 		}
36185717ea4dSDave Chinner 
36195717ea4dSDave Chinner 		if (!xfs_inode_clean(ip))
36205717ea4dSDave Chinner 			error = xfs_iflush(ip, bp);
36215717ea4dSDave Chinner 		else
3622718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36235717ea4dSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
36245717ea4dSDave Chinner 		if (error)
3625e6187b34SDave Chinner 			break;
3626e6187b34SDave Chinner 		clcount++;
3627e6187b34SDave Chinner 	}
3628e6187b34SDave Chinner 
3629e6187b34SDave Chinner 	if (error) {
3630e6187b34SDave Chinner 		bp->b_flags |= XBF_ASYNC;
3631e6187b34SDave Chinner 		xfs_buf_ioend_fail(bp);
3632e6187b34SDave Chinner 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3633e6187b34SDave Chinner 		return error;
3634e6187b34SDave Chinner 	}
3635e6187b34SDave Chinner 
36365717ea4dSDave Chinner 	if (!clcount)
36375717ea4dSDave Chinner 		return -EAGAIN;
36385717ea4dSDave Chinner 
36395717ea4dSDave Chinner 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
36405717ea4dSDave Chinner 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
36415717ea4dSDave Chinner 	return 0;
36425717ea4dSDave Chinner 
36435717ea4dSDave Chinner }
36445717ea4dSDave Chinner 
364544a8736bSDarrick J. Wong /* Release an inode. */
364644a8736bSDarrick J. Wong void
364744a8736bSDarrick J. Wong xfs_irele(
364844a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
364944a8736bSDarrick J. Wong {
365044a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
365144a8736bSDarrick J. Wong 	iput(VFS_I(ip));
365244a8736bSDarrick J. Wong }
365354fbdd10SChristoph Hellwig 
365454fbdd10SChristoph Hellwig /*
365554fbdd10SChristoph Hellwig  * Ensure all commited transactions touching the inode are written to the log.
365654fbdd10SChristoph Hellwig  */
365754fbdd10SChristoph Hellwig int
365854fbdd10SChristoph Hellwig xfs_log_force_inode(
365954fbdd10SChristoph Hellwig 	struct xfs_inode	*ip)
366054fbdd10SChristoph Hellwig {
366154fbdd10SChristoph Hellwig 	xfs_lsn_t		lsn = 0;
366254fbdd10SChristoph Hellwig 
366354fbdd10SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
366454fbdd10SChristoph Hellwig 	if (xfs_ipincount(ip))
366554fbdd10SChristoph Hellwig 		lsn = ip->i_itemp->ili_last_lsn;
366654fbdd10SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
366754fbdd10SChristoph Hellwig 
366854fbdd10SChristoph Hellwig 	if (!lsn)
366954fbdd10SChristoph Hellwig 		return 0;
367054fbdd10SChristoph Hellwig 	return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
367154fbdd10SChristoph Hellwig }
3672e2aaee9cSDarrick J. Wong 
3673e2aaee9cSDarrick J. Wong /*
3674e2aaee9cSDarrick J. Wong  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3675e2aaee9cSDarrick J. Wong  * abide vfs locking order (lowest pointer value goes first) and breaking the
3676e2aaee9cSDarrick J. Wong  * layout leases before proceeding.  The loop is needed because we cannot call
3677e2aaee9cSDarrick J. Wong  * the blocking break_layout() with the iolocks held, and therefore have to
3678e2aaee9cSDarrick J. Wong  * back out both locks.
3679e2aaee9cSDarrick J. Wong  */
3680e2aaee9cSDarrick J. Wong static int
3681e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout(
3682e2aaee9cSDarrick J. Wong 	struct inode		*src,
3683e2aaee9cSDarrick J. Wong 	struct inode		*dest)
3684e2aaee9cSDarrick J. Wong {
3685e2aaee9cSDarrick J. Wong 	int			error;
3686e2aaee9cSDarrick J. Wong 
3687e2aaee9cSDarrick J. Wong 	if (src > dest)
3688e2aaee9cSDarrick J. Wong 		swap(src, dest);
3689e2aaee9cSDarrick J. Wong 
3690e2aaee9cSDarrick J. Wong retry:
3691e2aaee9cSDarrick J. Wong 	/* Wait to break both inodes' layouts before we start locking. */
3692e2aaee9cSDarrick J. Wong 	error = break_layout(src, true);
3693e2aaee9cSDarrick J. Wong 	if (error)
3694e2aaee9cSDarrick J. Wong 		return error;
3695e2aaee9cSDarrick J. Wong 	if (src != dest) {
3696e2aaee9cSDarrick J. Wong 		error = break_layout(dest, true);
3697e2aaee9cSDarrick J. Wong 		if (error)
3698e2aaee9cSDarrick J. Wong 			return error;
3699e2aaee9cSDarrick J. Wong 	}
3700e2aaee9cSDarrick J. Wong 
3701e2aaee9cSDarrick J. Wong 	/* Lock one inode and make sure nobody got in and leased it. */
3702e2aaee9cSDarrick J. Wong 	inode_lock(src);
3703e2aaee9cSDarrick J. Wong 	error = break_layout(src, false);
3704e2aaee9cSDarrick J. Wong 	if (error) {
3705e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3706e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3707e2aaee9cSDarrick J. Wong 			goto retry;
3708e2aaee9cSDarrick J. Wong 		return error;
3709e2aaee9cSDarrick J. Wong 	}
3710e2aaee9cSDarrick J. Wong 
3711e2aaee9cSDarrick J. Wong 	if (src == dest)
3712e2aaee9cSDarrick J. Wong 		return 0;
3713e2aaee9cSDarrick J. Wong 
3714e2aaee9cSDarrick J. Wong 	/* Lock the other inode and make sure nobody got in and leased it. */
3715e2aaee9cSDarrick J. Wong 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3716e2aaee9cSDarrick J. Wong 	error = break_layout(dest, false);
3717e2aaee9cSDarrick J. Wong 	if (error) {
3718e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3719e2aaee9cSDarrick J. Wong 		inode_unlock(dest);
3720e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3721e2aaee9cSDarrick J. Wong 			goto retry;
3722e2aaee9cSDarrick J. Wong 		return error;
3723e2aaee9cSDarrick J. Wong 	}
3724e2aaee9cSDarrick J. Wong 
3725e2aaee9cSDarrick J. Wong 	return 0;
3726e2aaee9cSDarrick J. Wong }
3727e2aaee9cSDarrick J. Wong 
3728e2aaee9cSDarrick J. Wong /*
3729e2aaee9cSDarrick J. Wong  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3730e2aaee9cSDarrick J. Wong  * mmap activity.
3731e2aaee9cSDarrick J. Wong  */
3732e2aaee9cSDarrick J. Wong int
3733e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap(
3734e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3735e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3736e2aaee9cSDarrick J. Wong {
3737e2aaee9cSDarrick J. Wong 	int			ret;
3738e2aaee9cSDarrick J. Wong 
3739e2aaee9cSDarrick J. Wong 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3740e2aaee9cSDarrick J. Wong 	if (ret)
3741e2aaee9cSDarrick J. Wong 		return ret;
3742e2aaee9cSDarrick J. Wong 	if (ip1 == ip2)
3743e2aaee9cSDarrick J. Wong 		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3744e2aaee9cSDarrick J. Wong 	else
3745e2aaee9cSDarrick J. Wong 		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3746e2aaee9cSDarrick J. Wong 				    ip2, XFS_MMAPLOCK_EXCL);
3747e2aaee9cSDarrick J. Wong 	return 0;
3748e2aaee9cSDarrick J. Wong }
3749e2aaee9cSDarrick J. Wong 
3750e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */
3751e2aaee9cSDarrick J. Wong void
3752e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap(
3753e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3754e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3755e2aaee9cSDarrick J. Wong {
3756e2aaee9cSDarrick J. Wong 	bool			same_inode = (ip1 == ip2);
3757e2aaee9cSDarrick J. Wong 
3758e2aaee9cSDarrick J. Wong 	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3759e2aaee9cSDarrick J. Wong 	if (!same_inode)
3760e2aaee9cSDarrick J. Wong 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3761e2aaee9cSDarrick J. Wong 	inode_unlock(VFS_I(ip2));
3762e2aaee9cSDarrick J. Wong 	if (!same_inode)
3763e2aaee9cSDarrick J. Wong 		inode_unlock(VFS_I(ip1));
3764e2aaee9cSDarrick J. Wong }
3765