xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision 62af7d54a0ec0b6f99d7d55ebeb9ecbb3371bc67)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
16a4fbe6abSDave Chinner #include "xfs_inode.h"
17c24b5dfaSDave Chinner #include "xfs_dir2.h"
18c24b5dfaSDave Chinner #include "xfs_attr.h"
19239880efSDave Chinner #include "xfs_trans_space.h"
20239880efSDave Chinner #include "xfs_trans.h"
211da177e4SLinus Torvalds #include "xfs_buf_item.h"
22a844f451SNathan Scott #include "xfs_inode_item.h"
23a844f451SNathan Scott #include "xfs_ialloc.h"
24a844f451SNathan Scott #include "xfs_bmap.h"
2568988114SDave Chinner #include "xfs_bmap_util.h"
26e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
271da177e4SLinus Torvalds #include "xfs_error.h"
281da177e4SLinus Torvalds #include "xfs_quota.h"
292a82b8beSDavid Chinner #include "xfs_filestream.h"
300b1b213fSChristoph Hellwig #include "xfs_trace.h"
3133479e05SDave Chinner #include "xfs_icache.h"
32c24b5dfaSDave Chinner #include "xfs_symlink.h"
33239880efSDave Chinner #include "xfs_trans_priv.h"
34239880efSDave Chinner #include "xfs_log.h"
35a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
36aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
379bbafc71SDave Chinner #include "xfs_ag.h"
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds kmem_zone_t *xfs_inode_zone;
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds /*
428f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
431da177e4SLinus Torvalds  * freed from a file in a single transaction.
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
461da177e4SLinus Torvalds 
4754d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48f40aadb2SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
49f40aadb2SDave Chinner 	struct xfs_inode *);
50ab297431SZhi Yong Wu 
512a0ec1d9SDave Chinner /*
522a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
532a0ec1d9SDave Chinner  */
542a0ec1d9SDave Chinner xfs_extlen_t
552a0ec1d9SDave Chinner xfs_get_extsz_hint(
562a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
572a0ec1d9SDave Chinner {
58bdb2ed2dSChristoph Hellwig 	/*
59bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
60bdb2ed2dSChristoph Hellwig 	 * write to them.
61bdb2ed2dSChristoph Hellwig 	 */
62bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
63bdb2ed2dSChristoph Hellwig 		return 0;
64db07349dSChristoph Hellwig 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
65031474c2SChristoph Hellwig 		return ip->i_extsize;
662a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
672a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
682a0ec1d9SDave Chinner 	return 0;
692a0ec1d9SDave Chinner }
702a0ec1d9SDave Chinner 
71fa96acadSDave Chinner /*
72f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
73f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
74e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
75e153aa79SDarrick J. Wong  * use the default size.
76f7ca3522SDarrick J. Wong  */
77f7ca3522SDarrick J. Wong xfs_extlen_t
78f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
79f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
80f7ca3522SDarrick J. Wong {
81f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
82f7ca3522SDarrick J. Wong 
83f7ca3522SDarrick J. Wong 	a = 0;
843e09ab8fSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
85b33ce57dSChristoph Hellwig 		a = ip->i_cowextsize;
86f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
87f7ca3522SDarrick J. Wong 
88e153aa79SDarrick J. Wong 	a = max(a, b);
89e153aa79SDarrick J. Wong 	if (a == 0)
90e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
91f7ca3522SDarrick J. Wong 	return a;
92f7ca3522SDarrick J. Wong }
93f7ca3522SDarrick J. Wong 
94f7ca3522SDarrick J. Wong /*
95efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
96efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
97efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
98efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
100efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
101efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
102efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
103efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
104efa70be1SChristoph Hellwig  * if they have not.
105fa96acadSDave Chinner  *
106efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10701f4f327SChristoph Hellwig  * xfs_iunlock() call.
108fa96acadSDave Chinner  */
109fa96acadSDave Chinner uint
110309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
111309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
112fa96acadSDave Chinner {
113309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
114fa96acadSDave Chinner 
115b2197a36SChristoph Hellwig 	if (xfs_need_iread_extents(&ip->i_df))
116fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
117fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
118fa96acadSDave Chinner 	return lock_mode;
119fa96acadSDave Chinner }
120fa96acadSDave Chinner 
121efa70be1SChristoph Hellwig uint
122efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
123efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
124fa96acadSDave Chinner {
125efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
126efa70be1SChristoph Hellwig 
127b2197a36SChristoph Hellwig 	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
128efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
129efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
130efa70be1SChristoph Hellwig 	return lock_mode;
131fa96acadSDave Chinner }
132fa96acadSDave Chinner 
133fa96acadSDave Chinner /*
13465523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
13565523218SChristoph Hellwig  * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
13665523218SChristoph Hellwig  * various combinations of the locks to be obtained.
137fa96acadSDave Chinner  *
138653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
139653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
140fa96acadSDave Chinner  *
141653c60b6SDave Chinner  * Basic locking order:
142653c60b6SDave Chinner  *
14365523218SChristoph Hellwig  * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
144653c60b6SDave Chinner  *
145c1e8d7c6SMichel Lespinasse  * mmap_lock locking order:
146653c60b6SDave Chinner  *
147c1e8d7c6SMichel Lespinasse  * i_rwsem -> page lock -> mmap_lock
148c1e8d7c6SMichel Lespinasse  * mmap_lock -> i_mmap_lock -> page_lock
149653c60b6SDave Chinner  *
150c1e8d7c6SMichel Lespinasse  * The difference in mmap_lock locking order mean that we cannot hold the
151653c60b6SDave Chinner  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
152c1e8d7c6SMichel Lespinasse  * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
153653c60b6SDave Chinner  * in get_user_pages() to map the user pages into the kernel address space for
15465523218SChristoph Hellwig  * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
155c1e8d7c6SMichel Lespinasse  * page faults already hold the mmap_lock.
156653c60b6SDave Chinner  *
157653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
15865523218SChristoph Hellwig  * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
159653c60b6SDave Chinner  * taken in places where we need to invalidate the page cache in a race
160653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
161653c60b6SDave Chinner  * functions).
162fa96acadSDave Chinner  */
163fa96acadSDave Chinner void
164fa96acadSDave Chinner xfs_ilock(
165fa96acadSDave Chinner 	xfs_inode_t		*ip,
166fa96acadSDave Chinner 	uint			lock_flags)
167fa96acadSDave Chinner {
168fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
169fa96acadSDave Chinner 
170fa96acadSDave Chinner 	/*
171fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
172fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
173fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
174fa96acadSDave Chinner 	 */
175fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
176fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
177653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
178653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
179fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
180fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
1810952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
182fa96acadSDave Chinner 
18365523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
18465523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
18565523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
18665523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
18765523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
18865523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
18965523218SChristoph Hellwig 	}
190fa96acadSDave Chinner 
191653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
192653c60b6SDave Chinner 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
193653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
194653c60b6SDave Chinner 		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195653c60b6SDave Chinner 
196fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
197fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
198fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
199fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200fa96acadSDave Chinner }
201fa96acadSDave Chinner 
202fa96acadSDave Chinner /*
203fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
204fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
205fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
206fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
207fa96acadSDave Chinner  * is dropped before returning.
208fa96acadSDave Chinner  *
209fa96acadSDave Chinner  * ip -- the inode being locked
210fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
211fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
212fa96acadSDave Chinner  *	 of valid values.
213fa96acadSDave Chinner  */
214fa96acadSDave Chinner int
215fa96acadSDave Chinner xfs_ilock_nowait(
216fa96acadSDave Chinner 	xfs_inode_t		*ip,
217fa96acadSDave Chinner 	uint			lock_flags)
218fa96acadSDave Chinner {
219fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
220fa96acadSDave Chinner 
221fa96acadSDave Chinner 	/*
222fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
223fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
224fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
225fa96acadSDave Chinner 	 */
226fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
227fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
228653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
229653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
230fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
231fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
2320952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
233fa96acadSDave Chinner 
234fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23565523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
236fa96acadSDave Chinner 			goto out;
237fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
23865523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
239fa96acadSDave Chinner 			goto out;
240fa96acadSDave Chinner 	}
241653c60b6SDave Chinner 
242653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
243653c60b6SDave Chinner 		if (!mrtryupdate(&ip->i_mmaplock))
244653c60b6SDave Chinner 			goto out_undo_iolock;
245653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
246653c60b6SDave Chinner 		if (!mrtryaccess(&ip->i_mmaplock))
247653c60b6SDave Chinner 			goto out_undo_iolock;
248653c60b6SDave Chinner 	}
249653c60b6SDave Chinner 
250fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
251fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
252653c60b6SDave Chinner 			goto out_undo_mmaplock;
253fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
254fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
255653c60b6SDave Chinner 			goto out_undo_mmaplock;
256fa96acadSDave Chinner 	}
257fa96acadSDave Chinner 	return 1;
258fa96acadSDave Chinner 
259653c60b6SDave Chinner out_undo_mmaplock:
260653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
261653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
262653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
263653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
264fa96acadSDave Chinner out_undo_iolock:
265fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26665523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
267fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
26865523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
269fa96acadSDave Chinner out:
270fa96acadSDave Chinner 	return 0;
271fa96acadSDave Chinner }
272fa96acadSDave Chinner 
273fa96acadSDave Chinner /*
274fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
275fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
276fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
277fa96acadSDave Chinner  * that we know which locks to drop.
278fa96acadSDave Chinner  *
279fa96acadSDave Chinner  * ip -- the inode being unlocked
280fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
281fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
282fa96acadSDave Chinner  *	 of valid values for this parameter.
283fa96acadSDave Chinner  *
284fa96acadSDave Chinner  */
285fa96acadSDave Chinner void
286fa96acadSDave Chinner xfs_iunlock(
287fa96acadSDave Chinner 	xfs_inode_t		*ip,
288fa96acadSDave Chinner 	uint			lock_flags)
289fa96acadSDave Chinner {
290fa96acadSDave Chinner 	/*
291fa96acadSDave Chinner 	 * You can't set both SHARED and EXCL for the same lock,
292fa96acadSDave Chinner 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
293fa96acadSDave Chinner 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
294fa96acadSDave Chinner 	 */
295fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
296fa96acadSDave Chinner 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
297653c60b6SDave Chinner 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
298653c60b6SDave Chinner 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
299fa96acadSDave Chinner 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
300fa96acadSDave Chinner 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
3010952c818SDave Chinner 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
302fa96acadSDave Chinner 	ASSERT(lock_flags != 0);
303fa96acadSDave Chinner 
304fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
30565523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
306fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
30765523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
308fa96acadSDave Chinner 
309653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
310653c60b6SDave Chinner 		mrunlock_excl(&ip->i_mmaplock);
311653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
312653c60b6SDave Chinner 		mrunlock_shared(&ip->i_mmaplock);
313653c60b6SDave Chinner 
314fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
315fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
316fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
317fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
318fa96acadSDave Chinner 
319fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
320fa96acadSDave Chinner }
321fa96acadSDave Chinner 
322fa96acadSDave Chinner /*
323fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
324fa96acadSDave Chinner  * if it is being demoted.
325fa96acadSDave Chinner  */
326fa96acadSDave Chinner void
327fa96acadSDave Chinner xfs_ilock_demote(
328fa96acadSDave Chinner 	xfs_inode_t		*ip,
329fa96acadSDave Chinner 	uint			lock_flags)
330fa96acadSDave Chinner {
331653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
332653c60b6SDave Chinner 	ASSERT((lock_flags &
333653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
334fa96acadSDave Chinner 
335fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
336fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
337653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
338653c60b6SDave Chinner 		mrdemote(&ip->i_mmaplock);
339fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
34065523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
341fa96acadSDave Chinner 
342fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
343fa96acadSDave Chinner }
344fa96acadSDave Chinner 
345742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
346fa96acadSDave Chinner int
347fa96acadSDave Chinner xfs_isilocked(
348fa96acadSDave Chinner 	xfs_inode_t		*ip,
349fa96acadSDave Chinner 	uint			lock_flags)
350fa96acadSDave Chinner {
351fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
352fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
353fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
354fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
355fa96acadSDave Chinner 	}
356fa96acadSDave Chinner 
357653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
358653c60b6SDave Chinner 		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
359653c60b6SDave Chinner 			return !!ip->i_mmaplock.mr_writer;
360653c60b6SDave Chinner 		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
361653c60b6SDave Chinner 	}
362653c60b6SDave Chinner 
363fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
364fa96acadSDave Chinner 		if (!(lock_flags & XFS_IOLOCK_SHARED))
36565523218SChristoph Hellwig 			return !debug_locks ||
36665523218SChristoph Hellwig 				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
36765523218SChristoph Hellwig 		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
368fa96acadSDave Chinner 	}
369fa96acadSDave Chinner 
370fa96acadSDave Chinner 	ASSERT(0);
371fa96acadSDave Chinner 	return 0;
372fa96acadSDave Chinner }
373fa96acadSDave Chinner #endif
374fa96acadSDave Chinner 
375b6a9947eSDave Chinner /*
376b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
377b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
378b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
379b6a9947eSDave Chinner  * errors and warnings.
380b6a9947eSDave Chinner  */
381b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3823403ccc0SDave Chinner static bool
3833403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3843403ccc0SDave Chinner 	int subclass)
3853403ccc0SDave Chinner {
3863403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3873403ccc0SDave Chinner }
3883403ccc0SDave Chinner #else
3893403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3903403ccc0SDave Chinner #endif
3913403ccc0SDave Chinner 
392c24b5dfaSDave Chinner /*
393653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
3940952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
3950952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
3960952c818SDave Chinner  * storage fields in the class mask we build.
397c24b5dfaSDave Chinner  */
398c24b5dfaSDave Chinner static inline int
399c24b5dfaSDave Chinner xfs_lock_inumorder(int lock_mode, int subclass)
400c24b5dfaSDave Chinner {
4010952c818SDave Chinner 	int	class = 0;
4020952c818SDave Chinner 
4030952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4040952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4053403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4060952c818SDave Chinner 
407653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4080952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4090952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
410653c60b6SDave Chinner 	}
411653c60b6SDave Chinner 
412653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4130952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4140952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
415653c60b6SDave Chinner 	}
416653c60b6SDave Chinner 
4170952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4180952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4190952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4200952c818SDave Chinner 	}
421c24b5dfaSDave Chinner 
4220952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
423c24b5dfaSDave Chinner }
424c24b5dfaSDave Chinner 
425c24b5dfaSDave Chinner /*
42695afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
42795afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
428c24b5dfaSDave Chinner  *
42995afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
43095afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
43195afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
43295afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
43395afcf5cSDave Chinner  * push the tail and free space in the log.
4340952c818SDave Chinner  *
4350952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4360952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4370952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4380952c818SDave Chinner  * have violated locking orders.
439c24b5dfaSDave Chinner  */
4400d5a75e9SEric Sandeen static void
441c24b5dfaSDave Chinner xfs_lock_inodes(
442efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
443c24b5dfaSDave Chinner 	int			inodes,
444c24b5dfaSDave Chinner 	uint			lock_mode)
445c24b5dfaSDave Chinner {
446c24b5dfaSDave Chinner 	int			attempts = 0, i, j, try_lock;
447efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
448c24b5dfaSDave Chinner 
4490952c818SDave Chinner 	/*
4500952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4510952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
452b63da6c8SRandy Dunlap 	 * inodes depend on the type of locking and the limits placed by
4530952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4540952c818SDave Chinner 	 * the asserts.
4550952c818SDave Chinner 	 */
45695afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4570952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4580952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4590952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4600952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4610952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4620952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4630952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4640952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4650952c818SDave Chinner 
4660952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4670952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4680952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4690952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
470c24b5dfaSDave Chinner 
471c24b5dfaSDave Chinner 	try_lock = 0;
472c24b5dfaSDave Chinner 	i = 0;
473c24b5dfaSDave Chinner again:
474c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
475c24b5dfaSDave Chinner 		ASSERT(ips[i]);
476c24b5dfaSDave Chinner 
477c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
478c24b5dfaSDave Chinner 			continue;
479c24b5dfaSDave Chinner 
480c24b5dfaSDave Chinner 		/*
48195afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
48295afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
483c24b5dfaSDave Chinner 		 */
484c24b5dfaSDave Chinner 		if (!try_lock) {
485c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
486b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
48722525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
488c24b5dfaSDave Chinner 					try_lock++;
489c24b5dfaSDave Chinner 			}
490c24b5dfaSDave Chinner 		}
491c24b5dfaSDave Chinner 
492c24b5dfaSDave Chinner 		/*
493c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
494c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
495c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
496c24b5dfaSDave Chinner 		 * and try again.
497c24b5dfaSDave Chinner 		 */
49895afcf5cSDave Chinner 		if (!try_lock) {
49995afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
50095afcf5cSDave Chinner 			continue;
50195afcf5cSDave Chinner 		}
502c24b5dfaSDave Chinner 
50395afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
504c24b5dfaSDave Chinner 		ASSERT(i != 0);
50595afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
50695afcf5cSDave Chinner 			continue;
50795afcf5cSDave Chinner 
50895afcf5cSDave Chinner 		/*
50995afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
51095afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
51195afcf5cSDave Chinner 		 */
512c24b5dfaSDave Chinner 		attempts++;
513c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
514c24b5dfaSDave Chinner 			/*
51595afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
51695afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
51795afcf5cSDave Chinner 			 * same.
518c24b5dfaSDave Chinner 			 */
51995afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
520c24b5dfaSDave Chinner 				continue;
521c24b5dfaSDave Chinner 
522c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
523c24b5dfaSDave Chinner 		}
524c24b5dfaSDave Chinner 
525c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
526c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
527c24b5dfaSDave Chinner 		}
528c24b5dfaSDave Chinner 		i = 0;
529c24b5dfaSDave Chinner 		try_lock = 0;
530c24b5dfaSDave Chinner 		goto again;
531c24b5dfaSDave Chinner 	}
532c24b5dfaSDave Chinner }
533c24b5dfaSDave Chinner 
534c24b5dfaSDave Chinner /*
535653c60b6SDave Chinner  * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
5367c2d238aSDarrick J. Wong  * the mmaplock or the ilock, but not more than one type at a time. If we lock
5377c2d238aSDarrick J. Wong  * more than one at a time, lockdep will report false positives saying we have
5387c2d238aSDarrick J. Wong  * violated locking orders.  The iolock must be double-locked separately since
5397c2d238aSDarrick J. Wong  * we use i_rwsem for that.  We now support taking one lock EXCL and the other
5407c2d238aSDarrick J. Wong  * SHARED.
541c24b5dfaSDave Chinner  */
542c24b5dfaSDave Chinner void
543c24b5dfaSDave Chinner xfs_lock_two_inodes(
5447c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5457c2d238aSDarrick J. Wong 	uint			ip0_mode,
5467c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5477c2d238aSDarrick J. Wong 	uint			ip1_mode)
548c24b5dfaSDave Chinner {
5497c2d238aSDarrick J. Wong 	struct xfs_inode	*temp;
5507c2d238aSDarrick J. Wong 	uint			mode_temp;
551c24b5dfaSDave Chinner 	int			attempts = 0;
552efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
553c24b5dfaSDave Chinner 
5547c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5557c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5567c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5577c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5587c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5597c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5607c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5617c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5627c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5637c2d238aSDarrick J. Wong 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
5647c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
5657c2d238aSDarrick J. Wong 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566653c60b6SDave Chinner 
567c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
568c24b5dfaSDave Chinner 
569c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
570c24b5dfaSDave Chinner 		temp = ip0;
571c24b5dfaSDave Chinner 		ip0 = ip1;
572c24b5dfaSDave Chinner 		ip1 = temp;
5737c2d238aSDarrick J. Wong 		mode_temp = ip0_mode;
5747c2d238aSDarrick J. Wong 		ip0_mode = ip1_mode;
5757c2d238aSDarrick J. Wong 		ip1_mode = mode_temp;
576c24b5dfaSDave Chinner 	}
577c24b5dfaSDave Chinner 
578c24b5dfaSDave Chinner  again:
5797c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
580c24b5dfaSDave Chinner 
581c24b5dfaSDave Chinner 	/*
582c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
583c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
584c24b5dfaSDave Chinner 	 * and try again.
585c24b5dfaSDave Chinner 	 */
586b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58722525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5887c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5897c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
590c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
591c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
592c24b5dfaSDave Chinner 			goto again;
593c24b5dfaSDave Chinner 		}
594c24b5dfaSDave Chinner 	} else {
5957c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
596c24b5dfaSDave Chinner 	}
597c24b5dfaSDave Chinner }
598c24b5dfaSDave Chinner 
5991da177e4SLinus Torvalds uint
6001da177e4SLinus Torvalds xfs_ip2xflags(
60158f88ca2SDave Chinner 	struct xfs_inode	*ip)
6021da177e4SLinus Torvalds {
6034422501dSChristoph Hellwig 	uint			flags = 0;
6041da177e4SLinus Torvalds 
6054422501dSChristoph Hellwig 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
6064422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
6074422501dSChristoph Hellwig 			flags |= FS_XFLAG_REALTIME;
6084422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
6094422501dSChristoph Hellwig 			flags |= FS_XFLAG_PREALLOC;
6104422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
6114422501dSChristoph Hellwig 			flags |= FS_XFLAG_IMMUTABLE;
6124422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
6134422501dSChristoph Hellwig 			flags |= FS_XFLAG_APPEND;
6144422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
6154422501dSChristoph Hellwig 			flags |= FS_XFLAG_SYNC;
6164422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
6174422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOATIME;
6184422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
6194422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODUMP;
6204422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
6214422501dSChristoph Hellwig 			flags |= FS_XFLAG_RTINHERIT;
6224422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
6234422501dSChristoph Hellwig 			flags |= FS_XFLAG_PROJINHERIT;
6244422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
6254422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOSYMLINKS;
6264422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6274422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSIZE;
6284422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
6294422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSZINHERIT;
6304422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
6314422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODEFRAG;
6324422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
6334422501dSChristoph Hellwig 			flags |= FS_XFLAG_FILESTREAM;
6344422501dSChristoph Hellwig 	}
6354422501dSChristoph Hellwig 
6364422501dSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
6374422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
6384422501dSChristoph Hellwig 			flags |= FS_XFLAG_DAX;
6394422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6404422501dSChristoph Hellwig 			flags |= FS_XFLAG_COWEXTSIZE;
6414422501dSChristoph Hellwig 	}
6424422501dSChristoph Hellwig 
6434422501dSChristoph Hellwig 	if (XFS_IFORK_Q(ip))
6444422501dSChristoph Hellwig 		flags |= FS_XFLAG_HASATTR;
6454422501dSChristoph Hellwig 	return flags;
6461da177e4SLinus Torvalds }
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds /*
649c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
650c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
651c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
652c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
653c24b5dfaSDave Chinner  */
654c24b5dfaSDave Chinner int
655c24b5dfaSDave Chinner xfs_lookup(
656c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
657c24b5dfaSDave Chinner 	struct xfs_name		*name,
658c24b5dfaSDave Chinner 	xfs_inode_t		**ipp,
659c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
660c24b5dfaSDave Chinner {
661c24b5dfaSDave Chinner 	xfs_ino_t		inum;
662c24b5dfaSDave Chinner 	int			error;
663c24b5dfaSDave Chinner 
664c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
665c24b5dfaSDave Chinner 
666c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
6672451337dSDave Chinner 		return -EIO;
668c24b5dfaSDave Chinner 
669c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
670c24b5dfaSDave Chinner 	if (error)
671dbad7c99SDave Chinner 		goto out_unlock;
672c24b5dfaSDave Chinner 
673c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
674c24b5dfaSDave Chinner 	if (error)
675c24b5dfaSDave Chinner 		goto out_free_name;
676c24b5dfaSDave Chinner 
677c24b5dfaSDave Chinner 	return 0;
678c24b5dfaSDave Chinner 
679c24b5dfaSDave Chinner out_free_name:
680c24b5dfaSDave Chinner 	if (ci_name)
681c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
682dbad7c99SDave Chinner out_unlock:
683c24b5dfaSDave Chinner 	*ipp = NULL;
684c24b5dfaSDave Chinner 	return error;
685c24b5dfaSDave Chinner }
686c24b5dfaSDave Chinner 
6878a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */
6888a569d71SDarrick J. Wong static void
6898a569d71SDarrick J. Wong xfs_inode_inherit_flags(
6908a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
6918a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
6928a569d71SDarrick J. Wong {
6938a569d71SDarrick J. Wong 	unsigned int		di_flags = 0;
694603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
6958a569d71SDarrick J. Wong 	umode_t			mode = VFS_I(ip)->i_mode;
6968a569d71SDarrick J. Wong 
6978a569d71SDarrick J. Wong 	if (S_ISDIR(mode)) {
698db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
6998a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_RTINHERIT;
700db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
7018a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
702031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7038a569d71SDarrick J. Wong 		}
704db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
7058a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_PROJINHERIT;
7068a569d71SDarrick J. Wong 	} else if (S_ISREG(mode)) {
707db07349dSChristoph Hellwig 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
708d4f2c14cSDarrick J. Wong 		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
7098a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_REALTIME;
710db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
7118a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSIZE;
712031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7138a569d71SDarrick J. Wong 		}
7148a569d71SDarrick J. Wong 	}
715db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
7168a569d71SDarrick J. Wong 	    xfs_inherit_noatime)
7178a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOATIME;
718db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
7198a569d71SDarrick J. Wong 	    xfs_inherit_nodump)
7208a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODUMP;
721db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
7228a569d71SDarrick J. Wong 	    xfs_inherit_sync)
7238a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_SYNC;
724db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
7258a569d71SDarrick J. Wong 	    xfs_inherit_nosymlinks)
7268a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
727db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
7288a569d71SDarrick J. Wong 	    xfs_inherit_nodefrag)
7298a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODEFRAG;
730db07349dSChristoph Hellwig 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
7318a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_FILESTREAM;
7328a569d71SDarrick J. Wong 
733db07349dSChristoph Hellwig 	ip->i_diflags |= di_flags;
734603f000bSDarrick J. Wong 
735603f000bSDarrick J. Wong 	/*
736603f000bSDarrick J. Wong 	 * Inode verifiers on older kernels only check that the extent size
737603f000bSDarrick J. Wong 	 * hint is an integer multiple of the rt extent size on realtime files.
738603f000bSDarrick J. Wong 	 * They did not check the hint alignment on a directory with both
739603f000bSDarrick J. Wong 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
740603f000bSDarrick J. Wong 	 * propagated from a directory into a new realtime file, new file
741603f000bSDarrick J. Wong 	 * allocations will fail due to math errors in the rt allocator and/or
742603f000bSDarrick J. Wong 	 * trip the verifiers.  Validate the hint settings in the new file so
743603f000bSDarrick J. Wong 	 * that we don't let broken hints propagate.
744603f000bSDarrick J. Wong 	 */
745603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
746603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags);
747603f000bSDarrick J. Wong 	if (failaddr) {
748603f000bSDarrick J. Wong 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
749603f000bSDarrick J. Wong 				   XFS_DIFLAG_EXTSZINHERIT);
750603f000bSDarrick J. Wong 		ip->i_extsize = 0;
751603f000bSDarrick J. Wong 	}
7528a569d71SDarrick J. Wong }
7538a569d71SDarrick J. Wong 
7548a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */
7558a569d71SDarrick J. Wong static void
7568a569d71SDarrick J. Wong xfs_inode_inherit_flags2(
7578a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7588a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7598a569d71SDarrick J. Wong {
760603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
761603f000bSDarrick J. Wong 
7623e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
7633e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
764b33ce57dSChristoph Hellwig 		ip->i_cowextsize = pip->i_cowextsize;
7658a569d71SDarrick J. Wong 	}
7663e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
7673e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
768603f000bSDarrick J. Wong 
769603f000bSDarrick J. Wong 	/* Don't let invalid cowextsize hints propagate. */
770603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
771603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
772603f000bSDarrick J. Wong 	if (failaddr) {
773603f000bSDarrick J. Wong 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
774603f000bSDarrick J. Wong 		ip->i_cowextsize = 0;
775603f000bSDarrick J. Wong 	}
7768a569d71SDarrick J. Wong }
7778a569d71SDarrick J. Wong 
778c24b5dfaSDave Chinner /*
7791abcf261SDave Chinner  * Initialise a newly allocated inode and return the in-core inode to the
7801abcf261SDave Chinner  * caller locked exclusively.
7811da177e4SLinus Torvalds  */
782b652afd9SDave Chinner int
7831abcf261SDave Chinner xfs_init_new_inode(
784f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
7851abcf261SDave Chinner 	struct xfs_trans	*tp,
7861abcf261SDave Chinner 	struct xfs_inode	*pip,
7871abcf261SDave Chinner 	xfs_ino_t		ino,
788576b1d67SAl Viro 	umode_t			mode,
78931b084aeSNathan Scott 	xfs_nlink_t		nlink,
79066f36464SChristoph Hellwig 	dev_t			rdev,
7916743099cSArkadiusz Mi?kiewicz 	prid_t			prid,
792e6a688c3SDave Chinner 	bool			init_xattrs,
7931abcf261SDave Chinner 	struct xfs_inode	**ipp)
7941da177e4SLinus Torvalds {
79501ea173eSChristoph Hellwig 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
79693848a99SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
7971abcf261SDave Chinner 	struct xfs_inode	*ip;
7981abcf261SDave Chinner 	unsigned int		flags;
7991da177e4SLinus Torvalds 	int			error;
80095582b00SDeepa Dinamani 	struct timespec64	tv;
8013987848cSDave Chinner 	struct inode		*inode;
8021da177e4SLinus Torvalds 
8031da177e4SLinus Torvalds 	/*
8048b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
8058b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
8068b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
8078b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
8088b26984dSDave Chinner 	 * first.
8098b26984dSDave Chinner 	 */
8108b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
8118b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
8128b26984dSDave Chinner 		return -EFSCORRUPTED;
8138b26984dSDave Chinner 	}
8148b26984dSDave Chinner 
8158b26984dSDave Chinner 	/*
8161abcf261SDave Chinner 	 * Get the in-core inode with the lock held exclusively to prevent
8171abcf261SDave Chinner 	 * others from looking at until we're done.
8181da177e4SLinus Torvalds 	 */
8191abcf261SDave Chinner 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
820bf904248SDavid Chinner 	if (error)
8211da177e4SLinus Torvalds 		return error;
8221abcf261SDave Chinner 
8231da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8243987848cSDave Chinner 	inode = VFS_I(ip);
82554d7b5c1SDave Chinner 	set_nlink(inode, nlink);
82666f36464SChristoph Hellwig 	inode->i_rdev = rdev;
827ceaf603cSChristoph Hellwig 	ip->i_projid = prid;
8281da177e4SLinus Torvalds 
82901ea173eSChristoph Hellwig 	if (dir && !(dir->i_mode & S_ISGID) &&
83001ea173eSChristoph Hellwig 	    (mp->m_flags & XFS_MOUNT_GRPID)) {
831db998553SChristian Brauner 		inode_fsuid_set(inode, mnt_userns);
83201ea173eSChristoph Hellwig 		inode->i_gid = dir->i_gid;
83301ea173eSChristoph Hellwig 		inode->i_mode = mode;
8343d8f2821SChristoph Hellwig 	} else {
8357d6beb71SLinus Torvalds 		inode_init_owner(mnt_userns, inode, dir, mode);
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds 	/*
8391da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8401da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8411da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8421da177e4SLinus Torvalds 	 */
84354295159SChristoph Hellwig 	if (irix_sgid_inherit &&
844f736d93dSChristoph Hellwig 	    (inode->i_mode & S_ISGID) &&
845f736d93dSChristoph Hellwig 	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
846c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8471da177e4SLinus Torvalds 
84813d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
849daf83964SChristoph Hellwig 	ip->i_df.if_nextents = 0;
8506e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
851dff35fd4SChristoph Hellwig 
852c2050a45SDeepa Dinamani 	tv = current_time(inode);
8533987848cSDave Chinner 	inode->i_mtime = tv;
8543987848cSDave Chinner 	inode->i_atime = tv;
8553987848cSDave Chinner 	inode->i_ctime = tv;
856dff35fd4SChristoph Hellwig 
857031474c2SChristoph Hellwig 	ip->i_extsize = 0;
858db07349dSChristoph Hellwig 	ip->i_diflags = 0;
85993848a99SChristoph Hellwig 
8606471e9c5SChristoph Hellwig 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
861f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
862b33ce57dSChristoph Hellwig 		ip->i_cowextsize = 0;
863e98d5e88SChristoph Hellwig 		ip->i_crtime = tv;
86493848a99SChristoph Hellwig 	}
86593848a99SChristoph Hellwig 
8661da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8671da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8681da177e4SLinus Torvalds 	case S_IFIFO:
8691da177e4SLinus Torvalds 	case S_IFCHR:
8701da177e4SLinus Torvalds 	case S_IFBLK:
8711da177e4SLinus Torvalds 	case S_IFSOCK:
872f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
8731da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8741da177e4SLinus Torvalds 		break;
8751da177e4SLinus Torvalds 	case S_IFREG:
8761da177e4SLinus Torvalds 	case S_IFDIR:
877db07349dSChristoph Hellwig 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
8788a569d71SDarrick J. Wong 			xfs_inode_inherit_flags(ip, pip);
8793e09ab8fSChristoph Hellwig 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
8808a569d71SDarrick J. Wong 			xfs_inode_inherit_flags2(ip, pip);
88153004ee7SGustavo A. R. Silva 		fallthrough;
8821da177e4SLinus Torvalds 	case S_IFLNK:
883f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
884fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
8856bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
8861da177e4SLinus Torvalds 		break;
8871da177e4SLinus Torvalds 	default:
8881da177e4SLinus Torvalds 		ASSERT(0);
8891da177e4SLinus Torvalds 	}
8901da177e4SLinus Torvalds 
8911da177e4SLinus Torvalds 	/*
892e6a688c3SDave Chinner 	 * If we need to create attributes immediately after allocating the
893e6a688c3SDave Chinner 	 * inode, initialise an empty attribute fork right now. We use the
894e6a688c3SDave Chinner 	 * default fork offset for attributes here as we don't know exactly what
895e6a688c3SDave Chinner 	 * size or how many attributes we might be adding. We can do this
896e6a688c3SDave Chinner 	 * safely here because we know the data fork is completely empty and
897e6a688c3SDave Chinner 	 * this saves us from needing to run a separate transaction to set the
898e6a688c3SDave Chinner 	 * fork offset in the immediate future.
899e6a688c3SDave Chinner 	 */
9002442ee15SDave Chinner 	if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
9017821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
902e6a688c3SDave Chinner 		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
903e6a688c3SDave Chinner 	}
904e6a688c3SDave Chinner 
905e6a688c3SDave Chinner 	/*
9061da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
9071da177e4SLinus Torvalds 	 */
908ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
9091da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
9101da177e4SLinus Torvalds 
91158c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
91241be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds 	*ipp = ip;
9151da177e4SLinus Torvalds 	return 0;
9161da177e4SLinus Torvalds }
9171da177e4SLinus Torvalds 
918e546cb79SDave Chinner /*
91954d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
92054d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
92154d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
922e546cb79SDave Chinner  */
9230d5a75e9SEric Sandeen static int			/* error */
924e546cb79SDave Chinner xfs_droplink(
925e546cb79SDave Chinner 	xfs_trans_t *tp,
926e546cb79SDave Chinner 	xfs_inode_t *ip)
927e546cb79SDave Chinner {
928e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
929e546cb79SDave Chinner 
930e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
931e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
932e546cb79SDave Chinner 
93354d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
93454d7b5c1SDave Chinner 		return 0;
93554d7b5c1SDave Chinner 
93654d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
937e546cb79SDave Chinner }
938e546cb79SDave Chinner 
939e546cb79SDave Chinner /*
940e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
941e546cb79SDave Chinner  */
94291083269SEric Sandeen static void
943e546cb79SDave Chinner xfs_bumplink(
944e546cb79SDave Chinner 	xfs_trans_t *tp,
945e546cb79SDave Chinner 	xfs_inode_t *ip)
946e546cb79SDave Chinner {
947e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
948e546cb79SDave Chinner 
949e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
950e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
951e546cb79SDave Chinner }
952e546cb79SDave Chinner 
953c24b5dfaSDave Chinner int
954c24b5dfaSDave Chinner xfs_create(
955f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
956c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
957c24b5dfaSDave Chinner 	struct xfs_name		*name,
958c24b5dfaSDave Chinner 	umode_t			mode,
95966f36464SChristoph Hellwig 	dev_t			rdev,
960e6a688c3SDave Chinner 	bool			init_xattrs,
961c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
962c24b5dfaSDave Chinner {
963c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
964c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
965c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
966c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
967c24b5dfaSDave Chinner 	int			error;
968c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
969c24b5dfaSDave Chinner 	prid_t			prid;
970c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
971c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
972c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
973062647a8SBrian Foster 	struct xfs_trans_res	*tres;
974c24b5dfaSDave Chinner 	uint			resblks;
975b652afd9SDave Chinner 	xfs_ino_t		ino;
976c24b5dfaSDave Chinner 
977c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
978c24b5dfaSDave Chinner 
979c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
9802451337dSDave Chinner 		return -EIO;
981c24b5dfaSDave Chinner 
982163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
983c24b5dfaSDave Chinner 
984c24b5dfaSDave Chinner 	/*
985c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
986c24b5dfaSDave Chinner 	 */
987a65e58e7SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
988a65e58e7SChristian Brauner 			mapped_fsgid(mnt_userns), prid,
989c24b5dfaSDave Chinner 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
990c24b5dfaSDave Chinner 			&udqp, &gdqp, &pdqp);
991c24b5dfaSDave Chinner 	if (error)
992c24b5dfaSDave Chinner 		return error;
993c24b5dfaSDave Chinner 
994c24b5dfaSDave Chinner 	if (is_dir) {
995c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
996062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
997c24b5dfaSDave Chinner 	} else {
998c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
999062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
1000c24b5dfaSDave Chinner 	}
1001c24b5dfaSDave Chinner 
1002c24b5dfaSDave Chinner 	/*
1003c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
1004c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
1005c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
1006c24b5dfaSDave Chinner 	 * appropriate transaction later.
1007c24b5dfaSDave Chinner 	 */
1008f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1009f2f7b9ffSDarrick J. Wong 			&tp);
10102451337dSDave Chinner 	if (error == -ENOSPC) {
1011c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1012c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1013f2f7b9ffSDarrick J. Wong 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1014f2f7b9ffSDarrick J. Wong 				resblks, &tp);
1015c24b5dfaSDave Chinner 	}
10164906e215SChristoph Hellwig 	if (error)
1017f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
1018c24b5dfaSDave Chinner 
101965523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1020c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1021c24b5dfaSDave Chinner 
1022f5d92749SChandan Babu R 	error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1023f5d92749SChandan Babu R 			XFS_IEXT_DIR_MANIP_CNT(mp));
1024f5d92749SChandan Babu R 	if (error)
1025f5d92749SChandan Babu R 		goto out_trans_cancel;
1026f5d92749SChandan Babu R 
1027c24b5dfaSDave Chinner 	/*
1028c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1029c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1030c24b5dfaSDave Chinner 	 * pointing to itself.
1031c24b5dfaSDave Chinner 	 */
1032b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1033b652afd9SDave Chinner 	if (!error)
1034b652afd9SDave Chinner 		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1035b652afd9SDave Chinner 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1036d6077aa3SJan Kara 	if (error)
1037c24b5dfaSDave Chinner 		goto out_trans_cancel;
1038c24b5dfaSDave Chinner 
1039c24b5dfaSDave Chinner 	/*
1040c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1041b652afd9SDave Chinner 	 * earlier because xfs_dialloc might commit the previous transaction
1042c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1043c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1044c24b5dfaSDave Chinner 	 * error path.
1045c24b5dfaSDave Chinner 	 */
104665523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1047c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1048c24b5dfaSDave Chinner 
1049381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
105063337b63SKaixu Xia 					resblks - XFS_IALLOC_SPACE_RES(mp));
1051c24b5dfaSDave Chinner 	if (error) {
10522451337dSDave Chinner 		ASSERT(error != -ENOSPC);
10534906e215SChristoph Hellwig 		goto out_trans_cancel;
1054c24b5dfaSDave Chinner 	}
1055c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1056c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1057c24b5dfaSDave Chinner 
1058c24b5dfaSDave Chinner 	if (is_dir) {
1059c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1060c24b5dfaSDave Chinner 		if (error)
1061c8eac49eSBrian Foster 			goto out_trans_cancel;
1062c24b5dfaSDave Chinner 
106391083269SEric Sandeen 		xfs_bumplink(tp, dp);
1064c24b5dfaSDave Chinner 	}
1065c24b5dfaSDave Chinner 
1066c24b5dfaSDave Chinner 	/*
1067c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1068c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1069c24b5dfaSDave Chinner 	 * the user.
1070c24b5dfaSDave Chinner 	 */
1071c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1072c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1073c24b5dfaSDave Chinner 
1074c24b5dfaSDave Chinner 	/*
1075c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1076c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1077c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1078c24b5dfaSDave Chinner 	 */
1079c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1080c24b5dfaSDave Chinner 
108170393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1082c24b5dfaSDave Chinner 	if (error)
1083c24b5dfaSDave Chinner 		goto out_release_inode;
1084c24b5dfaSDave Chinner 
1085c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1086c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1087c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1088c24b5dfaSDave Chinner 
1089c24b5dfaSDave Chinner 	*ipp = ip;
1090c24b5dfaSDave Chinner 	return 0;
1091c24b5dfaSDave Chinner 
1092c24b5dfaSDave Chinner  out_trans_cancel:
10934906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1094c24b5dfaSDave Chinner  out_release_inode:
1095c24b5dfaSDave Chinner 	/*
109658c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
109758c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
109858c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1099c24b5dfaSDave Chinner 	 */
110058c90473SDave Chinner 	if (ip) {
110158c90473SDave Chinner 		xfs_finish_inode_setup(ip);
110244a8736bSDarrick J. Wong 		xfs_irele(ip);
110358c90473SDave Chinner 	}
1104f2f7b9ffSDarrick J. Wong  out_release_dquots:
1105c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1106c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1107c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1108c24b5dfaSDave Chinner 
1109c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
111065523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1111c24b5dfaSDave Chinner 	return error;
1112c24b5dfaSDave Chinner }
1113c24b5dfaSDave Chinner 
1114c24b5dfaSDave Chinner int
111599b6436bSZhi Yong Wu xfs_create_tmpfile(
1116f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
111799b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1118330033d6SBrian Foster 	umode_t			mode,
1119330033d6SBrian Foster 	struct xfs_inode	**ipp)
112099b6436bSZhi Yong Wu {
112199b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
112299b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
112399b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
112499b6436bSZhi Yong Wu 	int			error;
112599b6436bSZhi Yong Wu 	prid_t                  prid;
112699b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
112799b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
112899b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
112999b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
113099b6436bSZhi Yong Wu 	uint			resblks;
1131b652afd9SDave Chinner 	xfs_ino_t		ino;
113299b6436bSZhi Yong Wu 
113399b6436bSZhi Yong Wu 	if (XFS_FORCED_SHUTDOWN(mp))
11342451337dSDave Chinner 		return -EIO;
113599b6436bSZhi Yong Wu 
113699b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
113799b6436bSZhi Yong Wu 
113899b6436bSZhi Yong Wu 	/*
113999b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
114099b6436bSZhi Yong Wu 	 */
1141a65e58e7SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
1142a65e58e7SChristian Brauner 			mapped_fsgid(mnt_userns), prid,
114399b6436bSZhi Yong Wu 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
114499b6436bSZhi Yong Wu 			&udqp, &gdqp, &pdqp);
114599b6436bSZhi Yong Wu 	if (error)
114699b6436bSZhi Yong Wu 		return error;
114799b6436bSZhi Yong Wu 
114899b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
114999b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1150253f4911SChristoph Hellwig 
1151f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1152f2f7b9ffSDarrick J. Wong 			&tp);
11534906e215SChristoph Hellwig 	if (error)
1154f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
115599b6436bSZhi Yong Wu 
1156b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1157b652afd9SDave Chinner 	if (!error)
1158b652afd9SDave Chinner 		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1159b652afd9SDave Chinner 				0, 0, prid, false, &ip);
1160d6077aa3SJan Kara 	if (error)
116199b6436bSZhi Yong Wu 		goto out_trans_cancel;
116299b6436bSZhi Yong Wu 
116399b6436bSZhi Yong Wu 	if (mp->m_flags & XFS_MOUNT_WSYNC)
116499b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
116599b6436bSZhi Yong Wu 
116699b6436bSZhi Yong Wu 	/*
116799b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
116899b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
116999b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
117099b6436bSZhi Yong Wu 	 */
117199b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
117299b6436bSZhi Yong Wu 
117399b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
117499b6436bSZhi Yong Wu 	if (error)
11754906e215SChristoph Hellwig 		goto out_trans_cancel;
117699b6436bSZhi Yong Wu 
117770393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
117899b6436bSZhi Yong Wu 	if (error)
117999b6436bSZhi Yong Wu 		goto out_release_inode;
118099b6436bSZhi Yong Wu 
118199b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
118299b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
118399b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
118499b6436bSZhi Yong Wu 
1185330033d6SBrian Foster 	*ipp = ip;
118699b6436bSZhi Yong Wu 	return 0;
118799b6436bSZhi Yong Wu 
118899b6436bSZhi Yong Wu  out_trans_cancel:
11894906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
119099b6436bSZhi Yong Wu  out_release_inode:
119199b6436bSZhi Yong Wu 	/*
119258c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
119358c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
119458c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
119599b6436bSZhi Yong Wu 	 */
119658c90473SDave Chinner 	if (ip) {
119758c90473SDave Chinner 		xfs_finish_inode_setup(ip);
119844a8736bSDarrick J. Wong 		xfs_irele(ip);
119958c90473SDave Chinner 	}
1200f2f7b9ffSDarrick J. Wong  out_release_dquots:
120199b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
120299b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
120399b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
120499b6436bSZhi Yong Wu 
120599b6436bSZhi Yong Wu 	return error;
120699b6436bSZhi Yong Wu }
120799b6436bSZhi Yong Wu 
120899b6436bSZhi Yong Wu int
1209c24b5dfaSDave Chinner xfs_link(
1210c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1211c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1212c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1213c24b5dfaSDave Chinner {
1214c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1215c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1216c24b5dfaSDave Chinner 	int			error;
1217c24b5dfaSDave Chinner 	int			resblks;
1218c24b5dfaSDave Chinner 
1219c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1220c24b5dfaSDave Chinner 
1221c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1222c24b5dfaSDave Chinner 
1223c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
12242451337dSDave Chinner 		return -EIO;
1225c24b5dfaSDave Chinner 
1226c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1227c24b5dfaSDave Chinner 	if (error)
1228c24b5dfaSDave Chinner 		goto std_return;
1229c24b5dfaSDave Chinner 
1230c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1231c24b5dfaSDave Chinner 	if (error)
1232c24b5dfaSDave Chinner 		goto std_return;
1233c24b5dfaSDave Chinner 
1234c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1235253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
12362451337dSDave Chinner 	if (error == -ENOSPC) {
1237c24b5dfaSDave Chinner 		resblks = 0;
1238253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1239c24b5dfaSDave Chinner 	}
12404906e215SChristoph Hellwig 	if (error)
1241253f4911SChristoph Hellwig 		goto std_return;
1242c24b5dfaSDave Chinner 
12437c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1244c24b5dfaSDave Chinner 
1245c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
124665523218SChristoph Hellwig 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1247c24b5dfaSDave Chinner 
1248f5d92749SChandan Babu R 	error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1249f5d92749SChandan Babu R 			XFS_IEXT_DIR_MANIP_CNT(mp));
1250f5d92749SChandan Babu R 	if (error)
1251f5d92749SChandan Babu R 		goto error_return;
1252f5d92749SChandan Babu R 
1253c24b5dfaSDave Chinner 	/*
1254c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1255c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1256c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1257c24b5dfaSDave Chinner 	 */
1258db07349dSChristoph Hellwig 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1259ceaf603cSChristoph Hellwig 		     tdp->i_projid != sip->i_projid)) {
12602451337dSDave Chinner 		error = -EXDEV;
1261c24b5dfaSDave Chinner 		goto error_return;
1262c24b5dfaSDave Chinner 	}
1263c24b5dfaSDave Chinner 
126494f3cad5SEric Sandeen 	if (!resblks) {
126594f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1266c24b5dfaSDave Chinner 		if (error)
1267c24b5dfaSDave Chinner 			goto error_return;
126894f3cad5SEric Sandeen 	}
1269c24b5dfaSDave Chinner 
127054d7b5c1SDave Chinner 	/*
127154d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
127254d7b5c1SDave Chinner 	 */
127354d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1274f40aadb2SDave Chinner 		struct xfs_perag	*pag;
1275f40aadb2SDave Chinner 
1276f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1277f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, sip);
1278f40aadb2SDave Chinner 		xfs_perag_put(pag);
1279ab297431SZhi Yong Wu 		if (error)
12804906e215SChristoph Hellwig 			goto error_return;
1281ab297431SZhi Yong Wu 	}
1282ab297431SZhi Yong Wu 
1283c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1284381eee69SBrian Foster 				   resblks);
1285c24b5dfaSDave Chinner 	if (error)
12864906e215SChristoph Hellwig 		goto error_return;
1287c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1288c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1289c24b5dfaSDave Chinner 
129091083269SEric Sandeen 	xfs_bumplink(tp, sip);
1291c24b5dfaSDave Chinner 
1292c24b5dfaSDave Chinner 	/*
1293c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1294c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1295c24b5dfaSDave Chinner 	 * the user.
1296c24b5dfaSDave Chinner 	 */
1297f6106efaSEric Sandeen 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1298c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1299c24b5dfaSDave Chinner 
130070393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1301c24b5dfaSDave Chinner 
1302c24b5dfaSDave Chinner  error_return:
13034906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1304c24b5dfaSDave Chinner  std_return:
1305c24b5dfaSDave Chinner 	return error;
1306c24b5dfaSDave Chinner }
1307c24b5dfaSDave Chinner 
1308363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1309363e59baSDarrick J. Wong static void
1310363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1311363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1312363e59baSDarrick J. Wong {
1313363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1314363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1315363e59baSDarrick J. Wong 
1316363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1317363e59baSDarrick J. Wong 		return;
1318363e59baSDarrick J. Wong 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1319363e59baSDarrick J. Wong 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1320363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
13213e09ab8fSChristoph Hellwig 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1322363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1323363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1324363e59baSDarrick J. Wong }
1325363e59baSDarrick J. Wong 
13261da177e4SLinus Torvalds /*
13278f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
13288f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
13298f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
13301da177e4SLinus Torvalds  *
1331f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1332f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1333f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1334f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1335f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1336f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1337f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1338f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1339f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
13401da177e4SLinus Torvalds  *
1341f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1342f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1343f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1344f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1345f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
13461da177e4SLinus Torvalds  */
13471da177e4SLinus Torvalds int
13484e529339SBrian Foster xfs_itruncate_extents_flags(
13498f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
13508f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
13518f04c47aSChristoph Hellwig 	int			whichfork,
135213b86fc3SBrian Foster 	xfs_fsize_t		new_size,
13534e529339SBrian Foster 	int			flags)
13541da177e4SLinus Torvalds {
13558f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
13568f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
13571da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
13588f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
13598f04c47aSChristoph Hellwig 	int			error = 0;
13601da177e4SLinus Torvalds 
13610b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
13620b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
13630b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1364ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
13658f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
13661da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1367898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
13681da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
13691da177e4SLinus Torvalds 
1370673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1371673e8e59SChristoph Hellwig 
13724e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
137313b86fc3SBrian Foster 
13741da177e4SLinus Torvalds 	/*
13751da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
13761da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
13771da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
13781da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
13794bbb04abSDarrick J. Wong 	 * possible file size.
13804bbb04abSDarrick J. Wong 	 *
13814bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
13824bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
13831da177e4SLinus Torvalds 	 */
13848f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
138533005fd0SDarrick J. Wong 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
13864bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
13878f04c47aSChristoph Hellwig 		return 0;
13884bbb04abSDarrick J. Wong 	}
13898f04c47aSChristoph Hellwig 
13904bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
13914bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
139202dff7bfSBrian Foster 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
13934bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
13944bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
13958f04c47aSChristoph Hellwig 		if (error)
1396d5a2e289SBrian Foster 			goto out;
13971da177e4SLinus Torvalds 
13986dd379c7SBrian Foster 		/* free the just unmapped extents */
13999e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
14008f04c47aSChristoph Hellwig 		if (error)
14019b1f4e98SBrian Foster 			goto out;
14021da177e4SLinus Torvalds 	}
14038f04c47aSChristoph Hellwig 
14044919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1405aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
14064919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
14074bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1408aa8968f2SDarrick J. Wong 		if (error)
1409aa8968f2SDarrick J. Wong 			goto out;
1410aa8968f2SDarrick J. Wong 
1411363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
14124919d42aSDarrick J. Wong 	}
1413aa8968f2SDarrick J. Wong 
1414673e8e59SChristoph Hellwig 	/*
1415673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1416673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1417673e8e59SChristoph Hellwig 	 */
1418673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1419673e8e59SChristoph Hellwig 
1420673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1421673e8e59SChristoph Hellwig 
14228f04c47aSChristoph Hellwig out:
14238f04c47aSChristoph Hellwig 	*tpp = tp;
14248f04c47aSChristoph Hellwig 	return error;
14258f04c47aSChristoph Hellwig }
14268f04c47aSChristoph Hellwig 
1427c24b5dfaSDave Chinner int
1428c24b5dfaSDave Chinner xfs_release(
1429c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1430c24b5dfaSDave Chinner {
1431c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
14327d88329eSDarrick J. Wong 	int		error = 0;
1433c24b5dfaSDave Chinner 
1434c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1435c24b5dfaSDave Chinner 		return 0;
1436c24b5dfaSDave Chinner 
1437c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1438c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1439c24b5dfaSDave Chinner 		return 0;
1440c24b5dfaSDave Chinner 
1441c24b5dfaSDave Chinner 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1442c24b5dfaSDave Chinner 		int truncated;
1443c24b5dfaSDave Chinner 
1444c24b5dfaSDave Chinner 		/*
1445c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1446c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1447c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1448c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1449c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1450c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1451c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1452c24b5dfaSDave Chinner 		 * be exposed to that problem.
1453c24b5dfaSDave Chinner 		 */
1454c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1455c24b5dfaSDave Chinner 		if (truncated) {
1456c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1457eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
14582451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1459c24b5dfaSDave Chinner 				if (error)
1460c24b5dfaSDave Chinner 					return error;
1461c24b5dfaSDave Chinner 			}
1462c24b5dfaSDave Chinner 		}
1463c24b5dfaSDave Chinner 	}
1464c24b5dfaSDave Chinner 
146554d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1466c24b5dfaSDave Chinner 		return 0;
1467c24b5dfaSDave Chinner 
14687d88329eSDarrick J. Wong 	/*
14697d88329eSDarrick J. Wong 	 * If we can't get the iolock just skip truncating the blocks past EOF
14707d88329eSDarrick J. Wong 	 * because we could deadlock with the mmap_lock otherwise. We'll get
14717d88329eSDarrick J. Wong 	 * another chance to drop them once the last reference to the inode is
14727d88329eSDarrick J. Wong 	 * dropped, so we'll never leak blocks permanently.
14737d88329eSDarrick J. Wong 	 */
14747d88329eSDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
14757d88329eSDarrick J. Wong 		return 0;
1476c24b5dfaSDave Chinner 
14777d88329eSDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false)) {
1478c24b5dfaSDave Chinner 		/*
1479a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1480a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1481a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1482a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1483a36b9261SBrian Foster 		 *
1484a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1485a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1486a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1487a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1488a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1489a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1490a36b9261SBrian Foster 		 * place.
1491a36b9261SBrian Foster 		 */
1492a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
14937d88329eSDarrick J. Wong 			goto out_unlock;
14947d88329eSDarrick J. Wong 
1495a36b9261SBrian Foster 		error = xfs_free_eofblocks(ip);
1496a36b9261SBrian Foster 		if (error)
14977d88329eSDarrick J. Wong 			goto out_unlock;
1498c24b5dfaSDave Chinner 
1499c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1500c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1501c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1502c24b5dfaSDave Chinner 	}
15037d88329eSDarrick J. Wong 
15047d88329eSDarrick J. Wong out_unlock:
15057d88329eSDarrick J. Wong 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
15067d88329eSDarrick J. Wong 	return error;
1507c24b5dfaSDave Chinner }
1508c24b5dfaSDave Chinner 
1509c24b5dfaSDave Chinner /*
1510f7be2d7fSBrian Foster  * xfs_inactive_truncate
1511f7be2d7fSBrian Foster  *
1512f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1513f7be2d7fSBrian Foster  */
1514f7be2d7fSBrian Foster STATIC int
1515f7be2d7fSBrian Foster xfs_inactive_truncate(
1516f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1517f7be2d7fSBrian Foster {
1518f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1519f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1520f7be2d7fSBrian Foster 	int			error;
1521f7be2d7fSBrian Foster 
1522253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1523f7be2d7fSBrian Foster 	if (error) {
1524f7be2d7fSBrian Foster 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1525f7be2d7fSBrian Foster 		return error;
1526f7be2d7fSBrian Foster 	}
1527f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1528f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1529f7be2d7fSBrian Foster 
1530f7be2d7fSBrian Foster 	/*
1531f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1532f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
153369bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1534f7be2d7fSBrian Foster 	 */
153513d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
1536f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1537f7be2d7fSBrian Foster 
1538f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1539f7be2d7fSBrian Foster 	if (error)
1540f7be2d7fSBrian Foster 		goto error_trans_cancel;
1541f7be2d7fSBrian Foster 
1542daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
1543f7be2d7fSBrian Foster 
154470393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1545f7be2d7fSBrian Foster 	if (error)
1546f7be2d7fSBrian Foster 		goto error_unlock;
1547f7be2d7fSBrian Foster 
1548f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1549f7be2d7fSBrian Foster 	return 0;
1550f7be2d7fSBrian Foster 
1551f7be2d7fSBrian Foster error_trans_cancel:
15524906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1553f7be2d7fSBrian Foster error_unlock:
1554f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1555f7be2d7fSBrian Foster 	return error;
1556f7be2d7fSBrian Foster }
1557f7be2d7fSBrian Foster 
1558f7be2d7fSBrian Foster /*
155988877d2bSBrian Foster  * xfs_inactive_ifree()
156088877d2bSBrian Foster  *
156188877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
156288877d2bSBrian Foster  */
156388877d2bSBrian Foster STATIC int
156488877d2bSBrian Foster xfs_inactive_ifree(
156588877d2bSBrian Foster 	struct xfs_inode *ip)
156688877d2bSBrian Foster {
156788877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
156888877d2bSBrian Foster 	struct xfs_trans	*tp;
156988877d2bSBrian Foster 	int			error;
157088877d2bSBrian Foster 
15719d43b180SBrian Foster 	/*
157276d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
157376d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
157476d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
157576d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
157676d771b4SChristoph Hellwig 	 * pool and pray.
15779d43b180SBrian Foster 	 *
15789d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
15799d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
15809d43b180SBrian Foster 	 * repaired.
15819d43b180SBrian Foster 	 */
1582e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1583253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
158476d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
158576d771b4SChristoph Hellwig 				&tp);
158676d771b4SChristoph Hellwig 	} else {
158776d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
158876d771b4SChristoph Hellwig 	}
158988877d2bSBrian Foster 	if (error) {
15902451337dSDave Chinner 		if (error == -ENOSPC) {
15919d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
15929d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
15939d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
15949d43b180SBrian Foster 		} else {
159588877d2bSBrian Foster 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
15969d43b180SBrian Foster 		}
159788877d2bSBrian Foster 		return error;
159888877d2bSBrian Foster 	}
159988877d2bSBrian Foster 
160096355d5aSDave Chinner 	/*
160196355d5aSDave Chinner 	 * We do not hold the inode locked across the entire rolling transaction
160296355d5aSDave Chinner 	 * here. We only need to hold it for the first transaction that
160396355d5aSDave Chinner 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
160496355d5aSDave Chinner 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
160596355d5aSDave Chinner 	 * here breaks the relationship between cluster buffer invalidation and
160696355d5aSDave Chinner 	 * stale inode invalidation on cluster buffer item journal commit
160796355d5aSDave Chinner 	 * completion, and can result in leaving dirty stale inodes hanging
160896355d5aSDave Chinner 	 * around in memory.
160996355d5aSDave Chinner 	 *
161096355d5aSDave Chinner 	 * We have no need for serialising this inode operation against other
161196355d5aSDave Chinner 	 * operations - we freed the inode and hence reallocation is required
161296355d5aSDave Chinner 	 * and that will serialise on reallocating the space the deferops need
161396355d5aSDave Chinner 	 * to free. Hence we can unlock the inode on the first commit of
161496355d5aSDave Chinner 	 * the transaction rather than roll it right through the deferops. This
161596355d5aSDave Chinner 	 * avoids relogging the XFS_ISTALE inode.
161696355d5aSDave Chinner 	 *
161796355d5aSDave Chinner 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
161896355d5aSDave Chinner 	 * by asserting that the inode is still locked when it returns.
161996355d5aSDave Chinner 	 */
162088877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
162196355d5aSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
162288877d2bSBrian Foster 
16230e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
162496355d5aSDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
162588877d2bSBrian Foster 	if (error) {
162688877d2bSBrian Foster 		/*
162788877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
162888877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
162988877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
163088877d2bSBrian Foster 		 */
163188877d2bSBrian Foster 		if (!XFS_FORCED_SHUTDOWN(mp)) {
163288877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
163388877d2bSBrian Foster 				__func__, error);
163488877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
163588877d2bSBrian Foster 		}
16364906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
163788877d2bSBrian Foster 		return error;
163888877d2bSBrian Foster 	}
163988877d2bSBrian Foster 
164088877d2bSBrian Foster 	/*
164188877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
164288877d2bSBrian Foster 	 */
164388877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
164488877d2bSBrian Foster 
164588877d2bSBrian Foster 	/*
1646d4a97a04SBrian Foster 	 * Just ignore errors at this point.  There is nothing we can do except
1647d4a97a04SBrian Foster 	 * to try to keep going. Make sure it's not a silent error.
164888877d2bSBrian Foster 	 */
164970393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
165088877d2bSBrian Foster 	if (error)
165188877d2bSBrian Foster 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
165288877d2bSBrian Foster 			__func__, error);
165388877d2bSBrian Foster 
165488877d2bSBrian Foster 	return 0;
165588877d2bSBrian Foster }
165688877d2bSBrian Foster 
165788877d2bSBrian Foster /*
1658*62af7d54SDarrick J. Wong  * Returns true if we need to update the on-disk metadata before we can free
1659*62af7d54SDarrick J. Wong  * the memory used by this inode.  Updates include freeing post-eof
1660*62af7d54SDarrick J. Wong  * preallocations; freeing COW staging extents; and marking the inode free in
1661*62af7d54SDarrick J. Wong  * the inobt if it is on the unlinked list.
1662*62af7d54SDarrick J. Wong  */
1663*62af7d54SDarrick J. Wong bool
1664*62af7d54SDarrick J. Wong xfs_inode_needs_inactive(
1665*62af7d54SDarrick J. Wong 	struct xfs_inode	*ip)
1666*62af7d54SDarrick J. Wong {
1667*62af7d54SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1668*62af7d54SDarrick J. Wong 	struct xfs_ifork	*cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1669*62af7d54SDarrick J. Wong 
1670*62af7d54SDarrick J. Wong 	/*
1671*62af7d54SDarrick J. Wong 	 * If the inode is already free, then there can be nothing
1672*62af7d54SDarrick J. Wong 	 * to clean up here.
1673*62af7d54SDarrick J. Wong 	 */
1674*62af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_mode == 0)
1675*62af7d54SDarrick J. Wong 		return false;
1676*62af7d54SDarrick J. Wong 
1677*62af7d54SDarrick J. Wong 	/* If this is a read-only mount, don't do this (would generate I/O) */
1678*62af7d54SDarrick J. Wong 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1679*62af7d54SDarrick J. Wong 		return false;
1680*62af7d54SDarrick J. Wong 
1681*62af7d54SDarrick J. Wong 	/* If the log isn't running, push inodes straight to reclaim. */
1682*62af7d54SDarrick J. Wong 	if (XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_NORECOVERY))
1683*62af7d54SDarrick J. Wong 		return false;
1684*62af7d54SDarrick J. Wong 
1685*62af7d54SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1686*62af7d54SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
1687*62af7d54SDarrick J. Wong 		return false;
1688*62af7d54SDarrick J. Wong 
1689*62af7d54SDarrick J. Wong 	/* Want to clean out the cow blocks if there are any. */
1690*62af7d54SDarrick J. Wong 	if (cow_ifp && cow_ifp->if_bytes > 0)
1691*62af7d54SDarrick J. Wong 		return true;
1692*62af7d54SDarrick J. Wong 
1693*62af7d54SDarrick J. Wong 	/* Unlinked files must be freed. */
1694*62af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_nlink == 0)
1695*62af7d54SDarrick J. Wong 		return true;
1696*62af7d54SDarrick J. Wong 
1697*62af7d54SDarrick J. Wong 	/*
1698*62af7d54SDarrick J. Wong 	 * This file isn't being freed, so check if there are post-eof blocks
1699*62af7d54SDarrick J. Wong 	 * to free.  @force is true because we are evicting an inode from the
1700*62af7d54SDarrick J. Wong 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
1701*62af7d54SDarrick J. Wong 	 * free space accounting.
1702*62af7d54SDarrick J. Wong 	 *
1703*62af7d54SDarrick J. Wong 	 * Note: don't bother with iolock here since lockdep complains about
1704*62af7d54SDarrick J. Wong 	 * acquiring it in reclaim context. We have the only reference to the
1705*62af7d54SDarrick J. Wong 	 * inode at this point anyways.
1706*62af7d54SDarrick J. Wong 	 */
1707*62af7d54SDarrick J. Wong 	return xfs_can_free_eofblocks(ip, true);
1708*62af7d54SDarrick J. Wong }
1709*62af7d54SDarrick J. Wong 
1710*62af7d54SDarrick J. Wong /*
1711c24b5dfaSDave Chinner  * xfs_inactive
1712c24b5dfaSDave Chinner  *
1713c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1714c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1715c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1716c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1717c24b5dfaSDave Chinner  */
171874564fb4SBrian Foster void
1719c24b5dfaSDave Chinner xfs_inactive(
1720c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1721c24b5dfaSDave Chinner {
17223d3c8b52SJie Liu 	struct xfs_mount	*mp;
1723c24b5dfaSDave Chinner 	int			error;
1724c24b5dfaSDave Chinner 	int			truncate = 0;
1725c24b5dfaSDave Chinner 
1726c24b5dfaSDave Chinner 	/*
1727c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1728c24b5dfaSDave Chinner 	 * to clean up here.
1729c24b5dfaSDave Chinner 	 */
1730c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1731c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
17323ea06d73SDarrick J. Wong 		goto out;
1733c24b5dfaSDave Chinner 	}
1734c24b5dfaSDave Chinner 
1735c24b5dfaSDave Chinner 	mp = ip->i_mount;
173617c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1737c24b5dfaSDave Chinner 
1738c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
1739c24b5dfaSDave Chinner 	if (mp->m_flags & XFS_MOUNT_RDONLY)
17403ea06d73SDarrick J. Wong 		goto out;
1741c24b5dfaSDave Chinner 
1742383e32b0SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1743383e32b0SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
17443ea06d73SDarrick J. Wong 		goto out;
1745383e32b0SDarrick J. Wong 
17466231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
174751d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
17486231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
17496231848cSDarrick J. Wong 
175054d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1751c24b5dfaSDave Chinner 		/*
1752c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1753c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1754c24b5dfaSDave Chinner 		 * broken free space accounting.
17553b4683c2SBrian Foster 		 *
17563b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
17573b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
17583b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1759c24b5dfaSDave Chinner 		 */
17603b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1761a36b9261SBrian Foster 			xfs_free_eofblocks(ip);
176274564fb4SBrian Foster 
17633ea06d73SDarrick J. Wong 		goto out;
1764c24b5dfaSDave Chinner 	}
1765c24b5dfaSDave Chinner 
1766c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
176713d2c10bSChristoph Hellwig 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1768daf83964SChristoph Hellwig 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1769c24b5dfaSDave Chinner 		truncate = 1;
1770c24b5dfaSDave Chinner 
1771c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1772c24b5dfaSDave Chinner 	if (error)
17733ea06d73SDarrick J. Wong 		goto out;
1774c24b5dfaSDave Chinner 
1775c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
177636b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1777f7be2d7fSBrian Foster 	else if (truncate)
1778f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
177936b21ddeSBrian Foster 	if (error)
17803ea06d73SDarrick J. Wong 		goto out;
1781c24b5dfaSDave Chinner 
1782c24b5dfaSDave Chinner 	/*
1783c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1784c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
17856dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1786c24b5dfaSDave Chinner 	 */
17876dfe5a04SDave Chinner 	if (XFS_IFORK_Q(ip)) {
1788c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1789c24b5dfaSDave Chinner 		if (error)
17903ea06d73SDarrick J. Wong 			goto out;
1791c24b5dfaSDave Chinner 	}
1792c24b5dfaSDave Chinner 
17936dfe5a04SDave Chinner 	ASSERT(!ip->i_afp);
17947821ea30SChristoph Hellwig 	ASSERT(ip->i_forkoff == 0);
1795c24b5dfaSDave Chinner 
1796c24b5dfaSDave Chinner 	/*
1797c24b5dfaSDave Chinner 	 * Free the inode.
1798c24b5dfaSDave Chinner 	 */
17993ea06d73SDarrick J. Wong 	xfs_inactive_ifree(ip);
1800c24b5dfaSDave Chinner 
18013ea06d73SDarrick J. Wong out:
1802c24b5dfaSDave Chinner 	/*
18033ea06d73SDarrick J. Wong 	 * We're done making metadata updates for this inode, so we can release
18043ea06d73SDarrick J. Wong 	 * the attached dquots.
1805c24b5dfaSDave Chinner 	 */
1806c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1807c24b5dfaSDave Chinner }
1808c24b5dfaSDave Chinner 
18091da177e4SLinus Torvalds /*
18109b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
18119b247179SDarrick J. Wong  * =============================
18129b247179SDarrick J. Wong  *
18139b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
18149b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
18159b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
18169b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
18179b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
18189b247179SDarrick J. Wong  *
18199b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
18209b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
18219b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
18229b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
18239b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
18249b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
18259b247179SDarrick J. Wong  *
18269b247179SDarrick J. Wong  * What if we modelled the unlinked list as a collection of records capturing
18279b247179SDarrick J. Wong  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
18289b247179SDarrick J. Wong  * have a fast way to look up unlinked list predecessors, which avoids the
18299b247179SDarrick J. Wong  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
18309b247179SDarrick J. Wong  * rhashtable.
18319b247179SDarrick J. Wong  *
18329b247179SDarrick J. Wong  * Because this is a backref cache, we ignore operational failures since the
18339b247179SDarrick J. Wong  * iunlink code can fall back to the slow bucket walk.  The only errors that
18349b247179SDarrick J. Wong  * should bubble out are for obviously incorrect situations.
18359b247179SDarrick J. Wong  *
18369b247179SDarrick J. Wong  * All users of the backref cache MUST hold the AGI buffer lock to serialize
18379b247179SDarrick J. Wong  * access or have otherwise provided for concurrency control.
18389b247179SDarrick J. Wong  */
18399b247179SDarrick J. Wong 
18409b247179SDarrick J. Wong /* Capture a "X.next_unlinked = Y" relationship. */
18419b247179SDarrick J. Wong struct xfs_iunlink {
18429b247179SDarrick J. Wong 	struct rhash_head	iu_rhash_head;
18439b247179SDarrick J. Wong 	xfs_agino_t		iu_agino;		/* X */
18449b247179SDarrick J. Wong 	xfs_agino_t		iu_next_unlinked;	/* Y */
18459b247179SDarrick J. Wong };
18469b247179SDarrick J. Wong 
18479b247179SDarrick J. Wong /* Unlinked list predecessor lookup hashtable construction */
18489b247179SDarrick J. Wong static int
18499b247179SDarrick J. Wong xfs_iunlink_obj_cmpfn(
18509b247179SDarrick J. Wong 	struct rhashtable_compare_arg	*arg,
18519b247179SDarrick J. Wong 	const void			*obj)
18529b247179SDarrick J. Wong {
18539b247179SDarrick J. Wong 	const xfs_agino_t		*key = arg->key;
18549b247179SDarrick J. Wong 	const struct xfs_iunlink	*iu = obj;
18559b247179SDarrick J. Wong 
18569b247179SDarrick J. Wong 	if (iu->iu_next_unlinked != *key)
18579b247179SDarrick J. Wong 		return 1;
18589b247179SDarrick J. Wong 	return 0;
18599b247179SDarrick J. Wong }
18609b247179SDarrick J. Wong 
18619b247179SDarrick J. Wong static const struct rhashtable_params xfs_iunlink_hash_params = {
18629b247179SDarrick J. Wong 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
18639b247179SDarrick J. Wong 	.key_len		= sizeof(xfs_agino_t),
18649b247179SDarrick J. Wong 	.key_offset		= offsetof(struct xfs_iunlink,
18659b247179SDarrick J. Wong 					   iu_next_unlinked),
18669b247179SDarrick J. Wong 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
18679b247179SDarrick J. Wong 	.automatic_shrinking	= true,
18689b247179SDarrick J. Wong 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
18699b247179SDarrick J. Wong };
18709b247179SDarrick J. Wong 
18719b247179SDarrick J. Wong /*
18729b247179SDarrick J. Wong  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
18739b247179SDarrick J. Wong  * relation is found.
18749b247179SDarrick J. Wong  */
18759b247179SDarrick J. Wong static xfs_agino_t
18769b247179SDarrick J. Wong xfs_iunlink_lookup_backref(
18779b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18789b247179SDarrick J. Wong 	xfs_agino_t		agino)
18799b247179SDarrick J. Wong {
18809b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
18819b247179SDarrick J. Wong 
18829b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
18839b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
18849b247179SDarrick J. Wong 	return iu ? iu->iu_agino : NULLAGINO;
18859b247179SDarrick J. Wong }
18869b247179SDarrick J. Wong 
18879b247179SDarrick J. Wong /*
18889b247179SDarrick J. Wong  * Take ownership of an iunlink cache entry and insert it into the hash table.
18899b247179SDarrick J. Wong  * If successful, the entry will be owned by the cache; if not, it is freed.
18909b247179SDarrick J. Wong  * Either way, the caller does not own @iu after this call.
18919b247179SDarrick J. Wong  */
18929b247179SDarrick J. Wong static int
18939b247179SDarrick J. Wong xfs_iunlink_insert_backref(
18949b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18959b247179SDarrick J. Wong 	struct xfs_iunlink	*iu)
18969b247179SDarrick J. Wong {
18979b247179SDarrick J. Wong 	int			error;
18989b247179SDarrick J. Wong 
18999b247179SDarrick J. Wong 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
19009b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
19019b247179SDarrick J. Wong 	/*
19029b247179SDarrick J. Wong 	 * Fail loudly if there already was an entry because that's a sign of
19039b247179SDarrick J. Wong 	 * corruption of in-memory data.  Also fail loudly if we see an error
19049b247179SDarrick J. Wong 	 * code we didn't anticipate from the rhashtable code.  Currently we
19059b247179SDarrick J. Wong 	 * only anticipate ENOMEM.
19069b247179SDarrick J. Wong 	 */
19079b247179SDarrick J. Wong 	if (error) {
19089b247179SDarrick J. Wong 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
19099b247179SDarrick J. Wong 		kmem_free(iu);
19109b247179SDarrick J. Wong 	}
19119b247179SDarrick J. Wong 	/*
19129b247179SDarrick J. Wong 	 * Absorb any runtime errors that aren't a result of corruption because
19139b247179SDarrick J. Wong 	 * this is a cache and we can always fall back to bucket list scanning.
19149b247179SDarrick J. Wong 	 */
19159b247179SDarrick J. Wong 	if (error != 0 && error != -EEXIST)
19169b247179SDarrick J. Wong 		error = 0;
19179b247179SDarrick J. Wong 	return error;
19189b247179SDarrick J. Wong }
19199b247179SDarrick J. Wong 
19209b247179SDarrick J. Wong /* Remember that @prev_agino.next_unlinked = @this_agino. */
19219b247179SDarrick J. Wong static int
19229b247179SDarrick J. Wong xfs_iunlink_add_backref(
19239b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19249b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
19259b247179SDarrick J. Wong 	xfs_agino_t		this_agino)
19269b247179SDarrick J. Wong {
19279b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19289b247179SDarrick J. Wong 
19299b247179SDarrick J. Wong 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
19309b247179SDarrick J. Wong 		return 0;
19319b247179SDarrick J. Wong 
1932707e0ddaSTetsuo Handa 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
19339b247179SDarrick J. Wong 	iu->iu_agino = prev_agino;
19349b247179SDarrick J. Wong 	iu->iu_next_unlinked = this_agino;
19359b247179SDarrick J. Wong 
19369b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19379b247179SDarrick J. Wong }
19389b247179SDarrick J. Wong 
19399b247179SDarrick J. Wong /*
19409b247179SDarrick J. Wong  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
19419b247179SDarrick J. Wong  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
19429b247179SDarrick J. Wong  * wasn't any such entry then we don't bother.
19439b247179SDarrick J. Wong  */
19449b247179SDarrick J. Wong static int
19459b247179SDarrick J. Wong xfs_iunlink_change_backref(
19469b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19479b247179SDarrick J. Wong 	xfs_agino_t		agino,
19489b247179SDarrick J. Wong 	xfs_agino_t		next_unlinked)
19499b247179SDarrick J. Wong {
19509b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19519b247179SDarrick J. Wong 	int			error;
19529b247179SDarrick J. Wong 
19539b247179SDarrick J. Wong 	/* Look up the old entry; if there wasn't one then exit. */
19549b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
19559b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
19569b247179SDarrick J. Wong 	if (!iu)
19579b247179SDarrick J. Wong 		return 0;
19589b247179SDarrick J. Wong 
19599b247179SDarrick J. Wong 	/*
19609b247179SDarrick J. Wong 	 * Remove the entry.  This shouldn't ever return an error, but if we
19619b247179SDarrick J. Wong 	 * couldn't remove the old entry we don't want to add it again to the
19629b247179SDarrick J. Wong 	 * hash table, and if the entry disappeared on us then someone's
19639b247179SDarrick J. Wong 	 * violated the locking rules and we need to fail loudly.  Either way
19649b247179SDarrick J. Wong 	 * we cannot remove the inode because internal state is or would have
19659b247179SDarrick J. Wong 	 * been corrupt.
19669b247179SDarrick J. Wong 	 */
19679b247179SDarrick J. Wong 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
19689b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
19699b247179SDarrick J. Wong 	if (error)
19709b247179SDarrick J. Wong 		return error;
19719b247179SDarrick J. Wong 
19729b247179SDarrick J. Wong 	/* If there is no new next entry just free our item and return. */
19739b247179SDarrick J. Wong 	if (next_unlinked == NULLAGINO) {
19749b247179SDarrick J. Wong 		kmem_free(iu);
19759b247179SDarrick J. Wong 		return 0;
19769b247179SDarrick J. Wong 	}
19779b247179SDarrick J. Wong 
19789b247179SDarrick J. Wong 	/* Update the entry and re-add it to the hash table. */
19799b247179SDarrick J. Wong 	iu->iu_next_unlinked = next_unlinked;
19809b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19819b247179SDarrick J. Wong }
19829b247179SDarrick J. Wong 
19839b247179SDarrick J. Wong /* Set up the in-core predecessor structures. */
19849b247179SDarrick J. Wong int
19859b247179SDarrick J. Wong xfs_iunlink_init(
19869b247179SDarrick J. Wong 	struct xfs_perag	*pag)
19879b247179SDarrick J. Wong {
19889b247179SDarrick J. Wong 	return rhashtable_init(&pag->pagi_unlinked_hash,
19899b247179SDarrick J. Wong 			&xfs_iunlink_hash_params);
19909b247179SDarrick J. Wong }
19919b247179SDarrick J. Wong 
19929b247179SDarrick J. Wong /* Free the in-core predecessor structures. */
19939b247179SDarrick J. Wong static void
19949b247179SDarrick J. Wong xfs_iunlink_free_item(
19959b247179SDarrick J. Wong 	void			*ptr,
19969b247179SDarrick J. Wong 	void			*arg)
19979b247179SDarrick J. Wong {
19989b247179SDarrick J. Wong 	struct xfs_iunlink	*iu = ptr;
19999b247179SDarrick J. Wong 	bool			*freed_anything = arg;
20009b247179SDarrick J. Wong 
20019b247179SDarrick J. Wong 	*freed_anything = true;
20029b247179SDarrick J. Wong 	kmem_free(iu);
20039b247179SDarrick J. Wong }
20049b247179SDarrick J. Wong 
20059b247179SDarrick J. Wong void
20069b247179SDarrick J. Wong xfs_iunlink_destroy(
20079b247179SDarrick J. Wong 	struct xfs_perag	*pag)
20089b247179SDarrick J. Wong {
20099b247179SDarrick J. Wong 	bool			freed_anything = false;
20109b247179SDarrick J. Wong 
20119b247179SDarrick J. Wong 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
20129b247179SDarrick J. Wong 			xfs_iunlink_free_item, &freed_anything);
20139b247179SDarrick J. Wong 
20149b247179SDarrick J. Wong 	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
20159b247179SDarrick J. Wong }
20169b247179SDarrick J. Wong 
20179b247179SDarrick J. Wong /*
20189a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
20199a4a5118SDarrick J. Wong  * is responsible for validating the old value.
20209a4a5118SDarrick J. Wong  */
20219a4a5118SDarrick J. Wong STATIC int
20229a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
20239a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
2024f40aadb2SDave Chinner 	struct xfs_perag	*pag,
20259a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
20269a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
20279a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
20289a4a5118SDarrick J. Wong {
2029370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
20309a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
20319a4a5118SDarrick J. Wong 	int			offset;
20329a4a5118SDarrick J. Wong 
2033f40aadb2SDave Chinner 	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
20349a4a5118SDarrick J. Wong 
20359a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2036f40aadb2SDave Chinner 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
20379a4a5118SDarrick J. Wong 			old_value, new_agino);
20389a4a5118SDarrick J. Wong 
20399a4a5118SDarrick J. Wong 	/*
20409a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
20419a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
20429a4a5118SDarrick J. Wong 	 * head of the list.
20439a4a5118SDarrick J. Wong 	 */
2044a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
20458d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
20469a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
2047a5155b87SDarrick J. Wong 	}
20489a4a5118SDarrick J. Wong 
20499a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
20509a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
20519a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
20529a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
20539a4a5118SDarrick J. Wong 	return 0;
20549a4a5118SDarrick J. Wong }
20559a4a5118SDarrick J. Wong 
2056f2fc16a3SDarrick J. Wong /* Set an on-disk inode's next_unlinked pointer. */
2057f2fc16a3SDarrick J. Wong STATIC void
2058f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(
2059f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2060f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2061f2fc16a3SDarrick J. Wong 	xfs_agino_t		agino,
2062f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp,
2063f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip,
2064f2fc16a3SDarrick J. Wong 	struct xfs_imap		*imap,
2065f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino)
2066f2fc16a3SDarrick J. Wong {
2067f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2068f2fc16a3SDarrick J. Wong 	int			offset;
2069f2fc16a3SDarrick J. Wong 
2070f40aadb2SDave Chinner 	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2071f2fc16a3SDarrick J. Wong 
2072f40aadb2SDave Chinner 	trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
2073f2fc16a3SDarrick J. Wong 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2074f2fc16a3SDarrick J. Wong 
2075f2fc16a3SDarrick J. Wong 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2076f2fc16a3SDarrick J. Wong 	offset = imap->im_boffset +
2077f2fc16a3SDarrick J. Wong 			offsetof(struct xfs_dinode, di_next_unlinked);
2078f2fc16a3SDarrick J. Wong 
2079f2fc16a3SDarrick J. Wong 	/* need to recalc the inode CRC if appropriate */
2080f2fc16a3SDarrick J. Wong 	xfs_dinode_calc_crc(mp, dip);
2081f2fc16a3SDarrick J. Wong 	xfs_trans_inode_buf(tp, ibp);
2082f2fc16a3SDarrick J. Wong 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2083f2fc16a3SDarrick J. Wong }
2084f2fc16a3SDarrick J. Wong 
2085f2fc16a3SDarrick J. Wong /* Set an in-core inode's unlinked pointer and return the old value. */
2086f2fc16a3SDarrick J. Wong STATIC int
2087f2fc16a3SDarrick J. Wong xfs_iunlink_update_inode(
2088f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2089f2fc16a3SDarrick J. Wong 	struct xfs_inode	*ip,
2090f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2091f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino,
2092f2fc16a3SDarrick J. Wong 	xfs_agino_t		*old_next_agino)
2093f2fc16a3SDarrick J. Wong {
2094f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2095f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip;
2096f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp;
2097f2fc16a3SDarrick J. Wong 	xfs_agino_t		old_value;
2098f2fc16a3SDarrick J. Wong 	int			error;
2099f2fc16a3SDarrick J. Wong 
2100f40aadb2SDave Chinner 	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2101f2fc16a3SDarrick J. Wong 
2102af9dcddeSChristoph Hellwig 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2103f2fc16a3SDarrick J. Wong 	if (error)
2104f2fc16a3SDarrick J. Wong 		return error;
2105af9dcddeSChristoph Hellwig 	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2106f2fc16a3SDarrick J. Wong 
2107f2fc16a3SDarrick J. Wong 	/* Make sure the old pointer isn't garbage. */
2108f2fc16a3SDarrick J. Wong 	old_value = be32_to_cpu(dip->di_next_unlinked);
2109f40aadb2SDave Chinner 	if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
2110a5155b87SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2111a5155b87SDarrick J. Wong 				sizeof(*dip), __this_address);
2112f2fc16a3SDarrick J. Wong 		error = -EFSCORRUPTED;
2113f2fc16a3SDarrick J. Wong 		goto out;
2114f2fc16a3SDarrick J. Wong 	}
2115f2fc16a3SDarrick J. Wong 
2116f2fc16a3SDarrick J. Wong 	/*
2117f2fc16a3SDarrick J. Wong 	 * Since we're updating a linked list, we should never find that the
2118f2fc16a3SDarrick J. Wong 	 * current pointer is the same as the new value, unless we're
2119f2fc16a3SDarrick J. Wong 	 * terminating the list.
2120f2fc16a3SDarrick J. Wong 	 */
2121f2fc16a3SDarrick J. Wong 	*old_next_agino = old_value;
2122f2fc16a3SDarrick J. Wong 	if (old_value == next_agino) {
2123a5155b87SDarrick J. Wong 		if (next_agino != NULLAGINO) {
2124a5155b87SDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2125a5155b87SDarrick J. Wong 					dip, sizeof(*dip), __this_address);
2126f2fc16a3SDarrick J. Wong 			error = -EFSCORRUPTED;
2127a5155b87SDarrick J. Wong 		}
2128f2fc16a3SDarrick J. Wong 		goto out;
2129f2fc16a3SDarrick J. Wong 	}
2130f2fc16a3SDarrick J. Wong 
2131f2fc16a3SDarrick J. Wong 	/* Ok, update the new pointer. */
2132f40aadb2SDave Chinner 	xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
2133f2fc16a3SDarrick J. Wong 			ibp, dip, &ip->i_imap, next_agino);
2134f2fc16a3SDarrick J. Wong 	return 0;
2135f2fc16a3SDarrick J. Wong out:
2136f2fc16a3SDarrick J. Wong 	xfs_trans_brelse(tp, ibp);
2137f2fc16a3SDarrick J. Wong 	return error;
2138f2fc16a3SDarrick J. Wong }
2139f2fc16a3SDarrick J. Wong 
21409a4a5118SDarrick J. Wong /*
2141c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
2142c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
214354d7b5c1SDave Chinner  *
214454d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
214554d7b5c1SDave Chinner  * list when the inode is freed.
21461da177e4SLinus Torvalds  */
214754d7b5c1SDave Chinner STATIC int
21481da177e4SLinus Torvalds xfs_iunlink(
214954d7b5c1SDave Chinner 	struct xfs_trans	*tp,
215054d7b5c1SDave Chinner 	struct xfs_inode	*ip)
21511da177e4SLinus Torvalds {
21525837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2153f40aadb2SDave Chinner 	struct xfs_perag	*pag;
21545837f625SDarrick J. Wong 	struct xfs_agi		*agi;
21555837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
215686bfd375SDarrick J. Wong 	xfs_agino_t		next_agino;
21575837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
21585837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
21591da177e4SLinus Torvalds 	int			error;
21601da177e4SLinus Torvalds 
2161c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
2162c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
21634664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
21641da177e4SLinus Torvalds 
2165f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2166f40aadb2SDave Chinner 
21675837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2168f40aadb2SDave Chinner 	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2169859d7182SVlad Apostolov 	if (error)
2170f40aadb2SDave Chinner 		goto out;
2171370c782bSChristoph Hellwig 	agi = agibp->b_addr;
21725e1be0fbSChristoph Hellwig 
21731da177e4SLinus Torvalds 	/*
217486bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
217586bfd375SDarrick J. Wong 	 * go on.  Make sure the pointer isn't garbage and that this inode
217686bfd375SDarrick J. Wong 	 * isn't already on the list.
21771da177e4SLinus Torvalds 	 */
217886bfd375SDarrick J. Wong 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
217986bfd375SDarrick J. Wong 	if (next_agino == agino ||
2180f40aadb2SDave Chinner 	    !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
21818d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
2182f40aadb2SDave Chinner 		error = -EFSCORRUPTED;
2183f40aadb2SDave Chinner 		goto out;
2184a5155b87SDarrick J. Wong 	}
21851da177e4SLinus Torvalds 
218686bfd375SDarrick J. Wong 	if (next_agino != NULLAGINO) {
2187f2fc16a3SDarrick J. Wong 		xfs_agino_t		old_agino;
2188f2fc16a3SDarrick J. Wong 
21891da177e4SLinus Torvalds 		/*
2190f2fc16a3SDarrick J. Wong 		 * There is already another inode in the bucket, so point this
2191f2fc16a3SDarrick J. Wong 		 * inode to the current head of the list.
21921da177e4SLinus Torvalds 		 */
2193f40aadb2SDave Chinner 		error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
2194f2fc16a3SDarrick J. Wong 				&old_agino);
2195c319b58bSVlad Apostolov 		if (error)
2196f40aadb2SDave Chinner 			goto out;
2197f2fc16a3SDarrick J. Wong 		ASSERT(old_agino == NULLAGINO);
21989b247179SDarrick J. Wong 
21999b247179SDarrick J. Wong 		/*
22009b247179SDarrick J. Wong 		 * agino has been unlinked, add a backref from the next inode
22019b247179SDarrick J. Wong 		 * back to agino.
22029b247179SDarrick J. Wong 		 */
2203f40aadb2SDave Chinner 		error = xfs_iunlink_add_backref(pag, agino, next_agino);
22049b247179SDarrick J. Wong 		if (error)
2205f40aadb2SDave Chinner 			goto out;
22061da177e4SLinus Torvalds 	}
22071da177e4SLinus Torvalds 
22089a4a5118SDarrick J. Wong 	/* Point the head of the list to point to this inode. */
2209f40aadb2SDave Chinner 	error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2210f40aadb2SDave Chinner out:
2211f40aadb2SDave Chinner 	xfs_perag_put(pag);
2212f40aadb2SDave Chinner 	return error;
22131da177e4SLinus Torvalds }
22141da177e4SLinus Torvalds 
221523ffa52cSDarrick J. Wong /* Return the imap, dinode pointer, and buffer for an inode. */
221623ffa52cSDarrick J. Wong STATIC int
221723ffa52cSDarrick J. Wong xfs_iunlink_map_ino(
221823ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
221923ffa52cSDarrick J. Wong 	xfs_agnumber_t		agno,
222023ffa52cSDarrick J. Wong 	xfs_agino_t		agino,
222123ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
222223ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
222323ffa52cSDarrick J. Wong 	struct xfs_buf		**bpp)
222423ffa52cSDarrick J. Wong {
222523ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
222623ffa52cSDarrick J. Wong 	int			error;
222723ffa52cSDarrick J. Wong 
222823ffa52cSDarrick J. Wong 	imap->im_blkno = 0;
222923ffa52cSDarrick J. Wong 	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
223023ffa52cSDarrick J. Wong 	if (error) {
223123ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap returned error %d.",
223223ffa52cSDarrick J. Wong 				__func__, error);
223323ffa52cSDarrick J. Wong 		return error;
223423ffa52cSDarrick J. Wong 	}
223523ffa52cSDarrick J. Wong 
2236af9dcddeSChristoph Hellwig 	error = xfs_imap_to_bp(mp, tp, imap, bpp);
223723ffa52cSDarrick J. Wong 	if (error) {
223823ffa52cSDarrick J. Wong 		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
223923ffa52cSDarrick J. Wong 				__func__, error);
224023ffa52cSDarrick J. Wong 		return error;
224123ffa52cSDarrick J. Wong 	}
224223ffa52cSDarrick J. Wong 
2243af9dcddeSChristoph Hellwig 	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
224423ffa52cSDarrick J. Wong 	return 0;
224523ffa52cSDarrick J. Wong }
224623ffa52cSDarrick J. Wong 
224723ffa52cSDarrick J. Wong /*
224823ffa52cSDarrick J. Wong  * Walk the unlinked chain from @head_agino until we find the inode that
224923ffa52cSDarrick J. Wong  * points to @target_agino.  Return the inode number, map, dinode pointer,
225023ffa52cSDarrick J. Wong  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
225123ffa52cSDarrick J. Wong  *
225223ffa52cSDarrick J. Wong  * @tp, @pag, @head_agino, and @target_agino are input parameters.
225323ffa52cSDarrick J. Wong  * @agino, @imap, @dipp, and @bpp are all output parameters.
225423ffa52cSDarrick J. Wong  *
225523ffa52cSDarrick J. Wong  * Do not call this function if @target_agino is the head of the list.
225623ffa52cSDarrick J. Wong  */
225723ffa52cSDarrick J. Wong STATIC int
225823ffa52cSDarrick J. Wong xfs_iunlink_map_prev(
225923ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
2260f40aadb2SDave Chinner 	struct xfs_perag	*pag,
226123ffa52cSDarrick J. Wong 	xfs_agino_t		head_agino,
226223ffa52cSDarrick J. Wong 	xfs_agino_t		target_agino,
226323ffa52cSDarrick J. Wong 	xfs_agino_t		*agino,
226423ffa52cSDarrick J. Wong 	struct xfs_imap		*imap,
226523ffa52cSDarrick J. Wong 	struct xfs_dinode	**dipp,
2266f40aadb2SDave Chinner 	struct xfs_buf		**bpp)
226723ffa52cSDarrick J. Wong {
226823ffa52cSDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
226923ffa52cSDarrick J. Wong 	xfs_agino_t		next_agino;
227023ffa52cSDarrick J. Wong 	int			error;
227123ffa52cSDarrick J. Wong 
227223ffa52cSDarrick J. Wong 	ASSERT(head_agino != target_agino);
227323ffa52cSDarrick J. Wong 	*bpp = NULL;
227423ffa52cSDarrick J. Wong 
22759b247179SDarrick J. Wong 	/* See if our backref cache can find it faster. */
22769b247179SDarrick J. Wong 	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
22779b247179SDarrick J. Wong 	if (*agino != NULLAGINO) {
2278f40aadb2SDave Chinner 		error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
2279f40aadb2SDave Chinner 				dipp, bpp);
22809b247179SDarrick J. Wong 		if (error)
22819b247179SDarrick J. Wong 			return error;
22829b247179SDarrick J. Wong 
22839b247179SDarrick J. Wong 		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
22849b247179SDarrick J. Wong 			return 0;
22859b247179SDarrick J. Wong 
22869b247179SDarrick J. Wong 		/*
22879b247179SDarrick J. Wong 		 * If we get here the cache contents were corrupt, so drop the
22889b247179SDarrick J. Wong 		 * buffer and fall back to walking the bucket list.
22899b247179SDarrick J. Wong 		 */
22909b247179SDarrick J. Wong 		xfs_trans_brelse(tp, *bpp);
22919b247179SDarrick J. Wong 		*bpp = NULL;
22929b247179SDarrick J. Wong 		WARN_ON_ONCE(1);
22939b247179SDarrick J. Wong 	}
22949b247179SDarrick J. Wong 
2295f40aadb2SDave Chinner 	trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
22969b247179SDarrick J. Wong 
22979b247179SDarrick J. Wong 	/* Otherwise, walk the entire bucket until we find it. */
229823ffa52cSDarrick J. Wong 	next_agino = head_agino;
229923ffa52cSDarrick J. Wong 	while (next_agino != target_agino) {
230023ffa52cSDarrick J. Wong 		xfs_agino_t	unlinked_agino;
230123ffa52cSDarrick J. Wong 
230223ffa52cSDarrick J. Wong 		if (*bpp)
230323ffa52cSDarrick J. Wong 			xfs_trans_brelse(tp, *bpp);
230423ffa52cSDarrick J. Wong 
230523ffa52cSDarrick J. Wong 		*agino = next_agino;
2306f40aadb2SDave Chinner 		error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
2307f40aadb2SDave Chinner 				dipp, bpp);
230823ffa52cSDarrick J. Wong 		if (error)
230923ffa52cSDarrick J. Wong 			return error;
231023ffa52cSDarrick J. Wong 
231123ffa52cSDarrick J. Wong 		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
231223ffa52cSDarrick J. Wong 		/*
231323ffa52cSDarrick J. Wong 		 * Make sure this pointer is valid and isn't an obvious
231423ffa52cSDarrick J. Wong 		 * infinite loop.
231523ffa52cSDarrick J. Wong 		 */
2316f40aadb2SDave Chinner 		if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
231723ffa52cSDarrick J. Wong 		    next_agino == unlinked_agino) {
231823ffa52cSDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__,
231923ffa52cSDarrick J. Wong 					XFS_ERRLEVEL_LOW, mp,
232023ffa52cSDarrick J. Wong 					*dipp, sizeof(**dipp));
232123ffa52cSDarrick J. Wong 			error = -EFSCORRUPTED;
232223ffa52cSDarrick J. Wong 			return error;
232323ffa52cSDarrick J. Wong 		}
232423ffa52cSDarrick J. Wong 		next_agino = unlinked_agino;
232523ffa52cSDarrick J. Wong 	}
232623ffa52cSDarrick J. Wong 
232723ffa52cSDarrick J. Wong 	return 0;
232823ffa52cSDarrick J. Wong }
232923ffa52cSDarrick J. Wong 
23301da177e4SLinus Torvalds /*
23311da177e4SLinus Torvalds  * Pull the on-disk inode from the AGI unlinked list.
23321da177e4SLinus Torvalds  */
23331da177e4SLinus Torvalds STATIC int
23341da177e4SLinus Torvalds xfs_iunlink_remove(
23355837f625SDarrick J. Wong 	struct xfs_trans	*tp,
2336f40aadb2SDave Chinner 	struct xfs_perag	*pag,
23375837f625SDarrick J. Wong 	struct xfs_inode	*ip)
23381da177e4SLinus Torvalds {
23395837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
23405837f625SDarrick J. Wong 	struct xfs_agi		*agi;
23415837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
23425837f625SDarrick J. Wong 	struct xfs_buf		*last_ibp;
23435837f625SDarrick J. Wong 	struct xfs_dinode	*last_dip = NULL;
23445837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
23451da177e4SLinus Torvalds 	xfs_agino_t		next_agino;
2346b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
23475837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
23481da177e4SLinus Torvalds 	int			error;
23491da177e4SLinus Torvalds 
23504664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
23514664c66cSDarrick J. Wong 
23525837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2353f40aadb2SDave Chinner 	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
23545e1be0fbSChristoph Hellwig 	if (error)
23551da177e4SLinus Torvalds 		return error;
2356370c782bSChristoph Hellwig 	agi = agibp->b_addr;
23575e1be0fbSChristoph Hellwig 
23581da177e4SLinus Torvalds 	/*
235986bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
236086bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
23611da177e4SLinus Torvalds 	 */
2362b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2363f40aadb2SDave Chinner 	if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
2364d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2365d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2366d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2367d2e73665SDarrick J. Wong 	}
23681da177e4SLinus Torvalds 
23691da177e4SLinus Torvalds 	/*
2370b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2371b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2372b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
23731da177e4SLinus Torvalds 	 */
2374f40aadb2SDave Chinner 	error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
2375f2fc16a3SDarrick J. Wong 	if (error)
23761da177e4SLinus Torvalds 		return error;
23779a4a5118SDarrick J. Wong 
23789b247179SDarrick J. Wong 	/*
23799b247179SDarrick J. Wong 	 * If there was a backref pointing from the next inode back to this
23809b247179SDarrick J. Wong 	 * one, remove it because we've removed this inode from the list.
23819b247179SDarrick J. Wong 	 *
23829b247179SDarrick J. Wong 	 * Later, if this inode was in the middle of the list we'll update
23839b247179SDarrick J. Wong 	 * this inode's backref to point from the next inode.
23849b247179SDarrick J. Wong 	 */
23859b247179SDarrick J. Wong 	if (next_agino != NULLAGINO) {
2386f40aadb2SDave Chinner 		error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
23879b247179SDarrick J. Wong 		if (error)
238892a00544SGao Xiang 			return error;
23899b247179SDarrick J. Wong 	}
23909b247179SDarrick J. Wong 
239192a00544SGao Xiang 	if (head_agino != agino) {
2392f2fc16a3SDarrick J. Wong 		struct xfs_imap	imap;
2393f2fc16a3SDarrick J. Wong 		xfs_agino_t	prev_agino;
2394f2fc16a3SDarrick J. Wong 
239523ffa52cSDarrick J. Wong 		/* We need to search the list for the inode being freed. */
2396f40aadb2SDave Chinner 		error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
2397f40aadb2SDave Chinner 				&prev_agino, &imap, &last_dip, &last_ibp);
239823ffa52cSDarrick J. Wong 		if (error)
239992a00544SGao Xiang 			return error;
2400475ee413SChristoph Hellwig 
2401f2fc16a3SDarrick J. Wong 		/* Point the previous inode on the list to the next inode. */
2402f40aadb2SDave Chinner 		xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
2403f2fc16a3SDarrick J. Wong 				last_dip, &imap, next_agino);
24049b247179SDarrick J. Wong 
24059b247179SDarrick J. Wong 		/*
24069b247179SDarrick J. Wong 		 * Now we deal with the backref for this inode.  If this inode
24079b247179SDarrick J. Wong 		 * pointed at a real inode, change the backref that pointed to
24089b247179SDarrick J. Wong 		 * us to point to our old next.  If this inode was the end of
24099b247179SDarrick J. Wong 		 * the list, delete the backref that pointed to us.  Note that
24109b247179SDarrick J. Wong 		 * change_backref takes care of deleting the backref if
24119b247179SDarrick J. Wong 		 * next_agino is NULLAGINO.
24129b247179SDarrick J. Wong 		 */
241392a00544SGao Xiang 		return xfs_iunlink_change_backref(agibp->b_pag, agino,
241492a00544SGao Xiang 				next_agino);
24151da177e4SLinus Torvalds 	}
24169b247179SDarrick J. Wong 
241792a00544SGao Xiang 	/* Point the head of the list to the next unlinked inode. */
2418f40aadb2SDave Chinner 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
241992a00544SGao Xiang 			next_agino);
24201da177e4SLinus Torvalds }
24211da177e4SLinus Torvalds 
24225b3eed75SDave Chinner /*
242371e3e356SDave Chinner  * Look up the inode number specified and if it is not already marked XFS_ISTALE
242471e3e356SDave Chinner  * mark it stale. We should only find clean inodes in this lookup that aren't
242571e3e356SDave Chinner  * already stale.
24265806165aSDave Chinner  */
242771e3e356SDave Chinner static void
242871e3e356SDave Chinner xfs_ifree_mark_inode_stale(
2429f40aadb2SDave Chinner 	struct xfs_perag	*pag,
24305806165aSDave Chinner 	struct xfs_inode	*free_ip,
2431d9fdd0adSBrian Foster 	xfs_ino_t		inum)
24325806165aSDave Chinner {
2433f40aadb2SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
243471e3e356SDave Chinner 	struct xfs_inode_log_item *iip;
24355806165aSDave Chinner 	struct xfs_inode	*ip;
24365806165aSDave Chinner 
24375806165aSDave Chinner retry:
24385806165aSDave Chinner 	rcu_read_lock();
24395806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
24405806165aSDave Chinner 
24415806165aSDave Chinner 	/* Inode not in memory, nothing to do */
244271e3e356SDave Chinner 	if (!ip) {
244371e3e356SDave Chinner 		rcu_read_unlock();
244471e3e356SDave Chinner 		return;
244571e3e356SDave Chinner 	}
24465806165aSDave Chinner 
24475806165aSDave Chinner 	/*
24485806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
24495806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
24505806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
24515806165aSDave Chinner 	 * valid, the wrong inode or stale.
24525806165aSDave Chinner 	 */
24535806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2454718ecc50SDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2455718ecc50SDave Chinner 		goto out_iflags_unlock;
24565806165aSDave Chinner 
24575806165aSDave Chinner 	/*
24585806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
24595806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
24605806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
24615806165aSDave Chinner 	 * retry.
24625806165aSDave Chinner 	 */
24635806165aSDave Chinner 	if (ip != free_ip) {
24645806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
246571e3e356SDave Chinner 			spin_unlock(&ip->i_flags_lock);
24665806165aSDave Chinner 			rcu_read_unlock();
24675806165aSDave Chinner 			delay(1);
24685806165aSDave Chinner 			goto retry;
24695806165aSDave Chinner 		}
24705806165aSDave Chinner 	}
247171e3e356SDave Chinner 	ip->i_flags |= XFS_ISTALE;
24725806165aSDave Chinner 
247371e3e356SDave Chinner 	/*
2474718ecc50SDave Chinner 	 * If the inode is flushing, it is already attached to the buffer.  All
247571e3e356SDave Chinner 	 * we needed to do here is mark the inode stale so buffer IO completion
247671e3e356SDave Chinner 	 * will remove it from the AIL.
247771e3e356SDave Chinner 	 */
247871e3e356SDave Chinner 	iip = ip->i_itemp;
2479718ecc50SDave Chinner 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
248071e3e356SDave Chinner 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
248171e3e356SDave Chinner 		ASSERT(iip->ili_last_fields);
248271e3e356SDave Chinner 		goto out_iunlock;
248371e3e356SDave Chinner 	}
24845806165aSDave Chinner 
24855806165aSDave Chinner 	/*
248648d55e2aSDave Chinner 	 * Inodes not attached to the buffer can be released immediately.
248748d55e2aSDave Chinner 	 * Everything else has to go through xfs_iflush_abort() on journal
248848d55e2aSDave Chinner 	 * commit as the flock synchronises removal of the inode from the
248948d55e2aSDave Chinner 	 * cluster buffer against inode reclaim.
24905806165aSDave Chinner 	 */
2491718ecc50SDave Chinner 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
249271e3e356SDave Chinner 		goto out_iunlock;
2493718ecc50SDave Chinner 
2494718ecc50SDave Chinner 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2495718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2496718ecc50SDave Chinner 	rcu_read_unlock();
24975806165aSDave Chinner 
249871e3e356SDave Chinner 	/* we have a dirty inode in memory that has not yet been flushed. */
249971e3e356SDave Chinner 	spin_lock(&iip->ili_lock);
250071e3e356SDave Chinner 	iip->ili_last_fields = iip->ili_fields;
250171e3e356SDave Chinner 	iip->ili_fields = 0;
250271e3e356SDave Chinner 	iip->ili_fsync_fields = 0;
250371e3e356SDave Chinner 	spin_unlock(&iip->ili_lock);
250471e3e356SDave Chinner 	ASSERT(iip->ili_last_fields);
250571e3e356SDave Chinner 
2506718ecc50SDave Chinner 	if (ip != free_ip)
2507718ecc50SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2508718ecc50SDave Chinner 	return;
2509718ecc50SDave Chinner 
251071e3e356SDave Chinner out_iunlock:
251171e3e356SDave Chinner 	if (ip != free_ip)
251271e3e356SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2513718ecc50SDave Chinner out_iflags_unlock:
2514718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2515718ecc50SDave Chinner 	rcu_read_unlock();
25165806165aSDave Chinner }
25175806165aSDave Chinner 
25185806165aSDave Chinner /*
25190b8182dbSZhi Yong Wu  * A big issue when freeing the inode cluster is that we _cannot_ skip any
25205b3eed75SDave Chinner  * inodes that are in memory - they all must be marked stale and attached to
25215b3eed75SDave Chinner  * the cluster buffer.
25225b3eed75SDave Chinner  */
2523f40aadb2SDave Chinner static int
25241da177e4SLinus Torvalds xfs_ifree_cluster(
252571e3e356SDave Chinner 	struct xfs_trans	*tp,
2526f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2527f40aadb2SDave Chinner 	struct xfs_inode	*free_ip,
252809b56604SBrian Foster 	struct xfs_icluster	*xic)
25291da177e4SLinus Torvalds {
253071e3e356SDave Chinner 	struct xfs_mount	*mp = free_ip->i_mount;
253171e3e356SDave Chinner 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
253271e3e356SDave Chinner 	struct xfs_buf		*bp;
253371e3e356SDave Chinner 	xfs_daddr_t		blkno;
253471e3e356SDave Chinner 	xfs_ino_t		inum = xic->first_ino;
25351da177e4SLinus Torvalds 	int			nbufs;
25365b257b4aSDave Chinner 	int			i, j;
25373cdaa189SBrian Foster 	int			ioffset;
2538ce92464cSDarrick J. Wong 	int			error;
25391da177e4SLinus Torvalds 
2540ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
25411da177e4SLinus Torvalds 
2542ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
254309b56604SBrian Foster 		/*
254409b56604SBrian Foster 		 * The allocation bitmap tells us which inodes of the chunk were
254509b56604SBrian Foster 		 * physically allocated. Skip the cluster if an inode falls into
254609b56604SBrian Foster 		 * a sparse region.
254709b56604SBrian Foster 		 */
25483cdaa189SBrian Foster 		ioffset = inum - xic->first_ino;
25493cdaa189SBrian Foster 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2550ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
255109b56604SBrian Foster 			continue;
255209b56604SBrian Foster 		}
255309b56604SBrian Foster 
25541da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
25551da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
25561da177e4SLinus Torvalds 
25571da177e4SLinus Torvalds 		/*
25585b257b4aSDave Chinner 		 * We obtain and lock the backing buffer first in the process
2559718ecc50SDave Chinner 		 * here to ensure dirty inodes attached to the buffer remain in
2560718ecc50SDave Chinner 		 * the flushing state while we mark them stale.
2561718ecc50SDave Chinner 		 *
25625b257b4aSDave Chinner 		 * If we scan the in-memory inodes first, then buffer IO can
25635b257b4aSDave Chinner 		 * complete before we get a lock on it, and hence we may fail
25645b257b4aSDave Chinner 		 * to mark all the active inodes on the buffer stale.
25651da177e4SLinus Torvalds 		 */
2566ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2567ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2568ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
256971e3e356SDave Chinner 		if (error)
2570ce92464cSDarrick J. Wong 			return error;
2571b0f539deSDave Chinner 
2572b0f539deSDave Chinner 		/*
2573b0f539deSDave Chinner 		 * This buffer may not have been correctly initialised as we
2574b0f539deSDave Chinner 		 * didn't read it from disk. That's not important because we are
2575b0f539deSDave Chinner 		 * only using to mark the buffer as stale in the log, and to
2576b0f539deSDave Chinner 		 * attach stale cached inodes on it. That means it will never be
2577b0f539deSDave Chinner 		 * dispatched for IO. If it is, we want to know about it, and we
2578b0f539deSDave Chinner 		 * want it to fail. We can acheive this by adding a write
2579b0f539deSDave Chinner 		 * verifier to the buffer.
2580b0f539deSDave Chinner 		 */
25811813dd64SDave Chinner 		bp->b_ops = &xfs_inode_buf_ops;
2582b0f539deSDave Chinner 
25835b257b4aSDave Chinner 		/*
258471e3e356SDave Chinner 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
258571e3e356SDave Chinner 		 * too. This requires lookups, and will skip inodes that we've
258671e3e356SDave Chinner 		 * already marked XFS_ISTALE.
25875b257b4aSDave Chinner 		 */
258871e3e356SDave Chinner 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2589f40aadb2SDave Chinner 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
25901da177e4SLinus Torvalds 
25911da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
25921da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
25931da177e4SLinus Torvalds 	}
25942a30f36dSChandra Seetharaman 	return 0;
25951da177e4SLinus Torvalds }
25961da177e4SLinus Torvalds 
25971da177e4SLinus Torvalds /*
25981da177e4SLinus Torvalds  * This is called to return an inode to the inode free list.
25991da177e4SLinus Torvalds  * The inode should already be truncated to 0 length and have
26001da177e4SLinus Torvalds  * no pages associated with it.  This routine also assumes that
26011da177e4SLinus Torvalds  * the inode is already a part of the transaction.
26021da177e4SLinus Torvalds  *
26031da177e4SLinus Torvalds  * The on-disk copy of the inode will have been added to the list
26041da177e4SLinus Torvalds  * of unlinked inodes in the AGI. We need to remove the inode from
26051da177e4SLinus Torvalds  * that list atomically with respect to freeing it here.
26061da177e4SLinus Torvalds  */
26071da177e4SLinus Torvalds int
26081da177e4SLinus Torvalds xfs_ifree(
26090e0417f3SBrian Foster 	struct xfs_trans	*tp,
26100e0417f3SBrian Foster 	struct xfs_inode	*ip)
26111da177e4SLinus Torvalds {
2612f40aadb2SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2613f40aadb2SDave Chinner 	struct xfs_perag	*pag;
261409b56604SBrian Foster 	struct xfs_icluster	xic = { 0 };
26151319ebefSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
2616f40aadb2SDave Chinner 	int			error;
26171da177e4SLinus Torvalds 
2618579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
261954d7b5c1SDave Chinner 	ASSERT(VFS_I(ip)->i_nlink == 0);
2620daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
262113d2c10bSChristoph Hellwig 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
26226e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
26231da177e4SLinus Torvalds 
2624f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2625f40aadb2SDave Chinner 
26261da177e4SLinus Torvalds 	/*
26271da177e4SLinus Torvalds 	 * Pull the on-disk inode from the AGI unlinked list.
26281da177e4SLinus Torvalds 	 */
2629f40aadb2SDave Chinner 	error = xfs_iunlink_remove(tp, pag, ip);
26301baaed8fSDave Chinner 	if (error)
2631f40aadb2SDave Chinner 		goto out;
26321da177e4SLinus Torvalds 
2633f40aadb2SDave Chinner 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
26341baaed8fSDave Chinner 	if (error)
2635f40aadb2SDave Chinner 		goto out;
26361baaed8fSDave Chinner 
2637b2c20045SChristoph Hellwig 	/*
2638b2c20045SChristoph Hellwig 	 * Free any local-format data sitting around before we reset the
2639b2c20045SChristoph Hellwig 	 * data fork to extents format.  Note that the attr fork data has
2640b2c20045SChristoph Hellwig 	 * already been freed by xfs_attr_inactive.
2641b2c20045SChristoph Hellwig 	 */
2642f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2643b2c20045SChristoph Hellwig 		kmem_free(ip->i_df.if_u1.if_data);
2644b2c20045SChristoph Hellwig 		ip->i_df.if_u1.if_data = NULL;
2645b2c20045SChristoph Hellwig 		ip->i_df.if_bytes = 0;
2646b2c20045SChristoph Hellwig 	}
264798c4f78dSDarrick J. Wong 
2648c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2649db07349dSChristoph Hellwig 	ip->i_diflags = 0;
2650f40aadb2SDave Chinner 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
26517821ea30SChristoph Hellwig 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2652f7e67b20SChristoph Hellwig 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
26539b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
26549b3beb02SChristoph Hellwig 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2655dc1baa71SEric Sandeen 
2656dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
26571319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
26581319ebefSDave Chinner 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
26591319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
2660dc1baa71SEric Sandeen 
26611da177e4SLinus Torvalds 	/*
26621da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
26631da177e4SLinus Torvalds 	 * by reincarnations of this inode.
26641da177e4SLinus Torvalds 	 */
26659e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
26661da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
26671da177e4SLinus Torvalds 
266809b56604SBrian Foster 	if (xic.deleted)
2669f40aadb2SDave Chinner 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2670f40aadb2SDave Chinner out:
2671f40aadb2SDave Chinner 	xfs_perag_put(pag);
26722a30f36dSChandra Seetharaman 	return error;
26731da177e4SLinus Torvalds }
26741da177e4SLinus Torvalds 
26751da177e4SLinus Torvalds /*
267660ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
267760ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
267860ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
26791da177e4SLinus Torvalds  */
268060ec6783SChristoph Hellwig static void
2681f392e631SChristoph Hellwig xfs_iunpin(
268260ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2683a3f74ffbSDavid Chinner {
2684579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2685a3f74ffbSDavid Chinner 
26864aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
26874aaf15d1SDave Chinner 
2688a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
26895f9b4b0dSDave Chinner 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2690a14a348bSChristoph Hellwig 
2691a3f74ffbSDavid Chinner }
2692a3f74ffbSDavid Chinner 
2693f392e631SChristoph Hellwig static void
2694f392e631SChristoph Hellwig __xfs_iunpin_wait(
2695f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2696f392e631SChristoph Hellwig {
2697f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2698f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2699f392e631SChristoph Hellwig 
2700f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2701f392e631SChristoph Hellwig 
2702f392e631SChristoph Hellwig 	do {
270321417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2704f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2705f392e631SChristoph Hellwig 			io_schedule();
2706f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
270721417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2708f392e631SChristoph Hellwig }
2709f392e631SChristoph Hellwig 
2710777df5afSDave Chinner void
27111da177e4SLinus Torvalds xfs_iunpin_wait(
271260ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
27131da177e4SLinus Torvalds {
2714f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2715f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
27161da177e4SLinus Torvalds }
27171da177e4SLinus Torvalds 
271827320369SDave Chinner /*
271927320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
272027320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
272127320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
272227320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
272327320369SDave Chinner  * locking an AGI.
272427320369SDave Chinner  *
272527320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
272627320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
272727320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
272827320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
272927320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
273027320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
273127320369SDave Chinner  *
273227320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
273327320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
273427320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
273527320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
273627320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
273727320369SDave Chinner  * directory entry.
273827320369SDave Chinner  *
273927320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2740310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
274127320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
274227320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
274327320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
274427320369SDave Chinner  */
2745c24b5dfaSDave Chinner int
2746c24b5dfaSDave Chinner xfs_remove(
2747c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2748c24b5dfaSDave Chinner 	struct xfs_name		*name,
2749c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2750c24b5dfaSDave Chinner {
2751c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2752c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2753c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2754c24b5dfaSDave Chinner 	int                     error = 0;
2755c24b5dfaSDave Chinner 	uint			resblks;
2756c24b5dfaSDave Chinner 
2757c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2758c24b5dfaSDave Chinner 
2759c24b5dfaSDave Chinner 	if (XFS_FORCED_SHUTDOWN(mp))
27602451337dSDave Chinner 		return -EIO;
2761c24b5dfaSDave Chinner 
2762c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2763c24b5dfaSDave Chinner 	if (error)
2764c24b5dfaSDave Chinner 		goto std_return;
2765c24b5dfaSDave Chinner 
2766c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2767c24b5dfaSDave Chinner 	if (error)
2768c24b5dfaSDave Chinner 		goto std_return;
2769c24b5dfaSDave Chinner 
2770c24b5dfaSDave Chinner 	/*
2771c24b5dfaSDave Chinner 	 * We try to get the real space reservation first,
2772c24b5dfaSDave Chinner 	 * allowing for directory btree deletion(s) implying
2773c24b5dfaSDave Chinner 	 * possible bmap insert(s).  If we can't get the space
2774c24b5dfaSDave Chinner 	 * reservation then we use 0 instead, and avoid the bmap
2775c24b5dfaSDave Chinner 	 * btree insert(s) in the directory code by, if the bmap
2776c24b5dfaSDave Chinner 	 * insert tries to happen, instead trimming the LAST
2777c24b5dfaSDave Chinner 	 * block from the directory.
2778c24b5dfaSDave Chinner 	 */
2779c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2780253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
27812451337dSDave Chinner 	if (error == -ENOSPC) {
2782c24b5dfaSDave Chinner 		resblks = 0;
2783253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2784253f4911SChristoph Hellwig 				&tp);
2785c24b5dfaSDave Chinner 	}
2786c24b5dfaSDave Chinner 	if (error) {
27872451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2788253f4911SChristoph Hellwig 		goto std_return;
2789c24b5dfaSDave Chinner 	}
2790c24b5dfaSDave Chinner 
27917c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2792c24b5dfaSDave Chinner 
279365523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2794c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2795c24b5dfaSDave Chinner 
2796c24b5dfaSDave Chinner 	/*
2797c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2798c24b5dfaSDave Chinner 	 */
2799c24b5dfaSDave Chinner 	if (is_dir) {
280054d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
280154d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
28022451337dSDave Chinner 			error = -ENOTEMPTY;
2803c24b5dfaSDave Chinner 			goto out_trans_cancel;
2804c24b5dfaSDave Chinner 		}
2805c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
28062451337dSDave Chinner 			error = -ENOTEMPTY;
2807c24b5dfaSDave Chinner 			goto out_trans_cancel;
2808c24b5dfaSDave Chinner 		}
2809c24b5dfaSDave Chinner 
281027320369SDave Chinner 		/* Drop the link from ip's "..".  */
2811c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2812c24b5dfaSDave Chinner 		if (error)
281327320369SDave Chinner 			goto out_trans_cancel;
2814c24b5dfaSDave Chinner 
281527320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2816c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2817c24b5dfaSDave Chinner 		if (error)
281827320369SDave Chinner 			goto out_trans_cancel;
28195838d035SDarrick J. Wong 
28205838d035SDarrick J. Wong 		/*
28215838d035SDarrick J. Wong 		 * Point the unlinked child directory's ".." entry to the root
28225838d035SDarrick J. Wong 		 * directory to eliminate back-references to inodes that may
28235838d035SDarrick J. Wong 		 * get freed before the child directory is closed.  If the fs
28245838d035SDarrick J. Wong 		 * gets shrunk, this can lead to dirent inode validation errors.
28255838d035SDarrick J. Wong 		 */
28265838d035SDarrick J. Wong 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
28275838d035SDarrick J. Wong 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
28285838d035SDarrick J. Wong 					tp->t_mountp->m_sb.sb_rootino, 0);
28295838d035SDarrick J. Wong 			if (error)
28305838d035SDarrick J. Wong 				return error;
28315838d035SDarrick J. Wong 		}
2832c24b5dfaSDave Chinner 	} else {
2833c24b5dfaSDave Chinner 		/*
2834c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2835c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2836c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2837c24b5dfaSDave Chinner 		 */
2838c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2839c24b5dfaSDave Chinner 	}
284027320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2841c24b5dfaSDave Chinner 
284227320369SDave Chinner 	/* Drop the link from dp to ip. */
2843c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2844c24b5dfaSDave Chinner 	if (error)
284527320369SDave Chinner 		goto out_trans_cancel;
2846c24b5dfaSDave Chinner 
2847381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
284827320369SDave Chinner 	if (error) {
28492451337dSDave Chinner 		ASSERT(error != -ENOENT);
2850c8eac49eSBrian Foster 		goto out_trans_cancel;
285127320369SDave Chinner 	}
285227320369SDave Chinner 
2853c24b5dfaSDave Chinner 	/*
2854c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2855c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2856c24b5dfaSDave Chinner 	 * the user.
2857c24b5dfaSDave Chinner 	 */
2858c24b5dfaSDave Chinner 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2859c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2860c24b5dfaSDave Chinner 
286170393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2862c24b5dfaSDave Chinner 	if (error)
2863c24b5dfaSDave Chinner 		goto std_return;
2864c24b5dfaSDave Chinner 
28652cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2866c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2867c24b5dfaSDave Chinner 
2868c24b5dfaSDave Chinner 	return 0;
2869c24b5dfaSDave Chinner 
2870c24b5dfaSDave Chinner  out_trans_cancel:
28714906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2872c24b5dfaSDave Chinner  std_return:
2873c24b5dfaSDave Chinner 	return error;
2874c24b5dfaSDave Chinner }
2875c24b5dfaSDave Chinner 
2876f6bba201SDave Chinner /*
2877f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2878f6bba201SDave Chinner  */
287995afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2880f6bba201SDave Chinner STATIC void
2881f6bba201SDave Chinner xfs_sort_for_rename(
288295afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
288395afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
288495afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
288595afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
288695afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
288795afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
288895afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2889f6bba201SDave Chinner {
2890f6bba201SDave Chinner 	int			i, j;
2891f6bba201SDave Chinner 
289295afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
289395afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
289495afcf5cSDave Chinner 
2895f6bba201SDave Chinner 	/*
2896f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
2897f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
2898f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
2899f6bba201SDave Chinner 	 *
2900f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2901f6bba201SDave Chinner 	 */
290295afcf5cSDave Chinner 	i = 0;
290395afcf5cSDave Chinner 	i_tab[i++] = dp1;
290495afcf5cSDave Chinner 	i_tab[i++] = dp2;
290595afcf5cSDave Chinner 	i_tab[i++] = ip1;
290695afcf5cSDave Chinner 	if (ip2)
290795afcf5cSDave Chinner 		i_tab[i++] = ip2;
290895afcf5cSDave Chinner 	if (wip)
290995afcf5cSDave Chinner 		i_tab[i++] = wip;
291095afcf5cSDave Chinner 	*num_inodes = i;
2911f6bba201SDave Chinner 
2912f6bba201SDave Chinner 	/*
2913f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
291495afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
2915f6bba201SDave Chinner 	 */
2916f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
2917f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
2918f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
291995afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
2920f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
2921f6bba201SDave Chinner 				i_tab[j-1] = temp;
2922f6bba201SDave Chinner 			}
2923f6bba201SDave Chinner 		}
2924f6bba201SDave Chinner 	}
2925f6bba201SDave Chinner }
2926f6bba201SDave Chinner 
2927310606b0SDave Chinner static int
2928310606b0SDave Chinner xfs_finish_rename(
2929c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
2930310606b0SDave Chinner {
2931310606b0SDave Chinner 	/*
2932310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
2933310606b0SDave Chinner 	 * goes to disk before returning to the user.
2934310606b0SDave Chinner 	 */
2935310606b0SDave Chinner 	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2936310606b0SDave Chinner 		xfs_trans_set_sync(tp);
2937310606b0SDave Chinner 
293870393313SChristoph Hellwig 	return xfs_trans_commit(tp);
2939310606b0SDave Chinner }
2940310606b0SDave Chinner 
2941f6bba201SDave Chinner /*
2942d31a1825SCarlos Maiolino  * xfs_cross_rename()
2943d31a1825SCarlos Maiolino  *
29440145225eSBhaskar Chowdhury  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2945d31a1825SCarlos Maiolino  */
2946d31a1825SCarlos Maiolino STATIC int
2947d31a1825SCarlos Maiolino xfs_cross_rename(
2948d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
2949d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
2950d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
2951d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
2952d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
2953d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
2954d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
2955d31a1825SCarlos Maiolino 	int			spaceres)
2956d31a1825SCarlos Maiolino {
2957d31a1825SCarlos Maiolino 	int		error = 0;
2958d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
2959d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
2960d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
2961d31a1825SCarlos Maiolino 
2962d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
2963381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2964d31a1825SCarlos Maiolino 	if (error)
2965eeacd321SDave Chinner 		goto out_trans_abort;
2966d31a1825SCarlos Maiolino 
2967d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
2968381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2969d31a1825SCarlos Maiolino 	if (error)
2970eeacd321SDave Chinner 		goto out_trans_abort;
2971d31a1825SCarlos Maiolino 
2972d31a1825SCarlos Maiolino 	/*
2973d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
2974d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
2975d31a1825SCarlos Maiolino 	 * parents.
2976d31a1825SCarlos Maiolino 	 */
2977d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
2978d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2979d31a1825SCarlos Maiolino 
2980c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2981d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2982381eee69SBrian Foster 						dp1->i_ino, spaceres);
2983d31a1825SCarlos Maiolino 			if (error)
2984eeacd321SDave Chinner 				goto out_trans_abort;
2985d31a1825SCarlos Maiolino 
2986d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
2987c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2988d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
2989d31a1825SCarlos Maiolino 				if (error)
2990eeacd321SDave Chinner 					goto out_trans_abort;
299191083269SEric Sandeen 				xfs_bumplink(tp, dp1);
2992d31a1825SCarlos Maiolino 			}
2993d31a1825SCarlos Maiolino 
2994d31a1825SCarlos Maiolino 			/*
2995d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
2996d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2997d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2998d31a1825SCarlos Maiolino 			 * notify the change
2999d31a1825SCarlos Maiolino 			 */
3000d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
3001d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3002d31a1825SCarlos Maiolino 		}
3003d31a1825SCarlos Maiolino 
3004c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3005d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3006381eee69SBrian Foster 						dp2->i_ino, spaceres);
3007d31a1825SCarlos Maiolino 			if (error)
3008eeacd321SDave Chinner 				goto out_trans_abort;
3009d31a1825SCarlos Maiolino 
3010d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
3011c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3012d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
3013d31a1825SCarlos Maiolino 				if (error)
3014eeacd321SDave Chinner 					goto out_trans_abort;
301591083269SEric Sandeen 				xfs_bumplink(tp, dp2);
3016d31a1825SCarlos Maiolino 			}
3017d31a1825SCarlos Maiolino 
3018d31a1825SCarlos Maiolino 			/*
3019d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
3020d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
3021d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
3022d31a1825SCarlos Maiolino 			 * notify the change
3023d31a1825SCarlos Maiolino 			 */
3024d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3025d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
3026d31a1825SCarlos Maiolino 		}
3027d31a1825SCarlos Maiolino 	}
3028d31a1825SCarlos Maiolino 
3029d31a1825SCarlos Maiolino 	if (ip1_flags) {
3030d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3031d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3032d31a1825SCarlos Maiolino 	}
3033d31a1825SCarlos Maiolino 	if (ip2_flags) {
3034d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3035d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3036d31a1825SCarlos Maiolino 	}
3037d31a1825SCarlos Maiolino 	if (dp2_flags) {
3038d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3039d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3040d31a1825SCarlos Maiolino 	}
3041d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3042d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3043c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
3044eeacd321SDave Chinner 
3045eeacd321SDave Chinner out_trans_abort:
30464906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3047d31a1825SCarlos Maiolino 	return error;
3048d31a1825SCarlos Maiolino }
3049d31a1825SCarlos Maiolino 
3050d31a1825SCarlos Maiolino /*
30517dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
30527dcf5c3eSDave Chinner  *
3053b63da6c8SRandy Dunlap  * Return a referenced, unlinked, unlocked inode that can be used as a
30547dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
30557dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
30567dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
30577dcf5c3eSDave Chinner  */
30587dcf5c3eSDave Chinner static int
30597dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
3060f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30617dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
30627dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
30637dcf5c3eSDave Chinner {
30647dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
30657dcf5c3eSDave Chinner 	int			error;
30667dcf5c3eSDave Chinner 
3067f736d93dSChristoph Hellwig 	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3068f736d93dSChristoph Hellwig 				   &tmpfile);
30697dcf5c3eSDave Chinner 	if (error)
30707dcf5c3eSDave Chinner 		return error;
30717dcf5c3eSDave Chinner 
307222419ac9SBrian Foster 	/*
307322419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
3074c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
3075c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
307622419ac9SBrian Foster 	 */
30772b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
30787dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
30797dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
30807dcf5c3eSDave Chinner 
30817dcf5c3eSDave Chinner 	*wip = tmpfile;
30827dcf5c3eSDave Chinner 	return 0;
30837dcf5c3eSDave Chinner }
30847dcf5c3eSDave Chinner 
30857dcf5c3eSDave Chinner /*
3086f6bba201SDave Chinner  * xfs_rename
3087f6bba201SDave Chinner  */
3088f6bba201SDave Chinner int
3089f6bba201SDave Chinner xfs_rename(
3090f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30917dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
3092f6bba201SDave Chinner 	struct xfs_name		*src_name,
30937dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
30947dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
3095f6bba201SDave Chinner 	struct xfs_name		*target_name,
30967dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
3097d31a1825SCarlos Maiolino 	unsigned int		flags)
3098f6bba201SDave Chinner {
30997dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
31007dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
31017dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
31027dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
31036da1b4b1SDarrick J. Wong 	int			i;
310495afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
31052b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
3106c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3107f6bba201SDave Chinner 	int			spaceres;
31087dcf5c3eSDave Chinner 	int			error;
3109f6bba201SDave Chinner 
3110f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3111f6bba201SDave Chinner 
3112eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3113eeacd321SDave Chinner 		return -EINVAL;
3114f6bba201SDave Chinner 
31157dcf5c3eSDave Chinner 	/*
31167dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
31177dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
31187dcf5c3eSDave Chinner 	 * appropriately.
31197dcf5c3eSDave Chinner 	 */
31207dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
31217dcf5c3eSDave Chinner 		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3122f736d93dSChristoph Hellwig 		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
31237dcf5c3eSDave Chinner 		if (error)
31247dcf5c3eSDave Chinner 			return error;
3125f6bba201SDave Chinner 
31267dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
31277dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
31287dcf5c3eSDave Chinner 	}
31297dcf5c3eSDave Chinner 
31307dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3131f6bba201SDave Chinner 				inodes, &num_inodes);
3132f6bba201SDave Chinner 
3133f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3134253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
31352451337dSDave Chinner 	if (error == -ENOSPC) {
3136f6bba201SDave Chinner 		spaceres = 0;
3137253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3138253f4911SChristoph Hellwig 				&tp);
3139f6bba201SDave Chinner 	}
3140445883e8SDave Chinner 	if (error)
3141253f4911SChristoph Hellwig 		goto out_release_wip;
3142f6bba201SDave Chinner 
3143f6bba201SDave Chinner 	/*
3144f6bba201SDave Chinner 	 * Attach the dquots to the inodes
3145f6bba201SDave Chinner 	 */
3146f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
3147445883e8SDave Chinner 	if (error)
3148445883e8SDave Chinner 		goto out_trans_cancel;
3149f6bba201SDave Chinner 
3150f6bba201SDave Chinner 	/*
3151f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
3152f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
3153f6bba201SDave Chinner 	 * whether the target directory is the same as the source
3154f6bba201SDave Chinner 	 * directory, we can lock from 2 to 4 inodes.
3155f6bba201SDave Chinner 	 */
3156f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3157f6bba201SDave Chinner 
3158f6bba201SDave Chinner 	/*
3159f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
3160f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
3161f6bba201SDave Chinner 	 * them.
3162f6bba201SDave Chinner 	 */
316365523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3164f6bba201SDave Chinner 	if (new_parent)
316565523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3166f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3167f6bba201SDave Chinner 	if (target_ip)
3168f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
31697dcf5c3eSDave Chinner 	if (wip)
31707dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3171f6bba201SDave Chinner 
3172f6bba201SDave Chinner 	/*
3173f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
3174f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
3175f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
3176f6bba201SDave Chinner 	 */
3177db07349dSChristoph Hellwig 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3178ceaf603cSChristoph Hellwig 		     target_dp->i_projid != src_ip->i_projid)) {
31792451337dSDave Chinner 		error = -EXDEV;
3180445883e8SDave Chinner 		goto out_trans_cancel;
3181f6bba201SDave Chinner 	}
3182f6bba201SDave Chinner 
3183eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
3184eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
3185eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3186d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
3187f16dea54SBrian Foster 					spaceres);
3188d31a1825SCarlos Maiolino 
3189d31a1825SCarlos Maiolino 	/*
3190bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
3191bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
319202092a2fSChandan Babu R 	 *
319302092a2fSChandan Babu R 	 * Extent count overflow check:
319402092a2fSChandan Babu R 	 *
319502092a2fSChandan Babu R 	 * From the perspective of src_dp, a rename operation is essentially a
319602092a2fSChandan Babu R 	 * directory entry remove operation. Hence the only place where we check
319702092a2fSChandan Babu R 	 * for extent count overflow for src_dp is in
319802092a2fSChandan Babu R 	 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
319902092a2fSChandan Babu R 	 * -ENOSPC when it detects a possible extent count overflow and in
320002092a2fSChandan Babu R 	 * response, the higher layers of directory handling code do the
320102092a2fSChandan Babu R 	 * following:
320202092a2fSChandan Babu R 	 * 1. Data/Free blocks: XFS lets these blocks linger until a
320302092a2fSChandan Babu R 	 *    future remove operation removes them.
320402092a2fSChandan Babu R 	 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
320502092a2fSChandan Babu R 	 *    Leaf space and unmaps the last block.
320602092a2fSChandan Babu R 	 *
320702092a2fSChandan Babu R 	 * For target_dp, there are two cases depending on whether the
320802092a2fSChandan Babu R 	 * destination directory entry exists or not.
320902092a2fSChandan Babu R 	 *
321002092a2fSChandan Babu R 	 * When destination directory entry does not exist (i.e. target_ip ==
321102092a2fSChandan Babu R 	 * NULL), extent count overflow check is performed only when transaction
321202092a2fSChandan Babu R 	 * has a non-zero sized space reservation associated with it.  With a
321302092a2fSChandan Babu R 	 * zero-sized space reservation, XFS allows a rename operation to
321402092a2fSChandan Babu R 	 * continue only when the directory has sufficient free space in its
321502092a2fSChandan Babu R 	 * data/leaf/free space blocks to hold the new entry.
321602092a2fSChandan Babu R 	 *
321702092a2fSChandan Babu R 	 * When destination directory entry exists (i.e. target_ip != NULL), all
321802092a2fSChandan Babu R 	 * we need to do is change the inode number associated with the already
321902092a2fSChandan Babu R 	 * existing entry. Hence there is no need to perform an extent count
322002092a2fSChandan Babu R 	 * overflow check.
3221f6bba201SDave Chinner 	 */
3222f6bba201SDave Chinner 	if (target_ip == NULL) {
3223f6bba201SDave Chinner 		/*
3224f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
3225f6bba201SDave Chinner 		 * fit before actually inserting it.
3226f6bba201SDave Chinner 		 */
322794f3cad5SEric Sandeen 		if (!spaceres) {
322894f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
3229f6bba201SDave Chinner 			if (error)
3230445883e8SDave Chinner 				goto out_trans_cancel;
323102092a2fSChandan Babu R 		} else {
323202092a2fSChandan Babu R 			error = xfs_iext_count_may_overflow(target_dp,
323302092a2fSChandan Babu R 					XFS_DATA_FORK,
323402092a2fSChandan Babu R 					XFS_IEXT_DIR_MANIP_CNT(mp));
323502092a2fSChandan Babu R 			if (error)
323602092a2fSChandan Babu R 				goto out_trans_cancel;
323794f3cad5SEric Sandeen 		}
3238bc56ad8cSkaixuxia 	} else {
3239bc56ad8cSkaixuxia 		/*
3240bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
3241bc56ad8cSkaixuxia 		 * it can be destroyed.
3242bc56ad8cSkaixuxia 		 */
3243bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3244bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
3245bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
3246bc56ad8cSkaixuxia 			error = -EEXIST;
3247bc56ad8cSkaixuxia 			goto out_trans_cancel;
3248bc56ad8cSkaixuxia 		}
3249bc56ad8cSkaixuxia 	}
3250bc56ad8cSkaixuxia 
3251bc56ad8cSkaixuxia 	/*
32526da1b4b1SDarrick J. Wong 	 * Lock the AGI buffers we need to handle bumping the nlink of the
32536da1b4b1SDarrick J. Wong 	 * whiteout inode off the unlinked list and to handle dropping the
32546da1b4b1SDarrick J. Wong 	 * nlink of the target inode.  Per locking order rules, do this in
32556da1b4b1SDarrick J. Wong 	 * increasing AG order and before directory block allocation tries to
32566da1b4b1SDarrick J. Wong 	 * grab AGFs because we grab AGIs before AGFs.
32576da1b4b1SDarrick J. Wong 	 *
32586da1b4b1SDarrick J. Wong 	 * The (vfs) caller must ensure that if src is a directory then
32596da1b4b1SDarrick J. Wong 	 * target_ip is either null or an empty directory.
32606da1b4b1SDarrick J. Wong 	 */
32616da1b4b1SDarrick J. Wong 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
32626da1b4b1SDarrick J. Wong 		if (inodes[i] == wip ||
32636da1b4b1SDarrick J. Wong 		    (inodes[i] == target_ip &&
32646da1b4b1SDarrick J. Wong 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
32656da1b4b1SDarrick J. Wong 			struct xfs_buf	*bp;
32666da1b4b1SDarrick J. Wong 			xfs_agnumber_t	agno;
32676da1b4b1SDarrick J. Wong 
32686da1b4b1SDarrick J. Wong 			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
32696da1b4b1SDarrick J. Wong 			error = xfs_read_agi(mp, tp, agno, &bp);
32706da1b4b1SDarrick J. Wong 			if (error)
32716da1b4b1SDarrick J. Wong 				goto out_trans_cancel;
32726da1b4b1SDarrick J. Wong 		}
32736da1b4b1SDarrick J. Wong 	}
32746da1b4b1SDarrick J. Wong 
32756da1b4b1SDarrick J. Wong 	/*
3276bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
3277bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
3278bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
3279bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
3280bc56ad8cSkaixuxia 	 * mess.
3281bc56ad8cSkaixuxia 	 *
3282bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
3283bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
3284bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
3285bc56ad8cSkaixuxia 	 * in future.
3286bc56ad8cSkaixuxia 	 */
3287bc56ad8cSkaixuxia 	if (wip) {
3288f40aadb2SDave Chinner 		struct xfs_perag	*pag;
3289f40aadb2SDave Chinner 
3290bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
3291f40aadb2SDave Chinner 
3292f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3293f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, wip);
3294f40aadb2SDave Chinner 		xfs_perag_put(pag);
3295bc56ad8cSkaixuxia 		if (error)
3296bc56ad8cSkaixuxia 			goto out_trans_cancel;
3297bc56ad8cSkaixuxia 
3298bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
3299bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3300bc56ad8cSkaixuxia 	}
3301bc56ad8cSkaixuxia 
3302bc56ad8cSkaixuxia 	/*
3303bc56ad8cSkaixuxia 	 * Set up the target.
3304bc56ad8cSkaixuxia 	 */
3305bc56ad8cSkaixuxia 	if (target_ip == NULL) {
3306f6bba201SDave Chinner 		/*
3307f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
3308f6bba201SDave Chinner 		 * directories, adjust the target directory link count
3309f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
3310f6bba201SDave Chinner 		 */
3311f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
3312381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
3313f6bba201SDave Chinner 		if (error)
3314c8eac49eSBrian Foster 			goto out_trans_cancel;
3315f6bba201SDave Chinner 
3316f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3317f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3318f6bba201SDave Chinner 
3319f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
332091083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
3321f6bba201SDave Chinner 		}
3322f6bba201SDave Chinner 	} else { /* target_ip != NULL */
3323f6bba201SDave Chinner 		/*
3324f6bba201SDave Chinner 		 * Link the source inode under the target name.
3325f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
3326f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
3327f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
3328f6bba201SDave Chinner 		 *
3329f6bba201SDave Chinner 		 * In case there is already an entry with the same
3330f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
3331f6bba201SDave Chinner 		 */
3332f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
3333381eee69SBrian Foster 					src_ip->i_ino, spaceres);
3334f6bba201SDave Chinner 		if (error)
3335c8eac49eSBrian Foster 			goto out_trans_cancel;
3336f6bba201SDave Chinner 
3337f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3338f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3339f6bba201SDave Chinner 
3340f6bba201SDave Chinner 		/*
3341f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3342f6bba201SDave Chinner 		 * dir no longer points to it.
3343f6bba201SDave Chinner 		 */
3344f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3345f6bba201SDave Chinner 		if (error)
3346c8eac49eSBrian Foster 			goto out_trans_cancel;
3347f6bba201SDave Chinner 
3348f6bba201SDave Chinner 		if (src_is_directory) {
3349f6bba201SDave Chinner 			/*
3350f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3351f6bba201SDave Chinner 			 */
3352f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3353f6bba201SDave Chinner 			if (error)
3354c8eac49eSBrian Foster 				goto out_trans_cancel;
3355f6bba201SDave Chinner 		}
3356f6bba201SDave Chinner 	} /* target_ip != NULL */
3357f6bba201SDave Chinner 
3358f6bba201SDave Chinner 	/*
3359f6bba201SDave Chinner 	 * Remove the source.
3360f6bba201SDave Chinner 	 */
3361f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3362f6bba201SDave Chinner 		/*
3363f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3364f6bba201SDave Chinner 		 * directory.
3365f6bba201SDave Chinner 		 */
3366f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3367381eee69SBrian Foster 					target_dp->i_ino, spaceres);
33682451337dSDave Chinner 		ASSERT(error != -EEXIST);
3369f6bba201SDave Chinner 		if (error)
3370c8eac49eSBrian Foster 			goto out_trans_cancel;
3371f6bba201SDave Chinner 	}
3372f6bba201SDave Chinner 
3373f6bba201SDave Chinner 	/*
3374f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3375f6bba201SDave Chinner 	 *
3376f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3377f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3378f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3379f6bba201SDave Chinner 	 */
3380f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3381f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3382f6bba201SDave Chinner 
3383f6bba201SDave Chinner 	/*
3384f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3385f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3386f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3387f6bba201SDave Chinner 	 */
3388f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3389f6bba201SDave Chinner 
3390f6bba201SDave Chinner 		/*
3391f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3392f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3393f6bba201SDave Chinner 		 */
3394f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3395f6bba201SDave Chinner 		if (error)
3396c8eac49eSBrian Foster 			goto out_trans_cancel;
3397f6bba201SDave Chinner 	}
3398f6bba201SDave Chinner 
33997dcf5c3eSDave Chinner 	/*
34007dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
34017dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
34027dcf5c3eSDave Chinner 	 * altogether.
34037dcf5c3eSDave Chinner 	 */
34047dcf5c3eSDave Chinner 	if (wip) {
34057dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3406381eee69SBrian Foster 					spaceres);
340702092a2fSChandan Babu R 	} else {
340802092a2fSChandan Babu R 		/*
340902092a2fSChandan Babu R 		 * NOTE: We don't need to check for extent count overflow here
341002092a2fSChandan Babu R 		 * because the dir remove name code will leave the dir block in
341102092a2fSChandan Babu R 		 * place if the extent count would overflow.
341202092a2fSChandan Babu R 		 */
3413f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3414381eee69SBrian Foster 					   spaceres);
341502092a2fSChandan Babu R 	}
341602092a2fSChandan Babu R 
3417f6bba201SDave Chinner 	if (error)
3418c8eac49eSBrian Foster 		goto out_trans_cancel;
3419f6bba201SDave Chinner 
3420f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3421f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3422f6bba201SDave Chinner 	if (new_parent)
3423f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3424f6bba201SDave Chinner 
3425c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
34267dcf5c3eSDave Chinner 	if (wip)
342744a8736bSDarrick J. Wong 		xfs_irele(wip);
34287dcf5c3eSDave Chinner 	return error;
3429f6bba201SDave Chinner 
3430445883e8SDave Chinner out_trans_cancel:
34314906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3432253f4911SChristoph Hellwig out_release_wip:
34337dcf5c3eSDave Chinner 	if (wip)
343444a8736bSDarrick J. Wong 		xfs_irele(wip);
3435f6bba201SDave Chinner 	return error;
3436f6bba201SDave Chinner }
3437f6bba201SDave Chinner 
3438e6187b34SDave Chinner static int
3439e6187b34SDave Chinner xfs_iflush(
344093848a99SChristoph Hellwig 	struct xfs_inode	*ip,
344193848a99SChristoph Hellwig 	struct xfs_buf		*bp)
34421da177e4SLinus Torvalds {
344393848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
344493848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
344593848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3446f2019299SBrian Foster 	int			error;
34471da177e4SLinus Torvalds 
3448579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3449718ecc50SDave Chinner 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3450f7e67b20SChristoph Hellwig 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3451daf83964SChristoph Hellwig 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
345290c60e16SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
34531da177e4SLinus Torvalds 
345488ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
34551da177e4SLinus Torvalds 
3456f2019299SBrian Foster 	/*
3457f2019299SBrian Foster 	 * We don't flush the inode if any of the following checks fail, but we
3458f2019299SBrian Foster 	 * do still update the log item and attach to the backing buffer as if
3459f2019299SBrian Foster 	 * the flush happened. This is a formality to facilitate predictable
3460f2019299SBrian Foster 	 * error handling as the caller will shutdown and fail the buffer.
3461f2019299SBrian Foster 	 */
3462f2019299SBrian Foster 	error = -EFSCORRUPTED;
346369ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
34649e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
34656a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3466c9690043SDarrick J. Wong 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
34676a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3468f2019299SBrian Foster 		goto flush_out;
34691da177e4SLinus Torvalds 	}
3470c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
34711da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3472f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3473f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
34749e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
34756a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3476c9690043SDarrick J. Wong 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
34776a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3478f2019299SBrian Foster 			goto flush_out;
34791da177e4SLinus Torvalds 		}
3480c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
34811da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3482f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3483f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3484f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
34859e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
34866a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3487c9690043SDarrick J. Wong 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
34886a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3489f2019299SBrian Foster 			goto flush_out;
34901da177e4SLinus Torvalds 		}
34911da177e4SLinus Torvalds 	}
3492daf83964SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
34936e73a545SChristoph Hellwig 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
34946a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
34956a19d939SDave Chinner 			"%s: detected corrupt incore inode %Lu, "
3496c9690043SDarrick J. Wong 			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
34976a19d939SDave Chinner 			__func__, ip->i_ino,
3498daf83964SChristoph Hellwig 			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
34996e73a545SChristoph Hellwig 			ip->i_nblocks, ip);
3500f2019299SBrian Foster 		goto flush_out;
35011da177e4SLinus Torvalds 	}
35027821ea30SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
35039e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
35046a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3505c9690043SDarrick J. Wong 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
35067821ea30SChristoph Hellwig 			__func__, ip->i_ino, ip->i_forkoff, ip);
3507f2019299SBrian Foster 		goto flush_out;
35081da177e4SLinus Torvalds 	}
3509e60896d8SDave Chinner 
35101da177e4SLinus Torvalds 	/*
3511965e0a1aSChristoph Hellwig 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3512965e0a1aSChristoph Hellwig 	 * count for correct sequencing.  We bump the flush iteration count so
3513965e0a1aSChristoph Hellwig 	 * we can detect flushes which postdate a log record during recovery.
3514965e0a1aSChristoph Hellwig 	 * This is redundant as we now log every change and hence this can't
3515965e0a1aSChristoph Hellwig 	 * happen but we need to still do it to ensure backwards compatibility
3516965e0a1aSChristoph Hellwig 	 * with old kernels that predate logging all inode changes.
35171da177e4SLinus Torvalds 	 */
35186471e9c5SChristoph Hellwig 	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3519965e0a1aSChristoph Hellwig 		ip->i_flushiter++;
35201da177e4SLinus Torvalds 
35210f45a1b2SChristoph Hellwig 	/*
35220f45a1b2SChristoph Hellwig 	 * If there are inline format data / attr forks attached to this inode,
35230f45a1b2SChristoph Hellwig 	 * make sure they are not corrupt.
35240f45a1b2SChristoph Hellwig 	 */
3525f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
35260f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_data(ip))
35270f45a1b2SChristoph Hellwig 		goto flush_out;
3528f7e67b20SChristoph Hellwig 	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
35290f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_attr(ip))
3530f2019299SBrian Foster 		goto flush_out;
3531005c5db8SDarrick J. Wong 
35321da177e4SLinus Torvalds 	/*
35333987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
35343987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
35353987848cSDave Chinner 	 * the core must be.
35361da177e4SLinus Torvalds 	 */
353793f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
35381da177e4SLinus Torvalds 
35391da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3540ee7b83fdSChristoph Hellwig 	if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
3541965e0a1aSChristoph Hellwig 		if (ip->i_flushiter == DI_MAX_FLUSH)
3542965e0a1aSChristoph Hellwig 			ip->i_flushiter = 0;
3543ee7b83fdSChristoph Hellwig 	}
35441da177e4SLinus Torvalds 
3545005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3546005c5db8SDarrick J. Wong 	if (XFS_IFORK_Q(ip))
3547005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
35481da177e4SLinus Torvalds 
35491da177e4SLinus Torvalds 	/*
3550f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3551f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3552f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3553f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3554f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3555f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3556f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
35571da177e4SLinus Torvalds 	 *
3558f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3559f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3560664ffb8aSChristoph Hellwig 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3561664ffb8aSChristoph Hellwig 	 * we know that the information those bits represent is permanently on
3562f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3563f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
35641da177e4SLinus Torvalds 	 */
3565f2019299SBrian Foster 	error = 0;
3566f2019299SBrian Foster flush_out:
35671319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
3568f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3569f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3570fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
35711319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
35721da177e4SLinus Torvalds 
35731319ebefSDave Chinner 	/*
35741319ebefSDave Chinner 	 * Store the current LSN of the inode so that we can tell whether the
3575664ffb8aSChristoph Hellwig 	 * item has moved in the AIL from xfs_buf_inode_iodone().
35761319ebefSDave Chinner 	 */
35777b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
35787b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
35791da177e4SLinus Torvalds 
358093848a99SChristoph Hellwig 	/* generate the checksum. */
358193848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
3582f2019299SBrian Foster 	return error;
35831da177e4SLinus Torvalds }
358444a8736bSDarrick J. Wong 
3585e6187b34SDave Chinner /*
3586e6187b34SDave Chinner  * Non-blocking flush of dirty inode metadata into the backing buffer.
3587e6187b34SDave Chinner  *
3588e6187b34SDave Chinner  * The caller must have a reference to the inode and hold the cluster buffer
3589e6187b34SDave Chinner  * locked. The function will walk across all the inodes on the cluster buffer it
3590e6187b34SDave Chinner  * can find and lock without blocking, and flush them to the cluster buffer.
3591e6187b34SDave Chinner  *
35925717ea4dSDave Chinner  * On successful flushing of at least one inode, the caller must write out the
35935717ea4dSDave Chinner  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
35945717ea4dSDave Chinner  * the caller needs to release the buffer. On failure, the filesystem will be
35955717ea4dSDave Chinner  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
35965717ea4dSDave Chinner  * will be returned.
3597e6187b34SDave Chinner  */
3598e6187b34SDave Chinner int
3599e6187b34SDave Chinner xfs_iflush_cluster(
3600e6187b34SDave Chinner 	struct xfs_buf		*bp)
3601e6187b34SDave Chinner {
36025717ea4dSDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
36035717ea4dSDave Chinner 	struct xfs_log_item	*lip, *n;
36045717ea4dSDave Chinner 	struct xfs_inode	*ip;
36055717ea4dSDave Chinner 	struct xfs_inode_log_item *iip;
3606e6187b34SDave Chinner 	int			clcount = 0;
36075717ea4dSDave Chinner 	int			error = 0;
3608e6187b34SDave Chinner 
3609e6187b34SDave Chinner 	/*
36105717ea4dSDave Chinner 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
36115717ea4dSDave Chinner 	 * can remove itself from the list.
3612e6187b34SDave Chinner 	 */
36135717ea4dSDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
36145717ea4dSDave Chinner 		iip = (struct xfs_inode_log_item *)lip;
36155717ea4dSDave Chinner 		ip = iip->ili_inode;
36165717ea4dSDave Chinner 
36175717ea4dSDave Chinner 		/*
36185717ea4dSDave Chinner 		 * Quick and dirty check to avoid locks if possible.
36195717ea4dSDave Chinner 		 */
3620718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
36215717ea4dSDave Chinner 			continue;
36225717ea4dSDave Chinner 		if (xfs_ipincount(ip))
36235717ea4dSDave Chinner 			continue;
36245717ea4dSDave Chinner 
36255717ea4dSDave Chinner 		/*
36265717ea4dSDave Chinner 		 * The inode is still attached to the buffer, which means it is
36275717ea4dSDave Chinner 		 * dirty but reclaim might try to grab it. Check carefully for
36285717ea4dSDave Chinner 		 * that, and grab the ilock while still holding the i_flags_lock
36295717ea4dSDave Chinner 		 * to guarantee reclaim will not be able to reclaim this inode
36305717ea4dSDave Chinner 		 * once we drop the i_flags_lock.
36315717ea4dSDave Chinner 		 */
36325717ea4dSDave Chinner 		spin_lock(&ip->i_flags_lock);
36335717ea4dSDave Chinner 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3634718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
36355717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
3636e6187b34SDave Chinner 			continue;
3637e6187b34SDave Chinner 		}
3638e6187b34SDave Chinner 
3639e6187b34SDave Chinner 		/*
36405717ea4dSDave Chinner 		 * ILOCK will pin the inode against reclaim and prevent
36415717ea4dSDave Chinner 		 * concurrent transactions modifying the inode while we are
3642718ecc50SDave Chinner 		 * flushing the inode. If we get the lock, set the flushing
3643718ecc50SDave Chinner 		 * state before we drop the i_flags_lock.
3644e6187b34SDave Chinner 		 */
36455717ea4dSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
36465717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
36475717ea4dSDave Chinner 			continue;
36485717ea4dSDave Chinner 		}
3649718ecc50SDave Chinner 		__xfs_iflags_set(ip, XFS_IFLUSHING);
36505717ea4dSDave Chinner 		spin_unlock(&ip->i_flags_lock);
36515717ea4dSDave Chinner 
36525717ea4dSDave Chinner 		/*
36535717ea4dSDave Chinner 		 * Abort flushing this inode if we are shut down because the
36545717ea4dSDave Chinner 		 * inode may not currently be in the AIL. This can occur when
36555717ea4dSDave Chinner 		 * log I/O failure unpins the inode without inserting into the
36565717ea4dSDave Chinner 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
36575717ea4dSDave Chinner 		 * that otherwise looks like it should be flushed.
36585717ea4dSDave Chinner 		 */
36595717ea4dSDave Chinner 		if (XFS_FORCED_SHUTDOWN(mp)) {
36605717ea4dSDave Chinner 			xfs_iunpin_wait(ip);
36615717ea4dSDave Chinner 			xfs_iflush_abort(ip);
36625717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36635717ea4dSDave Chinner 			error = -EIO;
36645717ea4dSDave Chinner 			continue;
36655717ea4dSDave Chinner 		}
36665717ea4dSDave Chinner 
36675717ea4dSDave Chinner 		/* don't block waiting on a log force to unpin dirty inodes */
36685717ea4dSDave Chinner 		if (xfs_ipincount(ip)) {
3669718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36705717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36715717ea4dSDave Chinner 			continue;
36725717ea4dSDave Chinner 		}
36735717ea4dSDave Chinner 
36745717ea4dSDave Chinner 		if (!xfs_inode_clean(ip))
36755717ea4dSDave Chinner 			error = xfs_iflush(ip, bp);
36765717ea4dSDave Chinner 		else
3677718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36785717ea4dSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
36795717ea4dSDave Chinner 		if (error)
3680e6187b34SDave Chinner 			break;
3681e6187b34SDave Chinner 		clcount++;
3682e6187b34SDave Chinner 	}
3683e6187b34SDave Chinner 
3684e6187b34SDave Chinner 	if (error) {
3685e6187b34SDave Chinner 		bp->b_flags |= XBF_ASYNC;
3686e6187b34SDave Chinner 		xfs_buf_ioend_fail(bp);
3687e6187b34SDave Chinner 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3688e6187b34SDave Chinner 		return error;
3689e6187b34SDave Chinner 	}
3690e6187b34SDave Chinner 
36915717ea4dSDave Chinner 	if (!clcount)
36925717ea4dSDave Chinner 		return -EAGAIN;
36935717ea4dSDave Chinner 
36945717ea4dSDave Chinner 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
36955717ea4dSDave Chinner 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
36965717ea4dSDave Chinner 	return 0;
36975717ea4dSDave Chinner 
36985717ea4dSDave Chinner }
36995717ea4dSDave Chinner 
370044a8736bSDarrick J. Wong /* Release an inode. */
370144a8736bSDarrick J. Wong void
370244a8736bSDarrick J. Wong xfs_irele(
370344a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
370444a8736bSDarrick J. Wong {
370544a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
370644a8736bSDarrick J. Wong 	iput(VFS_I(ip));
370744a8736bSDarrick J. Wong }
370854fbdd10SChristoph Hellwig 
370954fbdd10SChristoph Hellwig /*
371054fbdd10SChristoph Hellwig  * Ensure all commited transactions touching the inode are written to the log.
371154fbdd10SChristoph Hellwig  */
371254fbdd10SChristoph Hellwig int
371354fbdd10SChristoph Hellwig xfs_log_force_inode(
371454fbdd10SChristoph Hellwig 	struct xfs_inode	*ip)
371554fbdd10SChristoph Hellwig {
37165f9b4b0dSDave Chinner 	xfs_csn_t		seq = 0;
371754fbdd10SChristoph Hellwig 
371854fbdd10SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
371954fbdd10SChristoph Hellwig 	if (xfs_ipincount(ip))
37205f9b4b0dSDave Chinner 		seq = ip->i_itemp->ili_commit_seq;
372154fbdd10SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
372254fbdd10SChristoph Hellwig 
37235f9b4b0dSDave Chinner 	if (!seq)
372454fbdd10SChristoph Hellwig 		return 0;
37255f9b4b0dSDave Chinner 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
372654fbdd10SChristoph Hellwig }
3727e2aaee9cSDarrick J. Wong 
3728e2aaee9cSDarrick J. Wong /*
3729e2aaee9cSDarrick J. Wong  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3730e2aaee9cSDarrick J. Wong  * abide vfs locking order (lowest pointer value goes first) and breaking the
3731e2aaee9cSDarrick J. Wong  * layout leases before proceeding.  The loop is needed because we cannot call
3732e2aaee9cSDarrick J. Wong  * the blocking break_layout() with the iolocks held, and therefore have to
3733e2aaee9cSDarrick J. Wong  * back out both locks.
3734e2aaee9cSDarrick J. Wong  */
3735e2aaee9cSDarrick J. Wong static int
3736e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout(
3737e2aaee9cSDarrick J. Wong 	struct inode		*src,
3738e2aaee9cSDarrick J. Wong 	struct inode		*dest)
3739e2aaee9cSDarrick J. Wong {
3740e2aaee9cSDarrick J. Wong 	int			error;
3741e2aaee9cSDarrick J. Wong 
3742e2aaee9cSDarrick J. Wong 	if (src > dest)
3743e2aaee9cSDarrick J. Wong 		swap(src, dest);
3744e2aaee9cSDarrick J. Wong 
3745e2aaee9cSDarrick J. Wong retry:
3746e2aaee9cSDarrick J. Wong 	/* Wait to break both inodes' layouts before we start locking. */
3747e2aaee9cSDarrick J. Wong 	error = break_layout(src, true);
3748e2aaee9cSDarrick J. Wong 	if (error)
3749e2aaee9cSDarrick J. Wong 		return error;
3750e2aaee9cSDarrick J. Wong 	if (src != dest) {
3751e2aaee9cSDarrick J. Wong 		error = break_layout(dest, true);
3752e2aaee9cSDarrick J. Wong 		if (error)
3753e2aaee9cSDarrick J. Wong 			return error;
3754e2aaee9cSDarrick J. Wong 	}
3755e2aaee9cSDarrick J. Wong 
3756e2aaee9cSDarrick J. Wong 	/* Lock one inode and make sure nobody got in and leased it. */
3757e2aaee9cSDarrick J. Wong 	inode_lock(src);
3758e2aaee9cSDarrick J. Wong 	error = break_layout(src, false);
3759e2aaee9cSDarrick J. Wong 	if (error) {
3760e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3761e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3762e2aaee9cSDarrick J. Wong 			goto retry;
3763e2aaee9cSDarrick J. Wong 		return error;
3764e2aaee9cSDarrick J. Wong 	}
3765e2aaee9cSDarrick J. Wong 
3766e2aaee9cSDarrick J. Wong 	if (src == dest)
3767e2aaee9cSDarrick J. Wong 		return 0;
3768e2aaee9cSDarrick J. Wong 
3769e2aaee9cSDarrick J. Wong 	/* Lock the other inode and make sure nobody got in and leased it. */
3770e2aaee9cSDarrick J. Wong 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3771e2aaee9cSDarrick J. Wong 	error = break_layout(dest, false);
3772e2aaee9cSDarrick J. Wong 	if (error) {
3773e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3774e2aaee9cSDarrick J. Wong 		inode_unlock(dest);
3775e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3776e2aaee9cSDarrick J. Wong 			goto retry;
3777e2aaee9cSDarrick J. Wong 		return error;
3778e2aaee9cSDarrick J. Wong 	}
3779e2aaee9cSDarrick J. Wong 
3780e2aaee9cSDarrick J. Wong 	return 0;
3781e2aaee9cSDarrick J. Wong }
3782e2aaee9cSDarrick J. Wong 
3783e2aaee9cSDarrick J. Wong /*
3784e2aaee9cSDarrick J. Wong  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3785e2aaee9cSDarrick J. Wong  * mmap activity.
3786e2aaee9cSDarrick J. Wong  */
3787e2aaee9cSDarrick J. Wong int
3788e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap(
3789e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3790e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3791e2aaee9cSDarrick J. Wong {
3792e2aaee9cSDarrick J. Wong 	int			ret;
3793e2aaee9cSDarrick J. Wong 
3794e2aaee9cSDarrick J. Wong 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3795e2aaee9cSDarrick J. Wong 	if (ret)
3796e2aaee9cSDarrick J. Wong 		return ret;
3797e2aaee9cSDarrick J. Wong 	if (ip1 == ip2)
3798e2aaee9cSDarrick J. Wong 		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3799e2aaee9cSDarrick J. Wong 	else
3800e2aaee9cSDarrick J. Wong 		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3801e2aaee9cSDarrick J. Wong 				    ip2, XFS_MMAPLOCK_EXCL);
3802e2aaee9cSDarrick J. Wong 	return 0;
3803e2aaee9cSDarrick J. Wong }
3804e2aaee9cSDarrick J. Wong 
3805e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */
3806e2aaee9cSDarrick J. Wong void
3807e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap(
3808e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3809e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3810e2aaee9cSDarrick J. Wong {
3811e2aaee9cSDarrick J. Wong 	bool			same_inode = (ip1 == ip2);
3812e2aaee9cSDarrick J. Wong 
3813e2aaee9cSDarrick J. Wong 	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3814e2aaee9cSDarrick J. Wong 	if (!same_inode)
3815e2aaee9cSDarrick J. Wong 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3816e2aaee9cSDarrick J. Wong 	inode_unlock(VFS_I(ip2));
3817e2aaee9cSDarrick J. Wong 	if (!same_inode)
3818e2aaee9cSDarrick J. Wong 		inode_unlock(VFS_I(ip1));
3819e2aaee9cSDarrick J. Wong }
3820