xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision f12b96683d6976a3a07fdf3323277c79dbe8f6ab)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
16a4fbe6abSDave Chinner #include "xfs_inode.h"
17c24b5dfaSDave Chinner #include "xfs_dir2.h"
18c24b5dfaSDave Chinner #include "xfs_attr.h"
19239880efSDave Chinner #include "xfs_trans_space.h"
20239880efSDave Chinner #include "xfs_trans.h"
211da177e4SLinus Torvalds #include "xfs_buf_item.h"
22a844f451SNathan Scott #include "xfs_inode_item.h"
23784eb7d8SDave Chinner #include "xfs_iunlink_item.h"
24a844f451SNathan Scott #include "xfs_ialloc.h"
25a844f451SNathan Scott #include "xfs_bmap.h"
2668988114SDave Chinner #include "xfs_bmap_util.h"
27e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
281da177e4SLinus Torvalds #include "xfs_error.h"
291da177e4SLinus Torvalds #include "xfs_quota.h"
302a82b8beSDavid Chinner #include "xfs_filestream.h"
310b1b213fSChristoph Hellwig #include "xfs_trace.h"
3233479e05SDave Chinner #include "xfs_icache.h"
33c24b5dfaSDave Chinner #include "xfs_symlink.h"
34239880efSDave Chinner #include "xfs_trans_priv.h"
35239880efSDave Chinner #include "xfs_log.h"
36a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
37aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
389bbafc71SDave Chinner #include "xfs_ag.h"
3901728b44SDave Chinner #include "xfs_log_priv.h"
401da177e4SLinus Torvalds 
41182696fbSDarrick J. Wong struct kmem_cache *xfs_inode_cache;
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds /*
448f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
451da177e4SLinus Torvalds  * freed from a file in a single transaction.
461da177e4SLinus Torvalds  */
471da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
481da177e4SLinus Torvalds 
4954d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
50f40aadb2SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
51f40aadb2SDave Chinner 	struct xfs_inode *);
52ab297431SZhi Yong Wu 
532a0ec1d9SDave Chinner /*
542a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
552a0ec1d9SDave Chinner  */
562a0ec1d9SDave Chinner xfs_extlen_t
572a0ec1d9SDave Chinner xfs_get_extsz_hint(
582a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
592a0ec1d9SDave Chinner {
60bdb2ed2dSChristoph Hellwig 	/*
61bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
62bdb2ed2dSChristoph Hellwig 	 * write to them.
63bdb2ed2dSChristoph Hellwig 	 */
64bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
65bdb2ed2dSChristoph Hellwig 		return 0;
66db07349dSChristoph Hellwig 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
67031474c2SChristoph Hellwig 		return ip->i_extsize;
682a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
692a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
702a0ec1d9SDave Chinner 	return 0;
712a0ec1d9SDave Chinner }
722a0ec1d9SDave Chinner 
73fa96acadSDave Chinner /*
74f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
75f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
76e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
77e153aa79SDarrick J. Wong  * use the default size.
78f7ca3522SDarrick J. Wong  */
79f7ca3522SDarrick J. Wong xfs_extlen_t
80f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
81f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
82f7ca3522SDarrick J. Wong {
83f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
84f7ca3522SDarrick J. Wong 
85f7ca3522SDarrick J. Wong 	a = 0;
863e09ab8fSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
87b33ce57dSChristoph Hellwig 		a = ip->i_cowextsize;
88f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
89f7ca3522SDarrick J. Wong 
90e153aa79SDarrick J. Wong 	a = max(a, b);
91e153aa79SDarrick J. Wong 	if (a == 0)
92e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
93f7ca3522SDarrick J. Wong 	return a;
94f7ca3522SDarrick J. Wong }
95f7ca3522SDarrick J. Wong 
96f7ca3522SDarrick J. Wong /*
97efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
98efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
99efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
100efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
101efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
102efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
103efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
104efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
105efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
106efa70be1SChristoph Hellwig  * if they have not.
107fa96acadSDave Chinner  *
108efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10901f4f327SChristoph Hellwig  * xfs_iunlock() call.
110fa96acadSDave Chinner  */
111fa96acadSDave Chinner uint
112309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
113309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
114fa96acadSDave Chinner {
115309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
116fa96acadSDave Chinner 
117b2197a36SChristoph Hellwig 	if (xfs_need_iread_extents(&ip->i_df))
118fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
119fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
120fa96acadSDave Chinner 	return lock_mode;
121fa96acadSDave Chinner }
122fa96acadSDave Chinner 
123efa70be1SChristoph Hellwig uint
124efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
125efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
126fa96acadSDave Chinner {
127efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
128efa70be1SChristoph Hellwig 
129932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
130efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
131efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
132efa70be1SChristoph Hellwig 	return lock_mode;
133fa96acadSDave Chinner }
134fa96acadSDave Chinner 
135fa96acadSDave Chinner /*
136ca76a761SKaixu Xia  * You can't set both SHARED and EXCL for the same lock,
137ca76a761SKaixu Xia  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
138ca76a761SKaixu Xia  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
139ca76a761SKaixu Xia  * to set in lock_flags.
140ca76a761SKaixu Xia  */
141ca76a761SKaixu Xia static inline void
142ca76a761SKaixu Xia xfs_lock_flags_assert(
143ca76a761SKaixu Xia 	uint		lock_flags)
144ca76a761SKaixu Xia {
145ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
146ca76a761SKaixu Xia 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
147ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
148ca76a761SKaixu Xia 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
149ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
150ca76a761SKaixu Xia 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
151ca76a761SKaixu Xia 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
152ca76a761SKaixu Xia 	ASSERT(lock_flags != 0);
153ca76a761SKaixu Xia }
154ca76a761SKaixu Xia 
155ca76a761SKaixu Xia /*
15665523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
1572433480aSJan Kara  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
15865523218SChristoph Hellwig  * various combinations of the locks to be obtained.
159fa96acadSDave Chinner  *
160653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
161653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
162fa96acadSDave Chinner  *
163653c60b6SDave Chinner  * Basic locking order:
164653c60b6SDave Chinner  *
1652433480aSJan Kara  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
166653c60b6SDave Chinner  *
167c1e8d7c6SMichel Lespinasse  * mmap_lock locking order:
168653c60b6SDave Chinner  *
169c1e8d7c6SMichel Lespinasse  * i_rwsem -> page lock -> mmap_lock
1702433480aSJan Kara  * mmap_lock -> invalidate_lock -> page_lock
171653c60b6SDave Chinner  *
172c1e8d7c6SMichel Lespinasse  * The difference in mmap_lock locking order mean that we cannot hold the
1732433480aSJan Kara  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
1742433480aSJan Kara  * can fault in pages during copy in/out (for buffered IO) or require the
1752433480aSJan Kara  * mmap_lock in get_user_pages() to map the user pages into the kernel address
1762433480aSJan Kara  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
1772433480aSJan Kara  * fault because page faults already hold the mmap_lock.
178653c60b6SDave Chinner  *
179653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
1802433480aSJan Kara  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
1812433480aSJan Kara  * both taken in places where we need to invalidate the page cache in a race
182653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
183653c60b6SDave Chinner  * functions).
184fa96acadSDave Chinner  */
185fa96acadSDave Chinner void
186fa96acadSDave Chinner xfs_ilock(
187fa96acadSDave Chinner 	xfs_inode_t		*ip,
188fa96acadSDave Chinner 	uint			lock_flags)
189fa96acadSDave Chinner {
190fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
191fa96acadSDave Chinner 
192ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
193fa96acadSDave Chinner 
19465523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
19565523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
19665523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
19765523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
19865523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
19965523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
20065523218SChristoph Hellwig 	}
201fa96acadSDave Chinner 
2022433480aSJan Kara 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2032433480aSJan Kara 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2042433480aSJan Kara 				  XFS_MMAPLOCK_DEP(lock_flags));
2052433480aSJan Kara 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2062433480aSJan Kara 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2072433480aSJan Kara 				 XFS_MMAPLOCK_DEP(lock_flags));
2082433480aSJan Kara 	}
209653c60b6SDave Chinner 
210fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
211fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
213fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214fa96acadSDave Chinner }
215fa96acadSDave Chinner 
216fa96acadSDave Chinner /*
217fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
218fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
219fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
220fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
221fa96acadSDave Chinner  * is dropped before returning.
222fa96acadSDave Chinner  *
223fa96acadSDave Chinner  * ip -- the inode being locked
224fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
225fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
226fa96acadSDave Chinner  *	 of valid values.
227fa96acadSDave Chinner  */
228fa96acadSDave Chinner int
229fa96acadSDave Chinner xfs_ilock_nowait(
230fa96acadSDave Chinner 	xfs_inode_t		*ip,
231fa96acadSDave Chinner 	uint			lock_flags)
232fa96acadSDave Chinner {
233fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234fa96acadSDave Chinner 
235ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
236fa96acadSDave Chinner 
237fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23865523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239fa96acadSDave Chinner 			goto out;
240fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
24165523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
242fa96acadSDave Chinner 			goto out;
243fa96acadSDave Chinner 	}
244653c60b6SDave Chinner 
245653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2462433480aSJan Kara 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247653c60b6SDave Chinner 			goto out_undo_iolock;
248653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2492433480aSJan Kara 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250653c60b6SDave Chinner 			goto out_undo_iolock;
251653c60b6SDave Chinner 	}
252653c60b6SDave Chinner 
253fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
254fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
255653c60b6SDave Chinner 			goto out_undo_mmaplock;
256fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
257fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
258653c60b6SDave Chinner 			goto out_undo_mmaplock;
259fa96acadSDave Chinner 	}
260fa96acadSDave Chinner 	return 1;
261fa96acadSDave Chinner 
262653c60b6SDave Chinner out_undo_mmaplock:
263653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
2642433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
2662433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
267fa96acadSDave Chinner out_undo_iolock:
268fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26965523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
270fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
27165523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
272fa96acadSDave Chinner out:
273fa96acadSDave Chinner 	return 0;
274fa96acadSDave Chinner }
275fa96acadSDave Chinner 
276fa96acadSDave Chinner /*
277fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
278fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
279fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280fa96acadSDave Chinner  * that we know which locks to drop.
281fa96acadSDave Chinner  *
282fa96acadSDave Chinner  * ip -- the inode being unlocked
283fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
284fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
285fa96acadSDave Chinner  *	 of valid values for this parameter.
286fa96acadSDave Chinner  *
287fa96acadSDave Chinner  */
288fa96acadSDave Chinner void
289fa96acadSDave Chinner xfs_iunlock(
290fa96acadSDave Chinner 	xfs_inode_t		*ip,
291fa96acadSDave Chinner 	uint			lock_flags)
292fa96acadSDave Chinner {
293ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
294fa96acadSDave Chinner 
295fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
29665523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
297fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
29865523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
299fa96acadSDave Chinner 
300653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3012433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
302653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
3032433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
304653c60b6SDave Chinner 
305fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
306fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
307fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
308fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
309fa96acadSDave Chinner 
310fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
311fa96acadSDave Chinner }
312fa96acadSDave Chinner 
313fa96acadSDave Chinner /*
314fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
315fa96acadSDave Chinner  * if it is being demoted.
316fa96acadSDave Chinner  */
317fa96acadSDave Chinner void
318fa96acadSDave Chinner xfs_ilock_demote(
319fa96acadSDave Chinner 	xfs_inode_t		*ip,
320fa96acadSDave Chinner 	uint			lock_flags)
321fa96acadSDave Chinner {
322653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
323653c60b6SDave Chinner 	ASSERT((lock_flags &
324653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
325fa96acadSDave Chinner 
326fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
327fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
328653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3292433480aSJan Kara 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
330fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
33165523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
332fa96acadSDave Chinner 
333fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
334fa96acadSDave Chinner }
335fa96acadSDave Chinner 
336742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
337e31cbde7SPavel Reichl static inline bool
338e31cbde7SPavel Reichl __xfs_rwsem_islocked(
339e31cbde7SPavel Reichl 	struct rw_semaphore	*rwsem,
340e31cbde7SPavel Reichl 	bool			shared)
341e31cbde7SPavel Reichl {
342e31cbde7SPavel Reichl 	if (!debug_locks)
343e31cbde7SPavel Reichl 		return rwsem_is_locked(rwsem);
344e31cbde7SPavel Reichl 
345e31cbde7SPavel Reichl 	if (!shared)
346e31cbde7SPavel Reichl 		return lockdep_is_held_type(rwsem, 0);
347e31cbde7SPavel Reichl 
348e31cbde7SPavel Reichl 	/*
349e31cbde7SPavel Reichl 	 * We are checking that the lock is held at least in shared
350e31cbde7SPavel Reichl 	 * mode but don't care that it might be held exclusively
351e31cbde7SPavel Reichl 	 * (i.e. shared | excl). Hence we check if the lock is held
352e31cbde7SPavel Reichl 	 * in any mode rather than an explicit shared mode.
353e31cbde7SPavel Reichl 	 */
354e31cbde7SPavel Reichl 	return lockdep_is_held_type(rwsem, -1);
355e31cbde7SPavel Reichl }
356e31cbde7SPavel Reichl 
357e31cbde7SPavel Reichl bool
358fa96acadSDave Chinner xfs_isilocked(
359e31cbde7SPavel Reichl 	struct xfs_inode	*ip,
360fa96acadSDave Chinner 	uint			lock_flags)
361fa96acadSDave Chinner {
362fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
363fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
364fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
365fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
366fa96acadSDave Chinner 	}
367fa96acadSDave Chinner 
368653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
36982af8806SKaixu Xia 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
37082af8806SKaixu Xia 				(lock_flags & XFS_MMAPLOCK_SHARED));
371653c60b6SDave Chinner 	}
372653c60b6SDave Chinner 
373fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
374e31cbde7SPavel Reichl 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
375e31cbde7SPavel Reichl 				(lock_flags & XFS_IOLOCK_SHARED));
376fa96acadSDave Chinner 	}
377fa96acadSDave Chinner 
378fa96acadSDave Chinner 	ASSERT(0);
379e31cbde7SPavel Reichl 	return false;
380fa96acadSDave Chinner }
381fa96acadSDave Chinner #endif
382fa96acadSDave Chinner 
383b6a9947eSDave Chinner /*
384b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
385b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
386b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
387b6a9947eSDave Chinner  * errors and warnings.
388b6a9947eSDave Chinner  */
389b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3903403ccc0SDave Chinner static bool
3913403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3923403ccc0SDave Chinner 	int subclass)
3933403ccc0SDave Chinner {
3943403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3953403ccc0SDave Chinner }
3963403ccc0SDave Chinner #else
3973403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3983403ccc0SDave Chinner #endif
3993403ccc0SDave Chinner 
400c24b5dfaSDave Chinner /*
401653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
4020952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
4030952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
4040952c818SDave Chinner  * storage fields in the class mask we build.
405c24b5dfaSDave Chinner  */
406a1033753SDave Chinner static inline uint
407a1033753SDave Chinner xfs_lock_inumorder(
408a1033753SDave Chinner 	uint	lock_mode,
409a1033753SDave Chinner 	uint	subclass)
410c24b5dfaSDave Chinner {
411a1033753SDave Chinner 	uint	class = 0;
4120952c818SDave Chinner 
4130952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4140952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4153403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4160952c818SDave Chinner 
417653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4180952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4190952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
420653c60b6SDave Chinner 	}
421653c60b6SDave Chinner 
422653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4230952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4240952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
425653c60b6SDave Chinner 	}
426653c60b6SDave Chinner 
4270952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4280952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4290952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4300952c818SDave Chinner 	}
431c24b5dfaSDave Chinner 
4320952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
433c24b5dfaSDave Chinner }
434c24b5dfaSDave Chinner 
435c24b5dfaSDave Chinner /*
43695afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
43795afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
438c24b5dfaSDave Chinner  *
43995afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
44095afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
44195afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
44295afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
44395afcf5cSDave Chinner  * push the tail and free space in the log.
4440952c818SDave Chinner  *
4450952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4460952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4470952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4480952c818SDave Chinner  * have violated locking orders.
449c24b5dfaSDave Chinner  */
4500d5a75e9SEric Sandeen static void
451c24b5dfaSDave Chinner xfs_lock_inodes(
452efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
453c24b5dfaSDave Chinner 	int			inodes,
454c24b5dfaSDave Chinner 	uint			lock_mode)
455c24b5dfaSDave Chinner {
456a1033753SDave Chinner 	int			attempts = 0;
457a1033753SDave Chinner 	uint			i;
458a1033753SDave Chinner 	int			j;
459a1033753SDave Chinner 	bool			try_lock;
460efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
461c24b5dfaSDave Chinner 
4620952c818SDave Chinner 	/*
4630952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4640952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
465b63da6c8SRandy Dunlap 	 * inodes depend on the type of locking and the limits placed by
4660952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4670952c818SDave Chinner 	 * the asserts.
4680952c818SDave Chinner 	 */
46995afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4700952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4710952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4720952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4730952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4740952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4750952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4760952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4770952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4780952c818SDave Chinner 
4790952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4800952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4810952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4820952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
483c24b5dfaSDave Chinner 
484c24b5dfaSDave Chinner again:
485a1033753SDave Chinner 	try_lock = false;
486a1033753SDave Chinner 	i = 0;
487c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
488c24b5dfaSDave Chinner 		ASSERT(ips[i]);
489c24b5dfaSDave Chinner 
490c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
491c24b5dfaSDave Chinner 			continue;
492c24b5dfaSDave Chinner 
493c24b5dfaSDave Chinner 		/*
49495afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
49595afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
496c24b5dfaSDave Chinner 		 */
497c24b5dfaSDave Chinner 		if (!try_lock) {
498c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
499b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
50022525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
501a1033753SDave Chinner 					try_lock = true;
502c24b5dfaSDave Chinner 			}
503c24b5dfaSDave Chinner 		}
504c24b5dfaSDave Chinner 
505c24b5dfaSDave Chinner 		/*
506c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
507c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
508c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
509c24b5dfaSDave Chinner 		 * and try again.
510c24b5dfaSDave Chinner 		 */
51195afcf5cSDave Chinner 		if (!try_lock) {
51295afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
51395afcf5cSDave Chinner 			continue;
51495afcf5cSDave Chinner 		}
515c24b5dfaSDave Chinner 
51695afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
517c24b5dfaSDave Chinner 		ASSERT(i != 0);
51895afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
51995afcf5cSDave Chinner 			continue;
52095afcf5cSDave Chinner 
52195afcf5cSDave Chinner 		/*
52295afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
52395afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
52495afcf5cSDave Chinner 		 */
525c24b5dfaSDave Chinner 		attempts++;
526c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
527c24b5dfaSDave Chinner 			/*
52895afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
52995afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
53095afcf5cSDave Chinner 			 * same.
531c24b5dfaSDave Chinner 			 */
53295afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
533c24b5dfaSDave Chinner 				continue;
534c24b5dfaSDave Chinner 
535c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
536c24b5dfaSDave Chinner 		}
537c24b5dfaSDave Chinner 
538c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
539c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
540c24b5dfaSDave Chinner 		}
541c24b5dfaSDave Chinner 		goto again;
542c24b5dfaSDave Chinner 	}
543c24b5dfaSDave Chinner }
544c24b5dfaSDave Chinner 
545c24b5dfaSDave Chinner /*
546d2c292d8SJan Kara  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
547d2c292d8SJan Kara  * mmaplock must be double-locked separately since we use i_rwsem and
548d2c292d8SJan Kara  * invalidate_lock for that. We now support taking one lock EXCL and the
549d2c292d8SJan Kara  * other SHARED.
550c24b5dfaSDave Chinner  */
551c24b5dfaSDave Chinner void
552c24b5dfaSDave Chinner xfs_lock_two_inodes(
5537c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5547c2d238aSDarrick J. Wong 	uint			ip0_mode,
5557c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5567c2d238aSDarrick J. Wong 	uint			ip1_mode)
557c24b5dfaSDave Chinner {
558c24b5dfaSDave Chinner 	int			attempts = 0;
559efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
560c24b5dfaSDave Chinner 
5617c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5627c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5637c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5647c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
565d2c292d8SJan Kara 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566d2c292d8SJan Kara 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
567c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
568c24b5dfaSDave Chinner 
569c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
5702a09b575SChangcheng Deng 		swap(ip0, ip1);
5712a09b575SChangcheng Deng 		swap(ip0_mode, ip1_mode);
572c24b5dfaSDave Chinner 	}
573c24b5dfaSDave Chinner 
574c24b5dfaSDave Chinner  again:
5757c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576c24b5dfaSDave Chinner 
577c24b5dfaSDave Chinner 	/*
578c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
579c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
580c24b5dfaSDave Chinner 	 * and try again.
581c24b5dfaSDave Chinner 	 */
582b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58322525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5847c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5857c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
586c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
587c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
588c24b5dfaSDave Chinner 			goto again;
589c24b5dfaSDave Chinner 		}
590c24b5dfaSDave Chinner 	} else {
5917c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592c24b5dfaSDave Chinner 	}
593c24b5dfaSDave Chinner }
594c24b5dfaSDave Chinner 
5951da177e4SLinus Torvalds uint
5961da177e4SLinus Torvalds xfs_ip2xflags(
59758f88ca2SDave Chinner 	struct xfs_inode	*ip)
5981da177e4SLinus Torvalds {
5994422501dSChristoph Hellwig 	uint			flags = 0;
6001da177e4SLinus Torvalds 
6014422501dSChristoph Hellwig 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
6024422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
6034422501dSChristoph Hellwig 			flags |= FS_XFLAG_REALTIME;
6044422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
6054422501dSChristoph Hellwig 			flags |= FS_XFLAG_PREALLOC;
6064422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
6074422501dSChristoph Hellwig 			flags |= FS_XFLAG_IMMUTABLE;
6084422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
6094422501dSChristoph Hellwig 			flags |= FS_XFLAG_APPEND;
6104422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
6114422501dSChristoph Hellwig 			flags |= FS_XFLAG_SYNC;
6124422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
6134422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOATIME;
6144422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
6154422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODUMP;
6164422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
6174422501dSChristoph Hellwig 			flags |= FS_XFLAG_RTINHERIT;
6184422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
6194422501dSChristoph Hellwig 			flags |= FS_XFLAG_PROJINHERIT;
6204422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
6214422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOSYMLINKS;
6224422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6234422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSIZE;
6244422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
6254422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSZINHERIT;
6264422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
6274422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODEFRAG;
6284422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
6294422501dSChristoph Hellwig 			flags |= FS_XFLAG_FILESTREAM;
6304422501dSChristoph Hellwig 	}
6314422501dSChristoph Hellwig 
6324422501dSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
6334422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
6344422501dSChristoph Hellwig 			flags |= FS_XFLAG_DAX;
6354422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6364422501dSChristoph Hellwig 			flags |= FS_XFLAG_COWEXTSIZE;
6374422501dSChristoph Hellwig 	}
6384422501dSChristoph Hellwig 
639932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
6404422501dSChristoph Hellwig 		flags |= FS_XFLAG_HASATTR;
6414422501dSChristoph Hellwig 	return flags;
6421da177e4SLinus Torvalds }
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds /*
645c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
646c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
647c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
648c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
649c24b5dfaSDave Chinner  */
650c24b5dfaSDave Chinner int
651c24b5dfaSDave Chinner xfs_lookup(
652996b2329SDarrick J. Wong 	struct xfs_inode	*dp,
653996b2329SDarrick J. Wong 	const struct xfs_name	*name,
654996b2329SDarrick J. Wong 	struct xfs_inode	**ipp,
655c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
656c24b5dfaSDave Chinner {
657c24b5dfaSDave Chinner 	xfs_ino_t		inum;
658c24b5dfaSDave Chinner 	int			error;
659c24b5dfaSDave Chinner 
660c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
661c24b5dfaSDave Chinner 
66275c8c50fSDave Chinner 	if (xfs_is_shutdown(dp->i_mount))
6632451337dSDave Chinner 		return -EIO;
664c24b5dfaSDave Chinner 
665c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
666c24b5dfaSDave Chinner 	if (error)
667dbad7c99SDave Chinner 		goto out_unlock;
668c24b5dfaSDave Chinner 
669c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
670c24b5dfaSDave Chinner 	if (error)
671c24b5dfaSDave Chinner 		goto out_free_name;
672c24b5dfaSDave Chinner 
673c24b5dfaSDave Chinner 	return 0;
674c24b5dfaSDave Chinner 
675c24b5dfaSDave Chinner out_free_name:
676c24b5dfaSDave Chinner 	if (ci_name)
677c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
678dbad7c99SDave Chinner out_unlock:
679c24b5dfaSDave Chinner 	*ipp = NULL;
680c24b5dfaSDave Chinner 	return error;
681c24b5dfaSDave Chinner }
682c24b5dfaSDave Chinner 
6838a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */
6848a569d71SDarrick J. Wong static void
6858a569d71SDarrick J. Wong xfs_inode_inherit_flags(
6868a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
6878a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
6888a569d71SDarrick J. Wong {
6898a569d71SDarrick J. Wong 	unsigned int		di_flags = 0;
690603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
6918a569d71SDarrick J. Wong 	umode_t			mode = VFS_I(ip)->i_mode;
6928a569d71SDarrick J. Wong 
6938a569d71SDarrick J. Wong 	if (S_ISDIR(mode)) {
694db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
6958a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_RTINHERIT;
696db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
6978a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
698031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
6998a569d71SDarrick J. Wong 		}
700db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
7018a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_PROJINHERIT;
7028a569d71SDarrick J. Wong 	} else if (S_ISREG(mode)) {
703db07349dSChristoph Hellwig 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
70438c26bfdSDave Chinner 		    xfs_has_realtime(ip->i_mount))
7058a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_REALTIME;
706db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
7078a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSIZE;
708031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7098a569d71SDarrick J. Wong 		}
7108a569d71SDarrick J. Wong 	}
711db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
7128a569d71SDarrick J. Wong 	    xfs_inherit_noatime)
7138a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOATIME;
714db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
7158a569d71SDarrick J. Wong 	    xfs_inherit_nodump)
7168a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODUMP;
717db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
7188a569d71SDarrick J. Wong 	    xfs_inherit_sync)
7198a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_SYNC;
720db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
7218a569d71SDarrick J. Wong 	    xfs_inherit_nosymlinks)
7228a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
723db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
7248a569d71SDarrick J. Wong 	    xfs_inherit_nodefrag)
7258a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODEFRAG;
726db07349dSChristoph Hellwig 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
7278a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_FILESTREAM;
7288a569d71SDarrick J. Wong 
729db07349dSChristoph Hellwig 	ip->i_diflags |= di_flags;
730603f000bSDarrick J. Wong 
731603f000bSDarrick J. Wong 	/*
732603f000bSDarrick J. Wong 	 * Inode verifiers on older kernels only check that the extent size
733603f000bSDarrick J. Wong 	 * hint is an integer multiple of the rt extent size on realtime files.
734603f000bSDarrick J. Wong 	 * They did not check the hint alignment on a directory with both
735603f000bSDarrick J. Wong 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
736603f000bSDarrick J. Wong 	 * propagated from a directory into a new realtime file, new file
737603f000bSDarrick J. Wong 	 * allocations will fail due to math errors in the rt allocator and/or
738603f000bSDarrick J. Wong 	 * trip the verifiers.  Validate the hint settings in the new file so
739603f000bSDarrick J. Wong 	 * that we don't let broken hints propagate.
740603f000bSDarrick J. Wong 	 */
741603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
742603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags);
743603f000bSDarrick J. Wong 	if (failaddr) {
744603f000bSDarrick J. Wong 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
745603f000bSDarrick J. Wong 				   XFS_DIFLAG_EXTSZINHERIT);
746603f000bSDarrick J. Wong 		ip->i_extsize = 0;
747603f000bSDarrick J. Wong 	}
7488a569d71SDarrick J. Wong }
7498a569d71SDarrick J. Wong 
7508a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */
7518a569d71SDarrick J. Wong static void
7528a569d71SDarrick J. Wong xfs_inode_inherit_flags2(
7538a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7548a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7558a569d71SDarrick J. Wong {
756603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
757603f000bSDarrick J. Wong 
7583e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
7593e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
760b33ce57dSChristoph Hellwig 		ip->i_cowextsize = pip->i_cowextsize;
7618a569d71SDarrick J. Wong 	}
7623e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
7633e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
764603f000bSDarrick J. Wong 
765603f000bSDarrick J. Wong 	/* Don't let invalid cowextsize hints propagate. */
766603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
767603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
768603f000bSDarrick J. Wong 	if (failaddr) {
769603f000bSDarrick J. Wong 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
770603f000bSDarrick J. Wong 		ip->i_cowextsize = 0;
771603f000bSDarrick J. Wong 	}
7728a569d71SDarrick J. Wong }
7738a569d71SDarrick J. Wong 
774c24b5dfaSDave Chinner /*
7751abcf261SDave Chinner  * Initialise a newly allocated inode and return the in-core inode to the
7761abcf261SDave Chinner  * caller locked exclusively.
7771da177e4SLinus Torvalds  */
778b652afd9SDave Chinner int
7791abcf261SDave Chinner xfs_init_new_inode(
780f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
7811abcf261SDave Chinner 	struct xfs_trans	*tp,
7821abcf261SDave Chinner 	struct xfs_inode	*pip,
7831abcf261SDave Chinner 	xfs_ino_t		ino,
784576b1d67SAl Viro 	umode_t			mode,
78531b084aeSNathan Scott 	xfs_nlink_t		nlink,
78666f36464SChristoph Hellwig 	dev_t			rdev,
7876743099cSArkadiusz Mi?kiewicz 	prid_t			prid,
788e6a688c3SDave Chinner 	bool			init_xattrs,
7891abcf261SDave Chinner 	struct xfs_inode	**ipp)
7901da177e4SLinus Torvalds {
79101ea173eSChristoph Hellwig 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
79293848a99SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
7931abcf261SDave Chinner 	struct xfs_inode	*ip;
7941abcf261SDave Chinner 	unsigned int		flags;
7951da177e4SLinus Torvalds 	int			error;
79695582b00SDeepa Dinamani 	struct timespec64	tv;
7973987848cSDave Chinner 	struct inode		*inode;
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 	/*
8008b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
8018b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
8028b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
8038b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
8048b26984dSDave Chinner 	 * first.
8058b26984dSDave Chinner 	 */
8068b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
8078b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
8088b26984dSDave Chinner 		return -EFSCORRUPTED;
8098b26984dSDave Chinner 	}
8108b26984dSDave Chinner 
8118b26984dSDave Chinner 	/*
8121abcf261SDave Chinner 	 * Get the in-core inode with the lock held exclusively to prevent
8131abcf261SDave Chinner 	 * others from looking at until we're done.
8141da177e4SLinus Torvalds 	 */
8151abcf261SDave Chinner 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
816bf904248SDavid Chinner 	if (error)
8171da177e4SLinus Torvalds 		return error;
8181abcf261SDave Chinner 
8191da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8203987848cSDave Chinner 	inode = VFS_I(ip);
82154d7b5c1SDave Chinner 	set_nlink(inode, nlink);
82266f36464SChristoph Hellwig 	inode->i_rdev = rdev;
823ceaf603cSChristoph Hellwig 	ip->i_projid = prid;
8241da177e4SLinus Torvalds 
8250560f31aSDave Chinner 	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
826c14329d3SChristian Brauner 		inode_fsuid_set(inode, idmap);
82701ea173eSChristoph Hellwig 		inode->i_gid = dir->i_gid;
82801ea173eSChristoph Hellwig 		inode->i_mode = mode;
8293d8f2821SChristoph Hellwig 	} else {
830f2d40141SChristian Brauner 		inode_init_owner(idmap, inode, dir, mode);
8311da177e4SLinus Torvalds 	}
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	/*
8341da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8351da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8361da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8371da177e4SLinus Torvalds 	 */
83842b7cc11SChristian Brauner 	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
839e67fe633SChristian Brauner 	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
840c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8411da177e4SLinus Torvalds 
84213d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
843daf83964SChristoph Hellwig 	ip->i_df.if_nextents = 0;
8446e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
845dff35fd4SChristoph Hellwig 
846a0a415e3SJeff Layton 	tv = inode_set_ctime_current(inode);
8473987848cSDave Chinner 	inode->i_mtime = tv;
8483987848cSDave Chinner 	inode->i_atime = tv;
849dff35fd4SChristoph Hellwig 
850031474c2SChristoph Hellwig 	ip->i_extsize = 0;
851db07349dSChristoph Hellwig 	ip->i_diflags = 0;
85293848a99SChristoph Hellwig 
85338c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp)) {
854f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
855b33ce57dSChristoph Hellwig 		ip->i_cowextsize = 0;
856e98d5e88SChristoph Hellwig 		ip->i_crtime = tv;
85793848a99SChristoph Hellwig 	}
85893848a99SChristoph Hellwig 
8591da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8601da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8611da177e4SLinus Torvalds 	case S_IFIFO:
8621da177e4SLinus Torvalds 	case S_IFCHR:
8631da177e4SLinus Torvalds 	case S_IFBLK:
8641da177e4SLinus Torvalds 	case S_IFSOCK:
865f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
8661da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8671da177e4SLinus Torvalds 		break;
8681da177e4SLinus Torvalds 	case S_IFREG:
8691da177e4SLinus Torvalds 	case S_IFDIR:
870db07349dSChristoph Hellwig 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
8718a569d71SDarrick J. Wong 			xfs_inode_inherit_flags(ip, pip);
8723e09ab8fSChristoph Hellwig 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
8738a569d71SDarrick J. Wong 			xfs_inode_inherit_flags2(ip, pip);
87453004ee7SGustavo A. R. Silva 		fallthrough;
8751da177e4SLinus Torvalds 	case S_IFLNK:
876f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
877fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
8786bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
8791da177e4SLinus Torvalds 		break;
8801da177e4SLinus Torvalds 	default:
8811da177e4SLinus Torvalds 		ASSERT(0);
8821da177e4SLinus Torvalds 	}
8831da177e4SLinus Torvalds 
8841da177e4SLinus Torvalds 	/*
885e6a688c3SDave Chinner 	 * If we need to create attributes immediately after allocating the
886e6a688c3SDave Chinner 	 * inode, initialise an empty attribute fork right now. We use the
887e6a688c3SDave Chinner 	 * default fork offset for attributes here as we don't know exactly what
888e6a688c3SDave Chinner 	 * size or how many attributes we might be adding. We can do this
889e6a688c3SDave Chinner 	 * safely here because we know the data fork is completely empty and
890e6a688c3SDave Chinner 	 * this saves us from needing to run a separate transaction to set the
891e6a688c3SDave Chinner 	 * fork offset in the immediate future.
892e6a688c3SDave Chinner 	 */
89338c26bfdSDave Chinner 	if (init_xattrs && xfs_has_attr(mp)) {
8947821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
8952ed5b09bSDarrick J. Wong 		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
896e6a688c3SDave Chinner 	}
897e6a688c3SDave Chinner 
898e6a688c3SDave Chinner 	/*
8991da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
9001da177e4SLinus Torvalds 	 */
901ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
9021da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
9031da177e4SLinus Torvalds 
90458c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
90541be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 	*ipp = ip;
9081da177e4SLinus Torvalds 	return 0;
9091da177e4SLinus Torvalds }
9101da177e4SLinus Torvalds 
911e546cb79SDave Chinner /*
91254d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
91354d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
91454d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
915e546cb79SDave Chinner  */
9160d5a75e9SEric Sandeen static int			/* error */
917e546cb79SDave Chinner xfs_droplink(
918e546cb79SDave Chinner 	xfs_trans_t *tp,
919e546cb79SDave Chinner 	xfs_inode_t *ip)
920e546cb79SDave Chinner {
921e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
922e546cb79SDave Chinner 
923e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
924e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
925e546cb79SDave Chinner 
92654d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
92754d7b5c1SDave Chinner 		return 0;
92854d7b5c1SDave Chinner 
92954d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
930e546cb79SDave Chinner }
931e546cb79SDave Chinner 
932e546cb79SDave Chinner /*
933e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
934e546cb79SDave Chinner  */
93591083269SEric Sandeen static void
936e546cb79SDave Chinner xfs_bumplink(
937e546cb79SDave Chinner 	xfs_trans_t *tp,
938e546cb79SDave Chinner 	xfs_inode_t *ip)
939e546cb79SDave Chinner {
940e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
941e546cb79SDave Chinner 
942e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
943e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
944e546cb79SDave Chinner }
945e546cb79SDave Chinner 
946c24b5dfaSDave Chinner int
947c24b5dfaSDave Chinner xfs_create(
948f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
949c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
950c24b5dfaSDave Chinner 	struct xfs_name		*name,
951c24b5dfaSDave Chinner 	umode_t			mode,
95266f36464SChristoph Hellwig 	dev_t			rdev,
953e6a688c3SDave Chinner 	bool			init_xattrs,
954c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
955c24b5dfaSDave Chinner {
956c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
957c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
958c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
959c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
960c24b5dfaSDave Chinner 	int			error;
961c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
962c24b5dfaSDave Chinner 	prid_t			prid;
963c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
964c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
965c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
966062647a8SBrian Foster 	struct xfs_trans_res	*tres;
967c24b5dfaSDave Chinner 	uint			resblks;
968b652afd9SDave Chinner 	xfs_ino_t		ino;
969c24b5dfaSDave Chinner 
970c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
971c24b5dfaSDave Chinner 
97275c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
9732451337dSDave Chinner 		return -EIO;
974c24b5dfaSDave Chinner 
975163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
976c24b5dfaSDave Chinner 
977c24b5dfaSDave Chinner 	/*
978c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
979c24b5dfaSDave Chinner 	 */
980c14329d3SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
981c14329d3SChristian Brauner 			mapped_fsgid(idmap, &init_user_ns), prid,
982c24b5dfaSDave Chinner 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
983c24b5dfaSDave Chinner 			&udqp, &gdqp, &pdqp);
984c24b5dfaSDave Chinner 	if (error)
985c24b5dfaSDave Chinner 		return error;
986c24b5dfaSDave Chinner 
987c24b5dfaSDave Chinner 	if (is_dir) {
988c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
989062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
990c24b5dfaSDave Chinner 	} else {
991c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
992062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
993c24b5dfaSDave Chinner 	}
994c24b5dfaSDave Chinner 
995c24b5dfaSDave Chinner 	/*
996c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
997c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
998c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
999c24b5dfaSDave Chinner 	 * appropriate transaction later.
1000c24b5dfaSDave Chinner 	 */
1001f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1002f2f7b9ffSDarrick J. Wong 			&tp);
10032451337dSDave Chinner 	if (error == -ENOSPC) {
1004c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1005c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1006f2f7b9ffSDarrick J. Wong 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1007f2f7b9ffSDarrick J. Wong 				resblks, &tp);
1008c24b5dfaSDave Chinner 	}
10094906e215SChristoph Hellwig 	if (error)
1010f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
1011c24b5dfaSDave Chinner 
101265523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1013c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1014c24b5dfaSDave Chinner 
1015c24b5dfaSDave Chinner 	/*
1016c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1017c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1018c24b5dfaSDave Chinner 	 * pointing to itself.
1019c24b5dfaSDave Chinner 	 */
1020b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1021b652afd9SDave Chinner 	if (!error)
1022f2d40141SChristian Brauner 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1023b652afd9SDave Chinner 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1024d6077aa3SJan Kara 	if (error)
1025c24b5dfaSDave Chinner 		goto out_trans_cancel;
1026c24b5dfaSDave Chinner 
1027c24b5dfaSDave Chinner 	/*
1028c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1029b652afd9SDave Chinner 	 * earlier because xfs_dialloc might commit the previous transaction
1030c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1031c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1032c24b5dfaSDave Chinner 	 * error path.
1033c24b5dfaSDave Chinner 	 */
103465523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1035c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1036c24b5dfaSDave Chinner 
1037381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
103863337b63SKaixu Xia 					resblks - XFS_IALLOC_SPACE_RES(mp));
1039c24b5dfaSDave Chinner 	if (error) {
10402451337dSDave Chinner 		ASSERT(error != -ENOSPC);
10414906e215SChristoph Hellwig 		goto out_trans_cancel;
1042c24b5dfaSDave Chinner 	}
1043c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1044c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1045c24b5dfaSDave Chinner 
1046c24b5dfaSDave Chinner 	if (is_dir) {
1047c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1048c24b5dfaSDave Chinner 		if (error)
1049c8eac49eSBrian Foster 			goto out_trans_cancel;
1050c24b5dfaSDave Chinner 
105191083269SEric Sandeen 		xfs_bumplink(tp, dp);
1052c24b5dfaSDave Chinner 	}
1053c24b5dfaSDave Chinner 
1054c24b5dfaSDave Chinner 	/*
1055c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1056c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1057c24b5dfaSDave Chinner 	 * the user.
1058c24b5dfaSDave Chinner 	 */
10590560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1060c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1061c24b5dfaSDave Chinner 
1062c24b5dfaSDave Chinner 	/*
1063c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1064c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1065c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1066c24b5dfaSDave Chinner 	 */
1067c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1068c24b5dfaSDave Chinner 
106970393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1070c24b5dfaSDave Chinner 	if (error)
1071c24b5dfaSDave Chinner 		goto out_release_inode;
1072c24b5dfaSDave Chinner 
1073c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1074c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1075c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1076c24b5dfaSDave Chinner 
1077c24b5dfaSDave Chinner 	*ipp = ip;
1078c24b5dfaSDave Chinner 	return 0;
1079c24b5dfaSDave Chinner 
1080c24b5dfaSDave Chinner  out_trans_cancel:
10814906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1082c24b5dfaSDave Chinner  out_release_inode:
1083c24b5dfaSDave Chinner 	/*
108458c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
108558c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
108658c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1087c24b5dfaSDave Chinner 	 */
108858c90473SDave Chinner 	if (ip) {
108958c90473SDave Chinner 		xfs_finish_inode_setup(ip);
109044a8736bSDarrick J. Wong 		xfs_irele(ip);
109158c90473SDave Chinner 	}
1092f2f7b9ffSDarrick J. Wong  out_release_dquots:
1093c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1094c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1095c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1096c24b5dfaSDave Chinner 
1097c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
109865523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1099c24b5dfaSDave Chinner 	return error;
1100c24b5dfaSDave Chinner }
1101c24b5dfaSDave Chinner 
1102c24b5dfaSDave Chinner int
110399b6436bSZhi Yong Wu xfs_create_tmpfile(
1104f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
110599b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1106330033d6SBrian Foster 	umode_t			mode,
1107330033d6SBrian Foster 	struct xfs_inode	**ipp)
110899b6436bSZhi Yong Wu {
110999b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
111099b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
111199b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
111299b6436bSZhi Yong Wu 	int			error;
111399b6436bSZhi Yong Wu 	prid_t                  prid;
111499b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
111599b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
111699b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
111799b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
111899b6436bSZhi Yong Wu 	uint			resblks;
1119b652afd9SDave Chinner 	xfs_ino_t		ino;
112099b6436bSZhi Yong Wu 
112175c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
11222451337dSDave Chinner 		return -EIO;
112399b6436bSZhi Yong Wu 
112499b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
112599b6436bSZhi Yong Wu 
112699b6436bSZhi Yong Wu 	/*
112799b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
112899b6436bSZhi Yong Wu 	 */
1129c14329d3SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1130c14329d3SChristian Brauner 			mapped_fsgid(idmap, &init_user_ns), prid,
113199b6436bSZhi Yong Wu 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
113299b6436bSZhi Yong Wu 			&udqp, &gdqp, &pdqp);
113399b6436bSZhi Yong Wu 	if (error)
113499b6436bSZhi Yong Wu 		return error;
113599b6436bSZhi Yong Wu 
113699b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
113799b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1138253f4911SChristoph Hellwig 
1139f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1140f2f7b9ffSDarrick J. Wong 			&tp);
11414906e215SChristoph Hellwig 	if (error)
1142f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
114399b6436bSZhi Yong Wu 
1144b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1145b652afd9SDave Chinner 	if (!error)
1146f2d40141SChristian Brauner 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1147b652afd9SDave Chinner 				0, 0, prid, false, &ip);
1148d6077aa3SJan Kara 	if (error)
114999b6436bSZhi Yong Wu 		goto out_trans_cancel;
115099b6436bSZhi Yong Wu 
11510560f31aSDave Chinner 	if (xfs_has_wsync(mp))
115299b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
115399b6436bSZhi Yong Wu 
115499b6436bSZhi Yong Wu 	/*
115599b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
115699b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
115799b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
115899b6436bSZhi Yong Wu 	 */
115999b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
116099b6436bSZhi Yong Wu 
116199b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
116299b6436bSZhi Yong Wu 	if (error)
11634906e215SChristoph Hellwig 		goto out_trans_cancel;
116499b6436bSZhi Yong Wu 
116570393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
116699b6436bSZhi Yong Wu 	if (error)
116799b6436bSZhi Yong Wu 		goto out_release_inode;
116899b6436bSZhi Yong Wu 
116999b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
117099b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
117199b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
117299b6436bSZhi Yong Wu 
1173330033d6SBrian Foster 	*ipp = ip;
117499b6436bSZhi Yong Wu 	return 0;
117599b6436bSZhi Yong Wu 
117699b6436bSZhi Yong Wu  out_trans_cancel:
11774906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
117899b6436bSZhi Yong Wu  out_release_inode:
117999b6436bSZhi Yong Wu 	/*
118058c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
118158c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
118258c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
118399b6436bSZhi Yong Wu 	 */
118458c90473SDave Chinner 	if (ip) {
118558c90473SDave Chinner 		xfs_finish_inode_setup(ip);
118644a8736bSDarrick J. Wong 		xfs_irele(ip);
118758c90473SDave Chinner 	}
1188f2f7b9ffSDarrick J. Wong  out_release_dquots:
118999b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
119099b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
119199b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
119299b6436bSZhi Yong Wu 
119399b6436bSZhi Yong Wu 	return error;
119499b6436bSZhi Yong Wu }
119599b6436bSZhi Yong Wu 
119699b6436bSZhi Yong Wu int
1197c24b5dfaSDave Chinner xfs_link(
1198c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1199c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1200c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1201c24b5dfaSDave Chinner {
1202c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1203c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1204871b9316SDarrick J. Wong 	int			error, nospace_error = 0;
1205c24b5dfaSDave Chinner 	int			resblks;
1206c24b5dfaSDave Chinner 
1207c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1208c24b5dfaSDave Chinner 
1209c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1210c24b5dfaSDave Chinner 
121175c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
12122451337dSDave Chinner 		return -EIO;
1213c24b5dfaSDave Chinner 
1214c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1215c24b5dfaSDave Chinner 	if (error)
1216c24b5dfaSDave Chinner 		goto std_return;
1217c24b5dfaSDave Chinner 
1218c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1219c24b5dfaSDave Chinner 	if (error)
1220c24b5dfaSDave Chinner 		goto std_return;
1221c24b5dfaSDave Chinner 
1222c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1223871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1224871b9316SDarrick J. Wong 			&tp, &nospace_error);
12254906e215SChristoph Hellwig 	if (error)
1226253f4911SChristoph Hellwig 		goto std_return;
1227c24b5dfaSDave Chinner 
1228c24b5dfaSDave Chinner 	/*
1229c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1230c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1231c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1232c24b5dfaSDave Chinner 	 */
1233db07349dSChristoph Hellwig 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1234ceaf603cSChristoph Hellwig 		     tdp->i_projid != sip->i_projid)) {
12352451337dSDave Chinner 		error = -EXDEV;
1236c24b5dfaSDave Chinner 		goto error_return;
1237c24b5dfaSDave Chinner 	}
1238c24b5dfaSDave Chinner 
123994f3cad5SEric Sandeen 	if (!resblks) {
124094f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1241c24b5dfaSDave Chinner 		if (error)
1242c24b5dfaSDave Chinner 			goto error_return;
124394f3cad5SEric Sandeen 	}
1244c24b5dfaSDave Chinner 
124554d7b5c1SDave Chinner 	/*
124654d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
124754d7b5c1SDave Chinner 	 */
124854d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1249f40aadb2SDave Chinner 		struct xfs_perag	*pag;
1250f40aadb2SDave Chinner 
1251f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1252f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, sip);
1253f40aadb2SDave Chinner 		xfs_perag_put(pag);
1254ab297431SZhi Yong Wu 		if (error)
12554906e215SChristoph Hellwig 			goto error_return;
1256ab297431SZhi Yong Wu 	}
1257ab297431SZhi Yong Wu 
1258c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1259381eee69SBrian Foster 				   resblks);
1260c24b5dfaSDave Chinner 	if (error)
12614906e215SChristoph Hellwig 		goto error_return;
1262c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1263c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1264c24b5dfaSDave Chinner 
126591083269SEric Sandeen 	xfs_bumplink(tp, sip);
1266c24b5dfaSDave Chinner 
1267c24b5dfaSDave Chinner 	/*
1268c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1269c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1270c24b5dfaSDave Chinner 	 * the user.
1271c24b5dfaSDave Chinner 	 */
12720560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1273c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1274c24b5dfaSDave Chinner 
127570393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1276c24b5dfaSDave Chinner 
1277c24b5dfaSDave Chinner  error_return:
12784906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1279c24b5dfaSDave Chinner  std_return:
1280871b9316SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
1281871b9316SDarrick J. Wong 		error = nospace_error;
1282c24b5dfaSDave Chinner 	return error;
1283c24b5dfaSDave Chinner }
1284c24b5dfaSDave Chinner 
1285363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1286363e59baSDarrick J. Wong static void
1287363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1288363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1289363e59baSDarrick J. Wong {
1290363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1291363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1292363e59baSDarrick J. Wong 
1293363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1294363e59baSDarrick J. Wong 		return;
1295732436efSDarrick J. Wong 	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1296732436efSDarrick J. Wong 	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1297363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
12983e09ab8fSChristoph Hellwig 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1299363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1300363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1301363e59baSDarrick J. Wong }
1302363e59baSDarrick J. Wong 
13031da177e4SLinus Torvalds /*
13048f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
13058f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
13068f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
13071da177e4SLinus Torvalds  *
1308f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1309f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1310f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1311f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1312f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1313f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1314f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1315f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1316f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
13171da177e4SLinus Torvalds  *
1318f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1319f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1320f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1321f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1322f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
13231da177e4SLinus Torvalds  */
13241da177e4SLinus Torvalds int
13254e529339SBrian Foster xfs_itruncate_extents_flags(
13268f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
13278f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
13288f04c47aSChristoph Hellwig 	int			whichfork,
132913b86fc3SBrian Foster 	xfs_fsize_t		new_size,
13304e529339SBrian Foster 	int			flags)
13311da177e4SLinus Torvalds {
13328f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
13338f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
13341da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
13358f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
13368f04c47aSChristoph Hellwig 	int			error = 0;
13371da177e4SLinus Torvalds 
13380b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
13390b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
13400b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1341ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
13428f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
13431da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1344898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
13451da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
13461da177e4SLinus Torvalds 
1347673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1348673e8e59SChristoph Hellwig 
13494e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
135013b86fc3SBrian Foster 
13511da177e4SLinus Torvalds 	/*
13521da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
13531da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
13541da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
13551da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
13564bbb04abSDarrick J. Wong 	 * possible file size.
13574bbb04abSDarrick J. Wong 	 *
13584bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
13594bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
13601da177e4SLinus Torvalds 	 */
13618f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
136233005fd0SDarrick J. Wong 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
13634bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
13648f04c47aSChristoph Hellwig 		return 0;
13654bbb04abSDarrick J. Wong 	}
13668f04c47aSChristoph Hellwig 
13674bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
13684bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
1369692b6cddSDave Chinner 		ASSERT(tp->t_highest_agno == NULLAGNUMBER);
13704bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
13714bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
13728f04c47aSChristoph Hellwig 		if (error)
1373d5a2e289SBrian Foster 			goto out;
13741da177e4SLinus Torvalds 
13756dd379c7SBrian Foster 		/* free the just unmapped extents */
13769e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
13778f04c47aSChristoph Hellwig 		if (error)
13789b1f4e98SBrian Foster 			goto out;
13791da177e4SLinus Torvalds 	}
13808f04c47aSChristoph Hellwig 
13814919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1382aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
13834919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
13844bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1385aa8968f2SDarrick J. Wong 		if (error)
1386aa8968f2SDarrick J. Wong 			goto out;
1387aa8968f2SDarrick J. Wong 
1388363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
13894919d42aSDarrick J. Wong 	}
1390aa8968f2SDarrick J. Wong 
1391673e8e59SChristoph Hellwig 	/*
1392673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1393673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1394673e8e59SChristoph Hellwig 	 */
1395673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1396673e8e59SChristoph Hellwig 
1397673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1398673e8e59SChristoph Hellwig 
13998f04c47aSChristoph Hellwig out:
14008f04c47aSChristoph Hellwig 	*tpp = tp;
14018f04c47aSChristoph Hellwig 	return error;
14028f04c47aSChristoph Hellwig }
14038f04c47aSChristoph Hellwig 
1404c24b5dfaSDave Chinner int
1405c24b5dfaSDave Chinner xfs_release(
1406c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1407c24b5dfaSDave Chinner {
1408c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
14097d88329eSDarrick J. Wong 	int		error = 0;
1410c24b5dfaSDave Chinner 
1411c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1412c24b5dfaSDave Chinner 		return 0;
1413c24b5dfaSDave Chinner 
1414c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
14152e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
1416c24b5dfaSDave Chinner 		return 0;
1417c24b5dfaSDave Chinner 
141875c8c50fSDave Chinner 	if (!xfs_is_shutdown(mp)) {
1419c24b5dfaSDave Chinner 		int truncated;
1420c24b5dfaSDave Chinner 
1421c24b5dfaSDave Chinner 		/*
1422c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1423c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1424c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1425c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1426c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1427c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1428c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1429c24b5dfaSDave Chinner 		 * be exposed to that problem.
1430c24b5dfaSDave Chinner 		 */
1431c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1432c24b5dfaSDave Chinner 		if (truncated) {
1433c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1434eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
14352451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1436c24b5dfaSDave Chinner 				if (error)
1437c24b5dfaSDave Chinner 					return error;
1438c24b5dfaSDave Chinner 			}
1439c24b5dfaSDave Chinner 		}
1440c24b5dfaSDave Chinner 	}
1441c24b5dfaSDave Chinner 
144254d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1443c24b5dfaSDave Chinner 		return 0;
1444c24b5dfaSDave Chinner 
14457d88329eSDarrick J. Wong 	/*
14467d88329eSDarrick J. Wong 	 * If we can't get the iolock just skip truncating the blocks past EOF
14477d88329eSDarrick J. Wong 	 * because we could deadlock with the mmap_lock otherwise. We'll get
14487d88329eSDarrick J. Wong 	 * another chance to drop them once the last reference to the inode is
14497d88329eSDarrick J. Wong 	 * dropped, so we'll never leak blocks permanently.
14507d88329eSDarrick J. Wong 	 */
14517d88329eSDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
14527d88329eSDarrick J. Wong 		return 0;
1453c24b5dfaSDave Chinner 
14547d88329eSDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false)) {
1455c24b5dfaSDave Chinner 		/*
1456a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1457a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1458a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1459a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1460a36b9261SBrian Foster 		 *
1461a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1462a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1463a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1464a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1465a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1466a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1467a36b9261SBrian Foster 		 * place.
1468a36b9261SBrian Foster 		 */
1469a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
14707d88329eSDarrick J. Wong 			goto out_unlock;
14717d88329eSDarrick J. Wong 
1472a36b9261SBrian Foster 		error = xfs_free_eofblocks(ip);
1473a36b9261SBrian Foster 		if (error)
14747d88329eSDarrick J. Wong 			goto out_unlock;
1475c24b5dfaSDave Chinner 
1476c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1477c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1478c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1479c24b5dfaSDave Chinner 	}
14807d88329eSDarrick J. Wong 
14817d88329eSDarrick J. Wong out_unlock:
14827d88329eSDarrick J. Wong 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
14837d88329eSDarrick J. Wong 	return error;
1484c24b5dfaSDave Chinner }
1485c24b5dfaSDave Chinner 
1486c24b5dfaSDave Chinner /*
1487f7be2d7fSBrian Foster  * xfs_inactive_truncate
1488f7be2d7fSBrian Foster  *
1489f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1490f7be2d7fSBrian Foster  */
1491f7be2d7fSBrian Foster STATIC int
1492f7be2d7fSBrian Foster xfs_inactive_truncate(
1493f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1494f7be2d7fSBrian Foster {
1495f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1496f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1497f7be2d7fSBrian Foster 	int			error;
1498f7be2d7fSBrian Foster 
1499253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1500f7be2d7fSBrian Foster 	if (error) {
150175c8c50fSDave Chinner 		ASSERT(xfs_is_shutdown(mp));
1502f7be2d7fSBrian Foster 		return error;
1503f7be2d7fSBrian Foster 	}
1504f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1505f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1506f7be2d7fSBrian Foster 
1507f7be2d7fSBrian Foster 	/*
1508f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1509f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
151069bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1511f7be2d7fSBrian Foster 	 */
151213d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
1513f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1514f7be2d7fSBrian Foster 
1515f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1516f7be2d7fSBrian Foster 	if (error)
1517f7be2d7fSBrian Foster 		goto error_trans_cancel;
1518f7be2d7fSBrian Foster 
1519daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
1520f7be2d7fSBrian Foster 
152170393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1522f7be2d7fSBrian Foster 	if (error)
1523f7be2d7fSBrian Foster 		goto error_unlock;
1524f7be2d7fSBrian Foster 
1525f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1526f7be2d7fSBrian Foster 	return 0;
1527f7be2d7fSBrian Foster 
1528f7be2d7fSBrian Foster error_trans_cancel:
15294906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1530f7be2d7fSBrian Foster error_unlock:
1531f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1532f7be2d7fSBrian Foster 	return error;
1533f7be2d7fSBrian Foster }
1534f7be2d7fSBrian Foster 
1535f7be2d7fSBrian Foster /*
153688877d2bSBrian Foster  * xfs_inactive_ifree()
153788877d2bSBrian Foster  *
153888877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
153988877d2bSBrian Foster  */
154088877d2bSBrian Foster STATIC int
154188877d2bSBrian Foster xfs_inactive_ifree(
154288877d2bSBrian Foster 	struct xfs_inode *ip)
154388877d2bSBrian Foster {
154488877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
154588877d2bSBrian Foster 	struct xfs_trans	*tp;
154688877d2bSBrian Foster 	int			error;
154788877d2bSBrian Foster 
15489d43b180SBrian Foster 	/*
154976d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
155076d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
155176d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
155276d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
155376d771b4SChristoph Hellwig 	 * pool and pray.
15549d43b180SBrian Foster 	 *
15559d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
15569d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
15579d43b180SBrian Foster 	 * repaired.
15589d43b180SBrian Foster 	 */
1559e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1560253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
156176d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
156276d771b4SChristoph Hellwig 				&tp);
156376d771b4SChristoph Hellwig 	} else {
156476d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
156576d771b4SChristoph Hellwig 	}
156688877d2bSBrian Foster 	if (error) {
15672451337dSDave Chinner 		if (error == -ENOSPC) {
15689d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
15699d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
15709d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
15719d43b180SBrian Foster 		} else {
157275c8c50fSDave Chinner 			ASSERT(xfs_is_shutdown(mp));
15739d43b180SBrian Foster 		}
157488877d2bSBrian Foster 		return error;
157588877d2bSBrian Foster 	}
157688877d2bSBrian Foster 
157796355d5aSDave Chinner 	/*
157896355d5aSDave Chinner 	 * We do not hold the inode locked across the entire rolling transaction
157996355d5aSDave Chinner 	 * here. We only need to hold it for the first transaction that
158096355d5aSDave Chinner 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
158196355d5aSDave Chinner 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
158296355d5aSDave Chinner 	 * here breaks the relationship between cluster buffer invalidation and
158396355d5aSDave Chinner 	 * stale inode invalidation on cluster buffer item journal commit
158496355d5aSDave Chinner 	 * completion, and can result in leaving dirty stale inodes hanging
158596355d5aSDave Chinner 	 * around in memory.
158696355d5aSDave Chinner 	 *
158796355d5aSDave Chinner 	 * We have no need for serialising this inode operation against other
158896355d5aSDave Chinner 	 * operations - we freed the inode and hence reallocation is required
158996355d5aSDave Chinner 	 * and that will serialise on reallocating the space the deferops need
159096355d5aSDave Chinner 	 * to free. Hence we can unlock the inode on the first commit of
159196355d5aSDave Chinner 	 * the transaction rather than roll it right through the deferops. This
159296355d5aSDave Chinner 	 * avoids relogging the XFS_ISTALE inode.
159396355d5aSDave Chinner 	 *
159496355d5aSDave Chinner 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
159596355d5aSDave Chinner 	 * by asserting that the inode is still locked when it returns.
159696355d5aSDave Chinner 	 */
159788877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
159896355d5aSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
159988877d2bSBrian Foster 
16000e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
160196355d5aSDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
160288877d2bSBrian Foster 	if (error) {
160388877d2bSBrian Foster 		/*
160488877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
160588877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
160688877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
160788877d2bSBrian Foster 		 */
160875c8c50fSDave Chinner 		if (!xfs_is_shutdown(mp)) {
160988877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
161088877d2bSBrian Foster 				__func__, error);
161188877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
161288877d2bSBrian Foster 		}
16134906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
161488877d2bSBrian Foster 		return error;
161588877d2bSBrian Foster 	}
161688877d2bSBrian Foster 
161788877d2bSBrian Foster 	/*
161888877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
161988877d2bSBrian Foster 	 */
162088877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
162188877d2bSBrian Foster 
1622d4d12c02SDave Chinner 	return xfs_trans_commit(tp);
162388877d2bSBrian Foster }
162488877d2bSBrian Foster 
162588877d2bSBrian Foster /*
162662af7d54SDarrick J. Wong  * Returns true if we need to update the on-disk metadata before we can free
162762af7d54SDarrick J. Wong  * the memory used by this inode.  Updates include freeing post-eof
162862af7d54SDarrick J. Wong  * preallocations; freeing COW staging extents; and marking the inode free in
162962af7d54SDarrick J. Wong  * the inobt if it is on the unlinked list.
163062af7d54SDarrick J. Wong  */
163162af7d54SDarrick J. Wong bool
163262af7d54SDarrick J. Wong xfs_inode_needs_inactive(
163362af7d54SDarrick J. Wong 	struct xfs_inode	*ip)
163462af7d54SDarrick J. Wong {
163562af7d54SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1636732436efSDarrick J. Wong 	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
163762af7d54SDarrick J. Wong 
163862af7d54SDarrick J. Wong 	/*
163962af7d54SDarrick J. Wong 	 * If the inode is already free, then there can be nothing
164062af7d54SDarrick J. Wong 	 * to clean up here.
164162af7d54SDarrick J. Wong 	 */
164262af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_mode == 0)
164362af7d54SDarrick J. Wong 		return false;
164462af7d54SDarrick J. Wong 
164576e58901SDarrick J. Wong 	/*
164676e58901SDarrick J. Wong 	 * If this is a read-only mount, don't do this (would generate I/O)
164776e58901SDarrick J. Wong 	 * unless we're in log recovery and cleaning the iunlinked list.
164876e58901SDarrick J. Wong 	 */
164976e58901SDarrick J. Wong 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
165062af7d54SDarrick J. Wong 		return false;
165162af7d54SDarrick J. Wong 
165262af7d54SDarrick J. Wong 	/* If the log isn't running, push inodes straight to reclaim. */
165375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
165462af7d54SDarrick J. Wong 		return false;
165562af7d54SDarrick J. Wong 
165662af7d54SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
165762af7d54SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
165862af7d54SDarrick J. Wong 		return false;
165962af7d54SDarrick J. Wong 
166062af7d54SDarrick J. Wong 	/* Want to clean out the cow blocks if there are any. */
166162af7d54SDarrick J. Wong 	if (cow_ifp && cow_ifp->if_bytes > 0)
166262af7d54SDarrick J. Wong 		return true;
166362af7d54SDarrick J. Wong 
166462af7d54SDarrick J. Wong 	/* Unlinked files must be freed. */
166562af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_nlink == 0)
166662af7d54SDarrick J. Wong 		return true;
166762af7d54SDarrick J. Wong 
166862af7d54SDarrick J. Wong 	/*
166962af7d54SDarrick J. Wong 	 * This file isn't being freed, so check if there are post-eof blocks
167062af7d54SDarrick J. Wong 	 * to free.  @force is true because we are evicting an inode from the
167162af7d54SDarrick J. Wong 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
167262af7d54SDarrick J. Wong 	 * free space accounting.
167362af7d54SDarrick J. Wong 	 *
167462af7d54SDarrick J. Wong 	 * Note: don't bother with iolock here since lockdep complains about
167562af7d54SDarrick J. Wong 	 * acquiring it in reclaim context. We have the only reference to the
167662af7d54SDarrick J. Wong 	 * inode at this point anyways.
167762af7d54SDarrick J. Wong 	 */
167862af7d54SDarrick J. Wong 	return xfs_can_free_eofblocks(ip, true);
167962af7d54SDarrick J. Wong }
168062af7d54SDarrick J. Wong 
168162af7d54SDarrick J. Wong /*
1682c24b5dfaSDave Chinner  * xfs_inactive
1683c24b5dfaSDave Chinner  *
1684c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1685c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1686c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1687c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1688c24b5dfaSDave Chinner  */
1689d4d12c02SDave Chinner int
1690c24b5dfaSDave Chinner xfs_inactive(
1691c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1692c24b5dfaSDave Chinner {
16933d3c8b52SJie Liu 	struct xfs_mount	*mp;
1694d4d12c02SDave Chinner 	int			error = 0;
1695c24b5dfaSDave Chinner 	int			truncate = 0;
1696c24b5dfaSDave Chinner 
1697c24b5dfaSDave Chinner 	/*
1698c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1699c24b5dfaSDave Chinner 	 * to clean up here.
1700c24b5dfaSDave Chinner 	 */
1701c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1702c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
17033ea06d73SDarrick J. Wong 		goto out;
1704c24b5dfaSDave Chinner 	}
1705c24b5dfaSDave Chinner 
1706c24b5dfaSDave Chinner 	mp = ip->i_mount;
170717c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1708c24b5dfaSDave Chinner 
170976e58901SDarrick J. Wong 	/*
171076e58901SDarrick J. Wong 	 * If this is a read-only mount, don't do this (would generate I/O)
171176e58901SDarrick J. Wong 	 * unless we're in log recovery and cleaning the iunlinked list.
171276e58901SDarrick J. Wong 	 */
171376e58901SDarrick J. Wong 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
17143ea06d73SDarrick J. Wong 		goto out;
1715c24b5dfaSDave Chinner 
1716383e32b0SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1717383e32b0SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
17183ea06d73SDarrick J. Wong 		goto out;
1719383e32b0SDarrick J. Wong 
17206231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
172151d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
17226231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
17236231848cSDarrick J. Wong 
172454d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1725c24b5dfaSDave Chinner 		/*
1726c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1727c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1728c24b5dfaSDave Chinner 		 * broken free space accounting.
17293b4683c2SBrian Foster 		 *
17303b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
17313b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
17323b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1733c24b5dfaSDave Chinner 		 */
17343b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1735d4d12c02SDave Chinner 			error = xfs_free_eofblocks(ip);
173674564fb4SBrian Foster 
17373ea06d73SDarrick J. Wong 		goto out;
1738c24b5dfaSDave Chinner 	}
1739c24b5dfaSDave Chinner 
1740c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
174113d2c10bSChristoph Hellwig 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1742daf83964SChristoph Hellwig 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1743c24b5dfaSDave Chinner 		truncate = 1;
1744c24b5dfaSDave Chinner 
1745c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1746c24b5dfaSDave Chinner 	if (error)
17473ea06d73SDarrick J. Wong 		goto out;
1748c24b5dfaSDave Chinner 
1749c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
175036b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1751f7be2d7fSBrian Foster 	else if (truncate)
1752f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
175336b21ddeSBrian Foster 	if (error)
17543ea06d73SDarrick J. Wong 		goto out;
1755c24b5dfaSDave Chinner 
1756c24b5dfaSDave Chinner 	/*
1757c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1758c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
17596dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1760c24b5dfaSDave Chinner 	 */
1761932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip)) {
1762c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1763c24b5dfaSDave Chinner 		if (error)
17643ea06d73SDarrick J. Wong 			goto out;
1765c24b5dfaSDave Chinner 	}
1766c24b5dfaSDave Chinner 
17677821ea30SChristoph Hellwig 	ASSERT(ip->i_forkoff == 0);
1768c24b5dfaSDave Chinner 
1769c24b5dfaSDave Chinner 	/*
1770c24b5dfaSDave Chinner 	 * Free the inode.
1771c24b5dfaSDave Chinner 	 */
1772d4d12c02SDave Chinner 	error = xfs_inactive_ifree(ip);
1773c24b5dfaSDave Chinner 
17743ea06d73SDarrick J. Wong out:
1775c24b5dfaSDave Chinner 	/*
17763ea06d73SDarrick J. Wong 	 * We're done making metadata updates for this inode, so we can release
17773ea06d73SDarrick J. Wong 	 * the attached dquots.
1778c24b5dfaSDave Chinner 	 */
1779c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1780d4d12c02SDave Chinner 	return error;
1781c24b5dfaSDave Chinner }
1782c24b5dfaSDave Chinner 
17831da177e4SLinus Torvalds /*
17849b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
17859b247179SDarrick J. Wong  * =============================
17869b247179SDarrick J. Wong  *
17879b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
17889b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
17899b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
17909b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
17919b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
17929b247179SDarrick J. Wong  *
17939b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
17949b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
17959b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
17969b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
17979b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
17989b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
17999b247179SDarrick J. Wong  *
18002fd26cc0SDave Chinner  * Hence we keep an in-memory double linked list to link each inode on an
18012fd26cc0SDave Chinner  * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
18022fd26cc0SDave Chinner  * based lists would require having 64 list heads in the perag, one for each
18032fd26cc0SDave Chinner  * list. This is expensive in terms of memory (think millions of AGs) and cache
18042fd26cc0SDave Chinner  * misses on lookups. Instead, use the fact that inodes on the unlinked list
18052fd26cc0SDave Chinner  * must be referenced at the VFS level to keep them on the list and hence we
18062fd26cc0SDave Chinner  * have an existence guarantee for inodes on the unlinked list.
18079b247179SDarrick J. Wong  *
18082fd26cc0SDave Chinner  * Given we have an existence guarantee, we can use lockless inode cache lookups
18092fd26cc0SDave Chinner  * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
18102fd26cc0SDave Chinner  * for the double linked unlinked list, and we don't need any extra locking to
18112fd26cc0SDave Chinner  * keep the list safe as all manipulations are done under the AGI buffer lock.
18122fd26cc0SDave Chinner  * Keeping the list up to date does not require memory allocation, just finding
18132fd26cc0SDave Chinner  * the XFS inode and updating the next/prev unlinked list aginos.
18149b247179SDarrick J. Wong  */
18159b247179SDarrick J. Wong 
18169b247179SDarrick J. Wong /*
1817a83d5a8bSDave Chinner  * Find an inode on the unlinked list. This does not take references to the
1818a83d5a8bSDave Chinner  * inode as we have existence guarantees by holding the AGI buffer lock and that
1819a83d5a8bSDave Chinner  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1820a83d5a8bSDave Chinner  * don't find the inode in cache, then let the caller handle the situation.
18219b247179SDarrick J. Wong  */
1822a83d5a8bSDave Chinner static struct xfs_inode *
1823a83d5a8bSDave Chinner xfs_iunlink_lookup(
18249b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18259b247179SDarrick J. Wong 	xfs_agino_t		agino)
18269b247179SDarrick J. Wong {
1827a83d5a8bSDave Chinner 	struct xfs_inode	*ip;
18289b247179SDarrick J. Wong 
1829a83d5a8bSDave Chinner 	rcu_read_lock();
1830a83d5a8bSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1831*68b957f6SDarrick J. Wong 	if (!ip) {
1832*68b957f6SDarrick J. Wong 		/* Caller can handle inode not being in memory. */
1833*68b957f6SDarrick J. Wong 		rcu_read_unlock();
1834*68b957f6SDarrick J. Wong 		return NULL;
1835*68b957f6SDarrick J. Wong 	}
18369b247179SDarrick J. Wong 
18379b247179SDarrick J. Wong 	/*
1838*68b957f6SDarrick J. Wong 	 * Inode in RCU freeing limbo should not happen.  Warn about this and
1839*68b957f6SDarrick J. Wong 	 * let the caller handle the failure.
18409b247179SDarrick J. Wong 	 */
1841*68b957f6SDarrick J. Wong 	if (WARN_ON_ONCE(!ip->i_ino)) {
1842a83d5a8bSDave Chinner 		rcu_read_unlock();
1843a83d5a8bSDave Chinner 		return NULL;
1844a83d5a8bSDave Chinner 	}
1845a83d5a8bSDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1846a83d5a8bSDave Chinner 	rcu_read_unlock();
1847a83d5a8bSDave Chinner 	return ip;
1848a83d5a8bSDave Chinner }
1849a83d5a8bSDave Chinner 
1850*68b957f6SDarrick J. Wong /*
1851*68b957f6SDarrick J. Wong  * Update the prev pointer of the next agino.  Returns -ENOLINK if the inode
1852*68b957f6SDarrick J. Wong  * is not in cache.
1853*68b957f6SDarrick J. Wong  */
18549b247179SDarrick J. Wong static int
18552fd26cc0SDave Chinner xfs_iunlink_update_backref(
18569b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18579b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
18582fd26cc0SDave Chinner 	xfs_agino_t		next_agino)
18599b247179SDarrick J. Wong {
18602fd26cc0SDave Chinner 	struct xfs_inode	*ip;
18619b247179SDarrick J. Wong 
18622fd26cc0SDave Chinner 	/* No update necessary if we are at the end of the list. */
18632fd26cc0SDave Chinner 	if (next_agino == NULLAGINO)
18649b247179SDarrick J. Wong 		return 0;
18659b247179SDarrick J. Wong 
18662fd26cc0SDave Chinner 	ip = xfs_iunlink_lookup(pag, next_agino);
18672fd26cc0SDave Chinner 	if (!ip)
1868*68b957f6SDarrick J. Wong 		return -ENOLINK;
1869*68b957f6SDarrick J. Wong 
18702fd26cc0SDave Chinner 	ip->i_prev_unlinked = prev_agino;
18719b247179SDarrick J. Wong 	return 0;
18729b247179SDarrick J. Wong }
18739b247179SDarrick J. Wong 
18749b247179SDarrick J. Wong /*
18759a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
18769a4a5118SDarrick J. Wong  * is responsible for validating the old value.
18779a4a5118SDarrick J. Wong  */
18789a4a5118SDarrick J. Wong STATIC int
18799a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
18809a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
1881f40aadb2SDave Chinner 	struct xfs_perag	*pag,
18829a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
18839a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
18849a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
18859a4a5118SDarrick J. Wong {
1886370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
18879a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
18889a4a5118SDarrick J. Wong 	int			offset;
18899a4a5118SDarrick J. Wong 
18902d6ca832SDave Chinner 	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
18919a4a5118SDarrick J. Wong 
18929a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1893f40aadb2SDave Chinner 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
18949a4a5118SDarrick J. Wong 			old_value, new_agino);
18959a4a5118SDarrick J. Wong 
18969a4a5118SDarrick J. Wong 	/*
18979a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
18989a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
18999a4a5118SDarrick J. Wong 	 * head of the list.
19009a4a5118SDarrick J. Wong 	 */
1901a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
19028d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
19039a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
1904a5155b87SDarrick J. Wong 	}
19059a4a5118SDarrick J. Wong 
19069a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
19079a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
19089a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
19099a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
19109a4a5118SDarrick J. Wong 	return 0;
19119a4a5118SDarrick J. Wong }
19129a4a5118SDarrick J. Wong 
1913*68b957f6SDarrick J. Wong /*
1914*68b957f6SDarrick J. Wong  * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1915*68b957f6SDarrick J. Wong  * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
1916*68b957f6SDarrick J. Wong  * to the unlinked list.
1917*68b957f6SDarrick J. Wong  */
1918*68b957f6SDarrick J. Wong STATIC int
1919*68b957f6SDarrick J. Wong xfs_iunlink_reload_next(
1920*68b957f6SDarrick J. Wong 	struct xfs_trans	*tp,
1921*68b957f6SDarrick J. Wong 	struct xfs_buf		*agibp,
1922*68b957f6SDarrick J. Wong 	xfs_agino_t		prev_agino,
1923*68b957f6SDarrick J. Wong 	xfs_agino_t		next_agino)
1924*68b957f6SDarrick J. Wong {
1925*68b957f6SDarrick J. Wong 	struct xfs_perag	*pag = agibp->b_pag;
1926*68b957f6SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
1927*68b957f6SDarrick J. Wong 	struct xfs_inode	*next_ip = NULL;
1928*68b957f6SDarrick J. Wong 	xfs_ino_t		ino;
1929*68b957f6SDarrick J. Wong 	int			error;
1930*68b957f6SDarrick J. Wong 
1931*68b957f6SDarrick J. Wong 	ASSERT(next_agino != NULLAGINO);
1932*68b957f6SDarrick J. Wong 
1933*68b957f6SDarrick J. Wong #ifdef DEBUG
1934*68b957f6SDarrick J. Wong 	rcu_read_lock();
1935*68b957f6SDarrick J. Wong 	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1936*68b957f6SDarrick J. Wong 	ASSERT(next_ip == NULL);
1937*68b957f6SDarrick J. Wong 	rcu_read_unlock();
1938*68b957f6SDarrick J. Wong #endif
1939*68b957f6SDarrick J. Wong 
1940*68b957f6SDarrick J. Wong 	xfs_info_ratelimited(mp,
1941*68b957f6SDarrick J. Wong  "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
1942*68b957f6SDarrick J. Wong 			next_agino, pag->pag_agno);
1943*68b957f6SDarrick J. Wong 
1944*68b957f6SDarrick J. Wong 	/*
1945*68b957f6SDarrick J. Wong 	 * Use an untrusted lookup just to be cautious in case the AGI has been
1946*68b957f6SDarrick J. Wong 	 * corrupted and now points at a free inode.  That shouldn't happen,
1947*68b957f6SDarrick J. Wong 	 * but we'd rather shut down now since we're already running in a weird
1948*68b957f6SDarrick J. Wong 	 * situation.
1949*68b957f6SDarrick J. Wong 	 */
1950*68b957f6SDarrick J. Wong 	ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1951*68b957f6SDarrick J. Wong 	error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1952*68b957f6SDarrick J. Wong 	if (error)
1953*68b957f6SDarrick J. Wong 		return error;
1954*68b957f6SDarrick J. Wong 
1955*68b957f6SDarrick J. Wong 	/* If this is not an unlinked inode, something is very wrong. */
1956*68b957f6SDarrick J. Wong 	if (VFS_I(next_ip)->i_nlink != 0) {
1957*68b957f6SDarrick J. Wong 		error = -EFSCORRUPTED;
1958*68b957f6SDarrick J. Wong 		goto rele;
1959*68b957f6SDarrick J. Wong 	}
1960*68b957f6SDarrick J. Wong 
1961*68b957f6SDarrick J. Wong 	next_ip->i_prev_unlinked = prev_agino;
1962*68b957f6SDarrick J. Wong 	trace_xfs_iunlink_reload_next(next_ip);
1963*68b957f6SDarrick J. Wong rele:
1964*68b957f6SDarrick J. Wong 	ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1965*68b957f6SDarrick J. Wong 	xfs_irele(next_ip);
1966*68b957f6SDarrick J. Wong 	return error;
1967*68b957f6SDarrick J. Wong }
1968*68b957f6SDarrick J. Wong 
1969a4454cd6SDave Chinner static int
1970a4454cd6SDave Chinner xfs_iunlink_insert_inode(
1971f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
1972f40aadb2SDave Chinner 	struct xfs_perag	*pag,
1973a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
1974a4454cd6SDave Chinner 	struct xfs_inode	*ip)
1975f2fc16a3SDarrick J. Wong {
1976f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
1977a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
1978a4454cd6SDave Chinner 	xfs_agino_t		next_agino;
1979a4454cd6SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1980a4454cd6SDave Chinner 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1981f2fc16a3SDarrick J. Wong 	int			error;
1982f2fc16a3SDarrick J. Wong 
1983a4454cd6SDave Chinner 	/*
1984a4454cd6SDave Chinner 	 * Get the index into the agi hash table for the list this inode will
1985a4454cd6SDave Chinner 	 * go on.  Make sure the pointer isn't garbage and that this inode
1986a4454cd6SDave Chinner 	 * isn't already on the list.
1987a4454cd6SDave Chinner 	 */
1988a4454cd6SDave Chinner 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1989a4454cd6SDave Chinner 	if (next_agino == agino ||
1990a4454cd6SDave Chinner 	    !xfs_verify_agino_or_null(pag, next_agino)) {
1991a4454cd6SDave Chinner 		xfs_buf_mark_corrupt(agibp);
1992a4454cd6SDave Chinner 		return -EFSCORRUPTED;
1993f2fc16a3SDarrick J. Wong 	}
1994f2fc16a3SDarrick J. Wong 
1995f2fc16a3SDarrick J. Wong 	/*
19962fd26cc0SDave Chinner 	 * Update the prev pointer in the next inode to point back to this
19972fd26cc0SDave Chinner 	 * inode.
1998f2fc16a3SDarrick J. Wong 	 */
19992fd26cc0SDave Chinner 	error = xfs_iunlink_update_backref(pag, agino, next_agino);
2000*68b957f6SDarrick J. Wong 	if (error == -ENOLINK)
2001*68b957f6SDarrick J. Wong 		error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
20022fd26cc0SDave Chinner 	if (error)
20032fd26cc0SDave Chinner 		return error;
20042fd26cc0SDave Chinner 
2005a5155b87SDarrick J. Wong 	if (next_agino != NULLAGINO) {
2006a4454cd6SDave Chinner 		/*
2007a4454cd6SDave Chinner 		 * There is already another inode in the bucket, so point this
2008a4454cd6SDave Chinner 		 * inode to the current head of the list.
2009a4454cd6SDave Chinner 		 */
2010062efdb0SDave Chinner 		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
2011a4454cd6SDave Chinner 		if (error)
2012a4454cd6SDave Chinner 			return error;
20134fcc94d6SDave Chinner 		ip->i_next_unlinked = next_agino;
2014f2fc16a3SDarrick J. Wong 	}
2015f2fc16a3SDarrick J. Wong 
2016a4454cd6SDave Chinner 	/* Point the head of the list to point to this inode. */
2017f12b9668SDarrick J. Wong 	ip->i_prev_unlinked = NULLAGINO;
2018a4454cd6SDave Chinner 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2019f2fc16a3SDarrick J. Wong }
2020f2fc16a3SDarrick J. Wong 
20219a4a5118SDarrick J. Wong /*
2022c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
2023c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
202454d7b5c1SDave Chinner  *
202554d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
202654d7b5c1SDave Chinner  * list when the inode is freed.
20271da177e4SLinus Torvalds  */
202854d7b5c1SDave Chinner STATIC int
20291da177e4SLinus Torvalds xfs_iunlink(
203054d7b5c1SDave Chinner 	struct xfs_trans	*tp,
203154d7b5c1SDave Chinner 	struct xfs_inode	*ip)
20321da177e4SLinus Torvalds {
20335837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2034f40aadb2SDave Chinner 	struct xfs_perag	*pag;
20355837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
20361da177e4SLinus Torvalds 	int			error;
20371da177e4SLinus Torvalds 
2038c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
2039c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
20404664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
20411da177e4SLinus Torvalds 
2042f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2043f40aadb2SDave Chinner 
20445837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
204561021debSDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
2046859d7182SVlad Apostolov 	if (error)
2047f40aadb2SDave Chinner 		goto out;
20485e1be0fbSChristoph Hellwig 
2049a4454cd6SDave Chinner 	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2050f40aadb2SDave Chinner out:
2051f40aadb2SDave Chinner 	xfs_perag_put(pag);
2052f40aadb2SDave Chinner 	return error;
20531da177e4SLinus Torvalds }
20541da177e4SLinus Torvalds 
2055a4454cd6SDave Chinner static int
2056a4454cd6SDave Chinner xfs_iunlink_remove_inode(
205723ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
2058f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2059a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
20605837f625SDarrick J. Wong 	struct xfs_inode	*ip)
20611da177e4SLinus Torvalds {
20625837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2063a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
20645837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2065b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
20665837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
20671da177e4SLinus Torvalds 	int			error;
20681da177e4SLinus Torvalds 
20694664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
20704664c66cSDarrick J. Wong 
20711da177e4SLinus Torvalds 	/*
207286bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
207386bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
20741da177e4SLinus Torvalds 	 */
2075b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
20762d6ca832SDave Chinner 	if (!xfs_verify_agino(pag, head_agino)) {
2077d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2078d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2079d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2080d2e73665SDarrick J. Wong 	}
20811da177e4SLinus Torvalds 
20821da177e4SLinus Torvalds 	/*
2083b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2084b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2085b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
20861da177e4SLinus Torvalds 	 */
2087062efdb0SDave Chinner 	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2088f2fc16a3SDarrick J. Wong 	if (error)
20891da177e4SLinus Torvalds 		return error;
20909a4a5118SDarrick J. Wong 
20919b247179SDarrick J. Wong 	/*
20922fd26cc0SDave Chinner 	 * Update the prev pointer in the next inode to point back to previous
20932fd26cc0SDave Chinner 	 * inode in the chain.
20949b247179SDarrick J. Wong 	 */
20952fd26cc0SDave Chinner 	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
20962fd26cc0SDave Chinner 			ip->i_next_unlinked);
2097*68b957f6SDarrick J. Wong 	if (error == -ENOLINK)
2098*68b957f6SDarrick J. Wong 		error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2099*68b957f6SDarrick J. Wong 				ip->i_next_unlinked);
21009b247179SDarrick J. Wong 	if (error)
210192a00544SGao Xiang 		return error;
21029b247179SDarrick J. Wong 
210392a00544SGao Xiang 	if (head_agino != agino) {
2104a83d5a8bSDave Chinner 		struct xfs_inode	*prev_ip;
2105f2fc16a3SDarrick J. Wong 
21062fd26cc0SDave Chinner 		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
21072fd26cc0SDave Chinner 		if (!prev_ip)
21082fd26cc0SDave Chinner 			return -EFSCORRUPTED;
2109475ee413SChristoph Hellwig 
2110062efdb0SDave Chinner 		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
21115301f870SDave Chinner 				ip->i_next_unlinked);
2112a83d5a8bSDave Chinner 		prev_ip->i_next_unlinked = ip->i_next_unlinked;
21132fd26cc0SDave Chinner 	} else {
21142fd26cc0SDave Chinner 		/* Point the head of the list to the next unlinked inode. */
21152fd26cc0SDave Chinner 		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
21162fd26cc0SDave Chinner 				ip->i_next_unlinked);
21171da177e4SLinus Torvalds 	}
21189b247179SDarrick J. Wong 
2119a83d5a8bSDave Chinner 	ip->i_next_unlinked = NULLAGINO;
2120f12b9668SDarrick J. Wong 	ip->i_prev_unlinked = 0;
21212fd26cc0SDave Chinner 	return error;
21221da177e4SLinus Torvalds }
21231da177e4SLinus Torvalds 
21245b3eed75SDave Chinner /*
2125a4454cd6SDave Chinner  * Pull the on-disk inode from the AGI unlinked list.
2126a4454cd6SDave Chinner  */
2127a4454cd6SDave Chinner STATIC int
2128a4454cd6SDave Chinner xfs_iunlink_remove(
2129a4454cd6SDave Chinner 	struct xfs_trans	*tp,
2130a4454cd6SDave Chinner 	struct xfs_perag	*pag,
2131a4454cd6SDave Chinner 	struct xfs_inode	*ip)
2132a4454cd6SDave Chinner {
2133a4454cd6SDave Chinner 	struct xfs_buf		*agibp;
2134a4454cd6SDave Chinner 	int			error;
2135a4454cd6SDave Chinner 
2136a4454cd6SDave Chinner 	trace_xfs_iunlink_remove(ip);
2137a4454cd6SDave Chinner 
2138a4454cd6SDave Chinner 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2139a4454cd6SDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
21401da177e4SLinus Torvalds 	if (error)
21411baaed8fSDave Chinner 		return error;
21421da177e4SLinus Torvalds 
2143a4454cd6SDave Chinner 	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
21441da177e4SLinus Torvalds }
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds /*
214771e3e356SDave Chinner  * Look up the inode number specified and if it is not already marked XFS_ISTALE
214871e3e356SDave Chinner  * mark it stale. We should only find clean inodes in this lookup that aren't
214971e3e356SDave Chinner  * already stale.
21505806165aSDave Chinner  */
215171e3e356SDave Chinner static void
215271e3e356SDave Chinner xfs_ifree_mark_inode_stale(
2153f40aadb2SDave Chinner 	struct xfs_perag	*pag,
21545806165aSDave Chinner 	struct xfs_inode	*free_ip,
2155d9fdd0adSBrian Foster 	xfs_ino_t		inum)
21565806165aSDave Chinner {
2157f40aadb2SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
215871e3e356SDave Chinner 	struct xfs_inode_log_item *iip;
21595806165aSDave Chinner 	struct xfs_inode	*ip;
21605806165aSDave Chinner 
21615806165aSDave Chinner retry:
21625806165aSDave Chinner 	rcu_read_lock();
21635806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
21645806165aSDave Chinner 
21655806165aSDave Chinner 	/* Inode not in memory, nothing to do */
216671e3e356SDave Chinner 	if (!ip) {
216771e3e356SDave Chinner 		rcu_read_unlock();
216871e3e356SDave Chinner 		return;
216971e3e356SDave Chinner 	}
21705806165aSDave Chinner 
21715806165aSDave Chinner 	/*
21725806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
21735806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
21745806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
21755806165aSDave Chinner 	 * valid, the wrong inode or stale.
21765806165aSDave Chinner 	 */
21775806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2178718ecc50SDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2179718ecc50SDave Chinner 		goto out_iflags_unlock;
21805806165aSDave Chinner 
21815806165aSDave Chinner 	/*
21825806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
21835806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
21845806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
21855806165aSDave Chinner 	 * retry.
21865806165aSDave Chinner 	 */
21875806165aSDave Chinner 	if (ip != free_ip) {
21885806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
218971e3e356SDave Chinner 			spin_unlock(&ip->i_flags_lock);
21905806165aSDave Chinner 			rcu_read_unlock();
21915806165aSDave Chinner 			delay(1);
21925806165aSDave Chinner 			goto retry;
21935806165aSDave Chinner 		}
21945806165aSDave Chinner 	}
219571e3e356SDave Chinner 	ip->i_flags |= XFS_ISTALE;
21965806165aSDave Chinner 
219771e3e356SDave Chinner 	/*
2198718ecc50SDave Chinner 	 * If the inode is flushing, it is already attached to the buffer.  All
219971e3e356SDave Chinner 	 * we needed to do here is mark the inode stale so buffer IO completion
220071e3e356SDave Chinner 	 * will remove it from the AIL.
220171e3e356SDave Chinner 	 */
220271e3e356SDave Chinner 	iip = ip->i_itemp;
2203718ecc50SDave Chinner 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
220471e3e356SDave Chinner 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
220571e3e356SDave Chinner 		ASSERT(iip->ili_last_fields);
220671e3e356SDave Chinner 		goto out_iunlock;
220771e3e356SDave Chinner 	}
22085806165aSDave Chinner 
22095806165aSDave Chinner 	/*
221048d55e2aSDave Chinner 	 * Inodes not attached to the buffer can be released immediately.
221148d55e2aSDave Chinner 	 * Everything else has to go through xfs_iflush_abort() on journal
221248d55e2aSDave Chinner 	 * commit as the flock synchronises removal of the inode from the
221348d55e2aSDave Chinner 	 * cluster buffer against inode reclaim.
22145806165aSDave Chinner 	 */
2215718ecc50SDave Chinner 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
221671e3e356SDave Chinner 		goto out_iunlock;
2217718ecc50SDave Chinner 
2218718ecc50SDave Chinner 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2219718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2220718ecc50SDave Chinner 	rcu_read_unlock();
22215806165aSDave Chinner 
222271e3e356SDave Chinner 	/* we have a dirty inode in memory that has not yet been flushed. */
222371e3e356SDave Chinner 	spin_lock(&iip->ili_lock);
222471e3e356SDave Chinner 	iip->ili_last_fields = iip->ili_fields;
222571e3e356SDave Chinner 	iip->ili_fields = 0;
222671e3e356SDave Chinner 	iip->ili_fsync_fields = 0;
222771e3e356SDave Chinner 	spin_unlock(&iip->ili_lock);
222871e3e356SDave Chinner 	ASSERT(iip->ili_last_fields);
222971e3e356SDave Chinner 
2230718ecc50SDave Chinner 	if (ip != free_ip)
2231718ecc50SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2232718ecc50SDave Chinner 	return;
2233718ecc50SDave Chinner 
223471e3e356SDave Chinner out_iunlock:
223571e3e356SDave Chinner 	if (ip != free_ip)
223671e3e356SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2237718ecc50SDave Chinner out_iflags_unlock:
2238718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2239718ecc50SDave Chinner 	rcu_read_unlock();
22405806165aSDave Chinner }
22415806165aSDave Chinner 
22425806165aSDave Chinner /*
22431da177e4SLinus Torvalds  * A big issue when freeing the inode cluster is that we _cannot_ skip any
22441da177e4SLinus Torvalds  * inodes that are in memory - they all must be marked stale and attached to
22451da177e4SLinus Torvalds  * the cluster buffer.
22461da177e4SLinus Torvalds  */
2247f40aadb2SDave Chinner static int
22481da177e4SLinus Torvalds xfs_ifree_cluster(
224971e3e356SDave Chinner 	struct xfs_trans	*tp,
2250f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2251f40aadb2SDave Chinner 	struct xfs_inode	*free_ip,
22521da177e4SLinus Torvalds 	struct xfs_icluster	*xic)
22531da177e4SLinus Torvalds {
225471e3e356SDave Chinner 	struct xfs_mount	*mp = free_ip->i_mount;
225571e3e356SDave Chinner 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
225671e3e356SDave Chinner 	struct xfs_buf		*bp;
225771e3e356SDave Chinner 	xfs_daddr_t		blkno;
225871e3e356SDave Chinner 	xfs_ino_t		inum = xic->first_ino;
22591da177e4SLinus Torvalds 	int			nbufs;
22601da177e4SLinus Torvalds 	int			i, j;
22611da177e4SLinus Torvalds 	int			ioffset;
2262ce92464cSDarrick J. Wong 	int			error;
22631da177e4SLinus Torvalds 
2264ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
22651da177e4SLinus Torvalds 
2266ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
22671da177e4SLinus Torvalds 		/*
22681da177e4SLinus Torvalds 		 * The allocation bitmap tells us which inodes of the chunk were
22691da177e4SLinus Torvalds 		 * physically allocated. Skip the cluster if an inode falls into
22701da177e4SLinus Torvalds 		 * a sparse region.
22711da177e4SLinus Torvalds 		 */
22721da177e4SLinus Torvalds 		ioffset = inum - xic->first_ino;
22731da177e4SLinus Torvalds 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2274ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
22751da177e4SLinus Torvalds 			continue;
22761da177e4SLinus Torvalds 		}
22771da177e4SLinus Torvalds 
22781da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
22791da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
22801da177e4SLinus Torvalds 
22811da177e4SLinus Torvalds 		/*
22821da177e4SLinus Torvalds 		 * We obtain and lock the backing buffer first in the process
2283718ecc50SDave Chinner 		 * here to ensure dirty inodes attached to the buffer remain in
2284718ecc50SDave Chinner 		 * the flushing state while we mark them stale.
2285718ecc50SDave Chinner 		 *
22861da177e4SLinus Torvalds 		 * If we scan the in-memory inodes first, then buffer IO can
22871da177e4SLinus Torvalds 		 * complete before we get a lock on it, and hence we may fail
22881da177e4SLinus Torvalds 		 * to mark all the active inodes on the buffer stale.
22891da177e4SLinus Torvalds 		 */
2290ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2291ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2292ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
229371e3e356SDave Chinner 		if (error)
2294ce92464cSDarrick J. Wong 			return error;
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds 		/*
22971da177e4SLinus Torvalds 		 * This buffer may not have been correctly initialised as we
22981da177e4SLinus Torvalds 		 * didn't read it from disk. That's not important because we are
22991da177e4SLinus Torvalds 		 * only using to mark the buffer as stale in the log, and to
23001da177e4SLinus Torvalds 		 * attach stale cached inodes on it. That means it will never be
23011da177e4SLinus Torvalds 		 * dispatched for IO. If it is, we want to know about it, and we
23021da177e4SLinus Torvalds 		 * want it to fail. We can acheive this by adding a write
23031da177e4SLinus Torvalds 		 * verifier to the buffer.
23041da177e4SLinus Torvalds 		 */
23051da177e4SLinus Torvalds 		bp->b_ops = &xfs_inode_buf_ops;
23061da177e4SLinus Torvalds 
23071da177e4SLinus Torvalds 		/*
230871e3e356SDave Chinner 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
230971e3e356SDave Chinner 		 * too. This requires lookups, and will skip inodes that we've
231071e3e356SDave Chinner 		 * already marked XFS_ISTALE.
23111da177e4SLinus Torvalds 		 */
231271e3e356SDave Chinner 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2313f40aadb2SDave Chinner 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
23161da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
23171da177e4SLinus Torvalds 	}
23181da177e4SLinus Torvalds 	return 0;
23191da177e4SLinus Torvalds }
23201da177e4SLinus Torvalds 
23211da177e4SLinus Torvalds /*
23229a5280b3SDave Chinner  * This is called to return an inode to the inode free list.  The inode should
23239a5280b3SDave Chinner  * already be truncated to 0 length and have no pages associated with it.  This
23249a5280b3SDave Chinner  * routine also assumes that the inode is already a part of the transaction.
23251da177e4SLinus Torvalds  *
23269a5280b3SDave Chinner  * The on-disk copy of the inode will have been added to the list of unlinked
23279a5280b3SDave Chinner  * inodes in the AGI. We need to remove the inode from that list atomically with
23289a5280b3SDave Chinner  * respect to freeing it here.
23291da177e4SLinus Torvalds  */
23301da177e4SLinus Torvalds int
23311da177e4SLinus Torvalds xfs_ifree(
23321da177e4SLinus Torvalds 	struct xfs_trans	*tp,
23331da177e4SLinus Torvalds 	struct xfs_inode	*ip)
23341da177e4SLinus Torvalds {
2335f40aadb2SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2336f40aadb2SDave Chinner 	struct xfs_perag	*pag;
23371da177e4SLinus Torvalds 	struct xfs_icluster	xic = { 0 };
23381319ebefSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
2339f40aadb2SDave Chinner 	int			error;
23401da177e4SLinus Torvalds 
23411da177e4SLinus Torvalds 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
23421da177e4SLinus Torvalds 	ASSERT(VFS_I(ip)->i_nlink == 0);
2343daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
234413d2c10bSChristoph Hellwig 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
23456e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
23461da177e4SLinus Torvalds 
2347f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2348f40aadb2SDave Chinner 
23491da177e4SLinus Torvalds 	/*
23509a5280b3SDave Chinner 	 * Free the inode first so that we guarantee that the AGI lock is going
23519a5280b3SDave Chinner 	 * to be taken before we remove the inode from the unlinked list. This
23529a5280b3SDave Chinner 	 * makes the AGI lock -> unlinked list modification order the same as
23539a5280b3SDave Chinner 	 * used in O_TMPFILE creation.
23541da177e4SLinus Torvalds 	 */
2355f40aadb2SDave Chinner 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
23561baaed8fSDave Chinner 	if (error)
23576f5097e3SBrian Foster 		goto out;
23589a5280b3SDave Chinner 
23599a5280b3SDave Chinner 	error = xfs_iunlink_remove(tp, pag, ip);
23609a5280b3SDave Chinner 	if (error)
2361f40aadb2SDave Chinner 		goto out;
23621baaed8fSDave Chinner 
2363b2c20045SChristoph Hellwig 	/*
2364b2c20045SChristoph Hellwig 	 * Free any local-format data sitting around before we reset the
2365b2c20045SChristoph Hellwig 	 * data fork to extents format.  Note that the attr fork data has
2366b2c20045SChristoph Hellwig 	 * already been freed by xfs_attr_inactive.
2367b2c20045SChristoph Hellwig 	 */
2368f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2369b2c20045SChristoph Hellwig 		kmem_free(ip->i_df.if_u1.if_data);
2370b2c20045SChristoph Hellwig 		ip->i_df.if_u1.if_data = NULL;
2371b2c20045SChristoph Hellwig 		ip->i_df.if_bytes = 0;
2372b2c20045SChristoph Hellwig 	}
237398c4f78dSDarrick J. Wong 
2374c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2375db07349dSChristoph Hellwig 	ip->i_diflags = 0;
2376f40aadb2SDave Chinner 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
23777821ea30SChristoph Hellwig 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2378f7e67b20SChristoph Hellwig 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
23799b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
23809b3beb02SChristoph Hellwig 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2381dc1baa71SEric Sandeen 
2382dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
23831319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
23841319ebefSDave Chinner 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
23851319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
2386dc1baa71SEric Sandeen 
23871da177e4SLinus Torvalds 	/*
23881da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
23891da177e4SLinus Torvalds 	 * by reincarnations of this inode.
23901da177e4SLinus Torvalds 	 */
23919e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
23921da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
23931da177e4SLinus Torvalds 
239409b56604SBrian Foster 	if (xic.deleted)
2395f40aadb2SDave Chinner 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2396f40aadb2SDave Chinner out:
2397f40aadb2SDave Chinner 	xfs_perag_put(pag);
23982a30f36dSChandra Seetharaman 	return error;
23991da177e4SLinus Torvalds }
24001da177e4SLinus Torvalds 
24011da177e4SLinus Torvalds /*
240260ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
240360ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
240460ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
24051da177e4SLinus Torvalds  */
240660ec6783SChristoph Hellwig static void
2407f392e631SChristoph Hellwig xfs_iunpin(
240860ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2409a3f74ffbSDavid Chinner {
2410579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2411a3f74ffbSDavid Chinner 
24124aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
24134aaf15d1SDave Chinner 
2414a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
24155f9b4b0dSDave Chinner 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2416a14a348bSChristoph Hellwig 
2417a3f74ffbSDavid Chinner }
2418a3f74ffbSDavid Chinner 
2419f392e631SChristoph Hellwig static void
2420f392e631SChristoph Hellwig __xfs_iunpin_wait(
2421f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2422f392e631SChristoph Hellwig {
2423f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2424f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2425f392e631SChristoph Hellwig 
2426f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2427f392e631SChristoph Hellwig 
2428f392e631SChristoph Hellwig 	do {
242921417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2430f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2431f392e631SChristoph Hellwig 			io_schedule();
2432f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
243321417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2434f392e631SChristoph Hellwig }
2435f392e631SChristoph Hellwig 
2436777df5afSDave Chinner void
24371da177e4SLinus Torvalds xfs_iunpin_wait(
243860ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
24391da177e4SLinus Torvalds {
2440f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2441f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
24421da177e4SLinus Torvalds }
24431da177e4SLinus Torvalds 
244427320369SDave Chinner /*
244527320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
244627320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
244727320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
244827320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
244927320369SDave Chinner  * locking an AGI.
245027320369SDave Chinner  *
245127320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
245227320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
245327320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
245427320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
245527320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
245627320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
245727320369SDave Chinner  *
245827320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
245927320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
246027320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
246127320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
246227320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
246327320369SDave Chinner  * directory entry.
246427320369SDave Chinner  *
246527320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2466310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
246727320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
246827320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
246927320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
247027320369SDave Chinner  */
2471c24b5dfaSDave Chinner int
2472c24b5dfaSDave Chinner xfs_remove(
2473c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2474c24b5dfaSDave Chinner 	struct xfs_name		*name,
2475c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2476c24b5dfaSDave Chinner {
2477c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2478c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2479c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2480871b9316SDarrick J. Wong 	int			dontcare;
2481c24b5dfaSDave Chinner 	int                     error = 0;
2482c24b5dfaSDave Chinner 	uint			resblks;
2483c24b5dfaSDave Chinner 
2484c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2485c24b5dfaSDave Chinner 
248675c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
24872451337dSDave Chinner 		return -EIO;
2488c24b5dfaSDave Chinner 
2489c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2490c24b5dfaSDave Chinner 	if (error)
2491c24b5dfaSDave Chinner 		goto std_return;
2492c24b5dfaSDave Chinner 
2493c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2494c24b5dfaSDave Chinner 	if (error)
2495c24b5dfaSDave Chinner 		goto std_return;
2496c24b5dfaSDave Chinner 
2497c24b5dfaSDave Chinner 	/*
2498871b9316SDarrick J. Wong 	 * We try to get the real space reservation first, allowing for
2499871b9316SDarrick J. Wong 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2500871b9316SDarrick J. Wong 	 * can't get the space reservation then we use 0 instead, and avoid the
2501871b9316SDarrick J. Wong 	 * bmap btree insert(s) in the directory code by, if the bmap insert
2502871b9316SDarrick J. Wong 	 * tries to happen, instead trimming the LAST block from the directory.
2503871b9316SDarrick J. Wong 	 *
2504871b9316SDarrick J. Wong 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2505871b9316SDarrick J. Wong 	 * the directory code can handle a reservationless update and we don't
2506871b9316SDarrick J. Wong 	 * want to prevent a user from trying to free space by deleting things.
2507c24b5dfaSDave Chinner 	 */
2508c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2509871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2510871b9316SDarrick J. Wong 			&tp, &dontcare);
2511c24b5dfaSDave Chinner 	if (error) {
25122451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2513253f4911SChristoph Hellwig 		goto std_return;
2514c24b5dfaSDave Chinner 	}
2515c24b5dfaSDave Chinner 
2516c24b5dfaSDave Chinner 	/*
2517c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2518c24b5dfaSDave Chinner 	 */
2519c24b5dfaSDave Chinner 	if (is_dir) {
252054d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
252154d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
25222451337dSDave Chinner 			error = -ENOTEMPTY;
2523c24b5dfaSDave Chinner 			goto out_trans_cancel;
2524c24b5dfaSDave Chinner 		}
2525c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
25262451337dSDave Chinner 			error = -ENOTEMPTY;
2527c24b5dfaSDave Chinner 			goto out_trans_cancel;
2528c24b5dfaSDave Chinner 		}
2529c24b5dfaSDave Chinner 
253027320369SDave Chinner 		/* Drop the link from ip's "..".  */
2531c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2532c24b5dfaSDave Chinner 		if (error)
253327320369SDave Chinner 			goto out_trans_cancel;
2534c24b5dfaSDave Chinner 
253527320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2536c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2537c24b5dfaSDave Chinner 		if (error)
253827320369SDave Chinner 			goto out_trans_cancel;
25395838d035SDarrick J. Wong 
25405838d035SDarrick J. Wong 		/*
25415838d035SDarrick J. Wong 		 * Point the unlinked child directory's ".." entry to the root
25425838d035SDarrick J. Wong 		 * directory to eliminate back-references to inodes that may
25435838d035SDarrick J. Wong 		 * get freed before the child directory is closed.  If the fs
25445838d035SDarrick J. Wong 		 * gets shrunk, this can lead to dirent inode validation errors.
25455838d035SDarrick J. Wong 		 */
25465838d035SDarrick J. Wong 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
25475838d035SDarrick J. Wong 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
25485838d035SDarrick J. Wong 					tp->t_mountp->m_sb.sb_rootino, 0);
25495838d035SDarrick J. Wong 			if (error)
25502653d533SDarrick J. Wong 				goto out_trans_cancel;
25515838d035SDarrick J. Wong 		}
2552c24b5dfaSDave Chinner 	} else {
2553c24b5dfaSDave Chinner 		/*
2554c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2555c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2556c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2557c24b5dfaSDave Chinner 		 */
2558c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2559c24b5dfaSDave Chinner 	}
256027320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2561c24b5dfaSDave Chinner 
256227320369SDave Chinner 	/* Drop the link from dp to ip. */
2563c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2564c24b5dfaSDave Chinner 	if (error)
256527320369SDave Chinner 		goto out_trans_cancel;
2566c24b5dfaSDave Chinner 
2567381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
256827320369SDave Chinner 	if (error) {
25692451337dSDave Chinner 		ASSERT(error != -ENOENT);
2570c8eac49eSBrian Foster 		goto out_trans_cancel;
257127320369SDave Chinner 	}
257227320369SDave Chinner 
2573c24b5dfaSDave Chinner 	/*
2574c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2575c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2576c24b5dfaSDave Chinner 	 * the user.
2577c24b5dfaSDave Chinner 	 */
25780560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2579c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2580c24b5dfaSDave Chinner 
258170393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2582c24b5dfaSDave Chinner 	if (error)
2583c24b5dfaSDave Chinner 		goto std_return;
2584c24b5dfaSDave Chinner 
25852cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2586c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2587c24b5dfaSDave Chinner 
2588c24b5dfaSDave Chinner 	return 0;
2589c24b5dfaSDave Chinner 
2590c24b5dfaSDave Chinner  out_trans_cancel:
25914906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2592c24b5dfaSDave Chinner  std_return:
2593c24b5dfaSDave Chinner 	return error;
2594c24b5dfaSDave Chinner }
2595c24b5dfaSDave Chinner 
2596f6bba201SDave Chinner /*
2597f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2598f6bba201SDave Chinner  */
259995afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2600f6bba201SDave Chinner STATIC void
2601f6bba201SDave Chinner xfs_sort_for_rename(
260295afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
260395afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
260495afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
260595afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
260695afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
260795afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
260895afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2609f6bba201SDave Chinner {
2610f6bba201SDave Chinner 	int			i, j;
2611f6bba201SDave Chinner 
261295afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
261395afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
261495afcf5cSDave Chinner 
2615f6bba201SDave Chinner 	/*
2616f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
2617f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
2618f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
2619f6bba201SDave Chinner 	 *
2620f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2621f6bba201SDave Chinner 	 */
262295afcf5cSDave Chinner 	i = 0;
262395afcf5cSDave Chinner 	i_tab[i++] = dp1;
262495afcf5cSDave Chinner 	i_tab[i++] = dp2;
262595afcf5cSDave Chinner 	i_tab[i++] = ip1;
262695afcf5cSDave Chinner 	if (ip2)
262795afcf5cSDave Chinner 		i_tab[i++] = ip2;
262895afcf5cSDave Chinner 	if (wip)
262995afcf5cSDave Chinner 		i_tab[i++] = wip;
263095afcf5cSDave Chinner 	*num_inodes = i;
2631f6bba201SDave Chinner 
2632f6bba201SDave Chinner 	/*
2633f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
263495afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
2635f6bba201SDave Chinner 	 */
2636f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
2637f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
2638f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
263995afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
2640f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
2641f6bba201SDave Chinner 				i_tab[j-1] = temp;
2642f6bba201SDave Chinner 			}
2643f6bba201SDave Chinner 		}
2644f6bba201SDave Chinner 	}
2645f6bba201SDave Chinner }
2646f6bba201SDave Chinner 
2647310606b0SDave Chinner static int
2648310606b0SDave Chinner xfs_finish_rename(
2649c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
2650310606b0SDave Chinner {
2651310606b0SDave Chinner 	/*
2652310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
2653310606b0SDave Chinner 	 * goes to disk before returning to the user.
2654310606b0SDave Chinner 	 */
26550560f31aSDave Chinner 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2656310606b0SDave Chinner 		xfs_trans_set_sync(tp);
2657310606b0SDave Chinner 
265870393313SChristoph Hellwig 	return xfs_trans_commit(tp);
2659310606b0SDave Chinner }
2660310606b0SDave Chinner 
2661f6bba201SDave Chinner /*
2662d31a1825SCarlos Maiolino  * xfs_cross_rename()
2663d31a1825SCarlos Maiolino  *
26640145225eSBhaskar Chowdhury  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2665d31a1825SCarlos Maiolino  */
2666d31a1825SCarlos Maiolino STATIC int
2667d31a1825SCarlos Maiolino xfs_cross_rename(
2668d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
2669d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
2670d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
2671d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
2672d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
2673d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
2674d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
2675d31a1825SCarlos Maiolino 	int			spaceres)
2676d31a1825SCarlos Maiolino {
2677d31a1825SCarlos Maiolino 	int		error = 0;
2678d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
2679d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
2680d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
2681d31a1825SCarlos Maiolino 
2682d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
2683381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2684d31a1825SCarlos Maiolino 	if (error)
2685eeacd321SDave Chinner 		goto out_trans_abort;
2686d31a1825SCarlos Maiolino 
2687d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
2688381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2689d31a1825SCarlos Maiolino 	if (error)
2690eeacd321SDave Chinner 		goto out_trans_abort;
2691d31a1825SCarlos Maiolino 
2692d31a1825SCarlos Maiolino 	/*
2693d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
2694d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
2695d31a1825SCarlos Maiolino 	 * parents.
2696d31a1825SCarlos Maiolino 	 */
2697d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
2698d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2699d31a1825SCarlos Maiolino 
2700c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2701d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2702381eee69SBrian Foster 						dp1->i_ino, spaceres);
2703d31a1825SCarlos Maiolino 			if (error)
2704eeacd321SDave Chinner 				goto out_trans_abort;
2705d31a1825SCarlos Maiolino 
2706d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
2707c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2708d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
2709d31a1825SCarlos Maiolino 				if (error)
2710eeacd321SDave Chinner 					goto out_trans_abort;
271191083269SEric Sandeen 				xfs_bumplink(tp, dp1);
2712d31a1825SCarlos Maiolino 			}
2713d31a1825SCarlos Maiolino 
2714d31a1825SCarlos Maiolino 			/*
2715d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
2716d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2717d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2718d31a1825SCarlos Maiolino 			 * notify the change
2719d31a1825SCarlos Maiolino 			 */
2720d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
2721d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2722d31a1825SCarlos Maiolino 		}
2723d31a1825SCarlos Maiolino 
2724c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2725d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2726381eee69SBrian Foster 						dp2->i_ino, spaceres);
2727d31a1825SCarlos Maiolino 			if (error)
2728eeacd321SDave Chinner 				goto out_trans_abort;
2729d31a1825SCarlos Maiolino 
2730d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
2731c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2732d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
2733d31a1825SCarlos Maiolino 				if (error)
2734eeacd321SDave Chinner 					goto out_trans_abort;
273591083269SEric Sandeen 				xfs_bumplink(tp, dp2);
2736d31a1825SCarlos Maiolino 			}
2737d31a1825SCarlos Maiolino 
2738d31a1825SCarlos Maiolino 			/*
2739d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
2740d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2741d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2742d31a1825SCarlos Maiolino 			 * notify the change
2743d31a1825SCarlos Maiolino 			 */
2744d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2745d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
2746d31a1825SCarlos Maiolino 		}
2747d31a1825SCarlos Maiolino 	}
2748d31a1825SCarlos Maiolino 
2749d31a1825SCarlos Maiolino 	if (ip1_flags) {
2750d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2751d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2752d31a1825SCarlos Maiolino 	}
2753d31a1825SCarlos Maiolino 	if (ip2_flags) {
2754d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2755d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2756d31a1825SCarlos Maiolino 	}
2757d31a1825SCarlos Maiolino 	if (dp2_flags) {
2758d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2759d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2760d31a1825SCarlos Maiolino 	}
2761d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2762d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2763c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
2764eeacd321SDave Chinner 
2765eeacd321SDave Chinner out_trans_abort:
27664906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2767d31a1825SCarlos Maiolino 	return error;
2768d31a1825SCarlos Maiolino }
2769d31a1825SCarlos Maiolino 
2770d31a1825SCarlos Maiolino /*
27717dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
27727dcf5c3eSDave Chinner  *
2773b63da6c8SRandy Dunlap  * Return a referenced, unlinked, unlocked inode that can be used as a
27747dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
27757dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
27767dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
27777dcf5c3eSDave Chinner  */
27787dcf5c3eSDave Chinner static int
27797dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
2780f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
278170b589a3SEric Sandeen 	struct xfs_name		*src_name,
27827dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
27837dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
27847dcf5c3eSDave Chinner {
27857dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
278670b589a3SEric Sandeen 	struct qstr		name;
27877dcf5c3eSDave Chinner 	int			error;
27887dcf5c3eSDave Chinner 
2789f2d40141SChristian Brauner 	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2790f736d93dSChristoph Hellwig 				   &tmpfile);
27917dcf5c3eSDave Chinner 	if (error)
27927dcf5c3eSDave Chinner 		return error;
27937dcf5c3eSDave Chinner 
279470b589a3SEric Sandeen 	name.name = src_name->name;
279570b589a3SEric Sandeen 	name.len = src_name->len;
279670b589a3SEric Sandeen 	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
279770b589a3SEric Sandeen 	if (error) {
279870b589a3SEric Sandeen 		xfs_finish_inode_setup(tmpfile);
279970b589a3SEric Sandeen 		xfs_irele(tmpfile);
280070b589a3SEric Sandeen 		return error;
280170b589a3SEric Sandeen 	}
280270b589a3SEric Sandeen 
280322419ac9SBrian Foster 	/*
280422419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
2805c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
2806c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
280722419ac9SBrian Foster 	 */
28082b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
28097dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
28107dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
28117dcf5c3eSDave Chinner 
28127dcf5c3eSDave Chinner 	*wip = tmpfile;
28137dcf5c3eSDave Chinner 	return 0;
28147dcf5c3eSDave Chinner }
28157dcf5c3eSDave Chinner 
28167dcf5c3eSDave Chinner /*
2817f6bba201SDave Chinner  * xfs_rename
2818f6bba201SDave Chinner  */
2819f6bba201SDave Chinner int
2820f6bba201SDave Chinner xfs_rename(
2821f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
28227dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
2823f6bba201SDave Chinner 	struct xfs_name		*src_name,
28247dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
28257dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
2826f6bba201SDave Chinner 	struct xfs_name		*target_name,
28277dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
2828d31a1825SCarlos Maiolino 	unsigned int		flags)
2829f6bba201SDave Chinner {
28307dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
28317dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
28327dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
28337dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
28346da1b4b1SDarrick J. Wong 	int			i;
283595afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
28362b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
2837c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2838f6bba201SDave Chinner 	int			spaceres;
283941667260SDarrick J. Wong 	bool			retried = false;
284041667260SDarrick J. Wong 	int			error, nospace_error = 0;
2841f6bba201SDave Chinner 
2842f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2843f6bba201SDave Chinner 
2844eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
2845eeacd321SDave Chinner 		return -EINVAL;
2846f6bba201SDave Chinner 
28477dcf5c3eSDave Chinner 	/*
28487dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
28497dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
28507dcf5c3eSDave Chinner 	 * appropriately.
28517dcf5c3eSDave Chinner 	 */
28527dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
2853f2d40141SChristian Brauner 		error = xfs_rename_alloc_whiteout(idmap, src_name,
285470b589a3SEric Sandeen 						  target_dp, &wip);
28557dcf5c3eSDave Chinner 		if (error)
28567dcf5c3eSDave Chinner 			return error;
2857f6bba201SDave Chinner 
28587dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
28597dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
28607dcf5c3eSDave Chinner 	}
28617dcf5c3eSDave Chinner 
28627dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2863f6bba201SDave Chinner 				inodes, &num_inodes);
2864f6bba201SDave Chinner 
286541667260SDarrick J. Wong retry:
286641667260SDarrick J. Wong 	nospace_error = 0;
2867f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2868253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
28692451337dSDave Chinner 	if (error == -ENOSPC) {
287041667260SDarrick J. Wong 		nospace_error = error;
2871f6bba201SDave Chinner 		spaceres = 0;
2872253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2873253f4911SChristoph Hellwig 				&tp);
2874f6bba201SDave Chinner 	}
2875445883e8SDave Chinner 	if (error)
2876253f4911SChristoph Hellwig 		goto out_release_wip;
2877f6bba201SDave Chinner 
2878f6bba201SDave Chinner 	/*
2879f6bba201SDave Chinner 	 * Attach the dquots to the inodes
2880f6bba201SDave Chinner 	 */
2881f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
2882445883e8SDave Chinner 	if (error)
2883445883e8SDave Chinner 		goto out_trans_cancel;
2884f6bba201SDave Chinner 
2885f6bba201SDave Chinner 	/*
2886f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
2887f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
2888f6bba201SDave Chinner 	 * whether the target directory is the same as the source
2889e07ee6feSAllison Henderson 	 * directory, we can lock from 2 to 5 inodes.
2890f6bba201SDave Chinner 	 */
2891f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2892f6bba201SDave Chinner 
2893f6bba201SDave Chinner 	/*
2894f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
2895f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
2896f6bba201SDave Chinner 	 * them.
2897f6bba201SDave Chinner 	 */
289865523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2899f6bba201SDave Chinner 	if (new_parent)
290065523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2901f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2902f6bba201SDave Chinner 	if (target_ip)
2903f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
29047dcf5c3eSDave Chinner 	if (wip)
29057dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2906f6bba201SDave Chinner 
2907f6bba201SDave Chinner 	/*
2908f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
2909f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
2910f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
2911f6bba201SDave Chinner 	 */
2912db07349dSChristoph Hellwig 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2913ceaf603cSChristoph Hellwig 		     target_dp->i_projid != src_ip->i_projid)) {
29142451337dSDave Chinner 		error = -EXDEV;
2915445883e8SDave Chinner 		goto out_trans_cancel;
2916f6bba201SDave Chinner 	}
2917f6bba201SDave Chinner 
2918eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
2919eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
2920eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2921d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
2922f16dea54SBrian Foster 					spaceres);
2923d31a1825SCarlos Maiolino 
2924d31a1825SCarlos Maiolino 	/*
292541667260SDarrick J. Wong 	 * Try to reserve quota to handle an expansion of the target directory.
292641667260SDarrick J. Wong 	 * We'll allow the rename to continue in reservationless mode if we hit
292741667260SDarrick J. Wong 	 * a space usage constraint.  If we trigger reservationless mode, save
292841667260SDarrick J. Wong 	 * the errno if there isn't any free space in the target directory.
292941667260SDarrick J. Wong 	 */
293041667260SDarrick J. Wong 	if (spaceres != 0) {
293141667260SDarrick J. Wong 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
293241667260SDarrick J. Wong 				0, false);
293341667260SDarrick J. Wong 		if (error == -EDQUOT || error == -ENOSPC) {
293441667260SDarrick J. Wong 			if (!retried) {
293541667260SDarrick J. Wong 				xfs_trans_cancel(tp);
293641667260SDarrick J. Wong 				xfs_blockgc_free_quota(target_dp, 0);
293741667260SDarrick J. Wong 				retried = true;
293841667260SDarrick J. Wong 				goto retry;
293941667260SDarrick J. Wong 			}
294041667260SDarrick J. Wong 
294141667260SDarrick J. Wong 			nospace_error = error;
294241667260SDarrick J. Wong 			spaceres = 0;
294341667260SDarrick J. Wong 			error = 0;
294441667260SDarrick J. Wong 		}
294541667260SDarrick J. Wong 		if (error)
294641667260SDarrick J. Wong 			goto out_trans_cancel;
294741667260SDarrick J. Wong 	}
294841667260SDarrick J. Wong 
294941667260SDarrick J. Wong 	/*
2950bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
2951bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
2952f6bba201SDave Chinner 	 */
2953f6bba201SDave Chinner 	if (target_ip == NULL) {
2954f6bba201SDave Chinner 		/*
2955f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
2956f6bba201SDave Chinner 		 * fit before actually inserting it.
2957f6bba201SDave Chinner 		 */
295894f3cad5SEric Sandeen 		if (!spaceres) {
295994f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
2960f6bba201SDave Chinner 			if (error)
2961445883e8SDave Chinner 				goto out_trans_cancel;
296294f3cad5SEric Sandeen 		}
2963bc56ad8cSkaixuxia 	} else {
2964bc56ad8cSkaixuxia 		/*
2965bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
2966bc56ad8cSkaixuxia 		 * it can be destroyed.
2967bc56ad8cSkaixuxia 		 */
2968bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2969bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
2970bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
2971bc56ad8cSkaixuxia 			error = -EEXIST;
2972bc56ad8cSkaixuxia 			goto out_trans_cancel;
2973bc56ad8cSkaixuxia 		}
2974bc56ad8cSkaixuxia 	}
2975bc56ad8cSkaixuxia 
2976bc56ad8cSkaixuxia 	/*
29776da1b4b1SDarrick J. Wong 	 * Lock the AGI buffers we need to handle bumping the nlink of the
29786da1b4b1SDarrick J. Wong 	 * whiteout inode off the unlinked list and to handle dropping the
29796da1b4b1SDarrick J. Wong 	 * nlink of the target inode.  Per locking order rules, do this in
29806da1b4b1SDarrick J. Wong 	 * increasing AG order and before directory block allocation tries to
29816da1b4b1SDarrick J. Wong 	 * grab AGFs because we grab AGIs before AGFs.
29826da1b4b1SDarrick J. Wong 	 *
29836da1b4b1SDarrick J. Wong 	 * The (vfs) caller must ensure that if src is a directory then
29846da1b4b1SDarrick J. Wong 	 * target_ip is either null or an empty directory.
29856da1b4b1SDarrick J. Wong 	 */
29866da1b4b1SDarrick J. Wong 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
29876da1b4b1SDarrick J. Wong 		if (inodes[i] == wip ||
29886da1b4b1SDarrick J. Wong 		    (inodes[i] == target_ip &&
29896da1b4b1SDarrick J. Wong 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
299061021debSDave Chinner 			struct xfs_perag	*pag;
29916da1b4b1SDarrick J. Wong 			struct xfs_buf		*bp;
29926da1b4b1SDarrick J. Wong 
299361021debSDave Chinner 			pag = xfs_perag_get(mp,
299461021debSDave Chinner 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
299561021debSDave Chinner 			error = xfs_read_agi(pag, tp, &bp);
299661021debSDave Chinner 			xfs_perag_put(pag);
29976da1b4b1SDarrick J. Wong 			if (error)
29986da1b4b1SDarrick J. Wong 				goto out_trans_cancel;
29996da1b4b1SDarrick J. Wong 		}
30006da1b4b1SDarrick J. Wong 	}
30016da1b4b1SDarrick J. Wong 
30026da1b4b1SDarrick J. Wong 	/*
3003bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
3004bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
3005bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
3006bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
3007bc56ad8cSkaixuxia 	 * mess.
3008bc56ad8cSkaixuxia 	 *
3009bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
3010bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
3011bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
3012bc56ad8cSkaixuxia 	 * in future.
3013bc56ad8cSkaixuxia 	 */
3014bc56ad8cSkaixuxia 	if (wip) {
3015f40aadb2SDave Chinner 		struct xfs_perag	*pag;
3016f40aadb2SDave Chinner 
3017bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
3018f40aadb2SDave Chinner 
3019f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3020f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, wip);
3021f40aadb2SDave Chinner 		xfs_perag_put(pag);
3022bc56ad8cSkaixuxia 		if (error)
3023bc56ad8cSkaixuxia 			goto out_trans_cancel;
3024bc56ad8cSkaixuxia 
3025bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
3026bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3027bc56ad8cSkaixuxia 	}
3028bc56ad8cSkaixuxia 
3029bc56ad8cSkaixuxia 	/*
3030bc56ad8cSkaixuxia 	 * Set up the target.
3031bc56ad8cSkaixuxia 	 */
3032bc56ad8cSkaixuxia 	if (target_ip == NULL) {
3033f6bba201SDave Chinner 		/*
3034f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
3035f6bba201SDave Chinner 		 * directories, adjust the target directory link count
3036f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
3037f6bba201SDave Chinner 		 */
3038f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
3039381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
3040f6bba201SDave Chinner 		if (error)
3041c8eac49eSBrian Foster 			goto out_trans_cancel;
3042f6bba201SDave Chinner 
3043f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3044f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3045f6bba201SDave Chinner 
3046f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
304791083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
3048f6bba201SDave Chinner 		}
3049f6bba201SDave Chinner 	} else { /* target_ip != NULL */
3050f6bba201SDave Chinner 		/*
3051f6bba201SDave Chinner 		 * Link the source inode under the target name.
3052f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
3053f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
3054f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
3055f6bba201SDave Chinner 		 *
3056f6bba201SDave Chinner 		 * In case there is already an entry with the same
3057f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
3058f6bba201SDave Chinner 		 */
3059f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
3060381eee69SBrian Foster 					src_ip->i_ino, spaceres);
3061f6bba201SDave Chinner 		if (error)
3062c8eac49eSBrian Foster 			goto out_trans_cancel;
3063f6bba201SDave Chinner 
3064f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3065f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3066f6bba201SDave Chinner 
3067f6bba201SDave Chinner 		/*
3068f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3069f6bba201SDave Chinner 		 * dir no longer points to it.
3070f6bba201SDave Chinner 		 */
3071f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3072f6bba201SDave Chinner 		if (error)
3073c8eac49eSBrian Foster 			goto out_trans_cancel;
3074f6bba201SDave Chinner 
3075f6bba201SDave Chinner 		if (src_is_directory) {
3076f6bba201SDave Chinner 			/*
3077f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3078f6bba201SDave Chinner 			 */
3079f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3080f6bba201SDave Chinner 			if (error)
3081c8eac49eSBrian Foster 				goto out_trans_cancel;
3082f6bba201SDave Chinner 		}
3083f6bba201SDave Chinner 	} /* target_ip != NULL */
3084f6bba201SDave Chinner 
3085f6bba201SDave Chinner 	/*
3086f6bba201SDave Chinner 	 * Remove the source.
3087f6bba201SDave Chinner 	 */
3088f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3089f6bba201SDave Chinner 		/*
3090f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3091f6bba201SDave Chinner 		 * directory.
3092f6bba201SDave Chinner 		 */
3093f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3094381eee69SBrian Foster 					target_dp->i_ino, spaceres);
30952451337dSDave Chinner 		ASSERT(error != -EEXIST);
3096f6bba201SDave Chinner 		if (error)
3097c8eac49eSBrian Foster 			goto out_trans_cancel;
3098f6bba201SDave Chinner 	}
3099f6bba201SDave Chinner 
3100f6bba201SDave Chinner 	/*
3101f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3102f6bba201SDave Chinner 	 *
3103f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3104f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3105f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3106f6bba201SDave Chinner 	 */
3107f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3108f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3109f6bba201SDave Chinner 
3110f6bba201SDave Chinner 	/*
3111f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3112f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3113f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3114f6bba201SDave Chinner 	 */
3115f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3116f6bba201SDave Chinner 
3117f6bba201SDave Chinner 		/*
3118f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3119f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3120f6bba201SDave Chinner 		 */
3121f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3122f6bba201SDave Chinner 		if (error)
3123c8eac49eSBrian Foster 			goto out_trans_cancel;
3124f6bba201SDave Chinner 	}
3125f6bba201SDave Chinner 
31267dcf5c3eSDave Chinner 	/*
31277dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
31287dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
31297dcf5c3eSDave Chinner 	 * altogether.
31307dcf5c3eSDave Chinner 	 */
313183a21c18SChandan Babu R 	if (wip)
31327dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3133381eee69SBrian Foster 					spaceres);
313483a21c18SChandan Babu R 	else
3135f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3136381eee69SBrian Foster 					   spaceres);
313702092a2fSChandan Babu R 
3138f6bba201SDave Chinner 	if (error)
3139c8eac49eSBrian Foster 		goto out_trans_cancel;
3140f6bba201SDave Chinner 
3141f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3142f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3143f6bba201SDave Chinner 	if (new_parent)
3144f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3145f6bba201SDave Chinner 
3146c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
31477dcf5c3eSDave Chinner 	if (wip)
314844a8736bSDarrick J. Wong 		xfs_irele(wip);
31497dcf5c3eSDave Chinner 	return error;
3150f6bba201SDave Chinner 
3151445883e8SDave Chinner out_trans_cancel:
31524906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3153253f4911SChristoph Hellwig out_release_wip:
31547dcf5c3eSDave Chinner 	if (wip)
315544a8736bSDarrick J. Wong 		xfs_irele(wip);
315641667260SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
315741667260SDarrick J. Wong 		error = nospace_error;
3158f6bba201SDave Chinner 	return error;
3159f6bba201SDave Chinner }
3160f6bba201SDave Chinner 
3161e6187b34SDave Chinner static int
3162e6187b34SDave Chinner xfs_iflush(
316393848a99SChristoph Hellwig 	struct xfs_inode	*ip,
316493848a99SChristoph Hellwig 	struct xfs_buf		*bp)
31651da177e4SLinus Torvalds {
316693848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
316793848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
316893848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3169f2019299SBrian Foster 	int			error;
31701da177e4SLinus Torvalds 
3171579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3172718ecc50SDave Chinner 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3173f7e67b20SChristoph Hellwig 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3174daf83964SChristoph Hellwig 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
317590c60e16SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
31761da177e4SLinus Torvalds 
317788ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
31781da177e4SLinus Torvalds 
3179f2019299SBrian Foster 	/*
3180f2019299SBrian Foster 	 * We don't flush the inode if any of the following checks fail, but we
3181f2019299SBrian Foster 	 * do still update the log item and attach to the backing buffer as if
3182f2019299SBrian Foster 	 * the flush happened. This is a formality to facilitate predictable
3183f2019299SBrian Foster 	 * error handling as the caller will shutdown and fail the buffer.
3184f2019299SBrian Foster 	 */
3185f2019299SBrian Foster 	error = -EFSCORRUPTED;
318669ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
31879e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
31886a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
318978b0f58bSZeng Heng 			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
31906a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3191f2019299SBrian Foster 		goto flush_out;
31921da177e4SLinus Torvalds 	}
3193c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
31941da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3195f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3196f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
31979e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
31986a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
319978b0f58bSZeng Heng 				"%s: Bad regular inode %llu, ptr "PTR_FMT,
32006a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3201f2019299SBrian Foster 			goto flush_out;
32021da177e4SLinus Torvalds 		}
3203c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
32041da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3205f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3206f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3207f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
32089e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
32096a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
321078b0f58bSZeng Heng 				"%s: Bad directory inode %llu, ptr "PTR_FMT,
32116a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3212f2019299SBrian Foster 			goto flush_out;
32131da177e4SLinus Torvalds 		}
32141da177e4SLinus Torvalds 	}
32152ed5b09bSDarrick J. Wong 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
32166e73a545SChristoph Hellwig 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
32176a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3218755c38ffSChandan Babu R 			"%s: detected corrupt incore inode %llu, "
3219755c38ffSChandan Babu R 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
32206a19d939SDave Chinner 			__func__, ip->i_ino,
32212ed5b09bSDarrick J. Wong 			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
32226e73a545SChristoph Hellwig 			ip->i_nblocks, ip);
3223f2019299SBrian Foster 		goto flush_out;
32241da177e4SLinus Torvalds 	}
32257821ea30SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
32269e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
32276a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
322878b0f58bSZeng Heng 			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
32297821ea30SChristoph Hellwig 			__func__, ip->i_ino, ip->i_forkoff, ip);
3230f2019299SBrian Foster 		goto flush_out;
32311da177e4SLinus Torvalds 	}
3232e60896d8SDave Chinner 
32331da177e4SLinus Torvalds 	/*
3234965e0a1aSChristoph Hellwig 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3235965e0a1aSChristoph Hellwig 	 * count for correct sequencing.  We bump the flush iteration count so
3236965e0a1aSChristoph Hellwig 	 * we can detect flushes which postdate a log record during recovery.
3237965e0a1aSChristoph Hellwig 	 * This is redundant as we now log every change and hence this can't
3238965e0a1aSChristoph Hellwig 	 * happen but we need to still do it to ensure backwards compatibility
3239965e0a1aSChristoph Hellwig 	 * with old kernels that predate logging all inode changes.
32401da177e4SLinus Torvalds 	 */
324138c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp))
3242965e0a1aSChristoph Hellwig 		ip->i_flushiter++;
32431da177e4SLinus Torvalds 
32440f45a1b2SChristoph Hellwig 	/*
32450f45a1b2SChristoph Hellwig 	 * If there are inline format data / attr forks attached to this inode,
32460f45a1b2SChristoph Hellwig 	 * make sure they are not corrupt.
32470f45a1b2SChristoph Hellwig 	 */
3248f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
32490f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_data(ip))
32500f45a1b2SChristoph Hellwig 		goto flush_out;
3251932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip) &&
32522ed5b09bSDarrick J. Wong 	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
32530f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_attr(ip))
3254f2019299SBrian Foster 		goto flush_out;
3255005c5db8SDarrick J. Wong 
32561da177e4SLinus Torvalds 	/*
32573987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
32583987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
32593987848cSDave Chinner 	 * the core must be.
32601da177e4SLinus Torvalds 	 */
326193f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
32621da177e4SLinus Torvalds 
32631da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
326438c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp)) {
3265965e0a1aSChristoph Hellwig 		if (ip->i_flushiter == DI_MAX_FLUSH)
3266965e0a1aSChristoph Hellwig 			ip->i_flushiter = 0;
3267ee7b83fdSChristoph Hellwig 	}
32681da177e4SLinus Torvalds 
3269005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3270932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
3271005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
32721da177e4SLinus Torvalds 
32731da177e4SLinus Torvalds 	/*
3274f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3275f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3276f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3277f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3278f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3279f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3280f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
32811da177e4SLinus Torvalds 	 *
3282f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3283f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3284664ffb8aSChristoph Hellwig 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3285664ffb8aSChristoph Hellwig 	 * we know that the information those bits represent is permanently on
3286f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3287f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
32881da177e4SLinus Torvalds 	 */
3289f2019299SBrian Foster 	error = 0;
3290f2019299SBrian Foster flush_out:
32911319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
3292f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3293f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3294fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
32951319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
32961da177e4SLinus Torvalds 
32971319ebefSDave Chinner 	/*
32981319ebefSDave Chinner 	 * Store the current LSN of the inode so that we can tell whether the
3299664ffb8aSChristoph Hellwig 	 * item has moved in the AIL from xfs_buf_inode_iodone().
33001319ebefSDave Chinner 	 */
33017b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
33027b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
33031da177e4SLinus Torvalds 
330493848a99SChristoph Hellwig 	/* generate the checksum. */
330593848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
3306f2019299SBrian Foster 	return error;
33071da177e4SLinus Torvalds }
330844a8736bSDarrick J. Wong 
3309e6187b34SDave Chinner /*
3310e6187b34SDave Chinner  * Non-blocking flush of dirty inode metadata into the backing buffer.
3311e6187b34SDave Chinner  *
3312e6187b34SDave Chinner  * The caller must have a reference to the inode and hold the cluster buffer
3313e6187b34SDave Chinner  * locked. The function will walk across all the inodes on the cluster buffer it
3314e6187b34SDave Chinner  * can find and lock without blocking, and flush them to the cluster buffer.
3315e6187b34SDave Chinner  *
33165717ea4dSDave Chinner  * On successful flushing of at least one inode, the caller must write out the
33175717ea4dSDave Chinner  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
33185717ea4dSDave Chinner  * the caller needs to release the buffer. On failure, the filesystem will be
33195717ea4dSDave Chinner  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
33205717ea4dSDave Chinner  * will be returned.
3321e6187b34SDave Chinner  */
3322e6187b34SDave Chinner int
3323e6187b34SDave Chinner xfs_iflush_cluster(
3324e6187b34SDave Chinner 	struct xfs_buf		*bp)
3325e6187b34SDave Chinner {
33265717ea4dSDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
33275717ea4dSDave Chinner 	struct xfs_log_item	*lip, *n;
33285717ea4dSDave Chinner 	struct xfs_inode	*ip;
33295717ea4dSDave Chinner 	struct xfs_inode_log_item *iip;
3330e6187b34SDave Chinner 	int			clcount = 0;
33315717ea4dSDave Chinner 	int			error = 0;
3332e6187b34SDave Chinner 
3333e6187b34SDave Chinner 	/*
33345717ea4dSDave Chinner 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3335d2d7c047SDave Chinner 	 * will remove itself from the list.
3336e6187b34SDave Chinner 	 */
33375717ea4dSDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
33385717ea4dSDave Chinner 		iip = (struct xfs_inode_log_item *)lip;
33395717ea4dSDave Chinner 		ip = iip->ili_inode;
33405717ea4dSDave Chinner 
33415717ea4dSDave Chinner 		/*
33425717ea4dSDave Chinner 		 * Quick and dirty check to avoid locks if possible.
33435717ea4dSDave Chinner 		 */
3344718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
33455717ea4dSDave Chinner 			continue;
33465717ea4dSDave Chinner 		if (xfs_ipincount(ip))
33475717ea4dSDave Chinner 			continue;
33485717ea4dSDave Chinner 
33495717ea4dSDave Chinner 		/*
33505717ea4dSDave Chinner 		 * The inode is still attached to the buffer, which means it is
33515717ea4dSDave Chinner 		 * dirty but reclaim might try to grab it. Check carefully for
33525717ea4dSDave Chinner 		 * that, and grab the ilock while still holding the i_flags_lock
33535717ea4dSDave Chinner 		 * to guarantee reclaim will not be able to reclaim this inode
33545717ea4dSDave Chinner 		 * once we drop the i_flags_lock.
33555717ea4dSDave Chinner 		 */
33565717ea4dSDave Chinner 		spin_lock(&ip->i_flags_lock);
33575717ea4dSDave Chinner 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3358718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
33595717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
3360e6187b34SDave Chinner 			continue;
3361e6187b34SDave Chinner 		}
3362e6187b34SDave Chinner 
3363e6187b34SDave Chinner 		/*
33645717ea4dSDave Chinner 		 * ILOCK will pin the inode against reclaim and prevent
33655717ea4dSDave Chinner 		 * concurrent transactions modifying the inode while we are
3366718ecc50SDave Chinner 		 * flushing the inode. If we get the lock, set the flushing
3367718ecc50SDave Chinner 		 * state before we drop the i_flags_lock.
3368e6187b34SDave Chinner 		 */
33695717ea4dSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
33705717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
33715717ea4dSDave Chinner 			continue;
33725717ea4dSDave Chinner 		}
3373718ecc50SDave Chinner 		__xfs_iflags_set(ip, XFS_IFLUSHING);
33745717ea4dSDave Chinner 		spin_unlock(&ip->i_flags_lock);
33755717ea4dSDave Chinner 
33765717ea4dSDave Chinner 		/*
33775717ea4dSDave Chinner 		 * Abort flushing this inode if we are shut down because the
33785717ea4dSDave Chinner 		 * inode may not currently be in the AIL. This can occur when
33795717ea4dSDave Chinner 		 * log I/O failure unpins the inode without inserting into the
33805717ea4dSDave Chinner 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
33815717ea4dSDave Chinner 		 * that otherwise looks like it should be flushed.
33825717ea4dSDave Chinner 		 */
338301728b44SDave Chinner 		if (xlog_is_shutdown(mp->m_log)) {
33845717ea4dSDave Chinner 			xfs_iunpin_wait(ip);
33855717ea4dSDave Chinner 			xfs_iflush_abort(ip);
33865717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
33875717ea4dSDave Chinner 			error = -EIO;
33885717ea4dSDave Chinner 			continue;
33895717ea4dSDave Chinner 		}
33905717ea4dSDave Chinner 
33915717ea4dSDave Chinner 		/* don't block waiting on a log force to unpin dirty inodes */
33925717ea4dSDave Chinner 		if (xfs_ipincount(ip)) {
3393718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
33945717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
33955717ea4dSDave Chinner 			continue;
33965717ea4dSDave Chinner 		}
33975717ea4dSDave Chinner 
33985717ea4dSDave Chinner 		if (!xfs_inode_clean(ip))
33995717ea4dSDave Chinner 			error = xfs_iflush(ip, bp);
34005717ea4dSDave Chinner 		else
3401718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
34025717ea4dSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
34035717ea4dSDave Chinner 		if (error)
3404e6187b34SDave Chinner 			break;
3405e6187b34SDave Chinner 		clcount++;
3406e6187b34SDave Chinner 	}
3407e6187b34SDave Chinner 
3408e6187b34SDave Chinner 	if (error) {
340901728b44SDave Chinner 		/*
341001728b44SDave Chinner 		 * Shutdown first so we kill the log before we release this
341101728b44SDave Chinner 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
341201728b44SDave Chinner 		 * of the log, failing it before the _log_ is shut down can
341301728b44SDave Chinner 		 * result in the log tail being moved forward in the journal
341401728b44SDave Chinner 		 * on disk because log writes can still be taking place. Hence
341501728b44SDave Chinner 		 * unpinning the tail will allow the ICREATE intent to be
341601728b44SDave Chinner 		 * removed from the log an recovery will fail with uninitialised
341701728b44SDave Chinner 		 * inode cluster buffers.
341801728b44SDave Chinner 		 */
341901728b44SDave Chinner 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3420e6187b34SDave Chinner 		bp->b_flags |= XBF_ASYNC;
3421e6187b34SDave Chinner 		xfs_buf_ioend_fail(bp);
3422e6187b34SDave Chinner 		return error;
3423e6187b34SDave Chinner 	}
3424e6187b34SDave Chinner 
34255717ea4dSDave Chinner 	if (!clcount)
34265717ea4dSDave Chinner 		return -EAGAIN;
34275717ea4dSDave Chinner 
34285717ea4dSDave Chinner 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
34295717ea4dSDave Chinner 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
34305717ea4dSDave Chinner 	return 0;
34315717ea4dSDave Chinner 
34325717ea4dSDave Chinner }
34335717ea4dSDave Chinner 
343444a8736bSDarrick J. Wong /* Release an inode. */
343544a8736bSDarrick J. Wong void
343644a8736bSDarrick J. Wong xfs_irele(
343744a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
343844a8736bSDarrick J. Wong {
343944a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
344044a8736bSDarrick J. Wong 	iput(VFS_I(ip));
344144a8736bSDarrick J. Wong }
344254fbdd10SChristoph Hellwig 
344354fbdd10SChristoph Hellwig /*
344454fbdd10SChristoph Hellwig  * Ensure all commited transactions touching the inode are written to the log.
344554fbdd10SChristoph Hellwig  */
344654fbdd10SChristoph Hellwig int
344754fbdd10SChristoph Hellwig xfs_log_force_inode(
344854fbdd10SChristoph Hellwig 	struct xfs_inode	*ip)
344954fbdd10SChristoph Hellwig {
34505f9b4b0dSDave Chinner 	xfs_csn_t		seq = 0;
345154fbdd10SChristoph Hellwig 
345254fbdd10SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
345354fbdd10SChristoph Hellwig 	if (xfs_ipincount(ip))
34545f9b4b0dSDave Chinner 		seq = ip->i_itemp->ili_commit_seq;
345554fbdd10SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
345654fbdd10SChristoph Hellwig 
34575f9b4b0dSDave Chinner 	if (!seq)
345854fbdd10SChristoph Hellwig 		return 0;
34595f9b4b0dSDave Chinner 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
346054fbdd10SChristoph Hellwig }
3461e2aaee9cSDarrick J. Wong 
3462e2aaee9cSDarrick J. Wong /*
3463e2aaee9cSDarrick J. Wong  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3464e2aaee9cSDarrick J. Wong  * abide vfs locking order (lowest pointer value goes first) and breaking the
3465e2aaee9cSDarrick J. Wong  * layout leases before proceeding.  The loop is needed because we cannot call
3466e2aaee9cSDarrick J. Wong  * the blocking break_layout() with the iolocks held, and therefore have to
3467e2aaee9cSDarrick J. Wong  * back out both locks.
3468e2aaee9cSDarrick J. Wong  */
3469e2aaee9cSDarrick J. Wong static int
3470e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout(
3471e2aaee9cSDarrick J. Wong 	struct inode		*src,
3472e2aaee9cSDarrick J. Wong 	struct inode		*dest)
3473e2aaee9cSDarrick J. Wong {
3474e2aaee9cSDarrick J. Wong 	int			error;
3475e2aaee9cSDarrick J. Wong 
3476e2aaee9cSDarrick J. Wong 	if (src > dest)
3477e2aaee9cSDarrick J. Wong 		swap(src, dest);
3478e2aaee9cSDarrick J. Wong 
3479e2aaee9cSDarrick J. Wong retry:
3480e2aaee9cSDarrick J. Wong 	/* Wait to break both inodes' layouts before we start locking. */
3481e2aaee9cSDarrick J. Wong 	error = break_layout(src, true);
3482e2aaee9cSDarrick J. Wong 	if (error)
3483e2aaee9cSDarrick J. Wong 		return error;
3484e2aaee9cSDarrick J. Wong 	if (src != dest) {
3485e2aaee9cSDarrick J. Wong 		error = break_layout(dest, true);
3486e2aaee9cSDarrick J. Wong 		if (error)
3487e2aaee9cSDarrick J. Wong 			return error;
3488e2aaee9cSDarrick J. Wong 	}
3489e2aaee9cSDarrick J. Wong 
3490e2aaee9cSDarrick J. Wong 	/* Lock one inode and make sure nobody got in and leased it. */
3491e2aaee9cSDarrick J. Wong 	inode_lock(src);
3492e2aaee9cSDarrick J. Wong 	error = break_layout(src, false);
3493e2aaee9cSDarrick J. Wong 	if (error) {
3494e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3495e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3496e2aaee9cSDarrick J. Wong 			goto retry;
3497e2aaee9cSDarrick J. Wong 		return error;
3498e2aaee9cSDarrick J. Wong 	}
3499e2aaee9cSDarrick J. Wong 
3500e2aaee9cSDarrick J. Wong 	if (src == dest)
3501e2aaee9cSDarrick J. Wong 		return 0;
3502e2aaee9cSDarrick J. Wong 
3503e2aaee9cSDarrick J. Wong 	/* Lock the other inode and make sure nobody got in and leased it. */
3504e2aaee9cSDarrick J. Wong 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3505e2aaee9cSDarrick J. Wong 	error = break_layout(dest, false);
3506e2aaee9cSDarrick J. Wong 	if (error) {
3507e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3508e2aaee9cSDarrick J. Wong 		inode_unlock(dest);
3509e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3510e2aaee9cSDarrick J. Wong 			goto retry;
3511e2aaee9cSDarrick J. Wong 		return error;
3512e2aaee9cSDarrick J. Wong 	}
3513e2aaee9cSDarrick J. Wong 
3514e2aaee9cSDarrick J. Wong 	return 0;
3515e2aaee9cSDarrick J. Wong }
3516e2aaee9cSDarrick J. Wong 
351713f9e267SShiyang Ruan static int
351813f9e267SShiyang Ruan xfs_mmaplock_two_inodes_and_break_dax_layout(
351913f9e267SShiyang Ruan 	struct xfs_inode	*ip1,
352013f9e267SShiyang Ruan 	struct xfs_inode	*ip2)
352113f9e267SShiyang Ruan {
352213f9e267SShiyang Ruan 	int			error;
352313f9e267SShiyang Ruan 	bool			retry;
352413f9e267SShiyang Ruan 	struct page		*page;
352513f9e267SShiyang Ruan 
352613f9e267SShiyang Ruan 	if (ip1->i_ino > ip2->i_ino)
352713f9e267SShiyang Ruan 		swap(ip1, ip2);
352813f9e267SShiyang Ruan 
352913f9e267SShiyang Ruan again:
353013f9e267SShiyang Ruan 	retry = false;
353113f9e267SShiyang Ruan 	/* Lock the first inode */
353213f9e267SShiyang Ruan 	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
353313f9e267SShiyang Ruan 	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
353413f9e267SShiyang Ruan 	if (error || retry) {
353513f9e267SShiyang Ruan 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
353613f9e267SShiyang Ruan 		if (error == 0 && retry)
353713f9e267SShiyang Ruan 			goto again;
353813f9e267SShiyang Ruan 		return error;
353913f9e267SShiyang Ruan 	}
354013f9e267SShiyang Ruan 
354113f9e267SShiyang Ruan 	if (ip1 == ip2)
354213f9e267SShiyang Ruan 		return 0;
354313f9e267SShiyang Ruan 
354413f9e267SShiyang Ruan 	/* Nested lock the second inode */
354513f9e267SShiyang Ruan 	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
354613f9e267SShiyang Ruan 	/*
354713f9e267SShiyang Ruan 	 * We cannot use xfs_break_dax_layouts() directly here because it may
354813f9e267SShiyang Ruan 	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
354913f9e267SShiyang Ruan 	 * for this nested lock case.
355013f9e267SShiyang Ruan 	 */
355113f9e267SShiyang Ruan 	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
355213f9e267SShiyang Ruan 	if (page && page_ref_count(page) != 1) {
355313f9e267SShiyang Ruan 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
355413f9e267SShiyang Ruan 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
355513f9e267SShiyang Ruan 		goto again;
355613f9e267SShiyang Ruan 	}
355713f9e267SShiyang Ruan 
355813f9e267SShiyang Ruan 	return 0;
355913f9e267SShiyang Ruan }
356013f9e267SShiyang Ruan 
3561e2aaee9cSDarrick J. Wong /*
3562e2aaee9cSDarrick J. Wong  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3563e2aaee9cSDarrick J. Wong  * mmap activity.
3564e2aaee9cSDarrick J. Wong  */
3565e2aaee9cSDarrick J. Wong int
3566e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap(
3567e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3568e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3569e2aaee9cSDarrick J. Wong {
3570e2aaee9cSDarrick J. Wong 	int			ret;
3571e2aaee9cSDarrick J. Wong 
3572e2aaee9cSDarrick J. Wong 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3573e2aaee9cSDarrick J. Wong 	if (ret)
3574e2aaee9cSDarrick J. Wong 		return ret;
357513f9e267SShiyang Ruan 
357613f9e267SShiyang Ruan 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
357713f9e267SShiyang Ruan 		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
357813f9e267SShiyang Ruan 		if (ret) {
357913f9e267SShiyang Ruan 			inode_unlock(VFS_I(ip2));
358013f9e267SShiyang Ruan 			if (ip1 != ip2)
358113f9e267SShiyang Ruan 				inode_unlock(VFS_I(ip1));
358213f9e267SShiyang Ruan 			return ret;
358313f9e267SShiyang Ruan 		}
358413f9e267SShiyang Ruan 	} else
3585d2c292d8SJan Kara 		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3586d2c292d8SJan Kara 					    VFS_I(ip2)->i_mapping);
358713f9e267SShiyang Ruan 
3588e2aaee9cSDarrick J. Wong 	return 0;
3589e2aaee9cSDarrick J. Wong }
3590e2aaee9cSDarrick J. Wong 
3591e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */
3592e2aaee9cSDarrick J. Wong void
3593e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap(
3594e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3595e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3596e2aaee9cSDarrick J. Wong {
359713f9e267SShiyang Ruan 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
359813f9e267SShiyang Ruan 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
359913f9e267SShiyang Ruan 		if (ip1 != ip2)
360013f9e267SShiyang Ruan 			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
360113f9e267SShiyang Ruan 	} else
3602d2c292d8SJan Kara 		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3603d2c292d8SJan Kara 					      VFS_I(ip2)->i_mapping);
360413f9e267SShiyang Ruan 
3605e2aaee9cSDarrick J. Wong 	inode_unlock(VFS_I(ip2));
3606d2c292d8SJan Kara 	if (ip1 != ip2)
3607e2aaee9cSDarrick J. Wong 		inode_unlock(VFS_I(ip1));
3608e2aaee9cSDarrick J. Wong }
3609