xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision a83d5a8b1d946264e24299d6697bb03fe5198668)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
16a4fbe6abSDave Chinner #include "xfs_inode.h"
17c24b5dfaSDave Chinner #include "xfs_dir2.h"
18c24b5dfaSDave Chinner #include "xfs_attr.h"
19239880efSDave Chinner #include "xfs_trans_space.h"
20239880efSDave Chinner #include "xfs_trans.h"
211da177e4SLinus Torvalds #include "xfs_buf_item.h"
22a844f451SNathan Scott #include "xfs_inode_item.h"
23a844f451SNathan Scott #include "xfs_ialloc.h"
24a844f451SNathan Scott #include "xfs_bmap.h"
2568988114SDave Chinner #include "xfs_bmap_util.h"
26e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
271da177e4SLinus Torvalds #include "xfs_error.h"
281da177e4SLinus Torvalds #include "xfs_quota.h"
292a82b8beSDavid Chinner #include "xfs_filestream.h"
300b1b213fSChristoph Hellwig #include "xfs_trace.h"
3133479e05SDave Chinner #include "xfs_icache.h"
32c24b5dfaSDave Chinner #include "xfs_symlink.h"
33239880efSDave Chinner #include "xfs_trans_priv.h"
34239880efSDave Chinner #include "xfs_log.h"
35a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
36aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
379bbafc71SDave Chinner #include "xfs_ag.h"
3801728b44SDave Chinner #include "xfs_log_priv.h"
391da177e4SLinus Torvalds 
40182696fbSDarrick J. Wong struct kmem_cache *xfs_inode_cache;
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds /*
438f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
441da177e4SLinus Torvalds  * freed from a file in a single transaction.
451da177e4SLinus Torvalds  */
461da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
471da177e4SLinus Torvalds 
4854d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
49f40aadb2SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
50f40aadb2SDave Chinner 	struct xfs_inode *);
51ab297431SZhi Yong Wu 
522a0ec1d9SDave Chinner /*
532a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
542a0ec1d9SDave Chinner  */
552a0ec1d9SDave Chinner xfs_extlen_t
562a0ec1d9SDave Chinner xfs_get_extsz_hint(
572a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
582a0ec1d9SDave Chinner {
59bdb2ed2dSChristoph Hellwig 	/*
60bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
61bdb2ed2dSChristoph Hellwig 	 * write to them.
62bdb2ed2dSChristoph Hellwig 	 */
63bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
64bdb2ed2dSChristoph Hellwig 		return 0;
65db07349dSChristoph Hellwig 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
66031474c2SChristoph Hellwig 		return ip->i_extsize;
672a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
682a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
692a0ec1d9SDave Chinner 	return 0;
702a0ec1d9SDave Chinner }
712a0ec1d9SDave Chinner 
72fa96acadSDave Chinner /*
73f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
74f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
75e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
76e153aa79SDarrick J. Wong  * use the default size.
77f7ca3522SDarrick J. Wong  */
78f7ca3522SDarrick J. Wong xfs_extlen_t
79f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
80f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
81f7ca3522SDarrick J. Wong {
82f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
83f7ca3522SDarrick J. Wong 
84f7ca3522SDarrick J. Wong 	a = 0;
853e09ab8fSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
86b33ce57dSChristoph Hellwig 		a = ip->i_cowextsize;
87f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
88f7ca3522SDarrick J. Wong 
89e153aa79SDarrick J. Wong 	a = max(a, b);
90e153aa79SDarrick J. Wong 	if (a == 0)
91e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
92f7ca3522SDarrick J. Wong 	return a;
93f7ca3522SDarrick J. Wong }
94f7ca3522SDarrick J. Wong 
95f7ca3522SDarrick J. Wong /*
96efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
97efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
98efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
99efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
100efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
101efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
102efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
103efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
104efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
105efa70be1SChristoph Hellwig  * if they have not.
106fa96acadSDave Chinner  *
107efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10801f4f327SChristoph Hellwig  * xfs_iunlock() call.
109fa96acadSDave Chinner  */
110fa96acadSDave Chinner uint
111309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
112309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
113fa96acadSDave Chinner {
114309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
115fa96acadSDave Chinner 
116b2197a36SChristoph Hellwig 	if (xfs_need_iread_extents(&ip->i_df))
117fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
118fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
119fa96acadSDave Chinner 	return lock_mode;
120fa96acadSDave Chinner }
121fa96acadSDave Chinner 
122efa70be1SChristoph Hellwig uint
123efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
124efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
125fa96acadSDave Chinner {
126efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
127efa70be1SChristoph Hellwig 
128b2197a36SChristoph Hellwig 	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
129efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
130efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
131efa70be1SChristoph Hellwig 	return lock_mode;
132fa96acadSDave Chinner }
133fa96acadSDave Chinner 
134fa96acadSDave Chinner /*
135ca76a761SKaixu Xia  * You can't set both SHARED and EXCL for the same lock,
136ca76a761SKaixu Xia  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
137ca76a761SKaixu Xia  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
138ca76a761SKaixu Xia  * to set in lock_flags.
139ca76a761SKaixu Xia  */
140ca76a761SKaixu Xia static inline void
141ca76a761SKaixu Xia xfs_lock_flags_assert(
142ca76a761SKaixu Xia 	uint		lock_flags)
143ca76a761SKaixu Xia {
144ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
145ca76a761SKaixu Xia 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
146ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
147ca76a761SKaixu Xia 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
148ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
149ca76a761SKaixu Xia 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
150ca76a761SKaixu Xia 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
151ca76a761SKaixu Xia 	ASSERT(lock_flags != 0);
152ca76a761SKaixu Xia }
153ca76a761SKaixu Xia 
154ca76a761SKaixu Xia /*
15565523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
1562433480aSJan Kara  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
15765523218SChristoph Hellwig  * various combinations of the locks to be obtained.
158fa96acadSDave Chinner  *
159653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
160653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
161fa96acadSDave Chinner  *
162653c60b6SDave Chinner  * Basic locking order:
163653c60b6SDave Chinner  *
1642433480aSJan Kara  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
165653c60b6SDave Chinner  *
166c1e8d7c6SMichel Lespinasse  * mmap_lock locking order:
167653c60b6SDave Chinner  *
168c1e8d7c6SMichel Lespinasse  * i_rwsem -> page lock -> mmap_lock
1692433480aSJan Kara  * mmap_lock -> invalidate_lock -> page_lock
170653c60b6SDave Chinner  *
171c1e8d7c6SMichel Lespinasse  * The difference in mmap_lock locking order mean that we cannot hold the
1722433480aSJan Kara  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
1732433480aSJan Kara  * can fault in pages during copy in/out (for buffered IO) or require the
1742433480aSJan Kara  * mmap_lock in get_user_pages() to map the user pages into the kernel address
1752433480aSJan Kara  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
1762433480aSJan Kara  * fault because page faults already hold the mmap_lock.
177653c60b6SDave Chinner  *
178653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
1792433480aSJan Kara  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
1802433480aSJan Kara  * both taken in places where we need to invalidate the page cache in a race
181653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
182653c60b6SDave Chinner  * functions).
183fa96acadSDave Chinner  */
184fa96acadSDave Chinner void
185fa96acadSDave Chinner xfs_ilock(
186fa96acadSDave Chinner 	xfs_inode_t		*ip,
187fa96acadSDave Chinner 	uint			lock_flags)
188fa96acadSDave Chinner {
189fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
190fa96acadSDave Chinner 
191ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
192fa96acadSDave Chinner 
19365523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
19465523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
19565523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
19665523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
19765523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
19865523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
19965523218SChristoph Hellwig 	}
200fa96acadSDave Chinner 
2012433480aSJan Kara 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2022433480aSJan Kara 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2032433480aSJan Kara 				  XFS_MMAPLOCK_DEP(lock_flags));
2042433480aSJan Kara 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2052433480aSJan Kara 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2062433480aSJan Kara 				 XFS_MMAPLOCK_DEP(lock_flags));
2072433480aSJan Kara 	}
208653c60b6SDave Chinner 
209fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
210fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
211fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
212fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
213fa96acadSDave Chinner }
214fa96acadSDave Chinner 
215fa96acadSDave Chinner /*
216fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
217fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
218fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
219fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
220fa96acadSDave Chinner  * is dropped before returning.
221fa96acadSDave Chinner  *
222fa96acadSDave Chinner  * ip -- the inode being locked
223fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
224fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
225fa96acadSDave Chinner  *	 of valid values.
226fa96acadSDave Chinner  */
227fa96acadSDave Chinner int
228fa96acadSDave Chinner xfs_ilock_nowait(
229fa96acadSDave Chinner 	xfs_inode_t		*ip,
230fa96acadSDave Chinner 	uint			lock_flags)
231fa96acadSDave Chinner {
232fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
233fa96acadSDave Chinner 
234ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
235fa96acadSDave Chinner 
236fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23765523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238fa96acadSDave Chinner 			goto out;
239fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
24065523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241fa96acadSDave Chinner 			goto out;
242fa96acadSDave Chinner 	}
243653c60b6SDave Chinner 
244653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2452433480aSJan Kara 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
246653c60b6SDave Chinner 			goto out_undo_iolock;
247653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2482433480aSJan Kara 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
249653c60b6SDave Chinner 			goto out_undo_iolock;
250653c60b6SDave Chinner 	}
251653c60b6SDave Chinner 
252fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
253fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
254653c60b6SDave Chinner 			goto out_undo_mmaplock;
255fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
256fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
257653c60b6SDave Chinner 			goto out_undo_mmaplock;
258fa96acadSDave Chinner 	}
259fa96acadSDave Chinner 	return 1;
260fa96acadSDave Chinner 
261653c60b6SDave Chinner out_undo_mmaplock:
262653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
2632433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
264653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
2652433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
266fa96acadSDave Chinner out_undo_iolock:
267fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26865523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
269fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
27065523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
271fa96acadSDave Chinner out:
272fa96acadSDave Chinner 	return 0;
273fa96acadSDave Chinner }
274fa96acadSDave Chinner 
275fa96acadSDave Chinner /*
276fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
277fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
278fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279fa96acadSDave Chinner  * that we know which locks to drop.
280fa96acadSDave Chinner  *
281fa96acadSDave Chinner  * ip -- the inode being unlocked
282fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
283fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
284fa96acadSDave Chinner  *	 of valid values for this parameter.
285fa96acadSDave Chinner  *
286fa96acadSDave Chinner  */
287fa96acadSDave Chinner void
288fa96acadSDave Chinner xfs_iunlock(
289fa96acadSDave Chinner 	xfs_inode_t		*ip,
290fa96acadSDave Chinner 	uint			lock_flags)
291fa96acadSDave Chinner {
292ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
293fa96acadSDave Chinner 
294fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
29565523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
296fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
29765523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
298fa96acadSDave Chinner 
299653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3002433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
301653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
3022433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
303653c60b6SDave Chinner 
304fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
305fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
306fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
307fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
308fa96acadSDave Chinner 
309fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
310fa96acadSDave Chinner }
311fa96acadSDave Chinner 
312fa96acadSDave Chinner /*
313fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
314fa96acadSDave Chinner  * if it is being demoted.
315fa96acadSDave Chinner  */
316fa96acadSDave Chinner void
317fa96acadSDave Chinner xfs_ilock_demote(
318fa96acadSDave Chinner 	xfs_inode_t		*ip,
319fa96acadSDave Chinner 	uint			lock_flags)
320fa96acadSDave Chinner {
321653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
322653c60b6SDave Chinner 	ASSERT((lock_flags &
323653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
324fa96acadSDave Chinner 
325fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
326fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
327653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3282433480aSJan Kara 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
329fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
33065523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
331fa96acadSDave Chinner 
332fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
333fa96acadSDave Chinner }
334fa96acadSDave Chinner 
335742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
336e31cbde7SPavel Reichl static inline bool
337e31cbde7SPavel Reichl __xfs_rwsem_islocked(
338e31cbde7SPavel Reichl 	struct rw_semaphore	*rwsem,
339e31cbde7SPavel Reichl 	bool			shared)
340e31cbde7SPavel Reichl {
341e31cbde7SPavel Reichl 	if (!debug_locks)
342e31cbde7SPavel Reichl 		return rwsem_is_locked(rwsem);
343e31cbde7SPavel Reichl 
344e31cbde7SPavel Reichl 	if (!shared)
345e31cbde7SPavel Reichl 		return lockdep_is_held_type(rwsem, 0);
346e31cbde7SPavel Reichl 
347e31cbde7SPavel Reichl 	/*
348e31cbde7SPavel Reichl 	 * We are checking that the lock is held at least in shared
349e31cbde7SPavel Reichl 	 * mode but don't care that it might be held exclusively
350e31cbde7SPavel Reichl 	 * (i.e. shared | excl). Hence we check if the lock is held
351e31cbde7SPavel Reichl 	 * in any mode rather than an explicit shared mode.
352e31cbde7SPavel Reichl 	 */
353e31cbde7SPavel Reichl 	return lockdep_is_held_type(rwsem, -1);
354e31cbde7SPavel Reichl }
355e31cbde7SPavel Reichl 
356e31cbde7SPavel Reichl bool
357fa96acadSDave Chinner xfs_isilocked(
358e31cbde7SPavel Reichl 	struct xfs_inode	*ip,
359fa96acadSDave Chinner 	uint			lock_flags)
360fa96acadSDave Chinner {
361fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
362fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
363fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
364fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
365fa96acadSDave Chinner 	}
366fa96acadSDave Chinner 
367653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
36882af8806SKaixu Xia 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
36982af8806SKaixu Xia 				(lock_flags & XFS_MMAPLOCK_SHARED));
370653c60b6SDave Chinner 	}
371653c60b6SDave Chinner 
372fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
373e31cbde7SPavel Reichl 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
374e31cbde7SPavel Reichl 				(lock_flags & XFS_IOLOCK_SHARED));
375fa96acadSDave Chinner 	}
376fa96acadSDave Chinner 
377fa96acadSDave Chinner 	ASSERT(0);
378e31cbde7SPavel Reichl 	return false;
379fa96acadSDave Chinner }
380fa96acadSDave Chinner #endif
381fa96acadSDave Chinner 
382b6a9947eSDave Chinner /*
383b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
384b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
385b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
386b6a9947eSDave Chinner  * errors and warnings.
387b6a9947eSDave Chinner  */
388b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3893403ccc0SDave Chinner static bool
3903403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3913403ccc0SDave Chinner 	int subclass)
3923403ccc0SDave Chinner {
3933403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3943403ccc0SDave Chinner }
3953403ccc0SDave Chinner #else
3963403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3973403ccc0SDave Chinner #endif
3983403ccc0SDave Chinner 
399c24b5dfaSDave Chinner /*
400653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
4010952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
4020952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
4030952c818SDave Chinner  * storage fields in the class mask we build.
404c24b5dfaSDave Chinner  */
405a1033753SDave Chinner static inline uint
406a1033753SDave Chinner xfs_lock_inumorder(
407a1033753SDave Chinner 	uint	lock_mode,
408a1033753SDave Chinner 	uint	subclass)
409c24b5dfaSDave Chinner {
410a1033753SDave Chinner 	uint	class = 0;
4110952c818SDave Chinner 
4120952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4130952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4143403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4150952c818SDave Chinner 
416653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4170952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4180952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
419653c60b6SDave Chinner 	}
420653c60b6SDave Chinner 
421653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4220952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4230952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
424653c60b6SDave Chinner 	}
425653c60b6SDave Chinner 
4260952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4270952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4280952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4290952c818SDave Chinner 	}
430c24b5dfaSDave Chinner 
4310952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
432c24b5dfaSDave Chinner }
433c24b5dfaSDave Chinner 
434c24b5dfaSDave Chinner /*
43595afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
43695afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
437c24b5dfaSDave Chinner  *
43895afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
43995afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
44095afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
44195afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
44295afcf5cSDave Chinner  * push the tail and free space in the log.
4430952c818SDave Chinner  *
4440952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4450952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4460952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4470952c818SDave Chinner  * have violated locking orders.
448c24b5dfaSDave Chinner  */
4490d5a75e9SEric Sandeen static void
450c24b5dfaSDave Chinner xfs_lock_inodes(
451efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
452c24b5dfaSDave Chinner 	int			inodes,
453c24b5dfaSDave Chinner 	uint			lock_mode)
454c24b5dfaSDave Chinner {
455a1033753SDave Chinner 	int			attempts = 0;
456a1033753SDave Chinner 	uint			i;
457a1033753SDave Chinner 	int			j;
458a1033753SDave Chinner 	bool			try_lock;
459efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
460c24b5dfaSDave Chinner 
4610952c818SDave Chinner 	/*
4620952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4630952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
464b63da6c8SRandy Dunlap 	 * inodes depend on the type of locking and the limits placed by
4650952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4660952c818SDave Chinner 	 * the asserts.
4670952c818SDave Chinner 	 */
46895afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4690952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4700952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4710952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4720952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4730952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4740952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4750952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4760952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4770952c818SDave Chinner 
4780952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4790952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4800952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4810952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
482c24b5dfaSDave Chinner 
483c24b5dfaSDave Chinner again:
484a1033753SDave Chinner 	try_lock = false;
485a1033753SDave Chinner 	i = 0;
486c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
487c24b5dfaSDave Chinner 		ASSERT(ips[i]);
488c24b5dfaSDave Chinner 
489c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
490c24b5dfaSDave Chinner 			continue;
491c24b5dfaSDave Chinner 
492c24b5dfaSDave Chinner 		/*
49395afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
49495afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
495c24b5dfaSDave Chinner 		 */
496c24b5dfaSDave Chinner 		if (!try_lock) {
497c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
498b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
49922525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
500a1033753SDave Chinner 					try_lock = true;
501c24b5dfaSDave Chinner 			}
502c24b5dfaSDave Chinner 		}
503c24b5dfaSDave Chinner 
504c24b5dfaSDave Chinner 		/*
505c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
506c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
507c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
508c24b5dfaSDave Chinner 		 * and try again.
509c24b5dfaSDave Chinner 		 */
51095afcf5cSDave Chinner 		if (!try_lock) {
51195afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
51295afcf5cSDave Chinner 			continue;
51395afcf5cSDave Chinner 		}
514c24b5dfaSDave Chinner 
51595afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
516c24b5dfaSDave Chinner 		ASSERT(i != 0);
51795afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
51895afcf5cSDave Chinner 			continue;
51995afcf5cSDave Chinner 
52095afcf5cSDave Chinner 		/*
52195afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
52295afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
52395afcf5cSDave Chinner 		 */
524c24b5dfaSDave Chinner 		attempts++;
525c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
526c24b5dfaSDave Chinner 			/*
52795afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
52895afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
52995afcf5cSDave Chinner 			 * same.
530c24b5dfaSDave Chinner 			 */
53195afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
532c24b5dfaSDave Chinner 				continue;
533c24b5dfaSDave Chinner 
534c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
535c24b5dfaSDave Chinner 		}
536c24b5dfaSDave Chinner 
537c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
538c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
539c24b5dfaSDave Chinner 		}
540c24b5dfaSDave Chinner 		goto again;
541c24b5dfaSDave Chinner 	}
542c24b5dfaSDave Chinner }
543c24b5dfaSDave Chinner 
544c24b5dfaSDave Chinner /*
545d2c292d8SJan Kara  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
546d2c292d8SJan Kara  * mmaplock must be double-locked separately since we use i_rwsem and
547d2c292d8SJan Kara  * invalidate_lock for that. We now support taking one lock EXCL and the
548d2c292d8SJan Kara  * other SHARED.
549c24b5dfaSDave Chinner  */
550c24b5dfaSDave Chinner void
551c24b5dfaSDave Chinner xfs_lock_two_inodes(
5527c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5537c2d238aSDarrick J. Wong 	uint			ip0_mode,
5547c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5557c2d238aSDarrick J. Wong 	uint			ip1_mode)
556c24b5dfaSDave Chinner {
557c24b5dfaSDave Chinner 	int			attempts = 0;
558efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
559c24b5dfaSDave Chinner 
5607c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5617c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5627c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5637c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
564d2c292d8SJan Kara 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
565d2c292d8SJan Kara 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
567c24b5dfaSDave Chinner 
568c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
5692a09b575SChangcheng Deng 		swap(ip0, ip1);
5702a09b575SChangcheng Deng 		swap(ip0_mode, ip1_mode);
571c24b5dfaSDave Chinner 	}
572c24b5dfaSDave Chinner 
573c24b5dfaSDave Chinner  again:
5747c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
575c24b5dfaSDave Chinner 
576c24b5dfaSDave Chinner 	/*
577c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
578c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
579c24b5dfaSDave Chinner 	 * and try again.
580c24b5dfaSDave Chinner 	 */
581b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58222525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5837c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5847c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
585c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
586c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
587c24b5dfaSDave Chinner 			goto again;
588c24b5dfaSDave Chinner 		}
589c24b5dfaSDave Chinner 	} else {
5907c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
591c24b5dfaSDave Chinner 	}
592c24b5dfaSDave Chinner }
593c24b5dfaSDave Chinner 
5941da177e4SLinus Torvalds uint
5951da177e4SLinus Torvalds xfs_ip2xflags(
59658f88ca2SDave Chinner 	struct xfs_inode	*ip)
5971da177e4SLinus Torvalds {
5984422501dSChristoph Hellwig 	uint			flags = 0;
5991da177e4SLinus Torvalds 
6004422501dSChristoph Hellwig 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
6014422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
6024422501dSChristoph Hellwig 			flags |= FS_XFLAG_REALTIME;
6034422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
6044422501dSChristoph Hellwig 			flags |= FS_XFLAG_PREALLOC;
6054422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
6064422501dSChristoph Hellwig 			flags |= FS_XFLAG_IMMUTABLE;
6074422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
6084422501dSChristoph Hellwig 			flags |= FS_XFLAG_APPEND;
6094422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
6104422501dSChristoph Hellwig 			flags |= FS_XFLAG_SYNC;
6114422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
6124422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOATIME;
6134422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
6144422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODUMP;
6154422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
6164422501dSChristoph Hellwig 			flags |= FS_XFLAG_RTINHERIT;
6174422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
6184422501dSChristoph Hellwig 			flags |= FS_XFLAG_PROJINHERIT;
6194422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
6204422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOSYMLINKS;
6214422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6224422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSIZE;
6234422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
6244422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSZINHERIT;
6254422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
6264422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODEFRAG;
6274422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
6284422501dSChristoph Hellwig 			flags |= FS_XFLAG_FILESTREAM;
6294422501dSChristoph Hellwig 	}
6304422501dSChristoph Hellwig 
6314422501dSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
6324422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
6334422501dSChristoph Hellwig 			flags |= FS_XFLAG_DAX;
6344422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6354422501dSChristoph Hellwig 			flags |= FS_XFLAG_COWEXTSIZE;
6364422501dSChristoph Hellwig 	}
6374422501dSChristoph Hellwig 
6384422501dSChristoph Hellwig 	if (XFS_IFORK_Q(ip))
6394422501dSChristoph Hellwig 		flags |= FS_XFLAG_HASATTR;
6404422501dSChristoph Hellwig 	return flags;
6411da177e4SLinus Torvalds }
6421da177e4SLinus Torvalds 
6431da177e4SLinus Torvalds /*
644c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
645c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
646c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
647c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
648c24b5dfaSDave Chinner  */
649c24b5dfaSDave Chinner int
650c24b5dfaSDave Chinner xfs_lookup(
651996b2329SDarrick J. Wong 	struct xfs_inode	*dp,
652996b2329SDarrick J. Wong 	const struct xfs_name	*name,
653996b2329SDarrick J. Wong 	struct xfs_inode	**ipp,
654c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
655c24b5dfaSDave Chinner {
656c24b5dfaSDave Chinner 	xfs_ino_t		inum;
657c24b5dfaSDave Chinner 	int			error;
658c24b5dfaSDave Chinner 
659c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
660c24b5dfaSDave Chinner 
66175c8c50fSDave Chinner 	if (xfs_is_shutdown(dp->i_mount))
6622451337dSDave Chinner 		return -EIO;
663c24b5dfaSDave Chinner 
664c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
665c24b5dfaSDave Chinner 	if (error)
666dbad7c99SDave Chinner 		goto out_unlock;
667c24b5dfaSDave Chinner 
668c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
669c24b5dfaSDave Chinner 	if (error)
670c24b5dfaSDave Chinner 		goto out_free_name;
671c24b5dfaSDave Chinner 
672c24b5dfaSDave Chinner 	return 0;
673c24b5dfaSDave Chinner 
674c24b5dfaSDave Chinner out_free_name:
675c24b5dfaSDave Chinner 	if (ci_name)
676c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
677dbad7c99SDave Chinner out_unlock:
678c24b5dfaSDave Chinner 	*ipp = NULL;
679c24b5dfaSDave Chinner 	return error;
680c24b5dfaSDave Chinner }
681c24b5dfaSDave Chinner 
6828a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */
6838a569d71SDarrick J. Wong static void
6848a569d71SDarrick J. Wong xfs_inode_inherit_flags(
6858a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
6868a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
6878a569d71SDarrick J. Wong {
6888a569d71SDarrick J. Wong 	unsigned int		di_flags = 0;
689603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
6908a569d71SDarrick J. Wong 	umode_t			mode = VFS_I(ip)->i_mode;
6918a569d71SDarrick J. Wong 
6928a569d71SDarrick J. Wong 	if (S_ISDIR(mode)) {
693db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
6948a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_RTINHERIT;
695db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
6968a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
697031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
6988a569d71SDarrick J. Wong 		}
699db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
7008a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_PROJINHERIT;
7018a569d71SDarrick J. Wong 	} else if (S_ISREG(mode)) {
702db07349dSChristoph Hellwig 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
70338c26bfdSDave Chinner 		    xfs_has_realtime(ip->i_mount))
7048a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_REALTIME;
705db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
7068a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSIZE;
707031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7088a569d71SDarrick J. Wong 		}
7098a569d71SDarrick J. Wong 	}
710db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
7118a569d71SDarrick J. Wong 	    xfs_inherit_noatime)
7128a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOATIME;
713db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
7148a569d71SDarrick J. Wong 	    xfs_inherit_nodump)
7158a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODUMP;
716db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
7178a569d71SDarrick J. Wong 	    xfs_inherit_sync)
7188a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_SYNC;
719db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
7208a569d71SDarrick J. Wong 	    xfs_inherit_nosymlinks)
7218a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
722db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
7238a569d71SDarrick J. Wong 	    xfs_inherit_nodefrag)
7248a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODEFRAG;
725db07349dSChristoph Hellwig 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
7268a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_FILESTREAM;
7278a569d71SDarrick J. Wong 
728db07349dSChristoph Hellwig 	ip->i_diflags |= di_flags;
729603f000bSDarrick J. Wong 
730603f000bSDarrick J. Wong 	/*
731603f000bSDarrick J. Wong 	 * Inode verifiers on older kernels only check that the extent size
732603f000bSDarrick J. Wong 	 * hint is an integer multiple of the rt extent size on realtime files.
733603f000bSDarrick J. Wong 	 * They did not check the hint alignment on a directory with both
734603f000bSDarrick J. Wong 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
735603f000bSDarrick J. Wong 	 * propagated from a directory into a new realtime file, new file
736603f000bSDarrick J. Wong 	 * allocations will fail due to math errors in the rt allocator and/or
737603f000bSDarrick J. Wong 	 * trip the verifiers.  Validate the hint settings in the new file so
738603f000bSDarrick J. Wong 	 * that we don't let broken hints propagate.
739603f000bSDarrick J. Wong 	 */
740603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
741603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags);
742603f000bSDarrick J. Wong 	if (failaddr) {
743603f000bSDarrick J. Wong 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
744603f000bSDarrick J. Wong 				   XFS_DIFLAG_EXTSZINHERIT);
745603f000bSDarrick J. Wong 		ip->i_extsize = 0;
746603f000bSDarrick J. Wong 	}
7478a569d71SDarrick J. Wong }
7488a569d71SDarrick J. Wong 
7498a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */
7508a569d71SDarrick J. Wong static void
7518a569d71SDarrick J. Wong xfs_inode_inherit_flags2(
7528a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7538a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7548a569d71SDarrick J. Wong {
755603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
756603f000bSDarrick J. Wong 
7573e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
7583e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
759b33ce57dSChristoph Hellwig 		ip->i_cowextsize = pip->i_cowextsize;
7608a569d71SDarrick J. Wong 	}
7613e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
7623e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
763603f000bSDarrick J. Wong 
764603f000bSDarrick J. Wong 	/* Don't let invalid cowextsize hints propagate. */
765603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
766603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
767603f000bSDarrick J. Wong 	if (failaddr) {
768603f000bSDarrick J. Wong 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
769603f000bSDarrick J. Wong 		ip->i_cowextsize = 0;
770603f000bSDarrick J. Wong 	}
7718a569d71SDarrick J. Wong }
7728a569d71SDarrick J. Wong 
773c24b5dfaSDave Chinner /*
7741abcf261SDave Chinner  * Initialise a newly allocated inode and return the in-core inode to the
7751abcf261SDave Chinner  * caller locked exclusively.
7761da177e4SLinus Torvalds  */
777b652afd9SDave Chinner int
7781abcf261SDave Chinner xfs_init_new_inode(
779f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
7801abcf261SDave Chinner 	struct xfs_trans	*tp,
7811abcf261SDave Chinner 	struct xfs_inode	*pip,
7821abcf261SDave Chinner 	xfs_ino_t		ino,
783576b1d67SAl Viro 	umode_t			mode,
78431b084aeSNathan Scott 	xfs_nlink_t		nlink,
78566f36464SChristoph Hellwig 	dev_t			rdev,
7866743099cSArkadiusz Mi?kiewicz 	prid_t			prid,
787e6a688c3SDave Chinner 	bool			init_xattrs,
7881abcf261SDave Chinner 	struct xfs_inode	**ipp)
7891da177e4SLinus Torvalds {
79001ea173eSChristoph Hellwig 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
79193848a99SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
7921abcf261SDave Chinner 	struct xfs_inode	*ip;
7931abcf261SDave Chinner 	unsigned int		flags;
7941da177e4SLinus Torvalds 	int			error;
79595582b00SDeepa Dinamani 	struct timespec64	tv;
7963987848cSDave Chinner 	struct inode		*inode;
7971da177e4SLinus Torvalds 
7981da177e4SLinus Torvalds 	/*
7998b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
8008b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
8018b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
8028b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
8038b26984dSDave Chinner 	 * first.
8048b26984dSDave Chinner 	 */
8058b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
8068b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
8078b26984dSDave Chinner 		return -EFSCORRUPTED;
8088b26984dSDave Chinner 	}
8098b26984dSDave Chinner 
8108b26984dSDave Chinner 	/*
8111abcf261SDave Chinner 	 * Get the in-core inode with the lock held exclusively to prevent
8121abcf261SDave Chinner 	 * others from looking at until we're done.
8131da177e4SLinus Torvalds 	 */
8141abcf261SDave Chinner 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
815bf904248SDavid Chinner 	if (error)
8161da177e4SLinus Torvalds 		return error;
8171abcf261SDave Chinner 
8181da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8193987848cSDave Chinner 	inode = VFS_I(ip);
82054d7b5c1SDave Chinner 	set_nlink(inode, nlink);
82166f36464SChristoph Hellwig 	inode->i_rdev = rdev;
822ceaf603cSChristoph Hellwig 	ip->i_projid = prid;
8231da177e4SLinus Torvalds 
8240560f31aSDave Chinner 	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
825db998553SChristian Brauner 		inode_fsuid_set(inode, mnt_userns);
82601ea173eSChristoph Hellwig 		inode->i_gid = dir->i_gid;
82701ea173eSChristoph Hellwig 		inode->i_mode = mode;
8283d8f2821SChristoph Hellwig 	} else {
8297d6beb71SLinus Torvalds 		inode_init_owner(mnt_userns, inode, dir, mode);
8301da177e4SLinus Torvalds 	}
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds 	/*
8331da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8341da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8351da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8361da177e4SLinus Torvalds 	 */
83754295159SChristoph Hellwig 	if (irix_sgid_inherit &&
838f736d93dSChristoph Hellwig 	    (inode->i_mode & S_ISGID) &&
839f736d93dSChristoph Hellwig 	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
840c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8411da177e4SLinus Torvalds 
84213d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
843daf83964SChristoph Hellwig 	ip->i_df.if_nextents = 0;
8446e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
845dff35fd4SChristoph Hellwig 
846c2050a45SDeepa Dinamani 	tv = current_time(inode);
8473987848cSDave Chinner 	inode->i_mtime = tv;
8483987848cSDave Chinner 	inode->i_atime = tv;
8493987848cSDave Chinner 	inode->i_ctime = tv;
850dff35fd4SChristoph Hellwig 
851031474c2SChristoph Hellwig 	ip->i_extsize = 0;
852db07349dSChristoph Hellwig 	ip->i_diflags = 0;
85393848a99SChristoph Hellwig 
85438c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp)) {
855f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
856b33ce57dSChristoph Hellwig 		ip->i_cowextsize = 0;
857e98d5e88SChristoph Hellwig 		ip->i_crtime = tv;
85893848a99SChristoph Hellwig 	}
85993848a99SChristoph Hellwig 
8601da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8611da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8621da177e4SLinus Torvalds 	case S_IFIFO:
8631da177e4SLinus Torvalds 	case S_IFCHR:
8641da177e4SLinus Torvalds 	case S_IFBLK:
8651da177e4SLinus Torvalds 	case S_IFSOCK:
866f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
8671da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8681da177e4SLinus Torvalds 		break;
8691da177e4SLinus Torvalds 	case S_IFREG:
8701da177e4SLinus Torvalds 	case S_IFDIR:
871db07349dSChristoph Hellwig 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
8728a569d71SDarrick J. Wong 			xfs_inode_inherit_flags(ip, pip);
8733e09ab8fSChristoph Hellwig 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
8748a569d71SDarrick J. Wong 			xfs_inode_inherit_flags2(ip, pip);
87553004ee7SGustavo A. R. Silva 		fallthrough;
8761da177e4SLinus Torvalds 	case S_IFLNK:
877f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
878fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
8796bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
8801da177e4SLinus Torvalds 		break;
8811da177e4SLinus Torvalds 	default:
8821da177e4SLinus Torvalds 		ASSERT(0);
8831da177e4SLinus Torvalds 	}
8841da177e4SLinus Torvalds 
8851da177e4SLinus Torvalds 	/*
886e6a688c3SDave Chinner 	 * If we need to create attributes immediately after allocating the
887e6a688c3SDave Chinner 	 * inode, initialise an empty attribute fork right now. We use the
888e6a688c3SDave Chinner 	 * default fork offset for attributes here as we don't know exactly what
889e6a688c3SDave Chinner 	 * size or how many attributes we might be adding. We can do this
890e6a688c3SDave Chinner 	 * safely here because we know the data fork is completely empty and
891e6a688c3SDave Chinner 	 * this saves us from needing to run a separate transaction to set the
892e6a688c3SDave Chinner 	 * fork offset in the immediate future.
893e6a688c3SDave Chinner 	 */
89438c26bfdSDave Chinner 	if (init_xattrs && xfs_has_attr(mp)) {
8957821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
896e6a688c3SDave Chinner 		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
897e6a688c3SDave Chinner 	}
898e6a688c3SDave Chinner 
899e6a688c3SDave Chinner 	/*
9001da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
9011da177e4SLinus Torvalds 	 */
902ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
9031da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
9041da177e4SLinus Torvalds 
90558c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
90641be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	*ipp = ip;
9091da177e4SLinus Torvalds 	return 0;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds 
912e546cb79SDave Chinner /*
91354d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
91454d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
91554d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
916e546cb79SDave Chinner  */
9170d5a75e9SEric Sandeen static int			/* error */
918e546cb79SDave Chinner xfs_droplink(
919e546cb79SDave Chinner 	xfs_trans_t *tp,
920e546cb79SDave Chinner 	xfs_inode_t *ip)
921e546cb79SDave Chinner {
922e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
923e546cb79SDave Chinner 
924e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
925e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
926e546cb79SDave Chinner 
92754d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
92854d7b5c1SDave Chinner 		return 0;
92954d7b5c1SDave Chinner 
93054d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
931e546cb79SDave Chinner }
932e546cb79SDave Chinner 
933e546cb79SDave Chinner /*
934e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
935e546cb79SDave Chinner  */
93691083269SEric Sandeen static void
937e546cb79SDave Chinner xfs_bumplink(
938e546cb79SDave Chinner 	xfs_trans_t *tp,
939e546cb79SDave Chinner 	xfs_inode_t *ip)
940e546cb79SDave Chinner {
941e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
942e546cb79SDave Chinner 
943e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
944e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
945e546cb79SDave Chinner }
946e546cb79SDave Chinner 
947c24b5dfaSDave Chinner int
948c24b5dfaSDave Chinner xfs_create(
949f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
950c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
951c24b5dfaSDave Chinner 	struct xfs_name		*name,
952c24b5dfaSDave Chinner 	umode_t			mode,
95366f36464SChristoph Hellwig 	dev_t			rdev,
954e6a688c3SDave Chinner 	bool			init_xattrs,
955c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
956c24b5dfaSDave Chinner {
957c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
958c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
959c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
960c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
961c24b5dfaSDave Chinner 	int			error;
962c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
963c24b5dfaSDave Chinner 	prid_t			prid;
964c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
965c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
966c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
967062647a8SBrian Foster 	struct xfs_trans_res	*tres;
968c24b5dfaSDave Chinner 	uint			resblks;
969b652afd9SDave Chinner 	xfs_ino_t		ino;
970c24b5dfaSDave Chinner 
971c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
972c24b5dfaSDave Chinner 
97375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
9742451337dSDave Chinner 		return -EIO;
975c24b5dfaSDave Chinner 
976163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
977c24b5dfaSDave Chinner 
978c24b5dfaSDave Chinner 	/*
979c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
980c24b5dfaSDave Chinner 	 */
981209188ceSChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
982209188ceSChristian Brauner 			mapped_fsgid(mnt_userns, &init_user_ns), prid,
983c24b5dfaSDave Chinner 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
984c24b5dfaSDave Chinner 			&udqp, &gdqp, &pdqp);
985c24b5dfaSDave Chinner 	if (error)
986c24b5dfaSDave Chinner 		return error;
987c24b5dfaSDave Chinner 
988c24b5dfaSDave Chinner 	if (is_dir) {
989c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
990062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
991c24b5dfaSDave Chinner 	} else {
992c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
993062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
994c24b5dfaSDave Chinner 	}
995c24b5dfaSDave Chinner 
996c24b5dfaSDave Chinner 	/*
997c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
998c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
999c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
1000c24b5dfaSDave Chinner 	 * appropriate transaction later.
1001c24b5dfaSDave Chinner 	 */
1002f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1003f2f7b9ffSDarrick J. Wong 			&tp);
10042451337dSDave Chinner 	if (error == -ENOSPC) {
1005c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1006c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1007f2f7b9ffSDarrick J. Wong 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1008f2f7b9ffSDarrick J. Wong 				resblks, &tp);
1009c24b5dfaSDave Chinner 	}
10104906e215SChristoph Hellwig 	if (error)
1011f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
1012c24b5dfaSDave Chinner 
101365523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1014c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1015c24b5dfaSDave Chinner 
1016c24b5dfaSDave Chinner 	/*
1017c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1018c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1019c24b5dfaSDave Chinner 	 * pointing to itself.
1020c24b5dfaSDave Chinner 	 */
1021b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1022b652afd9SDave Chinner 	if (!error)
1023b652afd9SDave Chinner 		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1024b652afd9SDave Chinner 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1025d6077aa3SJan Kara 	if (error)
1026c24b5dfaSDave Chinner 		goto out_trans_cancel;
1027c24b5dfaSDave Chinner 
1028c24b5dfaSDave Chinner 	/*
1029c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1030b652afd9SDave Chinner 	 * earlier because xfs_dialloc might commit the previous transaction
1031c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1032c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1033c24b5dfaSDave Chinner 	 * error path.
1034c24b5dfaSDave Chinner 	 */
103565523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1036c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1037c24b5dfaSDave Chinner 
1038381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
103963337b63SKaixu Xia 					resblks - XFS_IALLOC_SPACE_RES(mp));
1040c24b5dfaSDave Chinner 	if (error) {
10412451337dSDave Chinner 		ASSERT(error != -ENOSPC);
10424906e215SChristoph Hellwig 		goto out_trans_cancel;
1043c24b5dfaSDave Chinner 	}
1044c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1045c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1046c24b5dfaSDave Chinner 
1047c24b5dfaSDave Chinner 	if (is_dir) {
1048c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1049c24b5dfaSDave Chinner 		if (error)
1050c8eac49eSBrian Foster 			goto out_trans_cancel;
1051c24b5dfaSDave Chinner 
105291083269SEric Sandeen 		xfs_bumplink(tp, dp);
1053c24b5dfaSDave Chinner 	}
1054c24b5dfaSDave Chinner 
1055c24b5dfaSDave Chinner 	/*
1056c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1057c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1058c24b5dfaSDave Chinner 	 * the user.
1059c24b5dfaSDave Chinner 	 */
10600560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1061c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1062c24b5dfaSDave Chinner 
1063c24b5dfaSDave Chinner 	/*
1064c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1065c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1066c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1067c24b5dfaSDave Chinner 	 */
1068c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1069c24b5dfaSDave Chinner 
107070393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1071c24b5dfaSDave Chinner 	if (error)
1072c24b5dfaSDave Chinner 		goto out_release_inode;
1073c24b5dfaSDave Chinner 
1074c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1075c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1076c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1077c24b5dfaSDave Chinner 
1078c24b5dfaSDave Chinner 	*ipp = ip;
1079c24b5dfaSDave Chinner 	return 0;
1080c24b5dfaSDave Chinner 
1081c24b5dfaSDave Chinner  out_trans_cancel:
10824906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1083c24b5dfaSDave Chinner  out_release_inode:
1084c24b5dfaSDave Chinner 	/*
108558c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
108658c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
108758c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1088c24b5dfaSDave Chinner 	 */
108958c90473SDave Chinner 	if (ip) {
109058c90473SDave Chinner 		xfs_finish_inode_setup(ip);
109144a8736bSDarrick J. Wong 		xfs_irele(ip);
109258c90473SDave Chinner 	}
1093f2f7b9ffSDarrick J. Wong  out_release_dquots:
1094c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1095c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1096c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1097c24b5dfaSDave Chinner 
1098c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
109965523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1100c24b5dfaSDave Chinner 	return error;
1101c24b5dfaSDave Chinner }
1102c24b5dfaSDave Chinner 
1103c24b5dfaSDave Chinner int
110499b6436bSZhi Yong Wu xfs_create_tmpfile(
1105f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
110699b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1107330033d6SBrian Foster 	umode_t			mode,
1108330033d6SBrian Foster 	struct xfs_inode	**ipp)
110999b6436bSZhi Yong Wu {
111099b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
111199b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
111299b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
111399b6436bSZhi Yong Wu 	int			error;
111499b6436bSZhi Yong Wu 	prid_t                  prid;
111599b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
111699b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
111799b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
111899b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
111999b6436bSZhi Yong Wu 	uint			resblks;
1120b652afd9SDave Chinner 	xfs_ino_t		ino;
112199b6436bSZhi Yong Wu 
112275c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
11232451337dSDave Chinner 		return -EIO;
112499b6436bSZhi Yong Wu 
112599b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
112699b6436bSZhi Yong Wu 
112799b6436bSZhi Yong Wu 	/*
112899b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
112999b6436bSZhi Yong Wu 	 */
1130209188ceSChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
1131209188ceSChristian Brauner 			mapped_fsgid(mnt_userns, &init_user_ns), prid,
113299b6436bSZhi Yong Wu 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
113399b6436bSZhi Yong Wu 			&udqp, &gdqp, &pdqp);
113499b6436bSZhi Yong Wu 	if (error)
113599b6436bSZhi Yong Wu 		return error;
113699b6436bSZhi Yong Wu 
113799b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
113899b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1139253f4911SChristoph Hellwig 
1140f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1141f2f7b9ffSDarrick J. Wong 			&tp);
11424906e215SChristoph Hellwig 	if (error)
1143f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
114499b6436bSZhi Yong Wu 
1145b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1146b652afd9SDave Chinner 	if (!error)
1147b652afd9SDave Chinner 		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1148b652afd9SDave Chinner 				0, 0, prid, false, &ip);
1149d6077aa3SJan Kara 	if (error)
115099b6436bSZhi Yong Wu 		goto out_trans_cancel;
115199b6436bSZhi Yong Wu 
11520560f31aSDave Chinner 	if (xfs_has_wsync(mp))
115399b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
115499b6436bSZhi Yong Wu 
115599b6436bSZhi Yong Wu 	/*
115699b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
115799b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
115899b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
115999b6436bSZhi Yong Wu 	 */
116099b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
116199b6436bSZhi Yong Wu 
116299b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
116399b6436bSZhi Yong Wu 	if (error)
11644906e215SChristoph Hellwig 		goto out_trans_cancel;
116599b6436bSZhi Yong Wu 
116670393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
116799b6436bSZhi Yong Wu 	if (error)
116899b6436bSZhi Yong Wu 		goto out_release_inode;
116999b6436bSZhi Yong Wu 
117099b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
117199b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
117299b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
117399b6436bSZhi Yong Wu 
1174330033d6SBrian Foster 	*ipp = ip;
117599b6436bSZhi Yong Wu 	return 0;
117699b6436bSZhi Yong Wu 
117799b6436bSZhi Yong Wu  out_trans_cancel:
11784906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
117999b6436bSZhi Yong Wu  out_release_inode:
118099b6436bSZhi Yong Wu 	/*
118158c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
118258c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
118358c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
118499b6436bSZhi Yong Wu 	 */
118558c90473SDave Chinner 	if (ip) {
118658c90473SDave Chinner 		xfs_finish_inode_setup(ip);
118744a8736bSDarrick J. Wong 		xfs_irele(ip);
118858c90473SDave Chinner 	}
1189f2f7b9ffSDarrick J. Wong  out_release_dquots:
119099b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
119199b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
119299b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
119399b6436bSZhi Yong Wu 
119499b6436bSZhi Yong Wu 	return error;
119599b6436bSZhi Yong Wu }
119699b6436bSZhi Yong Wu 
119799b6436bSZhi Yong Wu int
1198c24b5dfaSDave Chinner xfs_link(
1199c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1200c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1201c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1202c24b5dfaSDave Chinner {
1203c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1204c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1205871b9316SDarrick J. Wong 	int			error, nospace_error = 0;
1206c24b5dfaSDave Chinner 	int			resblks;
1207c24b5dfaSDave Chinner 
1208c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1209c24b5dfaSDave Chinner 
1210c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1211c24b5dfaSDave Chinner 
121275c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
12132451337dSDave Chinner 		return -EIO;
1214c24b5dfaSDave Chinner 
1215c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1216c24b5dfaSDave Chinner 	if (error)
1217c24b5dfaSDave Chinner 		goto std_return;
1218c24b5dfaSDave Chinner 
1219c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1220c24b5dfaSDave Chinner 	if (error)
1221c24b5dfaSDave Chinner 		goto std_return;
1222c24b5dfaSDave Chinner 
1223c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1224871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1225871b9316SDarrick J. Wong 			&tp, &nospace_error);
12264906e215SChristoph Hellwig 	if (error)
1227253f4911SChristoph Hellwig 		goto std_return;
1228c24b5dfaSDave Chinner 
1229c24b5dfaSDave Chinner 	/*
1230c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1231c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1232c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1233c24b5dfaSDave Chinner 	 */
1234db07349dSChristoph Hellwig 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1235ceaf603cSChristoph Hellwig 		     tdp->i_projid != sip->i_projid)) {
12362451337dSDave Chinner 		error = -EXDEV;
1237c24b5dfaSDave Chinner 		goto error_return;
1238c24b5dfaSDave Chinner 	}
1239c24b5dfaSDave Chinner 
124094f3cad5SEric Sandeen 	if (!resblks) {
124194f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1242c24b5dfaSDave Chinner 		if (error)
1243c24b5dfaSDave Chinner 			goto error_return;
124494f3cad5SEric Sandeen 	}
1245c24b5dfaSDave Chinner 
124654d7b5c1SDave Chinner 	/*
124754d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
124854d7b5c1SDave Chinner 	 */
124954d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1250f40aadb2SDave Chinner 		struct xfs_perag	*pag;
1251f40aadb2SDave Chinner 
1252f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1253f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, sip);
1254f40aadb2SDave Chinner 		xfs_perag_put(pag);
1255ab297431SZhi Yong Wu 		if (error)
12564906e215SChristoph Hellwig 			goto error_return;
1257ab297431SZhi Yong Wu 	}
1258ab297431SZhi Yong Wu 
1259c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1260381eee69SBrian Foster 				   resblks);
1261c24b5dfaSDave Chinner 	if (error)
12624906e215SChristoph Hellwig 		goto error_return;
1263c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1264c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1265c24b5dfaSDave Chinner 
126691083269SEric Sandeen 	xfs_bumplink(tp, sip);
1267c24b5dfaSDave Chinner 
1268c24b5dfaSDave Chinner 	/*
1269c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1270c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1271c24b5dfaSDave Chinner 	 * the user.
1272c24b5dfaSDave Chinner 	 */
12730560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1274c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1275c24b5dfaSDave Chinner 
127670393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1277c24b5dfaSDave Chinner 
1278c24b5dfaSDave Chinner  error_return:
12794906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1280c24b5dfaSDave Chinner  std_return:
1281871b9316SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
1282871b9316SDarrick J. Wong 		error = nospace_error;
1283c24b5dfaSDave Chinner 	return error;
1284c24b5dfaSDave Chinner }
1285c24b5dfaSDave Chinner 
1286363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1287363e59baSDarrick J. Wong static void
1288363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1289363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1290363e59baSDarrick J. Wong {
1291363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1292363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1293363e59baSDarrick J. Wong 
1294363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1295363e59baSDarrick J. Wong 		return;
1296363e59baSDarrick J. Wong 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1297363e59baSDarrick J. Wong 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1298363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
12993e09ab8fSChristoph Hellwig 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1300363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1301363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1302363e59baSDarrick J. Wong }
1303363e59baSDarrick J. Wong 
13041da177e4SLinus Torvalds /*
13058f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
13068f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
13078f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
13081da177e4SLinus Torvalds  *
1309f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1310f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1311f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1312f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1313f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1314f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1315f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1316f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1317f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
13181da177e4SLinus Torvalds  *
1319f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1320f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1321f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1322f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1323f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
13241da177e4SLinus Torvalds  */
13251da177e4SLinus Torvalds int
13264e529339SBrian Foster xfs_itruncate_extents_flags(
13278f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
13288f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
13298f04c47aSChristoph Hellwig 	int			whichfork,
133013b86fc3SBrian Foster 	xfs_fsize_t		new_size,
13314e529339SBrian Foster 	int			flags)
13321da177e4SLinus Torvalds {
13338f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
13348f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
13351da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
13368f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
13378f04c47aSChristoph Hellwig 	int			error = 0;
13381da177e4SLinus Torvalds 
13390b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
13400b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
13410b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1342ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
13438f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
13441da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1345898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
13461da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
13471da177e4SLinus Torvalds 
1348673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1349673e8e59SChristoph Hellwig 
13504e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
135113b86fc3SBrian Foster 
13521da177e4SLinus Torvalds 	/*
13531da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
13541da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
13551da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
13561da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
13574bbb04abSDarrick J. Wong 	 * possible file size.
13584bbb04abSDarrick J. Wong 	 *
13594bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
13604bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
13611da177e4SLinus Torvalds 	 */
13628f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
136333005fd0SDarrick J. Wong 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
13644bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
13658f04c47aSChristoph Hellwig 		return 0;
13664bbb04abSDarrick J. Wong 	}
13678f04c47aSChristoph Hellwig 
13684bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
13694bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
137002dff7bfSBrian Foster 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
13714bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
13724bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
13738f04c47aSChristoph Hellwig 		if (error)
1374d5a2e289SBrian Foster 			goto out;
13751da177e4SLinus Torvalds 
13766dd379c7SBrian Foster 		/* free the just unmapped extents */
13779e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
13788f04c47aSChristoph Hellwig 		if (error)
13799b1f4e98SBrian Foster 			goto out;
13801da177e4SLinus Torvalds 	}
13818f04c47aSChristoph Hellwig 
13824919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1383aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
13844919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
13854bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1386aa8968f2SDarrick J. Wong 		if (error)
1387aa8968f2SDarrick J. Wong 			goto out;
1388aa8968f2SDarrick J. Wong 
1389363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
13904919d42aSDarrick J. Wong 	}
1391aa8968f2SDarrick J. Wong 
1392673e8e59SChristoph Hellwig 	/*
1393673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1394673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1395673e8e59SChristoph Hellwig 	 */
1396673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1397673e8e59SChristoph Hellwig 
1398673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1399673e8e59SChristoph Hellwig 
14008f04c47aSChristoph Hellwig out:
14018f04c47aSChristoph Hellwig 	*tpp = tp;
14028f04c47aSChristoph Hellwig 	return error;
14038f04c47aSChristoph Hellwig }
14048f04c47aSChristoph Hellwig 
1405c24b5dfaSDave Chinner int
1406c24b5dfaSDave Chinner xfs_release(
1407c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1408c24b5dfaSDave Chinner {
1409c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
14107d88329eSDarrick J. Wong 	int		error = 0;
1411c24b5dfaSDave Chinner 
1412c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1413c24b5dfaSDave Chinner 		return 0;
1414c24b5dfaSDave Chinner 
1415c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
14162e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
1417c24b5dfaSDave Chinner 		return 0;
1418c24b5dfaSDave Chinner 
141975c8c50fSDave Chinner 	if (!xfs_is_shutdown(mp)) {
1420c24b5dfaSDave Chinner 		int truncated;
1421c24b5dfaSDave Chinner 
1422c24b5dfaSDave Chinner 		/*
1423c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1424c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1425c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1426c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1427c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1428c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1429c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1430c24b5dfaSDave Chinner 		 * be exposed to that problem.
1431c24b5dfaSDave Chinner 		 */
1432c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1433c24b5dfaSDave Chinner 		if (truncated) {
1434c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1435eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
14362451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1437c24b5dfaSDave Chinner 				if (error)
1438c24b5dfaSDave Chinner 					return error;
1439c24b5dfaSDave Chinner 			}
1440c24b5dfaSDave Chinner 		}
1441c24b5dfaSDave Chinner 	}
1442c24b5dfaSDave Chinner 
144354d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1444c24b5dfaSDave Chinner 		return 0;
1445c24b5dfaSDave Chinner 
14467d88329eSDarrick J. Wong 	/*
14477d88329eSDarrick J. Wong 	 * If we can't get the iolock just skip truncating the blocks past EOF
14487d88329eSDarrick J. Wong 	 * because we could deadlock with the mmap_lock otherwise. We'll get
14497d88329eSDarrick J. Wong 	 * another chance to drop them once the last reference to the inode is
14507d88329eSDarrick J. Wong 	 * dropped, so we'll never leak blocks permanently.
14517d88329eSDarrick J. Wong 	 */
14527d88329eSDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
14537d88329eSDarrick J. Wong 		return 0;
1454c24b5dfaSDave Chinner 
14557d88329eSDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false)) {
1456c24b5dfaSDave Chinner 		/*
1457a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1458a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1459a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1460a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1461a36b9261SBrian Foster 		 *
1462a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1463a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1464a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1465a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1466a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1467a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1468a36b9261SBrian Foster 		 * place.
1469a36b9261SBrian Foster 		 */
1470a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
14717d88329eSDarrick J. Wong 			goto out_unlock;
14727d88329eSDarrick J. Wong 
1473a36b9261SBrian Foster 		error = xfs_free_eofblocks(ip);
1474a36b9261SBrian Foster 		if (error)
14757d88329eSDarrick J. Wong 			goto out_unlock;
1476c24b5dfaSDave Chinner 
1477c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1478c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1479c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1480c24b5dfaSDave Chinner 	}
14817d88329eSDarrick J. Wong 
14827d88329eSDarrick J. Wong out_unlock:
14837d88329eSDarrick J. Wong 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
14847d88329eSDarrick J. Wong 	return error;
1485c24b5dfaSDave Chinner }
1486c24b5dfaSDave Chinner 
1487c24b5dfaSDave Chinner /*
1488f7be2d7fSBrian Foster  * xfs_inactive_truncate
1489f7be2d7fSBrian Foster  *
1490f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1491f7be2d7fSBrian Foster  */
1492f7be2d7fSBrian Foster STATIC int
1493f7be2d7fSBrian Foster xfs_inactive_truncate(
1494f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1495f7be2d7fSBrian Foster {
1496f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1497f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1498f7be2d7fSBrian Foster 	int			error;
1499f7be2d7fSBrian Foster 
1500253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1501f7be2d7fSBrian Foster 	if (error) {
150275c8c50fSDave Chinner 		ASSERT(xfs_is_shutdown(mp));
1503f7be2d7fSBrian Foster 		return error;
1504f7be2d7fSBrian Foster 	}
1505f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1506f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1507f7be2d7fSBrian Foster 
1508f7be2d7fSBrian Foster 	/*
1509f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1510f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
151169bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1512f7be2d7fSBrian Foster 	 */
151313d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
1514f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1515f7be2d7fSBrian Foster 
1516f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1517f7be2d7fSBrian Foster 	if (error)
1518f7be2d7fSBrian Foster 		goto error_trans_cancel;
1519f7be2d7fSBrian Foster 
1520daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
1521f7be2d7fSBrian Foster 
152270393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1523f7be2d7fSBrian Foster 	if (error)
1524f7be2d7fSBrian Foster 		goto error_unlock;
1525f7be2d7fSBrian Foster 
1526f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1527f7be2d7fSBrian Foster 	return 0;
1528f7be2d7fSBrian Foster 
1529f7be2d7fSBrian Foster error_trans_cancel:
15304906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1531f7be2d7fSBrian Foster error_unlock:
1532f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1533f7be2d7fSBrian Foster 	return error;
1534f7be2d7fSBrian Foster }
1535f7be2d7fSBrian Foster 
1536f7be2d7fSBrian Foster /*
153788877d2bSBrian Foster  * xfs_inactive_ifree()
153888877d2bSBrian Foster  *
153988877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
154088877d2bSBrian Foster  */
154188877d2bSBrian Foster STATIC int
154288877d2bSBrian Foster xfs_inactive_ifree(
154388877d2bSBrian Foster 	struct xfs_inode *ip)
154488877d2bSBrian Foster {
154588877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
154688877d2bSBrian Foster 	struct xfs_trans	*tp;
154788877d2bSBrian Foster 	int			error;
154888877d2bSBrian Foster 
15499d43b180SBrian Foster 	/*
155076d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
155176d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
155276d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
155376d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
155476d771b4SChristoph Hellwig 	 * pool and pray.
15559d43b180SBrian Foster 	 *
15569d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
15579d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
15589d43b180SBrian Foster 	 * repaired.
15599d43b180SBrian Foster 	 */
1560e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1561253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
156276d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
156376d771b4SChristoph Hellwig 				&tp);
156476d771b4SChristoph Hellwig 	} else {
156576d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
156676d771b4SChristoph Hellwig 	}
156788877d2bSBrian Foster 	if (error) {
15682451337dSDave Chinner 		if (error == -ENOSPC) {
15699d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
15709d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
15719d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
15729d43b180SBrian Foster 		} else {
157375c8c50fSDave Chinner 			ASSERT(xfs_is_shutdown(mp));
15749d43b180SBrian Foster 		}
157588877d2bSBrian Foster 		return error;
157688877d2bSBrian Foster 	}
157788877d2bSBrian Foster 
157896355d5aSDave Chinner 	/*
157996355d5aSDave Chinner 	 * We do not hold the inode locked across the entire rolling transaction
158096355d5aSDave Chinner 	 * here. We only need to hold it for the first transaction that
158196355d5aSDave Chinner 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
158296355d5aSDave Chinner 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
158396355d5aSDave Chinner 	 * here breaks the relationship between cluster buffer invalidation and
158496355d5aSDave Chinner 	 * stale inode invalidation on cluster buffer item journal commit
158596355d5aSDave Chinner 	 * completion, and can result in leaving dirty stale inodes hanging
158696355d5aSDave Chinner 	 * around in memory.
158796355d5aSDave Chinner 	 *
158896355d5aSDave Chinner 	 * We have no need for serialising this inode operation against other
158996355d5aSDave Chinner 	 * operations - we freed the inode and hence reallocation is required
159096355d5aSDave Chinner 	 * and that will serialise on reallocating the space the deferops need
159196355d5aSDave Chinner 	 * to free. Hence we can unlock the inode on the first commit of
159296355d5aSDave Chinner 	 * the transaction rather than roll it right through the deferops. This
159396355d5aSDave Chinner 	 * avoids relogging the XFS_ISTALE inode.
159496355d5aSDave Chinner 	 *
159596355d5aSDave Chinner 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
159696355d5aSDave Chinner 	 * by asserting that the inode is still locked when it returns.
159796355d5aSDave Chinner 	 */
159888877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
159996355d5aSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
160088877d2bSBrian Foster 
16010e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
160296355d5aSDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
160388877d2bSBrian Foster 	if (error) {
160488877d2bSBrian Foster 		/*
160588877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
160688877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
160788877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
160888877d2bSBrian Foster 		 */
160975c8c50fSDave Chinner 		if (!xfs_is_shutdown(mp)) {
161088877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
161188877d2bSBrian Foster 				__func__, error);
161288877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
161388877d2bSBrian Foster 		}
16144906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
161588877d2bSBrian Foster 		return error;
161688877d2bSBrian Foster 	}
161788877d2bSBrian Foster 
161888877d2bSBrian Foster 	/*
161988877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
162088877d2bSBrian Foster 	 */
162188877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
162288877d2bSBrian Foster 
162388877d2bSBrian Foster 	/*
1624d4a97a04SBrian Foster 	 * Just ignore errors at this point.  There is nothing we can do except
1625d4a97a04SBrian Foster 	 * to try to keep going. Make sure it's not a silent error.
162688877d2bSBrian Foster 	 */
162770393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
162888877d2bSBrian Foster 	if (error)
162988877d2bSBrian Foster 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
163088877d2bSBrian Foster 			__func__, error);
163188877d2bSBrian Foster 
163288877d2bSBrian Foster 	return 0;
163388877d2bSBrian Foster }
163488877d2bSBrian Foster 
163588877d2bSBrian Foster /*
163662af7d54SDarrick J. Wong  * Returns true if we need to update the on-disk metadata before we can free
163762af7d54SDarrick J. Wong  * the memory used by this inode.  Updates include freeing post-eof
163862af7d54SDarrick J. Wong  * preallocations; freeing COW staging extents; and marking the inode free in
163962af7d54SDarrick J. Wong  * the inobt if it is on the unlinked list.
164062af7d54SDarrick J. Wong  */
164162af7d54SDarrick J. Wong bool
164262af7d54SDarrick J. Wong xfs_inode_needs_inactive(
164362af7d54SDarrick J. Wong 	struct xfs_inode	*ip)
164462af7d54SDarrick J. Wong {
164562af7d54SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
164662af7d54SDarrick J. Wong 	struct xfs_ifork	*cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
164762af7d54SDarrick J. Wong 
164862af7d54SDarrick J. Wong 	/*
164962af7d54SDarrick J. Wong 	 * If the inode is already free, then there can be nothing
165062af7d54SDarrick J. Wong 	 * to clean up here.
165162af7d54SDarrick J. Wong 	 */
165262af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_mode == 0)
165362af7d54SDarrick J. Wong 		return false;
165462af7d54SDarrick J. Wong 
165562af7d54SDarrick J. Wong 	/* If this is a read-only mount, don't do this (would generate I/O) */
16562e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
165762af7d54SDarrick J. Wong 		return false;
165862af7d54SDarrick J. Wong 
165962af7d54SDarrick J. Wong 	/* If the log isn't running, push inodes straight to reclaim. */
166075c8c50fSDave Chinner 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
166162af7d54SDarrick J. Wong 		return false;
166262af7d54SDarrick J. Wong 
166362af7d54SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
166462af7d54SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
166562af7d54SDarrick J. Wong 		return false;
166662af7d54SDarrick J. Wong 
166762af7d54SDarrick J. Wong 	/* Want to clean out the cow blocks if there are any. */
166862af7d54SDarrick J. Wong 	if (cow_ifp && cow_ifp->if_bytes > 0)
166962af7d54SDarrick J. Wong 		return true;
167062af7d54SDarrick J. Wong 
167162af7d54SDarrick J. Wong 	/* Unlinked files must be freed. */
167262af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_nlink == 0)
167362af7d54SDarrick J. Wong 		return true;
167462af7d54SDarrick J. Wong 
167562af7d54SDarrick J. Wong 	/*
167662af7d54SDarrick J. Wong 	 * This file isn't being freed, so check if there are post-eof blocks
167762af7d54SDarrick J. Wong 	 * to free.  @force is true because we are evicting an inode from the
167862af7d54SDarrick J. Wong 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
167962af7d54SDarrick J. Wong 	 * free space accounting.
168062af7d54SDarrick J. Wong 	 *
168162af7d54SDarrick J. Wong 	 * Note: don't bother with iolock here since lockdep complains about
168262af7d54SDarrick J. Wong 	 * acquiring it in reclaim context. We have the only reference to the
168362af7d54SDarrick J. Wong 	 * inode at this point anyways.
168462af7d54SDarrick J. Wong 	 */
168562af7d54SDarrick J. Wong 	return xfs_can_free_eofblocks(ip, true);
168662af7d54SDarrick J. Wong }
168762af7d54SDarrick J. Wong 
168862af7d54SDarrick J. Wong /*
1689c24b5dfaSDave Chinner  * xfs_inactive
1690c24b5dfaSDave Chinner  *
1691c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1692c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1693c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1694c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1695c24b5dfaSDave Chinner  */
169674564fb4SBrian Foster void
1697c24b5dfaSDave Chinner xfs_inactive(
1698c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1699c24b5dfaSDave Chinner {
17003d3c8b52SJie Liu 	struct xfs_mount	*mp;
1701c24b5dfaSDave Chinner 	int			error;
1702c24b5dfaSDave Chinner 	int			truncate = 0;
1703c24b5dfaSDave Chinner 
1704c24b5dfaSDave Chinner 	/*
1705c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1706c24b5dfaSDave Chinner 	 * to clean up here.
1707c24b5dfaSDave Chinner 	 */
1708c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1709c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
17103ea06d73SDarrick J. Wong 		goto out;
1711c24b5dfaSDave Chinner 	}
1712c24b5dfaSDave Chinner 
1713c24b5dfaSDave Chinner 	mp = ip->i_mount;
171417c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1715c24b5dfaSDave Chinner 
1716c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
17172e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
17183ea06d73SDarrick J. Wong 		goto out;
1719c24b5dfaSDave Chinner 
1720383e32b0SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1721383e32b0SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
17223ea06d73SDarrick J. Wong 		goto out;
1723383e32b0SDarrick J. Wong 
17246231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
172551d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
17266231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
17276231848cSDarrick J. Wong 
172854d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1729c24b5dfaSDave Chinner 		/*
1730c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1731c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1732c24b5dfaSDave Chinner 		 * broken free space accounting.
17333b4683c2SBrian Foster 		 *
17343b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
17353b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
17363b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1737c24b5dfaSDave Chinner 		 */
17383b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1739a36b9261SBrian Foster 			xfs_free_eofblocks(ip);
174074564fb4SBrian Foster 
17413ea06d73SDarrick J. Wong 		goto out;
1742c24b5dfaSDave Chinner 	}
1743c24b5dfaSDave Chinner 
1744c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
174513d2c10bSChristoph Hellwig 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1746daf83964SChristoph Hellwig 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1747c24b5dfaSDave Chinner 		truncate = 1;
1748c24b5dfaSDave Chinner 
1749c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1750c24b5dfaSDave Chinner 	if (error)
17513ea06d73SDarrick J. Wong 		goto out;
1752c24b5dfaSDave Chinner 
1753c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
175436b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1755f7be2d7fSBrian Foster 	else if (truncate)
1756f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
175736b21ddeSBrian Foster 	if (error)
17583ea06d73SDarrick J. Wong 		goto out;
1759c24b5dfaSDave Chinner 
1760c24b5dfaSDave Chinner 	/*
1761c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1762c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
17636dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1764c24b5dfaSDave Chinner 	 */
17656dfe5a04SDave Chinner 	if (XFS_IFORK_Q(ip)) {
1766c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1767c24b5dfaSDave Chinner 		if (error)
17683ea06d73SDarrick J. Wong 			goto out;
1769c24b5dfaSDave Chinner 	}
1770c24b5dfaSDave Chinner 
17716dfe5a04SDave Chinner 	ASSERT(!ip->i_afp);
17727821ea30SChristoph Hellwig 	ASSERT(ip->i_forkoff == 0);
1773c24b5dfaSDave Chinner 
1774c24b5dfaSDave Chinner 	/*
1775c24b5dfaSDave Chinner 	 * Free the inode.
1776c24b5dfaSDave Chinner 	 */
17773ea06d73SDarrick J. Wong 	xfs_inactive_ifree(ip);
1778c24b5dfaSDave Chinner 
17793ea06d73SDarrick J. Wong out:
1780c24b5dfaSDave Chinner 	/*
17813ea06d73SDarrick J. Wong 	 * We're done making metadata updates for this inode, so we can release
17823ea06d73SDarrick J. Wong 	 * the attached dquots.
1783c24b5dfaSDave Chinner 	 */
1784c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1785c24b5dfaSDave Chinner }
1786c24b5dfaSDave Chinner 
17871da177e4SLinus Torvalds /*
17889b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
17899b247179SDarrick J. Wong  * =============================
17909b247179SDarrick J. Wong  *
17919b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
17929b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
17939b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
17949b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
17959b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
17969b247179SDarrick J. Wong  *
17979b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
17989b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
17999b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
18009b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
18019b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
18029b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
18039b247179SDarrick J. Wong  *
18049b247179SDarrick J. Wong  * What if we modelled the unlinked list as a collection of records capturing
18059b247179SDarrick J. Wong  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
18069b247179SDarrick J. Wong  * have a fast way to look up unlinked list predecessors, which avoids the
18079b247179SDarrick J. Wong  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
18089b247179SDarrick J. Wong  * rhashtable.
18099b247179SDarrick J. Wong  *
18109b247179SDarrick J. Wong  * Because this is a backref cache, we ignore operational failures since the
18119b247179SDarrick J. Wong  * iunlink code can fall back to the slow bucket walk.  The only errors that
18129b247179SDarrick J. Wong  * should bubble out are for obviously incorrect situations.
18139b247179SDarrick J. Wong  *
18149b247179SDarrick J. Wong  * All users of the backref cache MUST hold the AGI buffer lock to serialize
18159b247179SDarrick J. Wong  * access or have otherwise provided for concurrency control.
18169b247179SDarrick J. Wong  */
18179b247179SDarrick J. Wong 
18189b247179SDarrick J. Wong /* Capture a "X.next_unlinked = Y" relationship. */
18199b247179SDarrick J. Wong struct xfs_iunlink {
18209b247179SDarrick J. Wong 	struct rhash_head	iu_rhash_head;
18219b247179SDarrick J. Wong 	xfs_agino_t		iu_agino;		/* X */
18229b247179SDarrick J. Wong 	xfs_agino_t		iu_next_unlinked;	/* Y */
18239b247179SDarrick J. Wong };
18249b247179SDarrick J. Wong 
18259b247179SDarrick J. Wong /* Unlinked list predecessor lookup hashtable construction */
18269b247179SDarrick J. Wong static int
18279b247179SDarrick J. Wong xfs_iunlink_obj_cmpfn(
18289b247179SDarrick J. Wong 	struct rhashtable_compare_arg	*arg,
18299b247179SDarrick J. Wong 	const void			*obj)
18309b247179SDarrick J. Wong {
18319b247179SDarrick J. Wong 	const xfs_agino_t		*key = arg->key;
18329b247179SDarrick J. Wong 	const struct xfs_iunlink	*iu = obj;
18339b247179SDarrick J. Wong 
18349b247179SDarrick J. Wong 	if (iu->iu_next_unlinked != *key)
18359b247179SDarrick J. Wong 		return 1;
18369b247179SDarrick J. Wong 	return 0;
18379b247179SDarrick J. Wong }
18389b247179SDarrick J. Wong 
18399b247179SDarrick J. Wong static const struct rhashtable_params xfs_iunlink_hash_params = {
18409b247179SDarrick J. Wong 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
18419b247179SDarrick J. Wong 	.key_len		= sizeof(xfs_agino_t),
18429b247179SDarrick J. Wong 	.key_offset		= offsetof(struct xfs_iunlink,
18439b247179SDarrick J. Wong 					   iu_next_unlinked),
18449b247179SDarrick J. Wong 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
18459b247179SDarrick J. Wong 	.automatic_shrinking	= true,
18469b247179SDarrick J. Wong 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
18479b247179SDarrick J. Wong };
18489b247179SDarrick J. Wong 
18499b247179SDarrick J. Wong /*
18509b247179SDarrick J. Wong  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
18519b247179SDarrick J. Wong  * relation is found.
18529b247179SDarrick J. Wong  */
18539b247179SDarrick J. Wong static xfs_agino_t
18549b247179SDarrick J. Wong xfs_iunlink_lookup_backref(
18559b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18569b247179SDarrick J. Wong 	xfs_agino_t		agino)
18579b247179SDarrick J. Wong {
18589b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
18599b247179SDarrick J. Wong 
18609b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
18619b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
18629b247179SDarrick J. Wong 	return iu ? iu->iu_agino : NULLAGINO;
18639b247179SDarrick J. Wong }
18649b247179SDarrick J. Wong 
18659b247179SDarrick J. Wong /*
18669b247179SDarrick J. Wong  * Take ownership of an iunlink cache entry and insert it into the hash table.
18679b247179SDarrick J. Wong  * If successful, the entry will be owned by the cache; if not, it is freed.
18689b247179SDarrick J. Wong  * Either way, the caller does not own @iu after this call.
18699b247179SDarrick J. Wong  */
18709b247179SDarrick J. Wong static int
18719b247179SDarrick J. Wong xfs_iunlink_insert_backref(
18729b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18739b247179SDarrick J. Wong 	struct xfs_iunlink	*iu)
18749b247179SDarrick J. Wong {
18759b247179SDarrick J. Wong 	int			error;
18769b247179SDarrick J. Wong 
18779b247179SDarrick J. Wong 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
18789b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
18799b247179SDarrick J. Wong 	/*
18809b247179SDarrick J. Wong 	 * Fail loudly if there already was an entry because that's a sign of
18819b247179SDarrick J. Wong 	 * corruption of in-memory data.  Also fail loudly if we see an error
18829b247179SDarrick J. Wong 	 * code we didn't anticipate from the rhashtable code.  Currently we
18839b247179SDarrick J. Wong 	 * only anticipate ENOMEM.
18849b247179SDarrick J. Wong 	 */
18859b247179SDarrick J. Wong 	if (error) {
18869b247179SDarrick J. Wong 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
18879b247179SDarrick J. Wong 		kmem_free(iu);
18889b247179SDarrick J. Wong 	}
18899b247179SDarrick J. Wong 	/*
18909b247179SDarrick J. Wong 	 * Absorb any runtime errors that aren't a result of corruption because
18919b247179SDarrick J. Wong 	 * this is a cache and we can always fall back to bucket list scanning.
18929b247179SDarrick J. Wong 	 */
18939b247179SDarrick J. Wong 	if (error != 0 && error != -EEXIST)
18949b247179SDarrick J. Wong 		error = 0;
18959b247179SDarrick J. Wong 	return error;
18969b247179SDarrick J. Wong }
18979b247179SDarrick J. Wong 
18989b247179SDarrick J. Wong /* Remember that @prev_agino.next_unlinked = @this_agino. */
18999b247179SDarrick J. Wong static int
19009b247179SDarrick J. Wong xfs_iunlink_add_backref(
19019b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19029b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
19039b247179SDarrick J. Wong 	xfs_agino_t		this_agino)
19049b247179SDarrick J. Wong {
19059b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19069b247179SDarrick J. Wong 
19079b247179SDarrick J. Wong 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
19089b247179SDarrick J. Wong 		return 0;
19099b247179SDarrick J. Wong 
1910707e0ddaSTetsuo Handa 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
19119b247179SDarrick J. Wong 	iu->iu_agino = prev_agino;
19129b247179SDarrick J. Wong 	iu->iu_next_unlinked = this_agino;
19139b247179SDarrick J. Wong 
19149b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19159b247179SDarrick J. Wong }
19169b247179SDarrick J. Wong 
19179b247179SDarrick J. Wong /*
19189b247179SDarrick J. Wong  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
19199b247179SDarrick J. Wong  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
19209b247179SDarrick J. Wong  * wasn't any such entry then we don't bother.
19219b247179SDarrick J. Wong  */
19229b247179SDarrick J. Wong static int
19239b247179SDarrick J. Wong xfs_iunlink_change_backref(
19249b247179SDarrick J. Wong 	struct xfs_perag	*pag,
19259b247179SDarrick J. Wong 	xfs_agino_t		agino,
19269b247179SDarrick J. Wong 	xfs_agino_t		next_unlinked)
19279b247179SDarrick J. Wong {
19289b247179SDarrick J. Wong 	struct xfs_iunlink	*iu;
19299b247179SDarrick J. Wong 	int			error;
19309b247179SDarrick J. Wong 
19319b247179SDarrick J. Wong 	/* Look up the old entry; if there wasn't one then exit. */
19329b247179SDarrick J. Wong 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
19339b247179SDarrick J. Wong 			xfs_iunlink_hash_params);
19349b247179SDarrick J. Wong 	if (!iu)
19359b247179SDarrick J. Wong 		return 0;
19369b247179SDarrick J. Wong 
19379b247179SDarrick J. Wong 	/*
19389b247179SDarrick J. Wong 	 * Remove the entry.  This shouldn't ever return an error, but if we
19399b247179SDarrick J. Wong 	 * couldn't remove the old entry we don't want to add it again to the
19409b247179SDarrick J. Wong 	 * hash table, and if the entry disappeared on us then someone's
19419b247179SDarrick J. Wong 	 * violated the locking rules and we need to fail loudly.  Either way
19429b247179SDarrick J. Wong 	 * we cannot remove the inode because internal state is or would have
19439b247179SDarrick J. Wong 	 * been corrupt.
19449b247179SDarrick J. Wong 	 */
19459b247179SDarrick J. Wong 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
19469b247179SDarrick J. Wong 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
19479b247179SDarrick J. Wong 	if (error)
19489b247179SDarrick J. Wong 		return error;
19499b247179SDarrick J. Wong 
19509b247179SDarrick J. Wong 	/* If there is no new next entry just free our item and return. */
19519b247179SDarrick J. Wong 	if (next_unlinked == NULLAGINO) {
19529b247179SDarrick J. Wong 		kmem_free(iu);
19539b247179SDarrick J. Wong 		return 0;
19549b247179SDarrick J. Wong 	}
19559b247179SDarrick J. Wong 
19569b247179SDarrick J. Wong 	/* Update the entry and re-add it to the hash table. */
19579b247179SDarrick J. Wong 	iu->iu_next_unlinked = next_unlinked;
19589b247179SDarrick J. Wong 	return xfs_iunlink_insert_backref(pag, iu);
19599b247179SDarrick J. Wong }
19609b247179SDarrick J. Wong 
19619b247179SDarrick J. Wong /* Set up the in-core predecessor structures. */
19629b247179SDarrick J. Wong int
19639b247179SDarrick J. Wong xfs_iunlink_init(
19649b247179SDarrick J. Wong 	struct xfs_perag	*pag)
19659b247179SDarrick J. Wong {
19669b247179SDarrick J. Wong 	return rhashtable_init(&pag->pagi_unlinked_hash,
19679b247179SDarrick J. Wong 			&xfs_iunlink_hash_params);
19689b247179SDarrick J. Wong }
19699b247179SDarrick J. Wong 
19709b247179SDarrick J. Wong /* Free the in-core predecessor structures. */
19719b247179SDarrick J. Wong static void
19729b247179SDarrick J. Wong xfs_iunlink_free_item(
19739b247179SDarrick J. Wong 	void			*ptr,
19749b247179SDarrick J. Wong 	void			*arg)
19759b247179SDarrick J. Wong {
19769b247179SDarrick J. Wong 	struct xfs_iunlink	*iu = ptr;
19779b247179SDarrick J. Wong 	bool			*freed_anything = arg;
19789b247179SDarrick J. Wong 
19799b247179SDarrick J. Wong 	*freed_anything = true;
19809b247179SDarrick J. Wong 	kmem_free(iu);
19819b247179SDarrick J. Wong }
19829b247179SDarrick J. Wong 
19839b247179SDarrick J. Wong void
19849b247179SDarrick J. Wong xfs_iunlink_destroy(
19859b247179SDarrick J. Wong 	struct xfs_perag	*pag)
19869b247179SDarrick J. Wong {
19879b247179SDarrick J. Wong 	bool			freed_anything = false;
19889b247179SDarrick J. Wong 
19899b247179SDarrick J. Wong 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
19909b247179SDarrick J. Wong 			xfs_iunlink_free_item, &freed_anything);
19919b247179SDarrick J. Wong 
199275c8c50fSDave Chinner 	ASSERT(freed_anything == false || xfs_is_shutdown(pag->pag_mount));
19939b247179SDarrick J. Wong }
19949b247179SDarrick J. Wong 
19959b247179SDarrick J. Wong /*
1996*a83d5a8bSDave Chinner  * Find an inode on the unlinked list. This does not take references to the
1997*a83d5a8bSDave Chinner  * inode as we have existence guarantees by holding the AGI buffer lock and that
1998*a83d5a8bSDave Chinner  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1999*a83d5a8bSDave Chinner  * don't find the inode in cache, then let the caller handle the situation.
2000*a83d5a8bSDave Chinner  */
2001*a83d5a8bSDave Chinner static struct xfs_inode *
2002*a83d5a8bSDave Chinner xfs_iunlink_lookup(
2003*a83d5a8bSDave Chinner 	struct xfs_perag	*pag,
2004*a83d5a8bSDave Chinner 	xfs_agino_t		agino)
2005*a83d5a8bSDave Chinner {
2006*a83d5a8bSDave Chinner 	struct xfs_inode	*ip;
2007*a83d5a8bSDave Chinner 
2008*a83d5a8bSDave Chinner 	rcu_read_lock();
2009*a83d5a8bSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
2010*a83d5a8bSDave Chinner 
2011*a83d5a8bSDave Chinner 	/*
2012*a83d5a8bSDave Chinner 	 * Inode not in memory or in RCU freeing limbo should not happen.
2013*a83d5a8bSDave Chinner 	 * Warn about this and let the caller handle the failure.
2014*a83d5a8bSDave Chinner 	 */
2015*a83d5a8bSDave Chinner 	if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
2016*a83d5a8bSDave Chinner 		rcu_read_unlock();
2017*a83d5a8bSDave Chinner 		return NULL;
2018*a83d5a8bSDave Chinner 	}
2019*a83d5a8bSDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
2020*a83d5a8bSDave Chinner 	rcu_read_unlock();
2021*a83d5a8bSDave Chinner 	return ip;
2022*a83d5a8bSDave Chinner }
2023*a83d5a8bSDave Chinner 
2024*a83d5a8bSDave Chinner /*
20259a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
20269a4a5118SDarrick J. Wong  * is responsible for validating the old value.
20279a4a5118SDarrick J. Wong  */
20289a4a5118SDarrick J. Wong STATIC int
20299a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
20309a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
2031f40aadb2SDave Chinner 	struct xfs_perag	*pag,
20329a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
20339a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
20349a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
20359a4a5118SDarrick J. Wong {
2036370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
20379a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
20389a4a5118SDarrick J. Wong 	int			offset;
20399a4a5118SDarrick J. Wong 
20402d6ca832SDave Chinner 	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
20419a4a5118SDarrick J. Wong 
20429a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2043f40aadb2SDave Chinner 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
20449a4a5118SDarrick J. Wong 			old_value, new_agino);
20459a4a5118SDarrick J. Wong 
20469a4a5118SDarrick J. Wong 	/*
20479a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
20489a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
20499a4a5118SDarrick J. Wong 	 * head of the list.
20509a4a5118SDarrick J. Wong 	 */
2051a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
20528d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
20539a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
2054a5155b87SDarrick J. Wong 	}
20559a4a5118SDarrick J. Wong 
20569a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
20579a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
20589a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
20599a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
20609a4a5118SDarrick J. Wong 	return 0;
20619a4a5118SDarrick J. Wong }
20629a4a5118SDarrick J. Wong 
2063f2fc16a3SDarrick J. Wong /* Set an on-disk inode's next_unlinked pointer. */
2064f2fc16a3SDarrick J. Wong STATIC void
2065f2fc16a3SDarrick J. Wong xfs_iunlink_update_dinode(
2066f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2067f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2068f2fc16a3SDarrick J. Wong 	xfs_agino_t		agino,
2069f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp,
2070f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip,
2071f2fc16a3SDarrick J. Wong 	struct xfs_imap		*imap,
2072f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino)
2073f2fc16a3SDarrick J. Wong {
2074f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2075f2fc16a3SDarrick J. Wong 	int			offset;
2076f2fc16a3SDarrick J. Wong 
20772d6ca832SDave Chinner 	ASSERT(xfs_verify_agino_or_null(pag, next_agino));
2078f2fc16a3SDarrick J. Wong 
2079f40aadb2SDave Chinner 	trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
2080f2fc16a3SDarrick J. Wong 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2081f2fc16a3SDarrick J. Wong 
2082f2fc16a3SDarrick J. Wong 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2083f2fc16a3SDarrick J. Wong 	offset = imap->im_boffset +
2084f2fc16a3SDarrick J. Wong 			offsetof(struct xfs_dinode, di_next_unlinked);
2085f2fc16a3SDarrick J. Wong 
2086f2fc16a3SDarrick J. Wong 	/* need to recalc the inode CRC if appropriate */
2087f2fc16a3SDarrick J. Wong 	xfs_dinode_calc_crc(mp, dip);
2088f2fc16a3SDarrick J. Wong 	xfs_trans_inode_buf(tp, ibp);
2089f2fc16a3SDarrick J. Wong 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2090f2fc16a3SDarrick J. Wong }
2091f2fc16a3SDarrick J. Wong 
2092f2fc16a3SDarrick J. Wong /* Set an in-core inode's unlinked pointer and return the old value. */
2093f2fc16a3SDarrick J. Wong STATIC int
2094f2fc16a3SDarrick J. Wong xfs_iunlink_update_inode(
2095f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
2096f2fc16a3SDarrick J. Wong 	struct xfs_inode	*ip,
2097f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2098f2fc16a3SDarrick J. Wong 	xfs_agino_t		next_agino,
2099f2fc16a3SDarrick J. Wong 	xfs_agino_t		*old_next_agino)
2100f2fc16a3SDarrick J. Wong {
2101f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2102f2fc16a3SDarrick J. Wong 	struct xfs_dinode	*dip;
2103f2fc16a3SDarrick J. Wong 	struct xfs_buf		*ibp;
2104f2fc16a3SDarrick J. Wong 	xfs_agino_t		old_value;
2105f2fc16a3SDarrick J. Wong 	int			error;
2106f2fc16a3SDarrick J. Wong 
21072d6ca832SDave Chinner 	ASSERT(xfs_verify_agino_or_null(pag, next_agino));
2108f2fc16a3SDarrick J. Wong 
2109af9dcddeSChristoph Hellwig 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2110f2fc16a3SDarrick J. Wong 	if (error)
2111f2fc16a3SDarrick J. Wong 		return error;
2112af9dcddeSChristoph Hellwig 	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2113f2fc16a3SDarrick J. Wong 
2114f2fc16a3SDarrick J. Wong 	/* Make sure the old pointer isn't garbage. */
2115f2fc16a3SDarrick J. Wong 	old_value = be32_to_cpu(dip->di_next_unlinked);
21164fcc94d6SDave Chinner 	if (old_value != ip->i_next_unlinked ||
21174fcc94d6SDave Chinner 	    !xfs_verify_agino_or_null(pag, old_value)) {
2118a5155b87SDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2119a5155b87SDarrick J. Wong 				sizeof(*dip), __this_address);
2120f2fc16a3SDarrick J. Wong 		error = -EFSCORRUPTED;
2121f2fc16a3SDarrick J. Wong 		goto out;
2122f2fc16a3SDarrick J. Wong 	}
2123f2fc16a3SDarrick J. Wong 
2124f2fc16a3SDarrick J. Wong 	/*
2125f2fc16a3SDarrick J. Wong 	 * Since we're updating a linked list, we should never find that the
2126f2fc16a3SDarrick J. Wong 	 * current pointer is the same as the new value, unless we're
2127f2fc16a3SDarrick J. Wong 	 * terminating the list.
2128f2fc16a3SDarrick J. Wong 	 */
2129*a83d5a8bSDave Chinner 	if (old_next_agino)
2130f2fc16a3SDarrick J. Wong 		*old_next_agino = old_value;
2131f2fc16a3SDarrick J. Wong 	if (old_value == next_agino) {
2132a5155b87SDarrick J. Wong 		if (next_agino != NULLAGINO) {
2133a5155b87SDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2134a5155b87SDarrick J. Wong 					dip, sizeof(*dip), __this_address);
2135f2fc16a3SDarrick J. Wong 			error = -EFSCORRUPTED;
2136a5155b87SDarrick J. Wong 		}
2137f2fc16a3SDarrick J. Wong 		goto out;
2138f2fc16a3SDarrick J. Wong 	}
2139f2fc16a3SDarrick J. Wong 
2140f2fc16a3SDarrick J. Wong 	/* Ok, update the new pointer. */
2141f40aadb2SDave Chinner 	xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
2142f2fc16a3SDarrick J. Wong 			ibp, dip, &ip->i_imap, next_agino);
2143f2fc16a3SDarrick J. Wong 	return 0;
2144f2fc16a3SDarrick J. Wong out:
2145f2fc16a3SDarrick J. Wong 	xfs_trans_brelse(tp, ibp);
2146f2fc16a3SDarrick J. Wong 	return error;
2147f2fc16a3SDarrick J. Wong }
2148f2fc16a3SDarrick J. Wong 
2149a4454cd6SDave Chinner static int
2150a4454cd6SDave Chinner xfs_iunlink_insert_inode(
2151a4454cd6SDave Chinner 	struct xfs_trans	*tp,
2152a4454cd6SDave Chinner 	struct xfs_perag	*pag,
2153a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
2154a4454cd6SDave Chinner 	struct xfs_inode	*ip)
2155a4454cd6SDave Chinner {
2156a4454cd6SDave Chinner 	struct xfs_mount	*mp = tp->t_mountp;
2157a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
2158a4454cd6SDave Chinner 	xfs_agino_t		next_agino;
2159a4454cd6SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2160a4454cd6SDave Chinner 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2161a4454cd6SDave Chinner 	int			error;
2162a4454cd6SDave Chinner 
2163a4454cd6SDave Chinner 	/*
2164a4454cd6SDave Chinner 	 * Get the index into the agi hash table for the list this inode will
2165a4454cd6SDave Chinner 	 * go on.  Make sure the pointer isn't garbage and that this inode
2166a4454cd6SDave Chinner 	 * isn't already on the list.
2167a4454cd6SDave Chinner 	 */
2168a4454cd6SDave Chinner 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2169a4454cd6SDave Chinner 	if (next_agino == agino ||
2170a4454cd6SDave Chinner 	    !xfs_verify_agino_or_null(pag, next_agino)) {
2171a4454cd6SDave Chinner 		xfs_buf_mark_corrupt(agibp);
2172a4454cd6SDave Chinner 		return -EFSCORRUPTED;
2173a4454cd6SDave Chinner 	}
2174a4454cd6SDave Chinner 
2175a4454cd6SDave Chinner 	if (next_agino != NULLAGINO) {
2176a4454cd6SDave Chinner 		xfs_agino_t		old_agino;
2177a4454cd6SDave Chinner 
2178a4454cd6SDave Chinner 		/*
2179a4454cd6SDave Chinner 		 * There is already another inode in the bucket, so point this
2180a4454cd6SDave Chinner 		 * inode to the current head of the list.
2181a4454cd6SDave Chinner 		 */
2182a4454cd6SDave Chinner 		error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
2183a4454cd6SDave Chinner 				&old_agino);
2184a4454cd6SDave Chinner 		if (error)
2185a4454cd6SDave Chinner 			return error;
2186a4454cd6SDave Chinner 		ASSERT(old_agino == NULLAGINO);
21874fcc94d6SDave Chinner 		ip->i_next_unlinked = next_agino;
2188a4454cd6SDave Chinner 
2189a4454cd6SDave Chinner 		/*
2190a4454cd6SDave Chinner 		 * agino has been unlinked, add a backref from the next inode
2191a4454cd6SDave Chinner 		 * back to agino.
2192a4454cd6SDave Chinner 		 */
2193a4454cd6SDave Chinner 		error = xfs_iunlink_add_backref(pag, agino, next_agino);
2194a4454cd6SDave Chinner 		if (error)
2195a4454cd6SDave Chinner 			return error;
2196a4454cd6SDave Chinner 	}
2197a4454cd6SDave Chinner 
2198a4454cd6SDave Chinner 	/* Point the head of the list to point to this inode. */
2199a4454cd6SDave Chinner 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2200a4454cd6SDave Chinner }
2201a4454cd6SDave Chinner 
22029a4a5118SDarrick J. Wong /*
2203c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
2204c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
220554d7b5c1SDave Chinner  *
220654d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
220754d7b5c1SDave Chinner  * list when the inode is freed.
22081da177e4SLinus Torvalds  */
220954d7b5c1SDave Chinner STATIC int
22101da177e4SLinus Torvalds xfs_iunlink(
221154d7b5c1SDave Chinner 	struct xfs_trans	*tp,
221254d7b5c1SDave Chinner 	struct xfs_inode	*ip)
22131da177e4SLinus Torvalds {
22145837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2215f40aadb2SDave Chinner 	struct xfs_perag	*pag;
22165837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
22171da177e4SLinus Torvalds 	int			error;
22181da177e4SLinus Torvalds 
2219c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
2220c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
22214664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
22221da177e4SLinus Torvalds 
2223f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2224f40aadb2SDave Chinner 
22255837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
222661021debSDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
2227859d7182SVlad Apostolov 	if (error)
2228f40aadb2SDave Chinner 		goto out;
22295e1be0fbSChristoph Hellwig 
2230a4454cd6SDave Chinner 	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2231f40aadb2SDave Chinner out:
2232f40aadb2SDave Chinner 	xfs_perag_put(pag);
2233f40aadb2SDave Chinner 	return error;
22341da177e4SLinus Torvalds }
22351da177e4SLinus Torvalds 
223623ffa52cSDarrick J. Wong /*
223723ffa52cSDarrick J. Wong  * Walk the unlinked chain from @head_agino until we find the inode that
223823ffa52cSDarrick J. Wong  * points to @target_agino.  Return the inode number, map, dinode pointer,
223923ffa52cSDarrick J. Wong  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
224023ffa52cSDarrick J. Wong  *
224123ffa52cSDarrick J. Wong  * @tp, @pag, @head_agino, and @target_agino are input parameters.
224223ffa52cSDarrick J. Wong  * @agino, @imap, @dipp, and @bpp are all output parameters.
224323ffa52cSDarrick J. Wong  *
224423ffa52cSDarrick J. Wong  * Do not call this function if @target_agino is the head of the list.
224523ffa52cSDarrick J. Wong  */
2246*a83d5a8bSDave Chinner static int
2247*a83d5a8bSDave Chinner xfs_iunlink_lookup_prev(
2248f40aadb2SDave Chinner 	struct xfs_perag	*pag,
224923ffa52cSDarrick J. Wong 	xfs_agino_t		head_agino,
225023ffa52cSDarrick J. Wong 	xfs_agino_t		target_agino,
2251*a83d5a8bSDave Chinner 	struct xfs_inode	**ipp)
225223ffa52cSDarrick J. Wong {
2253*a83d5a8bSDave Chinner 	struct xfs_inode	*ip;
225423ffa52cSDarrick J. Wong 	xfs_agino_t		next_agino;
225523ffa52cSDarrick J. Wong 
2256*a83d5a8bSDave Chinner 	*ipp = NULL;
225723ffa52cSDarrick J. Wong 
2258*a83d5a8bSDave Chinner 	next_agino = xfs_iunlink_lookup_backref(pag, target_agino);
2259*a83d5a8bSDave Chinner 	if (next_agino != NULLAGINO) {
2260*a83d5a8bSDave Chinner 		ip = xfs_iunlink_lookup(pag, next_agino);
2261*a83d5a8bSDave Chinner 		if (ip && ip->i_next_unlinked == target_agino) {
2262*a83d5a8bSDave Chinner 			*ipp = ip;
22639b247179SDarrick J. Wong 			return 0;
22649b247179SDarrick J. Wong 		}
2265*a83d5a8bSDave Chinner 	}
22669b247179SDarrick J. Wong 
22679b247179SDarrick J. Wong 	/* Otherwise, walk the entire bucket until we find it. */
226823ffa52cSDarrick J. Wong 	next_agino = head_agino;
2269*a83d5a8bSDave Chinner 	while (next_agino != NULLAGINO) {
2270*a83d5a8bSDave Chinner 		ip = xfs_iunlink_lookup(pag, next_agino);
2271*a83d5a8bSDave Chinner 		if (!ip)
2272*a83d5a8bSDave Chinner 			return -EFSCORRUPTED;
227323ffa52cSDarrick J. Wong 
227423ffa52cSDarrick J. Wong 		/*
227523ffa52cSDarrick J. Wong 		 * Make sure this pointer is valid and isn't an obvious
227623ffa52cSDarrick J. Wong 		 * infinite loop.
227723ffa52cSDarrick J. Wong 		 */
2278*a83d5a8bSDave Chinner 		if (!xfs_verify_agino(pag, ip->i_next_unlinked) ||
2279*a83d5a8bSDave Chinner 		    next_agino == ip->i_next_unlinked)
2280*a83d5a8bSDave Chinner 			return -EFSCORRUPTED;
228123ffa52cSDarrick J. Wong 
2282*a83d5a8bSDave Chinner 		if (ip->i_next_unlinked == target_agino) {
2283*a83d5a8bSDave Chinner 			*ipp = ip;
228423ffa52cSDarrick J. Wong 			return 0;
228523ffa52cSDarrick J. Wong 		}
2286*a83d5a8bSDave Chinner 		next_agino = ip->i_next_unlinked;
2287*a83d5a8bSDave Chinner 	}
2288*a83d5a8bSDave Chinner 	return -EFSCORRUPTED;
2289*a83d5a8bSDave Chinner }
229023ffa52cSDarrick J. Wong 
2291a4454cd6SDave Chinner static int
2292a4454cd6SDave Chinner xfs_iunlink_remove_inode(
22935837f625SDarrick J. Wong 	struct xfs_trans	*tp,
2294f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2295a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
22965837f625SDarrick J. Wong 	struct xfs_inode	*ip)
22971da177e4SLinus Torvalds {
22985837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
2299a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
23005837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
23011da177e4SLinus Torvalds 	xfs_agino_t		next_agino;
2302b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
23035837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
23041da177e4SLinus Torvalds 	int			error;
23051da177e4SLinus Torvalds 
23064664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
23074664c66cSDarrick J. Wong 
23081da177e4SLinus Torvalds 	/*
230986bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
231086bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
23111da177e4SLinus Torvalds 	 */
2312b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
23132d6ca832SDave Chinner 	if (!xfs_verify_agino(pag, head_agino)) {
2314d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2315d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2316d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2317d2e73665SDarrick J. Wong 	}
23181da177e4SLinus Torvalds 
23191da177e4SLinus Torvalds 	/*
2320b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2321b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2322b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
23231da177e4SLinus Torvalds 	 */
2324f40aadb2SDave Chinner 	error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
2325f2fc16a3SDarrick J. Wong 	if (error)
23261da177e4SLinus Torvalds 		return error;
23279a4a5118SDarrick J. Wong 
23289b247179SDarrick J. Wong 	/*
23299b247179SDarrick J. Wong 	 * If there was a backref pointing from the next inode back to this
23309b247179SDarrick J. Wong 	 * one, remove it because we've removed this inode from the list.
23319b247179SDarrick J. Wong 	 *
23329b247179SDarrick J. Wong 	 * Later, if this inode was in the middle of the list we'll update
23339b247179SDarrick J. Wong 	 * this inode's backref to point from the next inode.
23349b247179SDarrick J. Wong 	 */
23359b247179SDarrick J. Wong 	if (next_agino != NULLAGINO) {
2336f40aadb2SDave Chinner 		error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
23379b247179SDarrick J. Wong 		if (error)
233892a00544SGao Xiang 			return error;
23399b247179SDarrick J. Wong 	}
23409b247179SDarrick J. Wong 
234192a00544SGao Xiang 	if (head_agino != agino) {
2342*a83d5a8bSDave Chinner 		struct xfs_inode	*prev_ip;
2343f2fc16a3SDarrick J. Wong 
2344*a83d5a8bSDave Chinner 		error = xfs_iunlink_lookup_prev(pag, head_agino, agino,
2345*a83d5a8bSDave Chinner 				&prev_ip);
234623ffa52cSDarrick J. Wong 		if (error)
234792a00544SGao Xiang 			return error;
2348475ee413SChristoph Hellwig 
2349f2fc16a3SDarrick J. Wong 		/* Point the previous inode on the list to the next inode. */
2350*a83d5a8bSDave Chinner 		error = xfs_iunlink_update_inode(tp, prev_ip, pag, next_agino,
2351*a83d5a8bSDave Chinner 				NULL);
2352*a83d5a8bSDave Chinner 		if (error)
2353*a83d5a8bSDave Chinner 			return error;
2354*a83d5a8bSDave Chinner 
2355*a83d5a8bSDave Chinner 		prev_ip->i_next_unlinked = ip->i_next_unlinked;
2356*a83d5a8bSDave Chinner 		ip->i_next_unlinked = NULLAGINO;
23579b247179SDarrick J. Wong 
23589b247179SDarrick J. Wong 		/*
23599b247179SDarrick J. Wong 		 * Now we deal with the backref for this inode.  If this inode
23609b247179SDarrick J. Wong 		 * pointed at a real inode, change the backref that pointed to
23619b247179SDarrick J. Wong 		 * us to point to our old next.  If this inode was the end of
23629b247179SDarrick J. Wong 		 * the list, delete the backref that pointed to us.  Note that
23639b247179SDarrick J. Wong 		 * change_backref takes care of deleting the backref if
23649b247179SDarrick J. Wong 		 * next_agino is NULLAGINO.
23659b247179SDarrick J. Wong 		 */
236692a00544SGao Xiang 		return xfs_iunlink_change_backref(agibp->b_pag, agino,
236792a00544SGao Xiang 				next_agino);
23681da177e4SLinus Torvalds 	}
23699b247179SDarrick J. Wong 
237092a00544SGao Xiang 	/* Point the head of the list to the next unlinked inode. */
2371*a83d5a8bSDave Chinner 	ip->i_next_unlinked = NULLAGINO;
2372f40aadb2SDave Chinner 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
237392a00544SGao Xiang 			next_agino);
23741da177e4SLinus Torvalds }
23751da177e4SLinus Torvalds 
23765b3eed75SDave Chinner /*
2377a4454cd6SDave Chinner  * Pull the on-disk inode from the AGI unlinked list.
2378a4454cd6SDave Chinner  */
2379a4454cd6SDave Chinner STATIC int
2380a4454cd6SDave Chinner xfs_iunlink_remove(
2381a4454cd6SDave Chinner 	struct xfs_trans	*tp,
2382a4454cd6SDave Chinner 	struct xfs_perag	*pag,
2383a4454cd6SDave Chinner 	struct xfs_inode	*ip)
2384a4454cd6SDave Chinner {
2385a4454cd6SDave Chinner 	struct xfs_buf		*agibp;
2386a4454cd6SDave Chinner 	int			error;
2387a4454cd6SDave Chinner 
2388a4454cd6SDave Chinner 	trace_xfs_iunlink_remove(ip);
2389a4454cd6SDave Chinner 
2390a4454cd6SDave Chinner 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2391a4454cd6SDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
2392a4454cd6SDave Chinner 	if (error)
2393a4454cd6SDave Chinner 		return error;
2394a4454cd6SDave Chinner 
2395a4454cd6SDave Chinner 	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2396a4454cd6SDave Chinner }
2397a4454cd6SDave Chinner 
2398a4454cd6SDave Chinner /*
239971e3e356SDave Chinner  * Look up the inode number specified and if it is not already marked XFS_ISTALE
240071e3e356SDave Chinner  * mark it stale. We should only find clean inodes in this lookup that aren't
240171e3e356SDave Chinner  * already stale.
24025806165aSDave Chinner  */
240371e3e356SDave Chinner static void
240471e3e356SDave Chinner xfs_ifree_mark_inode_stale(
2405f40aadb2SDave Chinner 	struct xfs_perag	*pag,
24065806165aSDave Chinner 	struct xfs_inode	*free_ip,
2407d9fdd0adSBrian Foster 	xfs_ino_t		inum)
24085806165aSDave Chinner {
2409f40aadb2SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
241071e3e356SDave Chinner 	struct xfs_inode_log_item *iip;
24115806165aSDave Chinner 	struct xfs_inode	*ip;
24125806165aSDave Chinner 
24135806165aSDave Chinner retry:
24145806165aSDave Chinner 	rcu_read_lock();
24155806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
24165806165aSDave Chinner 
24175806165aSDave Chinner 	/* Inode not in memory, nothing to do */
241871e3e356SDave Chinner 	if (!ip) {
241971e3e356SDave Chinner 		rcu_read_unlock();
242071e3e356SDave Chinner 		return;
242171e3e356SDave Chinner 	}
24225806165aSDave Chinner 
24235806165aSDave Chinner 	/*
24245806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
24255806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
24265806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
24275806165aSDave Chinner 	 * valid, the wrong inode or stale.
24285806165aSDave Chinner 	 */
24295806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2430718ecc50SDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2431718ecc50SDave Chinner 		goto out_iflags_unlock;
24325806165aSDave Chinner 
24335806165aSDave Chinner 	/*
24345806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
24355806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
24365806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
24375806165aSDave Chinner 	 * retry.
24385806165aSDave Chinner 	 */
24395806165aSDave Chinner 	if (ip != free_ip) {
24405806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
244171e3e356SDave Chinner 			spin_unlock(&ip->i_flags_lock);
24425806165aSDave Chinner 			rcu_read_unlock();
24435806165aSDave Chinner 			delay(1);
24445806165aSDave Chinner 			goto retry;
24455806165aSDave Chinner 		}
24465806165aSDave Chinner 	}
244771e3e356SDave Chinner 	ip->i_flags |= XFS_ISTALE;
24485806165aSDave Chinner 
244971e3e356SDave Chinner 	/*
2450718ecc50SDave Chinner 	 * If the inode is flushing, it is already attached to the buffer.  All
245171e3e356SDave Chinner 	 * we needed to do here is mark the inode stale so buffer IO completion
245271e3e356SDave Chinner 	 * will remove it from the AIL.
245371e3e356SDave Chinner 	 */
245471e3e356SDave Chinner 	iip = ip->i_itemp;
2455718ecc50SDave Chinner 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
245671e3e356SDave Chinner 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
245771e3e356SDave Chinner 		ASSERT(iip->ili_last_fields);
245871e3e356SDave Chinner 		goto out_iunlock;
245971e3e356SDave Chinner 	}
24605806165aSDave Chinner 
24615806165aSDave Chinner 	/*
246248d55e2aSDave Chinner 	 * Inodes not attached to the buffer can be released immediately.
246348d55e2aSDave Chinner 	 * Everything else has to go through xfs_iflush_abort() on journal
246448d55e2aSDave Chinner 	 * commit as the flock synchronises removal of the inode from the
246548d55e2aSDave Chinner 	 * cluster buffer against inode reclaim.
24665806165aSDave Chinner 	 */
2467718ecc50SDave Chinner 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
246871e3e356SDave Chinner 		goto out_iunlock;
2469718ecc50SDave Chinner 
2470718ecc50SDave Chinner 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2471718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2472718ecc50SDave Chinner 	rcu_read_unlock();
24735806165aSDave Chinner 
247471e3e356SDave Chinner 	/* we have a dirty inode in memory that has not yet been flushed. */
247571e3e356SDave Chinner 	spin_lock(&iip->ili_lock);
247671e3e356SDave Chinner 	iip->ili_last_fields = iip->ili_fields;
247771e3e356SDave Chinner 	iip->ili_fields = 0;
247871e3e356SDave Chinner 	iip->ili_fsync_fields = 0;
247971e3e356SDave Chinner 	spin_unlock(&iip->ili_lock);
248071e3e356SDave Chinner 	ASSERT(iip->ili_last_fields);
248171e3e356SDave Chinner 
2482718ecc50SDave Chinner 	if (ip != free_ip)
2483718ecc50SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2484718ecc50SDave Chinner 	return;
2485718ecc50SDave Chinner 
248671e3e356SDave Chinner out_iunlock:
248771e3e356SDave Chinner 	if (ip != free_ip)
248871e3e356SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2489718ecc50SDave Chinner out_iflags_unlock:
2490718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2491718ecc50SDave Chinner 	rcu_read_unlock();
24925806165aSDave Chinner }
24935806165aSDave Chinner 
24945806165aSDave Chinner /*
24950b8182dbSZhi Yong Wu  * A big issue when freeing the inode cluster is that we _cannot_ skip any
24965b3eed75SDave Chinner  * inodes that are in memory - they all must be marked stale and attached to
24975b3eed75SDave Chinner  * the cluster buffer.
24985b3eed75SDave Chinner  */
2499f40aadb2SDave Chinner static int
25001da177e4SLinus Torvalds xfs_ifree_cluster(
250171e3e356SDave Chinner 	struct xfs_trans	*tp,
2502f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2503f40aadb2SDave Chinner 	struct xfs_inode	*free_ip,
250409b56604SBrian Foster 	struct xfs_icluster	*xic)
25051da177e4SLinus Torvalds {
250671e3e356SDave Chinner 	struct xfs_mount	*mp = free_ip->i_mount;
250771e3e356SDave Chinner 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
250871e3e356SDave Chinner 	struct xfs_buf		*bp;
250971e3e356SDave Chinner 	xfs_daddr_t		blkno;
251071e3e356SDave Chinner 	xfs_ino_t		inum = xic->first_ino;
25111da177e4SLinus Torvalds 	int			nbufs;
25125b257b4aSDave Chinner 	int			i, j;
25133cdaa189SBrian Foster 	int			ioffset;
2514ce92464cSDarrick J. Wong 	int			error;
25151da177e4SLinus Torvalds 
2516ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
25171da177e4SLinus Torvalds 
2518ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
251909b56604SBrian Foster 		/*
252009b56604SBrian Foster 		 * The allocation bitmap tells us which inodes of the chunk were
252109b56604SBrian Foster 		 * physically allocated. Skip the cluster if an inode falls into
252209b56604SBrian Foster 		 * a sparse region.
252309b56604SBrian Foster 		 */
25243cdaa189SBrian Foster 		ioffset = inum - xic->first_ino;
25253cdaa189SBrian Foster 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2526ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
252709b56604SBrian Foster 			continue;
252809b56604SBrian Foster 		}
252909b56604SBrian Foster 
25301da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
25311da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds 		/*
25345b257b4aSDave Chinner 		 * We obtain and lock the backing buffer first in the process
2535718ecc50SDave Chinner 		 * here to ensure dirty inodes attached to the buffer remain in
2536718ecc50SDave Chinner 		 * the flushing state while we mark them stale.
2537718ecc50SDave Chinner 		 *
25385b257b4aSDave Chinner 		 * If we scan the in-memory inodes first, then buffer IO can
25395b257b4aSDave Chinner 		 * complete before we get a lock on it, and hence we may fail
25405b257b4aSDave Chinner 		 * to mark all the active inodes on the buffer stale.
25411da177e4SLinus Torvalds 		 */
2542ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2543ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2544ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
254571e3e356SDave Chinner 		if (error)
2546ce92464cSDarrick J. Wong 			return error;
2547b0f539deSDave Chinner 
2548b0f539deSDave Chinner 		/*
2549b0f539deSDave Chinner 		 * This buffer may not have been correctly initialised as we
2550b0f539deSDave Chinner 		 * didn't read it from disk. That's not important because we are
2551b0f539deSDave Chinner 		 * only using to mark the buffer as stale in the log, and to
2552b0f539deSDave Chinner 		 * attach stale cached inodes on it. That means it will never be
2553b0f539deSDave Chinner 		 * dispatched for IO. If it is, we want to know about it, and we
2554b0f539deSDave Chinner 		 * want it to fail. We can acheive this by adding a write
2555b0f539deSDave Chinner 		 * verifier to the buffer.
2556b0f539deSDave Chinner 		 */
25571813dd64SDave Chinner 		bp->b_ops = &xfs_inode_buf_ops;
2558b0f539deSDave Chinner 
25595b257b4aSDave Chinner 		/*
256071e3e356SDave Chinner 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
256171e3e356SDave Chinner 		 * too. This requires lookups, and will skip inodes that we've
256271e3e356SDave Chinner 		 * already marked XFS_ISTALE.
25635b257b4aSDave Chinner 		 */
256471e3e356SDave Chinner 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2565f40aadb2SDave Chinner 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
25661da177e4SLinus Torvalds 
25671da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
25681da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
25691da177e4SLinus Torvalds 	}
25702a30f36dSChandra Seetharaman 	return 0;
25711da177e4SLinus Torvalds }
25721da177e4SLinus Torvalds 
25731da177e4SLinus Torvalds /*
25749a5280b3SDave Chinner  * This is called to return an inode to the inode free list.  The inode should
25759a5280b3SDave Chinner  * already be truncated to 0 length and have no pages associated with it.  This
25769a5280b3SDave Chinner  * routine also assumes that the inode is already a part of the transaction.
25771da177e4SLinus Torvalds  *
25789a5280b3SDave Chinner  * The on-disk copy of the inode will have been added to the list of unlinked
25799a5280b3SDave Chinner  * inodes in the AGI. We need to remove the inode from that list atomically with
25809a5280b3SDave Chinner  * respect to freeing it here.
25811da177e4SLinus Torvalds  */
25821da177e4SLinus Torvalds int
25831da177e4SLinus Torvalds xfs_ifree(
25840e0417f3SBrian Foster 	struct xfs_trans	*tp,
25850e0417f3SBrian Foster 	struct xfs_inode	*ip)
25861da177e4SLinus Torvalds {
2587f40aadb2SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2588f40aadb2SDave Chinner 	struct xfs_perag	*pag;
258909b56604SBrian Foster 	struct xfs_icluster	xic = { 0 };
25901319ebefSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
2591f40aadb2SDave Chinner 	int			error;
25921da177e4SLinus Torvalds 
2593579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
259454d7b5c1SDave Chinner 	ASSERT(VFS_I(ip)->i_nlink == 0);
2595daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
259613d2c10bSChristoph Hellwig 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
25976e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
25981da177e4SLinus Torvalds 
2599f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2600f40aadb2SDave Chinner 
26011da177e4SLinus Torvalds 	/*
26029a5280b3SDave Chinner 	 * Free the inode first so that we guarantee that the AGI lock is going
26039a5280b3SDave Chinner 	 * to be taken before we remove the inode from the unlinked list. This
26049a5280b3SDave Chinner 	 * makes the AGI lock -> unlinked list modification order the same as
26059a5280b3SDave Chinner 	 * used in O_TMPFILE creation.
26061da177e4SLinus Torvalds 	 */
2607f40aadb2SDave Chinner 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
26081baaed8fSDave Chinner 	if (error)
26096f5097e3SBrian Foster 		goto out;
26109a5280b3SDave Chinner 
26119a5280b3SDave Chinner 	error = xfs_iunlink_remove(tp, pag, ip);
26129a5280b3SDave Chinner 	if (error)
2613f40aadb2SDave Chinner 		goto out;
26141baaed8fSDave Chinner 
2615b2c20045SChristoph Hellwig 	/*
2616b2c20045SChristoph Hellwig 	 * Free any local-format data sitting around before we reset the
2617b2c20045SChristoph Hellwig 	 * data fork to extents format.  Note that the attr fork data has
2618b2c20045SChristoph Hellwig 	 * already been freed by xfs_attr_inactive.
2619b2c20045SChristoph Hellwig 	 */
2620f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2621b2c20045SChristoph Hellwig 		kmem_free(ip->i_df.if_u1.if_data);
2622b2c20045SChristoph Hellwig 		ip->i_df.if_u1.if_data = NULL;
2623b2c20045SChristoph Hellwig 		ip->i_df.if_bytes = 0;
2624b2c20045SChristoph Hellwig 	}
262598c4f78dSDarrick J. Wong 
2626c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2627db07349dSChristoph Hellwig 	ip->i_diflags = 0;
2628f40aadb2SDave Chinner 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
26297821ea30SChristoph Hellwig 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2630f7e67b20SChristoph Hellwig 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
26319b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
26329b3beb02SChristoph Hellwig 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2633dc1baa71SEric Sandeen 
2634dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
26351319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
26361319ebefSDave Chinner 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
26371319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
2638dc1baa71SEric Sandeen 
26391da177e4SLinus Torvalds 	/*
26401da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
26411da177e4SLinus Torvalds 	 * by reincarnations of this inode.
26421da177e4SLinus Torvalds 	 */
26439e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
26441da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
26451da177e4SLinus Torvalds 
264609b56604SBrian Foster 	if (xic.deleted)
2647f40aadb2SDave Chinner 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2648f40aadb2SDave Chinner out:
2649f40aadb2SDave Chinner 	xfs_perag_put(pag);
26502a30f36dSChandra Seetharaman 	return error;
26511da177e4SLinus Torvalds }
26521da177e4SLinus Torvalds 
26531da177e4SLinus Torvalds /*
265460ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
265560ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
265660ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
26571da177e4SLinus Torvalds  */
265860ec6783SChristoph Hellwig static void
2659f392e631SChristoph Hellwig xfs_iunpin(
266060ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2661a3f74ffbSDavid Chinner {
2662579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2663a3f74ffbSDavid Chinner 
26644aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
26654aaf15d1SDave Chinner 
2666a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
26675f9b4b0dSDave Chinner 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2668a14a348bSChristoph Hellwig 
2669a3f74ffbSDavid Chinner }
2670a3f74ffbSDavid Chinner 
2671f392e631SChristoph Hellwig static void
2672f392e631SChristoph Hellwig __xfs_iunpin_wait(
2673f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2674f392e631SChristoph Hellwig {
2675f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2676f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2677f392e631SChristoph Hellwig 
2678f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2679f392e631SChristoph Hellwig 
2680f392e631SChristoph Hellwig 	do {
268121417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2682f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2683f392e631SChristoph Hellwig 			io_schedule();
2684f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
268521417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2686f392e631SChristoph Hellwig }
2687f392e631SChristoph Hellwig 
2688777df5afSDave Chinner void
26891da177e4SLinus Torvalds xfs_iunpin_wait(
269060ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
26911da177e4SLinus Torvalds {
2692f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2693f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
26941da177e4SLinus Torvalds }
26951da177e4SLinus Torvalds 
269627320369SDave Chinner /*
269727320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
269827320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
269927320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
270027320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
270127320369SDave Chinner  * locking an AGI.
270227320369SDave Chinner  *
270327320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
270427320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
270527320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
270627320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
270727320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
270827320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
270927320369SDave Chinner  *
271027320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
271127320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
271227320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
271327320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
271427320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
271527320369SDave Chinner  * directory entry.
271627320369SDave Chinner  *
271727320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2718310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
271927320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
272027320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
272127320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
272227320369SDave Chinner  */
2723c24b5dfaSDave Chinner int
2724c24b5dfaSDave Chinner xfs_remove(
2725c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2726c24b5dfaSDave Chinner 	struct xfs_name		*name,
2727c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2728c24b5dfaSDave Chinner {
2729c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2730c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2731c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2732871b9316SDarrick J. Wong 	int			dontcare;
2733c24b5dfaSDave Chinner 	int                     error = 0;
2734c24b5dfaSDave Chinner 	uint			resblks;
2735c24b5dfaSDave Chinner 
2736c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2737c24b5dfaSDave Chinner 
273875c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
27392451337dSDave Chinner 		return -EIO;
2740c24b5dfaSDave Chinner 
2741c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2742c24b5dfaSDave Chinner 	if (error)
2743c24b5dfaSDave Chinner 		goto std_return;
2744c24b5dfaSDave Chinner 
2745c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2746c24b5dfaSDave Chinner 	if (error)
2747c24b5dfaSDave Chinner 		goto std_return;
2748c24b5dfaSDave Chinner 
2749c24b5dfaSDave Chinner 	/*
2750871b9316SDarrick J. Wong 	 * We try to get the real space reservation first, allowing for
2751871b9316SDarrick J. Wong 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2752871b9316SDarrick J. Wong 	 * can't get the space reservation then we use 0 instead, and avoid the
2753871b9316SDarrick J. Wong 	 * bmap btree insert(s) in the directory code by, if the bmap insert
2754871b9316SDarrick J. Wong 	 * tries to happen, instead trimming the LAST block from the directory.
2755871b9316SDarrick J. Wong 	 *
2756871b9316SDarrick J. Wong 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2757871b9316SDarrick J. Wong 	 * the directory code can handle a reservationless update and we don't
2758871b9316SDarrick J. Wong 	 * want to prevent a user from trying to free space by deleting things.
2759c24b5dfaSDave Chinner 	 */
2760c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2761871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2762871b9316SDarrick J. Wong 			&tp, &dontcare);
2763c24b5dfaSDave Chinner 	if (error) {
27642451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2765253f4911SChristoph Hellwig 		goto std_return;
2766c24b5dfaSDave Chinner 	}
2767c24b5dfaSDave Chinner 
2768c24b5dfaSDave Chinner 	/*
2769c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2770c24b5dfaSDave Chinner 	 */
2771c24b5dfaSDave Chinner 	if (is_dir) {
277254d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
277354d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
27742451337dSDave Chinner 			error = -ENOTEMPTY;
2775c24b5dfaSDave Chinner 			goto out_trans_cancel;
2776c24b5dfaSDave Chinner 		}
2777c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
27782451337dSDave Chinner 			error = -ENOTEMPTY;
2779c24b5dfaSDave Chinner 			goto out_trans_cancel;
2780c24b5dfaSDave Chinner 		}
2781c24b5dfaSDave Chinner 
278227320369SDave Chinner 		/* Drop the link from ip's "..".  */
2783c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2784c24b5dfaSDave Chinner 		if (error)
278527320369SDave Chinner 			goto out_trans_cancel;
2786c24b5dfaSDave Chinner 
278727320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2788c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2789c24b5dfaSDave Chinner 		if (error)
279027320369SDave Chinner 			goto out_trans_cancel;
27915838d035SDarrick J. Wong 
27925838d035SDarrick J. Wong 		/*
27935838d035SDarrick J. Wong 		 * Point the unlinked child directory's ".." entry to the root
27945838d035SDarrick J. Wong 		 * directory to eliminate back-references to inodes that may
27955838d035SDarrick J. Wong 		 * get freed before the child directory is closed.  If the fs
27965838d035SDarrick J. Wong 		 * gets shrunk, this can lead to dirent inode validation errors.
27975838d035SDarrick J. Wong 		 */
27985838d035SDarrick J. Wong 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
27995838d035SDarrick J. Wong 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
28005838d035SDarrick J. Wong 					tp->t_mountp->m_sb.sb_rootino, 0);
28015838d035SDarrick J. Wong 			if (error)
28025838d035SDarrick J. Wong 				return error;
28035838d035SDarrick J. Wong 		}
2804c24b5dfaSDave Chinner 	} else {
2805c24b5dfaSDave Chinner 		/*
2806c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2807c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2808c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2809c24b5dfaSDave Chinner 		 */
2810c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2811c24b5dfaSDave Chinner 	}
281227320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2813c24b5dfaSDave Chinner 
281427320369SDave Chinner 	/* Drop the link from dp to ip. */
2815c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2816c24b5dfaSDave Chinner 	if (error)
281727320369SDave Chinner 		goto out_trans_cancel;
2818c24b5dfaSDave Chinner 
2819381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
282027320369SDave Chinner 	if (error) {
28212451337dSDave Chinner 		ASSERT(error != -ENOENT);
2822c8eac49eSBrian Foster 		goto out_trans_cancel;
282327320369SDave Chinner 	}
282427320369SDave Chinner 
2825c24b5dfaSDave Chinner 	/*
2826c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2827c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2828c24b5dfaSDave Chinner 	 * the user.
2829c24b5dfaSDave Chinner 	 */
28300560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2831c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2832c24b5dfaSDave Chinner 
283370393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2834c24b5dfaSDave Chinner 	if (error)
2835c24b5dfaSDave Chinner 		goto std_return;
2836c24b5dfaSDave Chinner 
28372cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2838c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2839c24b5dfaSDave Chinner 
2840c24b5dfaSDave Chinner 	return 0;
2841c24b5dfaSDave Chinner 
2842c24b5dfaSDave Chinner  out_trans_cancel:
28434906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2844c24b5dfaSDave Chinner  std_return:
2845c24b5dfaSDave Chinner 	return error;
2846c24b5dfaSDave Chinner }
2847c24b5dfaSDave Chinner 
2848f6bba201SDave Chinner /*
2849f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2850f6bba201SDave Chinner  */
285195afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2852f6bba201SDave Chinner STATIC void
2853f6bba201SDave Chinner xfs_sort_for_rename(
285495afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
285595afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
285695afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
285795afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
285895afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
285995afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
286095afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2861f6bba201SDave Chinner {
2862f6bba201SDave Chinner 	int			i, j;
2863f6bba201SDave Chinner 
286495afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
286595afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
286695afcf5cSDave Chinner 
2867f6bba201SDave Chinner 	/*
2868f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
2869f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
2870f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
2871f6bba201SDave Chinner 	 *
2872f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2873f6bba201SDave Chinner 	 */
287495afcf5cSDave Chinner 	i = 0;
287595afcf5cSDave Chinner 	i_tab[i++] = dp1;
287695afcf5cSDave Chinner 	i_tab[i++] = dp2;
287795afcf5cSDave Chinner 	i_tab[i++] = ip1;
287895afcf5cSDave Chinner 	if (ip2)
287995afcf5cSDave Chinner 		i_tab[i++] = ip2;
288095afcf5cSDave Chinner 	if (wip)
288195afcf5cSDave Chinner 		i_tab[i++] = wip;
288295afcf5cSDave Chinner 	*num_inodes = i;
2883f6bba201SDave Chinner 
2884f6bba201SDave Chinner 	/*
2885f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
288695afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
2887f6bba201SDave Chinner 	 */
2888f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
2889f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
2890f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
289195afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
2892f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
2893f6bba201SDave Chinner 				i_tab[j-1] = temp;
2894f6bba201SDave Chinner 			}
2895f6bba201SDave Chinner 		}
2896f6bba201SDave Chinner 	}
2897f6bba201SDave Chinner }
2898f6bba201SDave Chinner 
2899310606b0SDave Chinner static int
2900310606b0SDave Chinner xfs_finish_rename(
2901c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
2902310606b0SDave Chinner {
2903310606b0SDave Chinner 	/*
2904310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
2905310606b0SDave Chinner 	 * goes to disk before returning to the user.
2906310606b0SDave Chinner 	 */
29070560f31aSDave Chinner 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2908310606b0SDave Chinner 		xfs_trans_set_sync(tp);
2909310606b0SDave Chinner 
291070393313SChristoph Hellwig 	return xfs_trans_commit(tp);
2911310606b0SDave Chinner }
2912310606b0SDave Chinner 
2913f6bba201SDave Chinner /*
2914d31a1825SCarlos Maiolino  * xfs_cross_rename()
2915d31a1825SCarlos Maiolino  *
29160145225eSBhaskar Chowdhury  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2917d31a1825SCarlos Maiolino  */
2918d31a1825SCarlos Maiolino STATIC int
2919d31a1825SCarlos Maiolino xfs_cross_rename(
2920d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
2921d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
2922d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
2923d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
2924d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
2925d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
2926d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
2927d31a1825SCarlos Maiolino 	int			spaceres)
2928d31a1825SCarlos Maiolino {
2929d31a1825SCarlos Maiolino 	int		error = 0;
2930d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
2931d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
2932d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
2933d31a1825SCarlos Maiolino 
2934d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
2935381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2936d31a1825SCarlos Maiolino 	if (error)
2937eeacd321SDave Chinner 		goto out_trans_abort;
2938d31a1825SCarlos Maiolino 
2939d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
2940381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2941d31a1825SCarlos Maiolino 	if (error)
2942eeacd321SDave Chinner 		goto out_trans_abort;
2943d31a1825SCarlos Maiolino 
2944d31a1825SCarlos Maiolino 	/*
2945d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
2946d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
2947d31a1825SCarlos Maiolino 	 * parents.
2948d31a1825SCarlos Maiolino 	 */
2949d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
2950d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2951d31a1825SCarlos Maiolino 
2952c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2953d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2954381eee69SBrian Foster 						dp1->i_ino, spaceres);
2955d31a1825SCarlos Maiolino 			if (error)
2956eeacd321SDave Chinner 				goto out_trans_abort;
2957d31a1825SCarlos Maiolino 
2958d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
2959c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2960d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
2961d31a1825SCarlos Maiolino 				if (error)
2962eeacd321SDave Chinner 					goto out_trans_abort;
296391083269SEric Sandeen 				xfs_bumplink(tp, dp1);
2964d31a1825SCarlos Maiolino 			}
2965d31a1825SCarlos Maiolino 
2966d31a1825SCarlos Maiolino 			/*
2967d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
2968d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2969d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2970d31a1825SCarlos Maiolino 			 * notify the change
2971d31a1825SCarlos Maiolino 			 */
2972d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
2973d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2974d31a1825SCarlos Maiolino 		}
2975d31a1825SCarlos Maiolino 
2976c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2977d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2978381eee69SBrian Foster 						dp2->i_ino, spaceres);
2979d31a1825SCarlos Maiolino 			if (error)
2980eeacd321SDave Chinner 				goto out_trans_abort;
2981d31a1825SCarlos Maiolino 
2982d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
2983c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2984d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
2985d31a1825SCarlos Maiolino 				if (error)
2986eeacd321SDave Chinner 					goto out_trans_abort;
298791083269SEric Sandeen 				xfs_bumplink(tp, dp2);
2988d31a1825SCarlos Maiolino 			}
2989d31a1825SCarlos Maiolino 
2990d31a1825SCarlos Maiolino 			/*
2991d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
2992d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2993d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2994d31a1825SCarlos Maiolino 			 * notify the change
2995d31a1825SCarlos Maiolino 			 */
2996d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2997d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
2998d31a1825SCarlos Maiolino 		}
2999d31a1825SCarlos Maiolino 	}
3000d31a1825SCarlos Maiolino 
3001d31a1825SCarlos Maiolino 	if (ip1_flags) {
3002d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3003d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3004d31a1825SCarlos Maiolino 	}
3005d31a1825SCarlos Maiolino 	if (ip2_flags) {
3006d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3007d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3008d31a1825SCarlos Maiolino 	}
3009d31a1825SCarlos Maiolino 	if (dp2_flags) {
3010d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3011d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3012d31a1825SCarlos Maiolino 	}
3013d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3014d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3015c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
3016eeacd321SDave Chinner 
3017eeacd321SDave Chinner out_trans_abort:
30184906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3019d31a1825SCarlos Maiolino 	return error;
3020d31a1825SCarlos Maiolino }
3021d31a1825SCarlos Maiolino 
3022d31a1825SCarlos Maiolino /*
30237dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
30247dcf5c3eSDave Chinner  *
3025b63da6c8SRandy Dunlap  * Return a referenced, unlinked, unlocked inode that can be used as a
30267dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
30277dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
30287dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
30297dcf5c3eSDave Chinner  */
30307dcf5c3eSDave Chinner static int
30317dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
3032f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30337dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
30347dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
30357dcf5c3eSDave Chinner {
30367dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
30377dcf5c3eSDave Chinner 	int			error;
30387dcf5c3eSDave Chinner 
3039f736d93dSChristoph Hellwig 	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3040f736d93dSChristoph Hellwig 				   &tmpfile);
30417dcf5c3eSDave Chinner 	if (error)
30427dcf5c3eSDave Chinner 		return error;
30437dcf5c3eSDave Chinner 
304422419ac9SBrian Foster 	/*
304522419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
3046c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
3047c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
304822419ac9SBrian Foster 	 */
30492b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
30507dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
30517dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
30527dcf5c3eSDave Chinner 
30537dcf5c3eSDave Chinner 	*wip = tmpfile;
30547dcf5c3eSDave Chinner 	return 0;
30557dcf5c3eSDave Chinner }
30567dcf5c3eSDave Chinner 
30577dcf5c3eSDave Chinner /*
3058f6bba201SDave Chinner  * xfs_rename
3059f6bba201SDave Chinner  */
3060f6bba201SDave Chinner int
3061f6bba201SDave Chinner xfs_rename(
3062f736d93dSChristoph Hellwig 	struct user_namespace	*mnt_userns,
30637dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
3064f6bba201SDave Chinner 	struct xfs_name		*src_name,
30657dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
30667dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
3067f6bba201SDave Chinner 	struct xfs_name		*target_name,
30687dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
3069d31a1825SCarlos Maiolino 	unsigned int		flags)
3070f6bba201SDave Chinner {
30717dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
30727dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
30737dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
30747dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
30756da1b4b1SDarrick J. Wong 	int			i;
307695afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
30772b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
3078c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3079f6bba201SDave Chinner 	int			spaceres;
308041667260SDarrick J. Wong 	bool			retried = false;
308141667260SDarrick J. Wong 	int			error, nospace_error = 0;
3082f6bba201SDave Chinner 
3083f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3084f6bba201SDave Chinner 
3085eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3086eeacd321SDave Chinner 		return -EINVAL;
3087f6bba201SDave Chinner 
30887dcf5c3eSDave Chinner 	/*
30897dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
30907dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
30917dcf5c3eSDave Chinner 	 * appropriately.
30927dcf5c3eSDave Chinner 	 */
30937dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
3094f736d93dSChristoph Hellwig 		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
30957dcf5c3eSDave Chinner 		if (error)
30967dcf5c3eSDave Chinner 			return error;
3097f6bba201SDave Chinner 
30987dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
30997dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
31007dcf5c3eSDave Chinner 	}
31017dcf5c3eSDave Chinner 
31027dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3103f6bba201SDave Chinner 				inodes, &num_inodes);
3104f6bba201SDave Chinner 
310541667260SDarrick J. Wong retry:
310641667260SDarrick J. Wong 	nospace_error = 0;
3107f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3108253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
31092451337dSDave Chinner 	if (error == -ENOSPC) {
311041667260SDarrick J. Wong 		nospace_error = error;
3111f6bba201SDave Chinner 		spaceres = 0;
3112253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3113253f4911SChristoph Hellwig 				&tp);
3114f6bba201SDave Chinner 	}
3115445883e8SDave Chinner 	if (error)
3116253f4911SChristoph Hellwig 		goto out_release_wip;
3117f6bba201SDave Chinner 
3118f6bba201SDave Chinner 	/*
3119f6bba201SDave Chinner 	 * Attach the dquots to the inodes
3120f6bba201SDave Chinner 	 */
3121f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
3122445883e8SDave Chinner 	if (error)
3123445883e8SDave Chinner 		goto out_trans_cancel;
3124f6bba201SDave Chinner 
3125f6bba201SDave Chinner 	/*
3126f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
3127f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
3128f6bba201SDave Chinner 	 * whether the target directory is the same as the source
3129f6bba201SDave Chinner 	 * directory, we can lock from 2 to 4 inodes.
3130f6bba201SDave Chinner 	 */
3131f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3132f6bba201SDave Chinner 
3133f6bba201SDave Chinner 	/*
3134f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
3135f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
3136f6bba201SDave Chinner 	 * them.
3137f6bba201SDave Chinner 	 */
313865523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3139f6bba201SDave Chinner 	if (new_parent)
314065523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3141f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3142f6bba201SDave Chinner 	if (target_ip)
3143f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
31447dcf5c3eSDave Chinner 	if (wip)
31457dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3146f6bba201SDave Chinner 
3147f6bba201SDave Chinner 	/*
3148f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
3149f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
3150f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
3151f6bba201SDave Chinner 	 */
3152db07349dSChristoph Hellwig 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3153ceaf603cSChristoph Hellwig 		     target_dp->i_projid != src_ip->i_projid)) {
31542451337dSDave Chinner 		error = -EXDEV;
3155445883e8SDave Chinner 		goto out_trans_cancel;
3156f6bba201SDave Chinner 	}
3157f6bba201SDave Chinner 
3158eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
3159eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
3160eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3161d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
3162f16dea54SBrian Foster 					spaceres);
3163d31a1825SCarlos Maiolino 
3164d31a1825SCarlos Maiolino 	/*
316541667260SDarrick J. Wong 	 * Try to reserve quota to handle an expansion of the target directory.
316641667260SDarrick J. Wong 	 * We'll allow the rename to continue in reservationless mode if we hit
316741667260SDarrick J. Wong 	 * a space usage constraint.  If we trigger reservationless mode, save
316841667260SDarrick J. Wong 	 * the errno if there isn't any free space in the target directory.
316941667260SDarrick J. Wong 	 */
317041667260SDarrick J. Wong 	if (spaceres != 0) {
317141667260SDarrick J. Wong 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
317241667260SDarrick J. Wong 				0, false);
317341667260SDarrick J. Wong 		if (error == -EDQUOT || error == -ENOSPC) {
317441667260SDarrick J. Wong 			if (!retried) {
317541667260SDarrick J. Wong 				xfs_trans_cancel(tp);
317641667260SDarrick J. Wong 				xfs_blockgc_free_quota(target_dp, 0);
317741667260SDarrick J. Wong 				retried = true;
317841667260SDarrick J. Wong 				goto retry;
317941667260SDarrick J. Wong 			}
318041667260SDarrick J. Wong 
318141667260SDarrick J. Wong 			nospace_error = error;
318241667260SDarrick J. Wong 			spaceres = 0;
318341667260SDarrick J. Wong 			error = 0;
318441667260SDarrick J. Wong 		}
318541667260SDarrick J. Wong 		if (error)
318641667260SDarrick J. Wong 			goto out_trans_cancel;
318741667260SDarrick J. Wong 	}
318841667260SDarrick J. Wong 
318941667260SDarrick J. Wong 	/*
3190bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
3191bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
3192f6bba201SDave Chinner 	 */
3193f6bba201SDave Chinner 	if (target_ip == NULL) {
3194f6bba201SDave Chinner 		/*
3195f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
3196f6bba201SDave Chinner 		 * fit before actually inserting it.
3197f6bba201SDave Chinner 		 */
319894f3cad5SEric Sandeen 		if (!spaceres) {
319994f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
3200f6bba201SDave Chinner 			if (error)
3201445883e8SDave Chinner 				goto out_trans_cancel;
320294f3cad5SEric Sandeen 		}
3203bc56ad8cSkaixuxia 	} else {
3204bc56ad8cSkaixuxia 		/*
3205bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
3206bc56ad8cSkaixuxia 		 * it can be destroyed.
3207bc56ad8cSkaixuxia 		 */
3208bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3209bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
3210bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
3211bc56ad8cSkaixuxia 			error = -EEXIST;
3212bc56ad8cSkaixuxia 			goto out_trans_cancel;
3213bc56ad8cSkaixuxia 		}
3214bc56ad8cSkaixuxia 	}
3215bc56ad8cSkaixuxia 
3216bc56ad8cSkaixuxia 	/*
32176da1b4b1SDarrick J. Wong 	 * Lock the AGI buffers we need to handle bumping the nlink of the
32186da1b4b1SDarrick J. Wong 	 * whiteout inode off the unlinked list and to handle dropping the
32196da1b4b1SDarrick J. Wong 	 * nlink of the target inode.  Per locking order rules, do this in
32206da1b4b1SDarrick J. Wong 	 * increasing AG order and before directory block allocation tries to
32216da1b4b1SDarrick J. Wong 	 * grab AGFs because we grab AGIs before AGFs.
32226da1b4b1SDarrick J. Wong 	 *
32236da1b4b1SDarrick J. Wong 	 * The (vfs) caller must ensure that if src is a directory then
32246da1b4b1SDarrick J. Wong 	 * target_ip is either null or an empty directory.
32256da1b4b1SDarrick J. Wong 	 */
32266da1b4b1SDarrick J. Wong 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
32276da1b4b1SDarrick J. Wong 		if (inodes[i] == wip ||
32286da1b4b1SDarrick J. Wong 		    (inodes[i] == target_ip &&
32296da1b4b1SDarrick J. Wong 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
323061021debSDave Chinner 			struct xfs_perag	*pag;
32316da1b4b1SDarrick J. Wong 			struct xfs_buf		*bp;
32326da1b4b1SDarrick J. Wong 
323361021debSDave Chinner 			pag = xfs_perag_get(mp,
323461021debSDave Chinner 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
323561021debSDave Chinner 			error = xfs_read_agi(pag, tp, &bp);
323661021debSDave Chinner 			xfs_perag_put(pag);
32376da1b4b1SDarrick J. Wong 			if (error)
32386da1b4b1SDarrick J. Wong 				goto out_trans_cancel;
32396da1b4b1SDarrick J. Wong 		}
32406da1b4b1SDarrick J. Wong 	}
32416da1b4b1SDarrick J. Wong 
32426da1b4b1SDarrick J. Wong 	/*
3243bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
3244bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
3245bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
3246bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
3247bc56ad8cSkaixuxia 	 * mess.
3248bc56ad8cSkaixuxia 	 *
3249bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
3250bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
3251bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
3252bc56ad8cSkaixuxia 	 * in future.
3253bc56ad8cSkaixuxia 	 */
3254bc56ad8cSkaixuxia 	if (wip) {
3255f40aadb2SDave Chinner 		struct xfs_perag	*pag;
3256f40aadb2SDave Chinner 
3257bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
3258f40aadb2SDave Chinner 
3259f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3260f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, wip);
3261f40aadb2SDave Chinner 		xfs_perag_put(pag);
3262bc56ad8cSkaixuxia 		if (error)
3263bc56ad8cSkaixuxia 			goto out_trans_cancel;
3264bc56ad8cSkaixuxia 
3265bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
3266bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3267bc56ad8cSkaixuxia 	}
3268bc56ad8cSkaixuxia 
3269bc56ad8cSkaixuxia 	/*
3270bc56ad8cSkaixuxia 	 * Set up the target.
3271bc56ad8cSkaixuxia 	 */
3272bc56ad8cSkaixuxia 	if (target_ip == NULL) {
3273f6bba201SDave Chinner 		/*
3274f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
3275f6bba201SDave Chinner 		 * directories, adjust the target directory link count
3276f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
3277f6bba201SDave Chinner 		 */
3278f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
3279381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
3280f6bba201SDave Chinner 		if (error)
3281c8eac49eSBrian Foster 			goto out_trans_cancel;
3282f6bba201SDave Chinner 
3283f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3284f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3285f6bba201SDave Chinner 
3286f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
328791083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
3288f6bba201SDave Chinner 		}
3289f6bba201SDave Chinner 	} else { /* target_ip != NULL */
3290f6bba201SDave Chinner 		/*
3291f6bba201SDave Chinner 		 * Link the source inode under the target name.
3292f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
3293f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
3294f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
3295f6bba201SDave Chinner 		 *
3296f6bba201SDave Chinner 		 * In case there is already an entry with the same
3297f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
3298f6bba201SDave Chinner 		 */
3299f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
3300381eee69SBrian Foster 					src_ip->i_ino, spaceres);
3301f6bba201SDave Chinner 		if (error)
3302c8eac49eSBrian Foster 			goto out_trans_cancel;
3303f6bba201SDave Chinner 
3304f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
3305f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3306f6bba201SDave Chinner 
3307f6bba201SDave Chinner 		/*
3308f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3309f6bba201SDave Chinner 		 * dir no longer points to it.
3310f6bba201SDave Chinner 		 */
3311f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3312f6bba201SDave Chinner 		if (error)
3313c8eac49eSBrian Foster 			goto out_trans_cancel;
3314f6bba201SDave Chinner 
3315f6bba201SDave Chinner 		if (src_is_directory) {
3316f6bba201SDave Chinner 			/*
3317f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3318f6bba201SDave Chinner 			 */
3319f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3320f6bba201SDave Chinner 			if (error)
3321c8eac49eSBrian Foster 				goto out_trans_cancel;
3322f6bba201SDave Chinner 		}
3323f6bba201SDave Chinner 	} /* target_ip != NULL */
3324f6bba201SDave Chinner 
3325f6bba201SDave Chinner 	/*
3326f6bba201SDave Chinner 	 * Remove the source.
3327f6bba201SDave Chinner 	 */
3328f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3329f6bba201SDave Chinner 		/*
3330f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3331f6bba201SDave Chinner 		 * directory.
3332f6bba201SDave Chinner 		 */
3333f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3334381eee69SBrian Foster 					target_dp->i_ino, spaceres);
33352451337dSDave Chinner 		ASSERT(error != -EEXIST);
3336f6bba201SDave Chinner 		if (error)
3337c8eac49eSBrian Foster 			goto out_trans_cancel;
3338f6bba201SDave Chinner 	}
3339f6bba201SDave Chinner 
3340f6bba201SDave Chinner 	/*
3341f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3342f6bba201SDave Chinner 	 *
3343f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3344f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3345f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3346f6bba201SDave Chinner 	 */
3347f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3348f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3349f6bba201SDave Chinner 
3350f6bba201SDave Chinner 	/*
3351f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3352f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3353f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3354f6bba201SDave Chinner 	 */
3355f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3356f6bba201SDave Chinner 
3357f6bba201SDave Chinner 		/*
3358f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3359f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3360f6bba201SDave Chinner 		 */
3361f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3362f6bba201SDave Chinner 		if (error)
3363c8eac49eSBrian Foster 			goto out_trans_cancel;
3364f6bba201SDave Chinner 	}
3365f6bba201SDave Chinner 
33667dcf5c3eSDave Chinner 	/*
33677dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
33687dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
33697dcf5c3eSDave Chinner 	 * altogether.
33707dcf5c3eSDave Chinner 	 */
337183a21c18SChandan Babu R 	if (wip)
33727dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3373381eee69SBrian Foster 					spaceres);
337483a21c18SChandan Babu R 	else
3375f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3376381eee69SBrian Foster 					   spaceres);
337702092a2fSChandan Babu R 
3378f6bba201SDave Chinner 	if (error)
3379c8eac49eSBrian Foster 		goto out_trans_cancel;
3380f6bba201SDave Chinner 
3381f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3382f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3383f6bba201SDave Chinner 	if (new_parent)
3384f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3385f6bba201SDave Chinner 
3386c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
33877dcf5c3eSDave Chinner 	if (wip)
338844a8736bSDarrick J. Wong 		xfs_irele(wip);
33897dcf5c3eSDave Chinner 	return error;
3390f6bba201SDave Chinner 
3391445883e8SDave Chinner out_trans_cancel:
33924906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3393253f4911SChristoph Hellwig out_release_wip:
33947dcf5c3eSDave Chinner 	if (wip)
339544a8736bSDarrick J. Wong 		xfs_irele(wip);
339641667260SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
339741667260SDarrick J. Wong 		error = nospace_error;
3398f6bba201SDave Chinner 	return error;
3399f6bba201SDave Chinner }
3400f6bba201SDave Chinner 
3401e6187b34SDave Chinner static int
3402e6187b34SDave Chinner xfs_iflush(
340393848a99SChristoph Hellwig 	struct xfs_inode	*ip,
340493848a99SChristoph Hellwig 	struct xfs_buf		*bp)
34051da177e4SLinus Torvalds {
340693848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
340793848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
340893848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3409f2019299SBrian Foster 	int			error;
34101da177e4SLinus Torvalds 
3411579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3412718ecc50SDave Chinner 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3413f7e67b20SChristoph Hellwig 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3414daf83964SChristoph Hellwig 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
341590c60e16SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
34161da177e4SLinus Torvalds 
341788ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
34181da177e4SLinus Torvalds 
3419f2019299SBrian Foster 	/*
3420f2019299SBrian Foster 	 * We don't flush the inode if any of the following checks fail, but we
3421f2019299SBrian Foster 	 * do still update the log item and attach to the backing buffer as if
3422f2019299SBrian Foster 	 * the flush happened. This is a formality to facilitate predictable
3423f2019299SBrian Foster 	 * error handling as the caller will shutdown and fail the buffer.
3424f2019299SBrian Foster 	 */
3425f2019299SBrian Foster 	error = -EFSCORRUPTED;
342669ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
34279e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
34286a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3429c9690043SDarrick J. Wong 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
34306a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3431f2019299SBrian Foster 		goto flush_out;
34321da177e4SLinus Torvalds 	}
3433c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
34341da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3435f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3436f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
34379e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
34386a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3439c9690043SDarrick J. Wong 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
34406a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3441f2019299SBrian Foster 			goto flush_out;
34421da177e4SLinus Torvalds 		}
3443c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
34441da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3445f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3446f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3447f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
34489e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
34496a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3450c9690043SDarrick J. Wong 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
34516a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3452f2019299SBrian Foster 			goto flush_out;
34531da177e4SLinus Torvalds 		}
34541da177e4SLinus Torvalds 	}
3455daf83964SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
34566e73a545SChristoph Hellwig 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
34576a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3458755c38ffSChandan Babu R 			"%s: detected corrupt incore inode %llu, "
3459755c38ffSChandan Babu R 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
34606a19d939SDave Chinner 			__func__, ip->i_ino,
3461daf83964SChristoph Hellwig 			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
34626e73a545SChristoph Hellwig 			ip->i_nblocks, ip);
3463f2019299SBrian Foster 		goto flush_out;
34641da177e4SLinus Torvalds 	}
34657821ea30SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
34669e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
34676a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3468c9690043SDarrick J. Wong 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
34697821ea30SChristoph Hellwig 			__func__, ip->i_ino, ip->i_forkoff, ip);
3470f2019299SBrian Foster 		goto flush_out;
34711da177e4SLinus Torvalds 	}
3472e60896d8SDave Chinner 
34731da177e4SLinus Torvalds 	/*
3474965e0a1aSChristoph Hellwig 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3475965e0a1aSChristoph Hellwig 	 * count for correct sequencing.  We bump the flush iteration count so
3476965e0a1aSChristoph Hellwig 	 * we can detect flushes which postdate a log record during recovery.
3477965e0a1aSChristoph Hellwig 	 * This is redundant as we now log every change and hence this can't
3478965e0a1aSChristoph Hellwig 	 * happen but we need to still do it to ensure backwards compatibility
3479965e0a1aSChristoph Hellwig 	 * with old kernels that predate logging all inode changes.
34801da177e4SLinus Torvalds 	 */
348138c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp))
3482965e0a1aSChristoph Hellwig 		ip->i_flushiter++;
34831da177e4SLinus Torvalds 
34840f45a1b2SChristoph Hellwig 	/*
34850f45a1b2SChristoph Hellwig 	 * If there are inline format data / attr forks attached to this inode,
34860f45a1b2SChristoph Hellwig 	 * make sure they are not corrupt.
34870f45a1b2SChristoph Hellwig 	 */
3488f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
34890f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_data(ip))
34900f45a1b2SChristoph Hellwig 		goto flush_out;
3491f7e67b20SChristoph Hellwig 	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
34920f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_attr(ip))
3493f2019299SBrian Foster 		goto flush_out;
3494005c5db8SDarrick J. Wong 
34951da177e4SLinus Torvalds 	/*
34963987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
34973987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
34983987848cSDave Chinner 	 * the core must be.
34991da177e4SLinus Torvalds 	 */
350093f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
35011da177e4SLinus Torvalds 
35021da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
350338c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp)) {
3504965e0a1aSChristoph Hellwig 		if (ip->i_flushiter == DI_MAX_FLUSH)
3505965e0a1aSChristoph Hellwig 			ip->i_flushiter = 0;
3506ee7b83fdSChristoph Hellwig 	}
35071da177e4SLinus Torvalds 
3508005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3509005c5db8SDarrick J. Wong 	if (XFS_IFORK_Q(ip))
3510005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
35111da177e4SLinus Torvalds 
35121da177e4SLinus Torvalds 	/*
3513f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3514f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3515f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3516f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3517f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3518f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3519f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
35201da177e4SLinus Torvalds 	 *
3521f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3522f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3523664ffb8aSChristoph Hellwig 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3524664ffb8aSChristoph Hellwig 	 * we know that the information those bits represent is permanently on
3525f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3526f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
35271da177e4SLinus Torvalds 	 */
3528f2019299SBrian Foster 	error = 0;
3529f2019299SBrian Foster flush_out:
35301319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
3531f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3532f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3533fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
35341319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
35351da177e4SLinus Torvalds 
35361319ebefSDave Chinner 	/*
35371319ebefSDave Chinner 	 * Store the current LSN of the inode so that we can tell whether the
3538664ffb8aSChristoph Hellwig 	 * item has moved in the AIL from xfs_buf_inode_iodone().
35391319ebefSDave Chinner 	 */
35407b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
35417b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
35421da177e4SLinus Torvalds 
354393848a99SChristoph Hellwig 	/* generate the checksum. */
354493848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
3545f2019299SBrian Foster 	return error;
35461da177e4SLinus Torvalds }
354744a8736bSDarrick J. Wong 
3548e6187b34SDave Chinner /*
3549e6187b34SDave Chinner  * Non-blocking flush of dirty inode metadata into the backing buffer.
3550e6187b34SDave Chinner  *
3551e6187b34SDave Chinner  * The caller must have a reference to the inode and hold the cluster buffer
3552e6187b34SDave Chinner  * locked. The function will walk across all the inodes on the cluster buffer it
3553e6187b34SDave Chinner  * can find and lock without blocking, and flush them to the cluster buffer.
3554e6187b34SDave Chinner  *
35555717ea4dSDave Chinner  * On successful flushing of at least one inode, the caller must write out the
35565717ea4dSDave Chinner  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
35575717ea4dSDave Chinner  * the caller needs to release the buffer. On failure, the filesystem will be
35585717ea4dSDave Chinner  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
35595717ea4dSDave Chinner  * will be returned.
3560e6187b34SDave Chinner  */
3561e6187b34SDave Chinner int
3562e6187b34SDave Chinner xfs_iflush_cluster(
3563e6187b34SDave Chinner 	struct xfs_buf		*bp)
3564e6187b34SDave Chinner {
35655717ea4dSDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
35665717ea4dSDave Chinner 	struct xfs_log_item	*lip, *n;
35675717ea4dSDave Chinner 	struct xfs_inode	*ip;
35685717ea4dSDave Chinner 	struct xfs_inode_log_item *iip;
3569e6187b34SDave Chinner 	int			clcount = 0;
35705717ea4dSDave Chinner 	int			error = 0;
3571e6187b34SDave Chinner 
3572e6187b34SDave Chinner 	/*
35735717ea4dSDave Chinner 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3574d2d7c047SDave Chinner 	 * will remove itself from the list.
3575e6187b34SDave Chinner 	 */
35765717ea4dSDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
35775717ea4dSDave Chinner 		iip = (struct xfs_inode_log_item *)lip;
35785717ea4dSDave Chinner 		ip = iip->ili_inode;
35795717ea4dSDave Chinner 
35805717ea4dSDave Chinner 		/*
35815717ea4dSDave Chinner 		 * Quick and dirty check to avoid locks if possible.
35825717ea4dSDave Chinner 		 */
3583718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
35845717ea4dSDave Chinner 			continue;
35855717ea4dSDave Chinner 		if (xfs_ipincount(ip))
35865717ea4dSDave Chinner 			continue;
35875717ea4dSDave Chinner 
35885717ea4dSDave Chinner 		/*
35895717ea4dSDave Chinner 		 * The inode is still attached to the buffer, which means it is
35905717ea4dSDave Chinner 		 * dirty but reclaim might try to grab it. Check carefully for
35915717ea4dSDave Chinner 		 * that, and grab the ilock while still holding the i_flags_lock
35925717ea4dSDave Chinner 		 * to guarantee reclaim will not be able to reclaim this inode
35935717ea4dSDave Chinner 		 * once we drop the i_flags_lock.
35945717ea4dSDave Chinner 		 */
35955717ea4dSDave Chinner 		spin_lock(&ip->i_flags_lock);
35965717ea4dSDave Chinner 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3597718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
35985717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
3599e6187b34SDave Chinner 			continue;
3600e6187b34SDave Chinner 		}
3601e6187b34SDave Chinner 
3602e6187b34SDave Chinner 		/*
36035717ea4dSDave Chinner 		 * ILOCK will pin the inode against reclaim and prevent
36045717ea4dSDave Chinner 		 * concurrent transactions modifying the inode while we are
3605718ecc50SDave Chinner 		 * flushing the inode. If we get the lock, set the flushing
3606718ecc50SDave Chinner 		 * state before we drop the i_flags_lock.
3607e6187b34SDave Chinner 		 */
36085717ea4dSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
36095717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
36105717ea4dSDave Chinner 			continue;
36115717ea4dSDave Chinner 		}
3612718ecc50SDave Chinner 		__xfs_iflags_set(ip, XFS_IFLUSHING);
36135717ea4dSDave Chinner 		spin_unlock(&ip->i_flags_lock);
36145717ea4dSDave Chinner 
36155717ea4dSDave Chinner 		/*
36165717ea4dSDave Chinner 		 * Abort flushing this inode if we are shut down because the
36175717ea4dSDave Chinner 		 * inode may not currently be in the AIL. This can occur when
36185717ea4dSDave Chinner 		 * log I/O failure unpins the inode without inserting into the
36195717ea4dSDave Chinner 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
36205717ea4dSDave Chinner 		 * that otherwise looks like it should be flushed.
36215717ea4dSDave Chinner 		 */
362201728b44SDave Chinner 		if (xlog_is_shutdown(mp->m_log)) {
36235717ea4dSDave Chinner 			xfs_iunpin_wait(ip);
36245717ea4dSDave Chinner 			xfs_iflush_abort(ip);
36255717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36265717ea4dSDave Chinner 			error = -EIO;
36275717ea4dSDave Chinner 			continue;
36285717ea4dSDave Chinner 		}
36295717ea4dSDave Chinner 
36305717ea4dSDave Chinner 		/* don't block waiting on a log force to unpin dirty inodes */
36315717ea4dSDave Chinner 		if (xfs_ipincount(ip)) {
3632718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36335717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
36345717ea4dSDave Chinner 			continue;
36355717ea4dSDave Chinner 		}
36365717ea4dSDave Chinner 
36375717ea4dSDave Chinner 		if (!xfs_inode_clean(ip))
36385717ea4dSDave Chinner 			error = xfs_iflush(ip, bp);
36395717ea4dSDave Chinner 		else
3640718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
36415717ea4dSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
36425717ea4dSDave Chinner 		if (error)
3643e6187b34SDave Chinner 			break;
3644e6187b34SDave Chinner 		clcount++;
3645e6187b34SDave Chinner 	}
3646e6187b34SDave Chinner 
3647e6187b34SDave Chinner 	if (error) {
364801728b44SDave Chinner 		/*
364901728b44SDave Chinner 		 * Shutdown first so we kill the log before we release this
365001728b44SDave Chinner 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
365101728b44SDave Chinner 		 * of the log, failing it before the _log_ is shut down can
365201728b44SDave Chinner 		 * result in the log tail being moved forward in the journal
365301728b44SDave Chinner 		 * on disk because log writes can still be taking place. Hence
365401728b44SDave Chinner 		 * unpinning the tail will allow the ICREATE intent to be
365501728b44SDave Chinner 		 * removed from the log an recovery will fail with uninitialised
365601728b44SDave Chinner 		 * inode cluster buffers.
365701728b44SDave Chinner 		 */
365801728b44SDave Chinner 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3659e6187b34SDave Chinner 		bp->b_flags |= XBF_ASYNC;
3660e6187b34SDave Chinner 		xfs_buf_ioend_fail(bp);
3661e6187b34SDave Chinner 		return error;
3662e6187b34SDave Chinner 	}
3663e6187b34SDave Chinner 
36645717ea4dSDave Chinner 	if (!clcount)
36655717ea4dSDave Chinner 		return -EAGAIN;
36665717ea4dSDave Chinner 
36675717ea4dSDave Chinner 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
36685717ea4dSDave Chinner 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
36695717ea4dSDave Chinner 	return 0;
36705717ea4dSDave Chinner 
36715717ea4dSDave Chinner }
36725717ea4dSDave Chinner 
367344a8736bSDarrick J. Wong /* Release an inode. */
367444a8736bSDarrick J. Wong void
367544a8736bSDarrick J. Wong xfs_irele(
367644a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
367744a8736bSDarrick J. Wong {
367844a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
367944a8736bSDarrick J. Wong 	iput(VFS_I(ip));
368044a8736bSDarrick J. Wong }
368154fbdd10SChristoph Hellwig 
368254fbdd10SChristoph Hellwig /*
368354fbdd10SChristoph Hellwig  * Ensure all commited transactions touching the inode are written to the log.
368454fbdd10SChristoph Hellwig  */
368554fbdd10SChristoph Hellwig int
368654fbdd10SChristoph Hellwig xfs_log_force_inode(
368754fbdd10SChristoph Hellwig 	struct xfs_inode	*ip)
368854fbdd10SChristoph Hellwig {
36895f9b4b0dSDave Chinner 	xfs_csn_t		seq = 0;
369054fbdd10SChristoph Hellwig 
369154fbdd10SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
369254fbdd10SChristoph Hellwig 	if (xfs_ipincount(ip))
36935f9b4b0dSDave Chinner 		seq = ip->i_itemp->ili_commit_seq;
369454fbdd10SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
369554fbdd10SChristoph Hellwig 
36965f9b4b0dSDave Chinner 	if (!seq)
369754fbdd10SChristoph Hellwig 		return 0;
36985f9b4b0dSDave Chinner 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
369954fbdd10SChristoph Hellwig }
3700e2aaee9cSDarrick J. Wong 
3701e2aaee9cSDarrick J. Wong /*
3702e2aaee9cSDarrick J. Wong  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3703e2aaee9cSDarrick J. Wong  * abide vfs locking order (lowest pointer value goes first) and breaking the
3704e2aaee9cSDarrick J. Wong  * layout leases before proceeding.  The loop is needed because we cannot call
3705e2aaee9cSDarrick J. Wong  * the blocking break_layout() with the iolocks held, and therefore have to
3706e2aaee9cSDarrick J. Wong  * back out both locks.
3707e2aaee9cSDarrick J. Wong  */
3708e2aaee9cSDarrick J. Wong static int
3709e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout(
3710e2aaee9cSDarrick J. Wong 	struct inode		*src,
3711e2aaee9cSDarrick J. Wong 	struct inode		*dest)
3712e2aaee9cSDarrick J. Wong {
3713e2aaee9cSDarrick J. Wong 	int			error;
3714e2aaee9cSDarrick J. Wong 
3715e2aaee9cSDarrick J. Wong 	if (src > dest)
3716e2aaee9cSDarrick J. Wong 		swap(src, dest);
3717e2aaee9cSDarrick J. Wong 
3718e2aaee9cSDarrick J. Wong retry:
3719e2aaee9cSDarrick J. Wong 	/* Wait to break both inodes' layouts before we start locking. */
3720e2aaee9cSDarrick J. Wong 	error = break_layout(src, true);
3721e2aaee9cSDarrick J. Wong 	if (error)
3722e2aaee9cSDarrick J. Wong 		return error;
3723e2aaee9cSDarrick J. Wong 	if (src != dest) {
3724e2aaee9cSDarrick J. Wong 		error = break_layout(dest, true);
3725e2aaee9cSDarrick J. Wong 		if (error)
3726e2aaee9cSDarrick J. Wong 			return error;
3727e2aaee9cSDarrick J. Wong 	}
3728e2aaee9cSDarrick J. Wong 
3729e2aaee9cSDarrick J. Wong 	/* Lock one inode and make sure nobody got in and leased it. */
3730e2aaee9cSDarrick J. Wong 	inode_lock(src);
3731e2aaee9cSDarrick J. Wong 	error = break_layout(src, false);
3732e2aaee9cSDarrick J. Wong 	if (error) {
3733e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3734e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3735e2aaee9cSDarrick J. Wong 			goto retry;
3736e2aaee9cSDarrick J. Wong 		return error;
3737e2aaee9cSDarrick J. Wong 	}
3738e2aaee9cSDarrick J. Wong 
3739e2aaee9cSDarrick J. Wong 	if (src == dest)
3740e2aaee9cSDarrick J. Wong 		return 0;
3741e2aaee9cSDarrick J. Wong 
3742e2aaee9cSDarrick J. Wong 	/* Lock the other inode and make sure nobody got in and leased it. */
3743e2aaee9cSDarrick J. Wong 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3744e2aaee9cSDarrick J. Wong 	error = break_layout(dest, false);
3745e2aaee9cSDarrick J. Wong 	if (error) {
3746e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3747e2aaee9cSDarrick J. Wong 		inode_unlock(dest);
3748e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3749e2aaee9cSDarrick J. Wong 			goto retry;
3750e2aaee9cSDarrick J. Wong 		return error;
3751e2aaee9cSDarrick J. Wong 	}
3752e2aaee9cSDarrick J. Wong 
3753e2aaee9cSDarrick J. Wong 	return 0;
3754e2aaee9cSDarrick J. Wong }
3755e2aaee9cSDarrick J. Wong 
3756e2aaee9cSDarrick J. Wong /*
3757e2aaee9cSDarrick J. Wong  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3758e2aaee9cSDarrick J. Wong  * mmap activity.
3759e2aaee9cSDarrick J. Wong  */
3760e2aaee9cSDarrick J. Wong int
3761e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap(
3762e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3763e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3764e2aaee9cSDarrick J. Wong {
3765e2aaee9cSDarrick J. Wong 	int			ret;
3766e2aaee9cSDarrick J. Wong 
3767e2aaee9cSDarrick J. Wong 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3768e2aaee9cSDarrick J. Wong 	if (ret)
3769e2aaee9cSDarrick J. Wong 		return ret;
3770d2c292d8SJan Kara 	filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3771d2c292d8SJan Kara 				    VFS_I(ip2)->i_mapping);
3772e2aaee9cSDarrick J. Wong 	return 0;
3773e2aaee9cSDarrick J. Wong }
3774e2aaee9cSDarrick J. Wong 
3775e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */
3776e2aaee9cSDarrick J. Wong void
3777e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap(
3778e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3779e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3780e2aaee9cSDarrick J. Wong {
3781d2c292d8SJan Kara 	filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3782d2c292d8SJan Kara 				      VFS_I(ip2)->i_mapping);
3783e2aaee9cSDarrick J. Wong 	inode_unlock(VFS_I(ip2));
3784d2c292d8SJan Kara 	if (ip1 != ip2)
3785e2aaee9cSDarrick J. Wong 		inode_unlock(VFS_I(ip1));
3786e2aaee9cSDarrick J. Wong }
3787