xref: /openbmc/linux/fs/xfs/xfs_inode.c (revision c14329d39f2daa8132e1bbe5cc531da387bcf44a)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
33e57ecf6SOlaf Weber  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
6f0e28280SJeff Layton #include <linux/iversion.h>
740ebd81dSRobert P. J. Day 
81da177e4SLinus Torvalds #include "xfs.h"
9a844f451SNathan Scott #include "xfs_fs.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_format.h"
12239880efSDave Chinner #include "xfs_log_format.h"
13239880efSDave Chinner #include "xfs_trans_resv.h"
141da177e4SLinus Torvalds #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
16a4fbe6abSDave Chinner #include "xfs_inode.h"
17c24b5dfaSDave Chinner #include "xfs_dir2.h"
18c24b5dfaSDave Chinner #include "xfs_attr.h"
19239880efSDave Chinner #include "xfs_trans_space.h"
20239880efSDave Chinner #include "xfs_trans.h"
211da177e4SLinus Torvalds #include "xfs_buf_item.h"
22a844f451SNathan Scott #include "xfs_inode_item.h"
23784eb7d8SDave Chinner #include "xfs_iunlink_item.h"
24a844f451SNathan Scott #include "xfs_ialloc.h"
25a844f451SNathan Scott #include "xfs_bmap.h"
2668988114SDave Chinner #include "xfs_bmap_util.h"
27e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
281da177e4SLinus Torvalds #include "xfs_error.h"
291da177e4SLinus Torvalds #include "xfs_quota.h"
302a82b8beSDavid Chinner #include "xfs_filestream.h"
310b1b213fSChristoph Hellwig #include "xfs_trace.h"
3233479e05SDave Chinner #include "xfs_icache.h"
33c24b5dfaSDave Chinner #include "xfs_symlink.h"
34239880efSDave Chinner #include "xfs_trans_priv.h"
35239880efSDave Chinner #include "xfs_log.h"
36a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
37aa8968f2SDarrick J. Wong #include "xfs_reflink.h"
389bbafc71SDave Chinner #include "xfs_ag.h"
3901728b44SDave Chinner #include "xfs_log_priv.h"
401da177e4SLinus Torvalds 
41182696fbSDarrick J. Wong struct kmem_cache *xfs_inode_cache;
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds /*
448f04c47aSChristoph Hellwig  * Used in xfs_itruncate_extents().  This is the maximum number of extents
451da177e4SLinus Torvalds  * freed from a file in a single transaction.
461da177e4SLinus Torvalds  */
471da177e4SLinus Torvalds #define	XFS_ITRUNC_MAX_EXTENTS	2
481da177e4SLinus Torvalds 
4954d7b5c1SDave Chinner STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
50f40aadb2SDave Chinner STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
51f40aadb2SDave Chinner 	struct xfs_inode *);
52ab297431SZhi Yong Wu 
532a0ec1d9SDave Chinner /*
542a0ec1d9SDave Chinner  * helper function to extract extent size hint from inode
552a0ec1d9SDave Chinner  */
562a0ec1d9SDave Chinner xfs_extlen_t
572a0ec1d9SDave Chinner xfs_get_extsz_hint(
582a0ec1d9SDave Chinner 	struct xfs_inode	*ip)
592a0ec1d9SDave Chinner {
60bdb2ed2dSChristoph Hellwig 	/*
61bdb2ed2dSChristoph Hellwig 	 * No point in aligning allocations if we need to COW to actually
62bdb2ed2dSChristoph Hellwig 	 * write to them.
63bdb2ed2dSChristoph Hellwig 	 */
64bdb2ed2dSChristoph Hellwig 	if (xfs_is_always_cow_inode(ip))
65bdb2ed2dSChristoph Hellwig 		return 0;
66db07349dSChristoph Hellwig 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
67031474c2SChristoph Hellwig 		return ip->i_extsize;
682a0ec1d9SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip))
692a0ec1d9SDave Chinner 		return ip->i_mount->m_sb.sb_rextsize;
702a0ec1d9SDave Chinner 	return 0;
712a0ec1d9SDave Chinner }
722a0ec1d9SDave Chinner 
73fa96acadSDave Chinner /*
74f7ca3522SDarrick J. Wong  * Helper function to extract CoW extent size hint from inode.
75f7ca3522SDarrick J. Wong  * Between the extent size hint and the CoW extent size hint, we
76e153aa79SDarrick J. Wong  * return the greater of the two.  If the value is zero (automatic),
77e153aa79SDarrick J. Wong  * use the default size.
78f7ca3522SDarrick J. Wong  */
79f7ca3522SDarrick J. Wong xfs_extlen_t
80f7ca3522SDarrick J. Wong xfs_get_cowextsz_hint(
81f7ca3522SDarrick J. Wong 	struct xfs_inode	*ip)
82f7ca3522SDarrick J. Wong {
83f7ca3522SDarrick J. Wong 	xfs_extlen_t		a, b;
84f7ca3522SDarrick J. Wong 
85f7ca3522SDarrick J. Wong 	a = 0;
863e09ab8fSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
87b33ce57dSChristoph Hellwig 		a = ip->i_cowextsize;
88f7ca3522SDarrick J. Wong 	b = xfs_get_extsz_hint(ip);
89f7ca3522SDarrick J. Wong 
90e153aa79SDarrick J. Wong 	a = max(a, b);
91e153aa79SDarrick J. Wong 	if (a == 0)
92e153aa79SDarrick J. Wong 		return XFS_DEFAULT_COWEXTSZ_HINT;
93f7ca3522SDarrick J. Wong 	return a;
94f7ca3522SDarrick J. Wong }
95f7ca3522SDarrick J. Wong 
96f7ca3522SDarrick J. Wong /*
97efa70be1SChristoph Hellwig  * These two are wrapper routines around the xfs_ilock() routine used to
98efa70be1SChristoph Hellwig  * centralize some grungy code.  They are used in places that wish to lock the
99efa70be1SChristoph Hellwig  * inode solely for reading the extents.  The reason these places can't just
100efa70be1SChristoph Hellwig  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
101efa70be1SChristoph Hellwig  * bringing in of the extents from disk for a file in b-tree format.  If the
102efa70be1SChristoph Hellwig  * inode is in b-tree format, then we need to lock the inode exclusively until
103efa70be1SChristoph Hellwig  * the extents are read in.  Locking it exclusively all the time would limit
104efa70be1SChristoph Hellwig  * our parallelism unnecessarily, though.  What we do instead is check to see
105efa70be1SChristoph Hellwig  * if the extents have been read in yet, and only lock the inode exclusively
106efa70be1SChristoph Hellwig  * if they have not.
107fa96acadSDave Chinner  *
108efa70be1SChristoph Hellwig  * The functions return a value which should be given to the corresponding
10901f4f327SChristoph Hellwig  * xfs_iunlock() call.
110fa96acadSDave Chinner  */
111fa96acadSDave Chinner uint
112309ecac8SChristoph Hellwig xfs_ilock_data_map_shared(
113309ecac8SChristoph Hellwig 	struct xfs_inode	*ip)
114fa96acadSDave Chinner {
115309ecac8SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
116fa96acadSDave Chinner 
117b2197a36SChristoph Hellwig 	if (xfs_need_iread_extents(&ip->i_df))
118fa96acadSDave Chinner 		lock_mode = XFS_ILOCK_EXCL;
119fa96acadSDave Chinner 	xfs_ilock(ip, lock_mode);
120fa96acadSDave Chinner 	return lock_mode;
121fa96acadSDave Chinner }
122fa96acadSDave Chinner 
123efa70be1SChristoph Hellwig uint
124efa70be1SChristoph Hellwig xfs_ilock_attr_map_shared(
125efa70be1SChristoph Hellwig 	struct xfs_inode	*ip)
126fa96acadSDave Chinner {
127efa70be1SChristoph Hellwig 	uint			lock_mode = XFS_ILOCK_SHARED;
128efa70be1SChristoph Hellwig 
129932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
130efa70be1SChristoph Hellwig 		lock_mode = XFS_ILOCK_EXCL;
131efa70be1SChristoph Hellwig 	xfs_ilock(ip, lock_mode);
132efa70be1SChristoph Hellwig 	return lock_mode;
133fa96acadSDave Chinner }
134fa96acadSDave Chinner 
135fa96acadSDave Chinner /*
136ca76a761SKaixu Xia  * You can't set both SHARED and EXCL for the same lock,
137ca76a761SKaixu Xia  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
138ca76a761SKaixu Xia  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
139ca76a761SKaixu Xia  * to set in lock_flags.
140ca76a761SKaixu Xia  */
141ca76a761SKaixu Xia static inline void
142ca76a761SKaixu Xia xfs_lock_flags_assert(
143ca76a761SKaixu Xia 	uint		lock_flags)
144ca76a761SKaixu Xia {
145ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
146ca76a761SKaixu Xia 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
147ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
148ca76a761SKaixu Xia 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
149ca76a761SKaixu Xia 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
150ca76a761SKaixu Xia 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
151ca76a761SKaixu Xia 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
152ca76a761SKaixu Xia 	ASSERT(lock_flags != 0);
153ca76a761SKaixu Xia }
154ca76a761SKaixu Xia 
155ca76a761SKaixu Xia /*
15665523218SChristoph Hellwig  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
1572433480aSJan Kara  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
15865523218SChristoph Hellwig  * various combinations of the locks to be obtained.
159fa96acadSDave Chinner  *
160653c60b6SDave Chinner  * The 3 locks should always be ordered so that the IO lock is obtained first,
161653c60b6SDave Chinner  * the mmap lock second and the ilock last in order to prevent deadlock.
162fa96acadSDave Chinner  *
163653c60b6SDave Chinner  * Basic locking order:
164653c60b6SDave Chinner  *
1652433480aSJan Kara  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
166653c60b6SDave Chinner  *
167c1e8d7c6SMichel Lespinasse  * mmap_lock locking order:
168653c60b6SDave Chinner  *
169c1e8d7c6SMichel Lespinasse  * i_rwsem -> page lock -> mmap_lock
1702433480aSJan Kara  * mmap_lock -> invalidate_lock -> page_lock
171653c60b6SDave Chinner  *
172c1e8d7c6SMichel Lespinasse  * The difference in mmap_lock locking order mean that we cannot hold the
1732433480aSJan Kara  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
1742433480aSJan Kara  * can fault in pages during copy in/out (for buffered IO) or require the
1752433480aSJan Kara  * mmap_lock in get_user_pages() to map the user pages into the kernel address
1762433480aSJan Kara  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
1772433480aSJan Kara  * fault because page faults already hold the mmap_lock.
178653c60b6SDave Chinner  *
179653c60b6SDave Chinner  * Hence to serialise fully against both syscall and mmap based IO, we need to
1802433480aSJan Kara  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
1812433480aSJan Kara  * both taken in places where we need to invalidate the page cache in a race
182653c60b6SDave Chinner  * free manner (e.g. truncate, hole punch and other extent manipulation
183653c60b6SDave Chinner  * functions).
184fa96acadSDave Chinner  */
185fa96acadSDave Chinner void
186fa96acadSDave Chinner xfs_ilock(
187fa96acadSDave Chinner 	xfs_inode_t		*ip,
188fa96acadSDave Chinner 	uint			lock_flags)
189fa96acadSDave Chinner {
190fa96acadSDave Chinner 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
191fa96acadSDave Chinner 
192ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
193fa96acadSDave Chinner 
19465523218SChristoph Hellwig 	if (lock_flags & XFS_IOLOCK_EXCL) {
19565523218SChristoph Hellwig 		down_write_nested(&VFS_I(ip)->i_rwsem,
19665523218SChristoph Hellwig 				  XFS_IOLOCK_DEP(lock_flags));
19765523218SChristoph Hellwig 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
19865523218SChristoph Hellwig 		down_read_nested(&VFS_I(ip)->i_rwsem,
19965523218SChristoph Hellwig 				 XFS_IOLOCK_DEP(lock_flags));
20065523218SChristoph Hellwig 	}
201fa96acadSDave Chinner 
2022433480aSJan Kara 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2032433480aSJan Kara 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2042433480aSJan Kara 				  XFS_MMAPLOCK_DEP(lock_flags));
2052433480aSJan Kara 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2062433480aSJan Kara 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
2072433480aSJan Kara 				 XFS_MMAPLOCK_DEP(lock_flags));
2082433480aSJan Kara 	}
209653c60b6SDave Chinner 
210fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
211fa96acadSDave Chinner 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
213fa96acadSDave Chinner 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214fa96acadSDave Chinner }
215fa96acadSDave Chinner 
216fa96acadSDave Chinner /*
217fa96acadSDave Chinner  * This is just like xfs_ilock(), except that the caller
218fa96acadSDave Chinner  * is guaranteed not to sleep.  It returns 1 if it gets
219fa96acadSDave Chinner  * the requested locks and 0 otherwise.  If the IO lock is
220fa96acadSDave Chinner  * obtained but the inode lock cannot be, then the IO lock
221fa96acadSDave Chinner  * is dropped before returning.
222fa96acadSDave Chinner  *
223fa96acadSDave Chinner  * ip -- the inode being locked
224fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
225fa96acadSDave Chinner  *       to be locked.  See the comment for xfs_ilock() for a list
226fa96acadSDave Chinner  *	 of valid values.
227fa96acadSDave Chinner  */
228fa96acadSDave Chinner int
229fa96acadSDave Chinner xfs_ilock_nowait(
230fa96acadSDave Chinner 	xfs_inode_t		*ip,
231fa96acadSDave Chinner 	uint			lock_flags)
232fa96acadSDave Chinner {
233fa96acadSDave Chinner 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234fa96acadSDave Chinner 
235ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
236fa96acadSDave Chinner 
237fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL) {
23865523218SChristoph Hellwig 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239fa96acadSDave Chinner 			goto out;
240fa96acadSDave Chinner 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
24165523218SChristoph Hellwig 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
242fa96acadSDave Chinner 			goto out;
243fa96acadSDave Chinner 	}
244653c60b6SDave Chinner 
245653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
2462433480aSJan Kara 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247653c60b6SDave Chinner 			goto out_undo_iolock;
248653c60b6SDave Chinner 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
2492433480aSJan Kara 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250653c60b6SDave Chinner 			goto out_undo_iolock;
251653c60b6SDave Chinner 	}
252653c60b6SDave Chinner 
253fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL) {
254fa96acadSDave Chinner 		if (!mrtryupdate(&ip->i_lock))
255653c60b6SDave Chinner 			goto out_undo_mmaplock;
256fa96acadSDave Chinner 	} else if (lock_flags & XFS_ILOCK_SHARED) {
257fa96acadSDave Chinner 		if (!mrtryaccess(&ip->i_lock))
258653c60b6SDave Chinner 			goto out_undo_mmaplock;
259fa96acadSDave Chinner 	}
260fa96acadSDave Chinner 	return 1;
261fa96acadSDave Chinner 
262653c60b6SDave Chinner out_undo_mmaplock:
263653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
2642433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
2662433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
267fa96acadSDave Chinner out_undo_iolock:
268fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
26965523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
270fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
27165523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
272fa96acadSDave Chinner out:
273fa96acadSDave Chinner 	return 0;
274fa96acadSDave Chinner }
275fa96acadSDave Chinner 
276fa96acadSDave Chinner /*
277fa96acadSDave Chinner  * xfs_iunlock() is used to drop the inode locks acquired with
278fa96acadSDave Chinner  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
279fa96acadSDave Chinner  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280fa96acadSDave Chinner  * that we know which locks to drop.
281fa96acadSDave Chinner  *
282fa96acadSDave Chinner  * ip -- the inode being unlocked
283fa96acadSDave Chinner  * lock_flags -- this parameter indicates the inode's locks to be
284fa96acadSDave Chinner  *       to be unlocked.  See the comment for xfs_ilock() for a list
285fa96acadSDave Chinner  *	 of valid values for this parameter.
286fa96acadSDave Chinner  *
287fa96acadSDave Chinner  */
288fa96acadSDave Chinner void
289fa96acadSDave Chinner xfs_iunlock(
290fa96acadSDave Chinner 	xfs_inode_t		*ip,
291fa96acadSDave Chinner 	uint			lock_flags)
292fa96acadSDave Chinner {
293ca76a761SKaixu Xia 	xfs_lock_flags_assert(lock_flags);
294fa96acadSDave Chinner 
295fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
29665523218SChristoph Hellwig 		up_write(&VFS_I(ip)->i_rwsem);
297fa96acadSDave Chinner 	else if (lock_flags & XFS_IOLOCK_SHARED)
29865523218SChristoph Hellwig 		up_read(&VFS_I(ip)->i_rwsem);
299fa96acadSDave Chinner 
300653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3012433480aSJan Kara 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
302653c60b6SDave Chinner 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
3032433480aSJan Kara 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
304653c60b6SDave Chinner 
305fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
306fa96acadSDave Chinner 		mrunlock_excl(&ip->i_lock);
307fa96acadSDave Chinner 	else if (lock_flags & XFS_ILOCK_SHARED)
308fa96acadSDave Chinner 		mrunlock_shared(&ip->i_lock);
309fa96acadSDave Chinner 
310fa96acadSDave Chinner 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
311fa96acadSDave Chinner }
312fa96acadSDave Chinner 
313fa96acadSDave Chinner /*
314fa96acadSDave Chinner  * give up write locks.  the i/o lock cannot be held nested
315fa96acadSDave Chinner  * if it is being demoted.
316fa96acadSDave Chinner  */
317fa96acadSDave Chinner void
318fa96acadSDave Chinner xfs_ilock_demote(
319fa96acadSDave Chinner 	xfs_inode_t		*ip,
320fa96acadSDave Chinner 	uint			lock_flags)
321fa96acadSDave Chinner {
322653c60b6SDave Chinner 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
323653c60b6SDave Chinner 	ASSERT((lock_flags &
324653c60b6SDave Chinner 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
325fa96acadSDave Chinner 
326fa96acadSDave Chinner 	if (lock_flags & XFS_ILOCK_EXCL)
327fa96acadSDave Chinner 		mrdemote(&ip->i_lock);
328653c60b6SDave Chinner 	if (lock_flags & XFS_MMAPLOCK_EXCL)
3292433480aSJan Kara 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
330fa96acadSDave Chinner 	if (lock_flags & XFS_IOLOCK_EXCL)
33165523218SChristoph Hellwig 		downgrade_write(&VFS_I(ip)->i_rwsem);
332fa96acadSDave Chinner 
333fa96acadSDave Chinner 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
334fa96acadSDave Chinner }
335fa96acadSDave Chinner 
336742ae1e3SDave Chinner #if defined(DEBUG) || defined(XFS_WARN)
337e31cbde7SPavel Reichl static inline bool
338e31cbde7SPavel Reichl __xfs_rwsem_islocked(
339e31cbde7SPavel Reichl 	struct rw_semaphore	*rwsem,
340e31cbde7SPavel Reichl 	bool			shared)
341e31cbde7SPavel Reichl {
342e31cbde7SPavel Reichl 	if (!debug_locks)
343e31cbde7SPavel Reichl 		return rwsem_is_locked(rwsem);
344e31cbde7SPavel Reichl 
345e31cbde7SPavel Reichl 	if (!shared)
346e31cbde7SPavel Reichl 		return lockdep_is_held_type(rwsem, 0);
347e31cbde7SPavel Reichl 
348e31cbde7SPavel Reichl 	/*
349e31cbde7SPavel Reichl 	 * We are checking that the lock is held at least in shared
350e31cbde7SPavel Reichl 	 * mode but don't care that it might be held exclusively
351e31cbde7SPavel Reichl 	 * (i.e. shared | excl). Hence we check if the lock is held
352e31cbde7SPavel Reichl 	 * in any mode rather than an explicit shared mode.
353e31cbde7SPavel Reichl 	 */
354e31cbde7SPavel Reichl 	return lockdep_is_held_type(rwsem, -1);
355e31cbde7SPavel Reichl }
356e31cbde7SPavel Reichl 
357e31cbde7SPavel Reichl bool
358fa96acadSDave Chinner xfs_isilocked(
359e31cbde7SPavel Reichl 	struct xfs_inode	*ip,
360fa96acadSDave Chinner 	uint			lock_flags)
361fa96acadSDave Chinner {
362fa96acadSDave Chinner 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
363fa96acadSDave Chinner 		if (!(lock_flags & XFS_ILOCK_SHARED))
364fa96acadSDave Chinner 			return !!ip->i_lock.mr_writer;
365fa96acadSDave Chinner 		return rwsem_is_locked(&ip->i_lock.mr_lock);
366fa96acadSDave Chinner 	}
367fa96acadSDave Chinner 
368653c60b6SDave Chinner 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
36982af8806SKaixu Xia 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
37082af8806SKaixu Xia 				(lock_flags & XFS_MMAPLOCK_SHARED));
371653c60b6SDave Chinner 	}
372653c60b6SDave Chinner 
373fa96acadSDave Chinner 	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
374e31cbde7SPavel Reichl 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
375e31cbde7SPavel Reichl 				(lock_flags & XFS_IOLOCK_SHARED));
376fa96acadSDave Chinner 	}
377fa96acadSDave Chinner 
378fa96acadSDave Chinner 	ASSERT(0);
379e31cbde7SPavel Reichl 	return false;
380fa96acadSDave Chinner }
381fa96acadSDave Chinner #endif
382fa96acadSDave Chinner 
383b6a9947eSDave Chinner /*
384b6a9947eSDave Chinner  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
385b6a9947eSDave Chinner  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
386b6a9947eSDave Chinner  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
387b6a9947eSDave Chinner  * errors and warnings.
388b6a9947eSDave Chinner  */
389b6a9947eSDave Chinner #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3903403ccc0SDave Chinner static bool
3913403ccc0SDave Chinner xfs_lockdep_subclass_ok(
3923403ccc0SDave Chinner 	int subclass)
3933403ccc0SDave Chinner {
3943403ccc0SDave Chinner 	return subclass < MAX_LOCKDEP_SUBCLASSES;
3953403ccc0SDave Chinner }
3963403ccc0SDave Chinner #else
3973403ccc0SDave Chinner #define xfs_lockdep_subclass_ok(subclass)	(true)
3983403ccc0SDave Chinner #endif
3993403ccc0SDave Chinner 
400c24b5dfaSDave Chinner /*
401653c60b6SDave Chinner  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
4020952c818SDave Chinner  * value. This can be called for any type of inode lock combination, including
4030952c818SDave Chinner  * parent locking. Care must be taken to ensure we don't overrun the subclass
4040952c818SDave Chinner  * storage fields in the class mask we build.
405c24b5dfaSDave Chinner  */
406a1033753SDave Chinner static inline uint
407a1033753SDave Chinner xfs_lock_inumorder(
408a1033753SDave Chinner 	uint	lock_mode,
409a1033753SDave Chinner 	uint	subclass)
410c24b5dfaSDave Chinner {
411a1033753SDave Chinner 	uint	class = 0;
4120952c818SDave Chinner 
4130952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
4140952c818SDave Chinner 			      XFS_ILOCK_RTSUM)));
4153403ccc0SDave Chinner 	ASSERT(xfs_lockdep_subclass_ok(subclass));
4160952c818SDave Chinner 
417653c60b6SDave Chinner 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
4180952c818SDave Chinner 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
4190952c818SDave Chinner 		class += subclass << XFS_IOLOCK_SHIFT;
420653c60b6SDave Chinner 	}
421653c60b6SDave Chinner 
422653c60b6SDave Chinner 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
4230952c818SDave Chinner 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
4240952c818SDave Chinner 		class += subclass << XFS_MMAPLOCK_SHIFT;
425653c60b6SDave Chinner 	}
426653c60b6SDave Chinner 
4270952c818SDave Chinner 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
4280952c818SDave Chinner 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
4290952c818SDave Chinner 		class += subclass << XFS_ILOCK_SHIFT;
4300952c818SDave Chinner 	}
431c24b5dfaSDave Chinner 
4320952c818SDave Chinner 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
433c24b5dfaSDave Chinner }
434c24b5dfaSDave Chinner 
435c24b5dfaSDave Chinner /*
43695afcf5cSDave Chinner  * The following routine will lock n inodes in exclusive mode.  We assume the
43795afcf5cSDave Chinner  * caller calls us with the inodes in i_ino order.
438c24b5dfaSDave Chinner  *
43995afcf5cSDave Chinner  * We need to detect deadlock where an inode that we lock is in the AIL and we
44095afcf5cSDave Chinner  * start waiting for another inode that is locked by a thread in a long running
44195afcf5cSDave Chinner  * transaction (such as truncate). This can result in deadlock since the long
44295afcf5cSDave Chinner  * running trans might need to wait for the inode we just locked in order to
44395afcf5cSDave Chinner  * push the tail and free space in the log.
4440952c818SDave Chinner  *
4450952c818SDave Chinner  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
4460952c818SDave Chinner  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
4470952c818SDave Chinner  * lock more than one at a time, lockdep will report false positives saying we
4480952c818SDave Chinner  * have violated locking orders.
449c24b5dfaSDave Chinner  */
4500d5a75e9SEric Sandeen static void
451c24b5dfaSDave Chinner xfs_lock_inodes(
452efe2330fSChristoph Hellwig 	struct xfs_inode	**ips,
453c24b5dfaSDave Chinner 	int			inodes,
454c24b5dfaSDave Chinner 	uint			lock_mode)
455c24b5dfaSDave Chinner {
456a1033753SDave Chinner 	int			attempts = 0;
457a1033753SDave Chinner 	uint			i;
458a1033753SDave Chinner 	int			j;
459a1033753SDave Chinner 	bool			try_lock;
460efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
461c24b5dfaSDave Chinner 
4620952c818SDave Chinner 	/*
4630952c818SDave Chinner 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
4640952c818SDave Chinner 	 * support an arbitrary depth of locking here, but absolute limits on
465b63da6c8SRandy Dunlap 	 * inodes depend on the type of locking and the limits placed by
4660952c818SDave Chinner 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
4670952c818SDave Chinner 	 * the asserts.
4680952c818SDave Chinner 	 */
46995afcf5cSDave Chinner 	ASSERT(ips && inodes >= 2 && inodes <= 5);
4700952c818SDave Chinner 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
4710952c818SDave Chinner 			    XFS_ILOCK_EXCL));
4720952c818SDave Chinner 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
4730952c818SDave Chinner 			      XFS_ILOCK_SHARED)));
4740952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
4750952c818SDave Chinner 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
4760952c818SDave Chinner 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
4770952c818SDave Chinner 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
4780952c818SDave Chinner 
4790952c818SDave Chinner 	if (lock_mode & XFS_IOLOCK_EXCL) {
4800952c818SDave Chinner 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
4810952c818SDave Chinner 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
4820952c818SDave Chinner 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
483c24b5dfaSDave Chinner 
484c24b5dfaSDave Chinner again:
485a1033753SDave Chinner 	try_lock = false;
486a1033753SDave Chinner 	i = 0;
487c24b5dfaSDave Chinner 	for (; i < inodes; i++) {
488c24b5dfaSDave Chinner 		ASSERT(ips[i]);
489c24b5dfaSDave Chinner 
490c24b5dfaSDave Chinner 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
491c24b5dfaSDave Chinner 			continue;
492c24b5dfaSDave Chinner 
493c24b5dfaSDave Chinner 		/*
49495afcf5cSDave Chinner 		 * If try_lock is not set yet, make sure all locked inodes are
49595afcf5cSDave Chinner 		 * not in the AIL.  If any are, set try_lock to be used later.
496c24b5dfaSDave Chinner 		 */
497c24b5dfaSDave Chinner 		if (!try_lock) {
498c24b5dfaSDave Chinner 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
499b3b14aacSChristoph Hellwig 				lp = &ips[j]->i_itemp->ili_item;
50022525c17SDave Chinner 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
501a1033753SDave Chinner 					try_lock = true;
502c24b5dfaSDave Chinner 			}
503c24b5dfaSDave Chinner 		}
504c24b5dfaSDave Chinner 
505c24b5dfaSDave Chinner 		/*
506c24b5dfaSDave Chinner 		 * If any of the previous locks we have locked is in the AIL,
507c24b5dfaSDave Chinner 		 * we must TRY to get the second and subsequent locks. If
508c24b5dfaSDave Chinner 		 * we can't get any, we must release all we have
509c24b5dfaSDave Chinner 		 * and try again.
510c24b5dfaSDave Chinner 		 */
51195afcf5cSDave Chinner 		if (!try_lock) {
51295afcf5cSDave Chinner 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
51395afcf5cSDave Chinner 			continue;
51495afcf5cSDave Chinner 		}
515c24b5dfaSDave Chinner 
51695afcf5cSDave Chinner 		/* try_lock means we have an inode locked that is in the AIL. */
517c24b5dfaSDave Chinner 		ASSERT(i != 0);
51895afcf5cSDave Chinner 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
51995afcf5cSDave Chinner 			continue;
52095afcf5cSDave Chinner 
52195afcf5cSDave Chinner 		/*
52295afcf5cSDave Chinner 		 * Unlock all previous guys and try again.  xfs_iunlock will try
52395afcf5cSDave Chinner 		 * to push the tail if the inode is in the AIL.
52495afcf5cSDave Chinner 		 */
525c24b5dfaSDave Chinner 		attempts++;
526c24b5dfaSDave Chinner 		for (j = i - 1; j >= 0; j--) {
527c24b5dfaSDave Chinner 			/*
52895afcf5cSDave Chinner 			 * Check to see if we've already unlocked this one.  Not
52995afcf5cSDave Chinner 			 * the first one going back, and the inode ptr is the
53095afcf5cSDave Chinner 			 * same.
531c24b5dfaSDave Chinner 			 */
53295afcf5cSDave Chinner 			if (j != (i - 1) && ips[j] == ips[j + 1])
533c24b5dfaSDave Chinner 				continue;
534c24b5dfaSDave Chinner 
535c24b5dfaSDave Chinner 			xfs_iunlock(ips[j], lock_mode);
536c24b5dfaSDave Chinner 		}
537c24b5dfaSDave Chinner 
538c24b5dfaSDave Chinner 		if ((attempts % 5) == 0) {
539c24b5dfaSDave Chinner 			delay(1); /* Don't just spin the CPU */
540c24b5dfaSDave Chinner 		}
541c24b5dfaSDave Chinner 		goto again;
542c24b5dfaSDave Chinner 	}
543c24b5dfaSDave Chinner }
544c24b5dfaSDave Chinner 
545c24b5dfaSDave Chinner /*
546d2c292d8SJan Kara  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
547d2c292d8SJan Kara  * mmaplock must be double-locked separately since we use i_rwsem and
548d2c292d8SJan Kara  * invalidate_lock for that. We now support taking one lock EXCL and the
549d2c292d8SJan Kara  * other SHARED.
550c24b5dfaSDave Chinner  */
551c24b5dfaSDave Chinner void
552c24b5dfaSDave Chinner xfs_lock_two_inodes(
5537c2d238aSDarrick J. Wong 	struct xfs_inode	*ip0,
5547c2d238aSDarrick J. Wong 	uint			ip0_mode,
5557c2d238aSDarrick J. Wong 	struct xfs_inode	*ip1,
5567c2d238aSDarrick J. Wong 	uint			ip1_mode)
557c24b5dfaSDave Chinner {
558c24b5dfaSDave Chinner 	int			attempts = 0;
559efe2330fSChristoph Hellwig 	struct xfs_log_item	*lp;
560c24b5dfaSDave Chinner 
5617c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip0_mode) == 1);
5627c2d238aSDarrick J. Wong 	ASSERT(hweight32(ip1_mode) == 1);
5637c2d238aSDarrick J. Wong 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
5647c2d238aSDarrick J. Wong 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
565d2c292d8SJan Kara 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566d2c292d8SJan Kara 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
567c24b5dfaSDave Chinner 	ASSERT(ip0->i_ino != ip1->i_ino);
568c24b5dfaSDave Chinner 
569c24b5dfaSDave Chinner 	if (ip0->i_ino > ip1->i_ino) {
5702a09b575SChangcheng Deng 		swap(ip0, ip1);
5712a09b575SChangcheng Deng 		swap(ip0_mode, ip1_mode);
572c24b5dfaSDave Chinner 	}
573c24b5dfaSDave Chinner 
574c24b5dfaSDave Chinner  again:
5757c2d238aSDarrick J. Wong 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576c24b5dfaSDave Chinner 
577c24b5dfaSDave Chinner 	/*
578c24b5dfaSDave Chinner 	 * If the first lock we have locked is in the AIL, we must TRY to get
579c24b5dfaSDave Chinner 	 * the second lock. If we can't get it, we must release the first one
580c24b5dfaSDave Chinner 	 * and try again.
581c24b5dfaSDave Chinner 	 */
582b3b14aacSChristoph Hellwig 	lp = &ip0->i_itemp->ili_item;
58322525c17SDave Chinner 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
5847c2d238aSDarrick J. Wong 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
5857c2d238aSDarrick J. Wong 			xfs_iunlock(ip0, ip0_mode);
586c24b5dfaSDave Chinner 			if ((++attempts % 5) == 0)
587c24b5dfaSDave Chinner 				delay(1); /* Don't just spin the CPU */
588c24b5dfaSDave Chinner 			goto again;
589c24b5dfaSDave Chinner 		}
590c24b5dfaSDave Chinner 	} else {
5917c2d238aSDarrick J. Wong 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592c24b5dfaSDave Chinner 	}
593c24b5dfaSDave Chinner }
594c24b5dfaSDave Chinner 
5951da177e4SLinus Torvalds uint
5961da177e4SLinus Torvalds xfs_ip2xflags(
59758f88ca2SDave Chinner 	struct xfs_inode	*ip)
5981da177e4SLinus Torvalds {
5994422501dSChristoph Hellwig 	uint			flags = 0;
6001da177e4SLinus Torvalds 
6014422501dSChristoph Hellwig 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
6024422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
6034422501dSChristoph Hellwig 			flags |= FS_XFLAG_REALTIME;
6044422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
6054422501dSChristoph Hellwig 			flags |= FS_XFLAG_PREALLOC;
6064422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
6074422501dSChristoph Hellwig 			flags |= FS_XFLAG_IMMUTABLE;
6084422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
6094422501dSChristoph Hellwig 			flags |= FS_XFLAG_APPEND;
6104422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
6114422501dSChristoph Hellwig 			flags |= FS_XFLAG_SYNC;
6124422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
6134422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOATIME;
6144422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
6154422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODUMP;
6164422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
6174422501dSChristoph Hellwig 			flags |= FS_XFLAG_RTINHERIT;
6184422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
6194422501dSChristoph Hellwig 			flags |= FS_XFLAG_PROJINHERIT;
6204422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
6214422501dSChristoph Hellwig 			flags |= FS_XFLAG_NOSYMLINKS;
6224422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6234422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSIZE;
6244422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
6254422501dSChristoph Hellwig 			flags |= FS_XFLAG_EXTSZINHERIT;
6264422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
6274422501dSChristoph Hellwig 			flags |= FS_XFLAG_NODEFRAG;
6284422501dSChristoph Hellwig 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
6294422501dSChristoph Hellwig 			flags |= FS_XFLAG_FILESTREAM;
6304422501dSChristoph Hellwig 	}
6314422501dSChristoph Hellwig 
6324422501dSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
6334422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
6344422501dSChristoph Hellwig 			flags |= FS_XFLAG_DAX;
6354422501dSChristoph Hellwig 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6364422501dSChristoph Hellwig 			flags |= FS_XFLAG_COWEXTSIZE;
6374422501dSChristoph Hellwig 	}
6384422501dSChristoph Hellwig 
639932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
6404422501dSChristoph Hellwig 		flags |= FS_XFLAG_HASATTR;
6414422501dSChristoph Hellwig 	return flags;
6421da177e4SLinus Torvalds }
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds /*
645c24b5dfaSDave Chinner  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
646c24b5dfaSDave Chinner  * is allowed, otherwise it has to be an exact match. If a CI match is found,
647c24b5dfaSDave Chinner  * ci_name->name will point to a the actual name (caller must free) or
648c24b5dfaSDave Chinner  * will be set to NULL if an exact match is found.
649c24b5dfaSDave Chinner  */
650c24b5dfaSDave Chinner int
651c24b5dfaSDave Chinner xfs_lookup(
652996b2329SDarrick J. Wong 	struct xfs_inode	*dp,
653996b2329SDarrick J. Wong 	const struct xfs_name	*name,
654996b2329SDarrick J. Wong 	struct xfs_inode	**ipp,
655c24b5dfaSDave Chinner 	struct xfs_name		*ci_name)
656c24b5dfaSDave Chinner {
657c24b5dfaSDave Chinner 	xfs_ino_t		inum;
658c24b5dfaSDave Chinner 	int			error;
659c24b5dfaSDave Chinner 
660c24b5dfaSDave Chinner 	trace_xfs_lookup(dp, name);
661c24b5dfaSDave Chinner 
66275c8c50fSDave Chinner 	if (xfs_is_shutdown(dp->i_mount))
6632451337dSDave Chinner 		return -EIO;
664c24b5dfaSDave Chinner 
665c24b5dfaSDave Chinner 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
666c24b5dfaSDave Chinner 	if (error)
667dbad7c99SDave Chinner 		goto out_unlock;
668c24b5dfaSDave Chinner 
669c24b5dfaSDave Chinner 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
670c24b5dfaSDave Chinner 	if (error)
671c24b5dfaSDave Chinner 		goto out_free_name;
672c24b5dfaSDave Chinner 
673c24b5dfaSDave Chinner 	return 0;
674c24b5dfaSDave Chinner 
675c24b5dfaSDave Chinner out_free_name:
676c24b5dfaSDave Chinner 	if (ci_name)
677c24b5dfaSDave Chinner 		kmem_free(ci_name->name);
678dbad7c99SDave Chinner out_unlock:
679c24b5dfaSDave Chinner 	*ipp = NULL;
680c24b5dfaSDave Chinner 	return error;
681c24b5dfaSDave Chinner }
682c24b5dfaSDave Chinner 
6838a569d71SDarrick J. Wong /* Propagate di_flags from a parent inode to a child inode. */
6848a569d71SDarrick J. Wong static void
6858a569d71SDarrick J. Wong xfs_inode_inherit_flags(
6868a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
6878a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
6888a569d71SDarrick J. Wong {
6898a569d71SDarrick J. Wong 	unsigned int		di_flags = 0;
690603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
6918a569d71SDarrick J. Wong 	umode_t			mode = VFS_I(ip)->i_mode;
6928a569d71SDarrick J. Wong 
6938a569d71SDarrick J. Wong 	if (S_ISDIR(mode)) {
694db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
6958a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_RTINHERIT;
696db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
6978a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
698031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
6998a569d71SDarrick J. Wong 		}
700db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
7018a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_PROJINHERIT;
7028a569d71SDarrick J. Wong 	} else if (S_ISREG(mode)) {
703db07349dSChristoph Hellwig 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
70438c26bfdSDave Chinner 		    xfs_has_realtime(ip->i_mount))
7058a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_REALTIME;
706db07349dSChristoph Hellwig 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
7078a569d71SDarrick J. Wong 			di_flags |= XFS_DIFLAG_EXTSIZE;
708031474c2SChristoph Hellwig 			ip->i_extsize = pip->i_extsize;
7098a569d71SDarrick J. Wong 		}
7108a569d71SDarrick J. Wong 	}
711db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
7128a569d71SDarrick J. Wong 	    xfs_inherit_noatime)
7138a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOATIME;
714db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
7158a569d71SDarrick J. Wong 	    xfs_inherit_nodump)
7168a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODUMP;
717db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
7188a569d71SDarrick J. Wong 	    xfs_inherit_sync)
7198a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_SYNC;
720db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
7218a569d71SDarrick J. Wong 	    xfs_inherit_nosymlinks)
7228a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
723db07349dSChristoph Hellwig 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
7248a569d71SDarrick J. Wong 	    xfs_inherit_nodefrag)
7258a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_NODEFRAG;
726db07349dSChristoph Hellwig 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
7278a569d71SDarrick J. Wong 		di_flags |= XFS_DIFLAG_FILESTREAM;
7288a569d71SDarrick J. Wong 
729db07349dSChristoph Hellwig 	ip->i_diflags |= di_flags;
730603f000bSDarrick J. Wong 
731603f000bSDarrick J. Wong 	/*
732603f000bSDarrick J. Wong 	 * Inode verifiers on older kernels only check that the extent size
733603f000bSDarrick J. Wong 	 * hint is an integer multiple of the rt extent size on realtime files.
734603f000bSDarrick J. Wong 	 * They did not check the hint alignment on a directory with both
735603f000bSDarrick J. Wong 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
736603f000bSDarrick J. Wong 	 * propagated from a directory into a new realtime file, new file
737603f000bSDarrick J. Wong 	 * allocations will fail due to math errors in the rt allocator and/or
738603f000bSDarrick J. Wong 	 * trip the verifiers.  Validate the hint settings in the new file so
739603f000bSDarrick J. Wong 	 * that we don't let broken hints propagate.
740603f000bSDarrick J. Wong 	 */
741603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
742603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags);
743603f000bSDarrick J. Wong 	if (failaddr) {
744603f000bSDarrick J. Wong 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
745603f000bSDarrick J. Wong 				   XFS_DIFLAG_EXTSZINHERIT);
746603f000bSDarrick J. Wong 		ip->i_extsize = 0;
747603f000bSDarrick J. Wong 	}
7488a569d71SDarrick J. Wong }
7498a569d71SDarrick J. Wong 
7508a569d71SDarrick J. Wong /* Propagate di_flags2 from a parent inode to a child inode. */
7518a569d71SDarrick J. Wong static void
7528a569d71SDarrick J. Wong xfs_inode_inherit_flags2(
7538a569d71SDarrick J. Wong 	struct xfs_inode	*ip,
7548a569d71SDarrick J. Wong 	const struct xfs_inode	*pip)
7558a569d71SDarrick J. Wong {
756603f000bSDarrick J. Wong 	xfs_failaddr_t		failaddr;
757603f000bSDarrick J. Wong 
7583e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
7593e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
760b33ce57dSChristoph Hellwig 		ip->i_cowextsize = pip->i_cowextsize;
7618a569d71SDarrick J. Wong 	}
7623e09ab8fSChristoph Hellwig 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
7633e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
764603f000bSDarrick J. Wong 
765603f000bSDarrick J. Wong 	/* Don't let invalid cowextsize hints propagate. */
766603f000bSDarrick J. Wong 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
767603f000bSDarrick J. Wong 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
768603f000bSDarrick J. Wong 	if (failaddr) {
769603f000bSDarrick J. Wong 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
770603f000bSDarrick J. Wong 		ip->i_cowextsize = 0;
771603f000bSDarrick J. Wong 	}
7728a569d71SDarrick J. Wong }
7738a569d71SDarrick J. Wong 
774c24b5dfaSDave Chinner /*
7751abcf261SDave Chinner  * Initialise a newly allocated inode and return the in-core inode to the
7761abcf261SDave Chinner  * caller locked exclusively.
7771da177e4SLinus Torvalds  */
778b652afd9SDave Chinner int
7791abcf261SDave Chinner xfs_init_new_inode(
780f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
7811abcf261SDave Chinner 	struct xfs_trans	*tp,
7821abcf261SDave Chinner 	struct xfs_inode	*pip,
7831abcf261SDave Chinner 	xfs_ino_t		ino,
784576b1d67SAl Viro 	umode_t			mode,
78531b084aeSNathan Scott 	xfs_nlink_t		nlink,
78666f36464SChristoph Hellwig 	dev_t			rdev,
7876743099cSArkadiusz Mi?kiewicz 	prid_t			prid,
788e6a688c3SDave Chinner 	bool			init_xattrs,
7891abcf261SDave Chinner 	struct xfs_inode	**ipp)
7901da177e4SLinus Torvalds {
79101ea173eSChristoph Hellwig 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
79293848a99SChristoph Hellwig 	struct xfs_mount	*mp = tp->t_mountp;
7931abcf261SDave Chinner 	struct xfs_inode	*ip;
7941abcf261SDave Chinner 	unsigned int		flags;
7951da177e4SLinus Torvalds 	int			error;
79695582b00SDeepa Dinamani 	struct timespec64	tv;
7973987848cSDave Chinner 	struct inode		*inode;
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 	/*
8008b26984dSDave Chinner 	 * Protect against obviously corrupt allocation btree records. Later
8018b26984dSDave Chinner 	 * xfs_iget checks will catch re-allocation of other active in-memory
8028b26984dSDave Chinner 	 * and on-disk inodes. If we don't catch reallocating the parent inode
8038b26984dSDave Chinner 	 * here we will deadlock in xfs_iget() so we have to do these checks
8048b26984dSDave Chinner 	 * first.
8058b26984dSDave Chinner 	 */
8068b26984dSDave Chinner 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
8078b26984dSDave Chinner 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
8088b26984dSDave Chinner 		return -EFSCORRUPTED;
8098b26984dSDave Chinner 	}
8108b26984dSDave Chinner 
8118b26984dSDave Chinner 	/*
8121abcf261SDave Chinner 	 * Get the in-core inode with the lock held exclusively to prevent
8131abcf261SDave Chinner 	 * others from looking at until we're done.
8141da177e4SLinus Torvalds 	 */
8151abcf261SDave Chinner 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
816bf904248SDavid Chinner 	if (error)
8171da177e4SLinus Torvalds 		return error;
8181abcf261SDave Chinner 
8191da177e4SLinus Torvalds 	ASSERT(ip != NULL);
8203987848cSDave Chinner 	inode = VFS_I(ip);
82154d7b5c1SDave Chinner 	set_nlink(inode, nlink);
82266f36464SChristoph Hellwig 	inode->i_rdev = rdev;
823ceaf603cSChristoph Hellwig 	ip->i_projid = prid;
8241da177e4SLinus Torvalds 
8250560f31aSDave Chinner 	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
826*c14329d3SChristian Brauner 		inode_fsuid_set(inode, idmap);
82701ea173eSChristoph Hellwig 		inode->i_gid = dir->i_gid;
82801ea173eSChristoph Hellwig 		inode->i_mode = mode;
8293d8f2821SChristoph Hellwig 	} else {
830f2d40141SChristian Brauner 		inode_init_owner(idmap, inode, dir, mode);
8311da177e4SLinus Torvalds 	}
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	/*
8341da177e4SLinus Torvalds 	 * If the group ID of the new file does not match the effective group
8351da177e4SLinus Torvalds 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
8361da177e4SLinus Torvalds 	 * (and only if the irix_sgid_inherit compatibility variable is set).
8371da177e4SLinus Torvalds 	 */
83842b7cc11SChristian Brauner 	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
839e67fe633SChristian Brauner 	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
840c19b3b05SDave Chinner 		inode->i_mode &= ~S_ISGID;
8411da177e4SLinus Torvalds 
84213d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
843daf83964SChristoph Hellwig 	ip->i_df.if_nextents = 0;
8446e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
845dff35fd4SChristoph Hellwig 
846c2050a45SDeepa Dinamani 	tv = current_time(inode);
8473987848cSDave Chinner 	inode->i_mtime = tv;
8483987848cSDave Chinner 	inode->i_atime = tv;
8493987848cSDave Chinner 	inode->i_ctime = tv;
850dff35fd4SChristoph Hellwig 
851031474c2SChristoph Hellwig 	ip->i_extsize = 0;
852db07349dSChristoph Hellwig 	ip->i_diflags = 0;
85393848a99SChristoph Hellwig 
85438c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp)) {
855f0e28280SJeff Layton 		inode_set_iversion(inode, 1);
856b33ce57dSChristoph Hellwig 		ip->i_cowextsize = 0;
857e98d5e88SChristoph Hellwig 		ip->i_crtime = tv;
85893848a99SChristoph Hellwig 	}
85993848a99SChristoph Hellwig 
8601da177e4SLinus Torvalds 	flags = XFS_ILOG_CORE;
8611da177e4SLinus Torvalds 	switch (mode & S_IFMT) {
8621da177e4SLinus Torvalds 	case S_IFIFO:
8631da177e4SLinus Torvalds 	case S_IFCHR:
8641da177e4SLinus Torvalds 	case S_IFBLK:
8651da177e4SLinus Torvalds 	case S_IFSOCK:
866f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
8671da177e4SLinus Torvalds 		flags |= XFS_ILOG_DEV;
8681da177e4SLinus Torvalds 		break;
8691da177e4SLinus Torvalds 	case S_IFREG:
8701da177e4SLinus Torvalds 	case S_IFDIR:
871db07349dSChristoph Hellwig 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
8728a569d71SDarrick J. Wong 			xfs_inode_inherit_flags(ip, pip);
8733e09ab8fSChristoph Hellwig 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
8748a569d71SDarrick J. Wong 			xfs_inode_inherit_flags2(ip, pip);
87553004ee7SGustavo A. R. Silva 		fallthrough;
8761da177e4SLinus Torvalds 	case S_IFLNK:
877f7e67b20SChristoph Hellwig 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
878fcacbc3fSChristoph Hellwig 		ip->i_df.if_bytes = 0;
8796bdcf26aSChristoph Hellwig 		ip->i_df.if_u1.if_root = NULL;
8801da177e4SLinus Torvalds 		break;
8811da177e4SLinus Torvalds 	default:
8821da177e4SLinus Torvalds 		ASSERT(0);
8831da177e4SLinus Torvalds 	}
8841da177e4SLinus Torvalds 
8851da177e4SLinus Torvalds 	/*
886e6a688c3SDave Chinner 	 * If we need to create attributes immediately after allocating the
887e6a688c3SDave Chinner 	 * inode, initialise an empty attribute fork right now. We use the
888e6a688c3SDave Chinner 	 * default fork offset for attributes here as we don't know exactly what
889e6a688c3SDave Chinner 	 * size or how many attributes we might be adding. We can do this
890e6a688c3SDave Chinner 	 * safely here because we know the data fork is completely empty and
891e6a688c3SDave Chinner 	 * this saves us from needing to run a separate transaction to set the
892e6a688c3SDave Chinner 	 * fork offset in the immediate future.
893e6a688c3SDave Chinner 	 */
89438c26bfdSDave Chinner 	if (init_xattrs && xfs_has_attr(mp)) {
8957821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
8962ed5b09bSDarrick J. Wong 		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
897e6a688c3SDave Chinner 	}
898e6a688c3SDave Chinner 
899e6a688c3SDave Chinner 	/*
9001da177e4SLinus Torvalds 	 * Log the new values stuffed into the inode.
9011da177e4SLinus Torvalds 	 */
902ddc3415aSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
9031da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, flags);
9041da177e4SLinus Torvalds 
90558c90473SDave Chinner 	/* now that we have an i_mode we can setup the inode structure */
90641be8bedSChristoph Hellwig 	xfs_setup_inode(ip);
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	*ipp = ip;
9091da177e4SLinus Torvalds 	return 0;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds 
912e546cb79SDave Chinner /*
91354d7b5c1SDave Chinner  * Decrement the link count on an inode & log the change.  If this causes the
91454d7b5c1SDave Chinner  * link count to go to zero, move the inode to AGI unlinked list so that it can
91554d7b5c1SDave Chinner  * be freed when the last active reference goes away via xfs_inactive().
916e546cb79SDave Chinner  */
9170d5a75e9SEric Sandeen static int			/* error */
918e546cb79SDave Chinner xfs_droplink(
919e546cb79SDave Chinner 	xfs_trans_t *tp,
920e546cb79SDave Chinner 	xfs_inode_t *ip)
921e546cb79SDave Chinner {
922e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
923e546cb79SDave Chinner 
924e546cb79SDave Chinner 	drop_nlink(VFS_I(ip));
925e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
926e546cb79SDave Chinner 
92754d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink)
92854d7b5c1SDave Chinner 		return 0;
92954d7b5c1SDave Chinner 
93054d7b5c1SDave Chinner 	return xfs_iunlink(tp, ip);
931e546cb79SDave Chinner }
932e546cb79SDave Chinner 
933e546cb79SDave Chinner /*
934e546cb79SDave Chinner  * Increment the link count on an inode & log the change.
935e546cb79SDave Chinner  */
93691083269SEric Sandeen static void
937e546cb79SDave Chinner xfs_bumplink(
938e546cb79SDave Chinner 	xfs_trans_t *tp,
939e546cb79SDave Chinner 	xfs_inode_t *ip)
940e546cb79SDave Chinner {
941e546cb79SDave Chinner 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
942e546cb79SDave Chinner 
943e546cb79SDave Chinner 	inc_nlink(VFS_I(ip));
944e546cb79SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
945e546cb79SDave Chinner }
946e546cb79SDave Chinner 
947c24b5dfaSDave Chinner int
948c24b5dfaSDave Chinner xfs_create(
949f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
950c24b5dfaSDave Chinner 	xfs_inode_t		*dp,
951c24b5dfaSDave Chinner 	struct xfs_name		*name,
952c24b5dfaSDave Chinner 	umode_t			mode,
95366f36464SChristoph Hellwig 	dev_t			rdev,
954e6a688c3SDave Chinner 	bool			init_xattrs,
955c24b5dfaSDave Chinner 	xfs_inode_t		**ipp)
956c24b5dfaSDave Chinner {
957c24b5dfaSDave Chinner 	int			is_dir = S_ISDIR(mode);
958c24b5dfaSDave Chinner 	struct xfs_mount	*mp = dp->i_mount;
959c24b5dfaSDave Chinner 	struct xfs_inode	*ip = NULL;
960c24b5dfaSDave Chinner 	struct xfs_trans	*tp = NULL;
961c24b5dfaSDave Chinner 	int			error;
962c24b5dfaSDave Chinner 	bool                    unlock_dp_on_error = false;
963c24b5dfaSDave Chinner 	prid_t			prid;
964c24b5dfaSDave Chinner 	struct xfs_dquot	*udqp = NULL;
965c24b5dfaSDave Chinner 	struct xfs_dquot	*gdqp = NULL;
966c24b5dfaSDave Chinner 	struct xfs_dquot	*pdqp = NULL;
967062647a8SBrian Foster 	struct xfs_trans_res	*tres;
968c24b5dfaSDave Chinner 	uint			resblks;
969b652afd9SDave Chinner 	xfs_ino_t		ino;
970c24b5dfaSDave Chinner 
971c24b5dfaSDave Chinner 	trace_xfs_create(dp, name);
972c24b5dfaSDave Chinner 
97375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
9742451337dSDave Chinner 		return -EIO;
975c24b5dfaSDave Chinner 
976163467d3SZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
977c24b5dfaSDave Chinner 
978c24b5dfaSDave Chinner 	/*
979c24b5dfaSDave Chinner 	 * Make sure that we have allocated dquot(s) on disk.
980c24b5dfaSDave Chinner 	 */
981*c14329d3SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
982*c14329d3SChristian Brauner 			mapped_fsgid(idmap, &init_user_ns), prid,
983c24b5dfaSDave Chinner 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
984c24b5dfaSDave Chinner 			&udqp, &gdqp, &pdqp);
985c24b5dfaSDave Chinner 	if (error)
986c24b5dfaSDave Chinner 		return error;
987c24b5dfaSDave Chinner 
988c24b5dfaSDave Chinner 	if (is_dir) {
989c24b5dfaSDave Chinner 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
990062647a8SBrian Foster 		tres = &M_RES(mp)->tr_mkdir;
991c24b5dfaSDave Chinner 	} else {
992c24b5dfaSDave Chinner 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
993062647a8SBrian Foster 		tres = &M_RES(mp)->tr_create;
994c24b5dfaSDave Chinner 	}
995c24b5dfaSDave Chinner 
996c24b5dfaSDave Chinner 	/*
997c24b5dfaSDave Chinner 	 * Initially assume that the file does not exist and
998c24b5dfaSDave Chinner 	 * reserve the resources for that case.  If that is not
999c24b5dfaSDave Chinner 	 * the case we'll drop the one we have and get a more
1000c24b5dfaSDave Chinner 	 * appropriate transaction later.
1001c24b5dfaSDave Chinner 	 */
1002f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1003f2f7b9ffSDarrick J. Wong 			&tp);
10042451337dSDave Chinner 	if (error == -ENOSPC) {
1005c24b5dfaSDave Chinner 		/* flush outstanding delalloc blocks and retry */
1006c24b5dfaSDave Chinner 		xfs_flush_inodes(mp);
1007f2f7b9ffSDarrick J. Wong 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1008f2f7b9ffSDarrick J. Wong 				resblks, &tp);
1009c24b5dfaSDave Chinner 	}
10104906e215SChristoph Hellwig 	if (error)
1011f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
1012c24b5dfaSDave Chinner 
101365523218SChristoph Hellwig 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1014c24b5dfaSDave Chinner 	unlock_dp_on_error = true;
1015c24b5dfaSDave Chinner 
1016c24b5dfaSDave Chinner 	/*
1017c24b5dfaSDave Chinner 	 * A newly created regular or special file just has one directory
1018c24b5dfaSDave Chinner 	 * entry pointing to them, but a directory also the "." entry
1019c24b5dfaSDave Chinner 	 * pointing to itself.
1020c24b5dfaSDave Chinner 	 */
1021b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1022b652afd9SDave Chinner 	if (!error)
1023f2d40141SChristian Brauner 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1024b652afd9SDave Chinner 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1025d6077aa3SJan Kara 	if (error)
1026c24b5dfaSDave Chinner 		goto out_trans_cancel;
1027c24b5dfaSDave Chinner 
1028c24b5dfaSDave Chinner 	/*
1029c24b5dfaSDave Chinner 	 * Now we join the directory inode to the transaction.  We do not do it
1030b652afd9SDave Chinner 	 * earlier because xfs_dialloc might commit the previous transaction
1031c24b5dfaSDave Chinner 	 * (and release all the locks).  An error from here on will result in
1032c24b5dfaSDave Chinner 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1033c24b5dfaSDave Chinner 	 * error path.
1034c24b5dfaSDave Chinner 	 */
103565523218SChristoph Hellwig 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1036c24b5dfaSDave Chinner 	unlock_dp_on_error = false;
1037c24b5dfaSDave Chinner 
1038381eee69SBrian Foster 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
103963337b63SKaixu Xia 					resblks - XFS_IALLOC_SPACE_RES(mp));
1040c24b5dfaSDave Chinner 	if (error) {
10412451337dSDave Chinner 		ASSERT(error != -ENOSPC);
10424906e215SChristoph Hellwig 		goto out_trans_cancel;
1043c24b5dfaSDave Chinner 	}
1044c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1045c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1046c24b5dfaSDave Chinner 
1047c24b5dfaSDave Chinner 	if (is_dir) {
1048c24b5dfaSDave Chinner 		error = xfs_dir_init(tp, ip, dp);
1049c24b5dfaSDave Chinner 		if (error)
1050c8eac49eSBrian Foster 			goto out_trans_cancel;
1051c24b5dfaSDave Chinner 
105291083269SEric Sandeen 		xfs_bumplink(tp, dp);
1053c24b5dfaSDave Chinner 	}
1054c24b5dfaSDave Chinner 
1055c24b5dfaSDave Chinner 	/*
1056c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1057c24b5dfaSDave Chinner 	 * create transaction goes to disk before returning to
1058c24b5dfaSDave Chinner 	 * the user.
1059c24b5dfaSDave Chinner 	 */
10600560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1061c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1062c24b5dfaSDave Chinner 
1063c24b5dfaSDave Chinner 	/*
1064c24b5dfaSDave Chinner 	 * Attach the dquot(s) to the inodes and modify them incore.
1065c24b5dfaSDave Chinner 	 * These ids of the inode couldn't have changed since the new
1066c24b5dfaSDave Chinner 	 * inode has been locked ever since it was created.
1067c24b5dfaSDave Chinner 	 */
1068c24b5dfaSDave Chinner 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1069c24b5dfaSDave Chinner 
107070393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1071c24b5dfaSDave Chinner 	if (error)
1072c24b5dfaSDave Chinner 		goto out_release_inode;
1073c24b5dfaSDave Chinner 
1074c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1075c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1076c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1077c24b5dfaSDave Chinner 
1078c24b5dfaSDave Chinner 	*ipp = ip;
1079c24b5dfaSDave Chinner 	return 0;
1080c24b5dfaSDave Chinner 
1081c24b5dfaSDave Chinner  out_trans_cancel:
10824906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1083c24b5dfaSDave Chinner  out_release_inode:
1084c24b5dfaSDave Chinner 	/*
108558c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
108658c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
108758c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
1088c24b5dfaSDave Chinner 	 */
108958c90473SDave Chinner 	if (ip) {
109058c90473SDave Chinner 		xfs_finish_inode_setup(ip);
109144a8736bSDarrick J. Wong 		xfs_irele(ip);
109258c90473SDave Chinner 	}
1093f2f7b9ffSDarrick J. Wong  out_release_dquots:
1094c24b5dfaSDave Chinner 	xfs_qm_dqrele(udqp);
1095c24b5dfaSDave Chinner 	xfs_qm_dqrele(gdqp);
1096c24b5dfaSDave Chinner 	xfs_qm_dqrele(pdqp);
1097c24b5dfaSDave Chinner 
1098c24b5dfaSDave Chinner 	if (unlock_dp_on_error)
109965523218SChristoph Hellwig 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1100c24b5dfaSDave Chinner 	return error;
1101c24b5dfaSDave Chinner }
1102c24b5dfaSDave Chinner 
1103c24b5dfaSDave Chinner int
110499b6436bSZhi Yong Wu xfs_create_tmpfile(
1105f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
110699b6436bSZhi Yong Wu 	struct xfs_inode	*dp,
1107330033d6SBrian Foster 	umode_t			mode,
1108330033d6SBrian Foster 	struct xfs_inode	**ipp)
110999b6436bSZhi Yong Wu {
111099b6436bSZhi Yong Wu 	struct xfs_mount	*mp = dp->i_mount;
111199b6436bSZhi Yong Wu 	struct xfs_inode	*ip = NULL;
111299b6436bSZhi Yong Wu 	struct xfs_trans	*tp = NULL;
111399b6436bSZhi Yong Wu 	int			error;
111499b6436bSZhi Yong Wu 	prid_t                  prid;
111599b6436bSZhi Yong Wu 	struct xfs_dquot	*udqp = NULL;
111699b6436bSZhi Yong Wu 	struct xfs_dquot	*gdqp = NULL;
111799b6436bSZhi Yong Wu 	struct xfs_dquot	*pdqp = NULL;
111899b6436bSZhi Yong Wu 	struct xfs_trans_res	*tres;
111999b6436bSZhi Yong Wu 	uint			resblks;
1120b652afd9SDave Chinner 	xfs_ino_t		ino;
112199b6436bSZhi Yong Wu 
112275c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
11232451337dSDave Chinner 		return -EIO;
112499b6436bSZhi Yong Wu 
112599b6436bSZhi Yong Wu 	prid = xfs_get_initial_prid(dp);
112699b6436bSZhi Yong Wu 
112799b6436bSZhi Yong Wu 	/*
112899b6436bSZhi Yong Wu 	 * Make sure that we have allocated dquot(s) on disk.
112999b6436bSZhi Yong Wu 	 */
1130*c14329d3SChristian Brauner 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1131*c14329d3SChristian Brauner 			mapped_fsgid(idmap, &init_user_ns), prid,
113299b6436bSZhi Yong Wu 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
113399b6436bSZhi Yong Wu 			&udqp, &gdqp, &pdqp);
113499b6436bSZhi Yong Wu 	if (error)
113599b6436bSZhi Yong Wu 		return error;
113699b6436bSZhi Yong Wu 
113799b6436bSZhi Yong Wu 	resblks = XFS_IALLOC_SPACE_RES(mp);
113899b6436bSZhi Yong Wu 	tres = &M_RES(mp)->tr_create_tmpfile;
1139253f4911SChristoph Hellwig 
1140f2f7b9ffSDarrick J. Wong 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1141f2f7b9ffSDarrick J. Wong 			&tp);
11424906e215SChristoph Hellwig 	if (error)
1143f2f7b9ffSDarrick J. Wong 		goto out_release_dquots;
114499b6436bSZhi Yong Wu 
1145b652afd9SDave Chinner 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1146b652afd9SDave Chinner 	if (!error)
1147f2d40141SChristian Brauner 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1148b652afd9SDave Chinner 				0, 0, prid, false, &ip);
1149d6077aa3SJan Kara 	if (error)
115099b6436bSZhi Yong Wu 		goto out_trans_cancel;
115199b6436bSZhi Yong Wu 
11520560f31aSDave Chinner 	if (xfs_has_wsync(mp))
115399b6436bSZhi Yong Wu 		xfs_trans_set_sync(tp);
115499b6436bSZhi Yong Wu 
115599b6436bSZhi Yong Wu 	/*
115699b6436bSZhi Yong Wu 	 * Attach the dquot(s) to the inodes and modify them incore.
115799b6436bSZhi Yong Wu 	 * These ids of the inode couldn't have changed since the new
115899b6436bSZhi Yong Wu 	 * inode has been locked ever since it was created.
115999b6436bSZhi Yong Wu 	 */
116099b6436bSZhi Yong Wu 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
116199b6436bSZhi Yong Wu 
116299b6436bSZhi Yong Wu 	error = xfs_iunlink(tp, ip);
116399b6436bSZhi Yong Wu 	if (error)
11644906e215SChristoph Hellwig 		goto out_trans_cancel;
116599b6436bSZhi Yong Wu 
116670393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
116799b6436bSZhi Yong Wu 	if (error)
116899b6436bSZhi Yong Wu 		goto out_release_inode;
116999b6436bSZhi Yong Wu 
117099b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
117199b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
117299b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
117399b6436bSZhi Yong Wu 
1174330033d6SBrian Foster 	*ipp = ip;
117599b6436bSZhi Yong Wu 	return 0;
117699b6436bSZhi Yong Wu 
117799b6436bSZhi Yong Wu  out_trans_cancel:
11784906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
117999b6436bSZhi Yong Wu  out_release_inode:
118099b6436bSZhi Yong Wu 	/*
118158c90473SDave Chinner 	 * Wait until after the current transaction is aborted to finish the
118258c90473SDave Chinner 	 * setup of the inode and release the inode.  This prevents recursive
118358c90473SDave Chinner 	 * transactions and deadlocks from xfs_inactive.
118499b6436bSZhi Yong Wu 	 */
118558c90473SDave Chinner 	if (ip) {
118658c90473SDave Chinner 		xfs_finish_inode_setup(ip);
118744a8736bSDarrick J. Wong 		xfs_irele(ip);
118858c90473SDave Chinner 	}
1189f2f7b9ffSDarrick J. Wong  out_release_dquots:
119099b6436bSZhi Yong Wu 	xfs_qm_dqrele(udqp);
119199b6436bSZhi Yong Wu 	xfs_qm_dqrele(gdqp);
119299b6436bSZhi Yong Wu 	xfs_qm_dqrele(pdqp);
119399b6436bSZhi Yong Wu 
119499b6436bSZhi Yong Wu 	return error;
119599b6436bSZhi Yong Wu }
119699b6436bSZhi Yong Wu 
119799b6436bSZhi Yong Wu int
1198c24b5dfaSDave Chinner xfs_link(
1199c24b5dfaSDave Chinner 	xfs_inode_t		*tdp,
1200c24b5dfaSDave Chinner 	xfs_inode_t		*sip,
1201c24b5dfaSDave Chinner 	struct xfs_name		*target_name)
1202c24b5dfaSDave Chinner {
1203c24b5dfaSDave Chinner 	xfs_mount_t		*mp = tdp->i_mount;
1204c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
1205871b9316SDarrick J. Wong 	int			error, nospace_error = 0;
1206c24b5dfaSDave Chinner 	int			resblks;
1207c24b5dfaSDave Chinner 
1208c24b5dfaSDave Chinner 	trace_xfs_link(tdp, target_name);
1209c24b5dfaSDave Chinner 
1210c19b3b05SDave Chinner 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1211c24b5dfaSDave Chinner 
121275c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
12132451337dSDave Chinner 		return -EIO;
1214c24b5dfaSDave Chinner 
1215c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(sip);
1216c24b5dfaSDave Chinner 	if (error)
1217c24b5dfaSDave Chinner 		goto std_return;
1218c24b5dfaSDave Chinner 
1219c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(tdp);
1220c24b5dfaSDave Chinner 	if (error)
1221c24b5dfaSDave Chinner 		goto std_return;
1222c24b5dfaSDave Chinner 
1223c24b5dfaSDave Chinner 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1224871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1225871b9316SDarrick J. Wong 			&tp, &nospace_error);
12264906e215SChristoph Hellwig 	if (error)
1227253f4911SChristoph Hellwig 		goto std_return;
1228c24b5dfaSDave Chinner 
1229c24b5dfaSDave Chinner 	/*
1230c24b5dfaSDave Chinner 	 * If we are using project inheritance, we only allow hard link
1231c24b5dfaSDave Chinner 	 * creation in our tree when the project IDs are the same; else
1232c24b5dfaSDave Chinner 	 * the tree quota mechanism could be circumvented.
1233c24b5dfaSDave Chinner 	 */
1234db07349dSChristoph Hellwig 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1235ceaf603cSChristoph Hellwig 		     tdp->i_projid != sip->i_projid)) {
12362451337dSDave Chinner 		error = -EXDEV;
1237c24b5dfaSDave Chinner 		goto error_return;
1238c24b5dfaSDave Chinner 	}
1239c24b5dfaSDave Chinner 
124094f3cad5SEric Sandeen 	if (!resblks) {
124194f3cad5SEric Sandeen 		error = xfs_dir_canenter(tp, tdp, target_name);
1242c24b5dfaSDave Chinner 		if (error)
1243c24b5dfaSDave Chinner 			goto error_return;
124494f3cad5SEric Sandeen 	}
1245c24b5dfaSDave Chinner 
124654d7b5c1SDave Chinner 	/*
124754d7b5c1SDave Chinner 	 * Handle initial link state of O_TMPFILE inode
124854d7b5c1SDave Chinner 	 */
124954d7b5c1SDave Chinner 	if (VFS_I(sip)->i_nlink == 0) {
1250f40aadb2SDave Chinner 		struct xfs_perag	*pag;
1251f40aadb2SDave Chinner 
1252f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1253f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, sip);
1254f40aadb2SDave Chinner 		xfs_perag_put(pag);
1255ab297431SZhi Yong Wu 		if (error)
12564906e215SChristoph Hellwig 			goto error_return;
1257ab297431SZhi Yong Wu 	}
1258ab297431SZhi Yong Wu 
1259c24b5dfaSDave Chinner 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1260381eee69SBrian Foster 				   resblks);
1261c24b5dfaSDave Chinner 	if (error)
12624906e215SChristoph Hellwig 		goto error_return;
1263c24b5dfaSDave Chinner 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1264c24b5dfaSDave Chinner 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1265c24b5dfaSDave Chinner 
126691083269SEric Sandeen 	xfs_bumplink(tp, sip);
1267c24b5dfaSDave Chinner 
1268c24b5dfaSDave Chinner 	/*
1269c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
1270c24b5dfaSDave Chinner 	 * link transaction goes to disk before returning to
1271c24b5dfaSDave Chinner 	 * the user.
1272c24b5dfaSDave Chinner 	 */
12730560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1274c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
1275c24b5dfaSDave Chinner 
127670393313SChristoph Hellwig 	return xfs_trans_commit(tp);
1277c24b5dfaSDave Chinner 
1278c24b5dfaSDave Chinner  error_return:
12794906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1280c24b5dfaSDave Chinner  std_return:
1281871b9316SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
1282871b9316SDarrick J. Wong 		error = nospace_error;
1283c24b5dfaSDave Chinner 	return error;
1284c24b5dfaSDave Chinner }
1285c24b5dfaSDave Chinner 
1286363e59baSDarrick J. Wong /* Clear the reflink flag and the cowblocks tag if possible. */
1287363e59baSDarrick J. Wong static void
1288363e59baSDarrick J. Wong xfs_itruncate_clear_reflink_flags(
1289363e59baSDarrick J. Wong 	struct xfs_inode	*ip)
1290363e59baSDarrick J. Wong {
1291363e59baSDarrick J. Wong 	struct xfs_ifork	*dfork;
1292363e59baSDarrick J. Wong 	struct xfs_ifork	*cfork;
1293363e59baSDarrick J. Wong 
1294363e59baSDarrick J. Wong 	if (!xfs_is_reflink_inode(ip))
1295363e59baSDarrick J. Wong 		return;
1296732436efSDarrick J. Wong 	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1297732436efSDarrick J. Wong 	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1298363e59baSDarrick J. Wong 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
12993e09ab8fSChristoph Hellwig 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1300363e59baSDarrick J. Wong 	if (cfork->if_bytes == 0)
1301363e59baSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1302363e59baSDarrick J. Wong }
1303363e59baSDarrick J. Wong 
13041da177e4SLinus Torvalds /*
13058f04c47aSChristoph Hellwig  * Free up the underlying blocks past new_size.  The new size must be smaller
13068f04c47aSChristoph Hellwig  * than the current size.  This routine can be used both for the attribute and
13078f04c47aSChristoph Hellwig  * data fork, and does not modify the inode size, which is left to the caller.
13081da177e4SLinus Torvalds  *
1309f6485057SDavid Chinner  * The transaction passed to this routine must have made a permanent log
1310f6485057SDavid Chinner  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1311f6485057SDavid Chinner  * given transaction and start new ones, so make sure everything involved in
1312f6485057SDavid Chinner  * the transaction is tidy before calling here.  Some transaction will be
1313f6485057SDavid Chinner  * returned to the caller to be committed.  The incoming transaction must
1314f6485057SDavid Chinner  * already include the inode, and both inode locks must be held exclusively.
1315f6485057SDavid Chinner  * The inode must also be "held" within the transaction.  On return the inode
1316f6485057SDavid Chinner  * will be "held" within the returned transaction.  This routine does NOT
1317f6485057SDavid Chinner  * require any disk space to be reserved for it within the transaction.
13181da177e4SLinus Torvalds  *
1319f6485057SDavid Chinner  * If we get an error, we must return with the inode locked and linked into the
1320f6485057SDavid Chinner  * current transaction. This keeps things simple for the higher level code,
1321f6485057SDavid Chinner  * because it always knows that the inode is locked and held in the transaction
1322f6485057SDavid Chinner  * that returns to it whether errors occur or not.  We don't mark the inode
1323f6485057SDavid Chinner  * dirty on error so that transactions can be easily aborted if possible.
13241da177e4SLinus Torvalds  */
13251da177e4SLinus Torvalds int
13264e529339SBrian Foster xfs_itruncate_extents_flags(
13278f04c47aSChristoph Hellwig 	struct xfs_trans	**tpp,
13288f04c47aSChristoph Hellwig 	struct xfs_inode	*ip,
13298f04c47aSChristoph Hellwig 	int			whichfork,
133013b86fc3SBrian Foster 	xfs_fsize_t		new_size,
13314e529339SBrian Foster 	int			flags)
13321da177e4SLinus Torvalds {
13338f04c47aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
13348f04c47aSChristoph Hellwig 	struct xfs_trans	*tp = *tpp;
13351da177e4SLinus Torvalds 	xfs_fileoff_t		first_unmap_block;
13368f04c47aSChristoph Hellwig 	xfs_filblks_t		unmap_len;
13378f04c47aSChristoph Hellwig 	int			error = 0;
13381da177e4SLinus Torvalds 
13390b56185bSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
13400b56185bSChristoph Hellwig 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
13410b56185bSChristoph Hellwig 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1342ce7ae151SChristoph Hellwig 	ASSERT(new_size <= XFS_ISIZE(ip));
13438f04c47aSChristoph Hellwig 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
13441da177e4SLinus Torvalds 	ASSERT(ip->i_itemp != NULL);
1345898621d5SChristoph Hellwig 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
13461da177e4SLinus Torvalds 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
13471da177e4SLinus Torvalds 
1348673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_start(ip, new_size);
1349673e8e59SChristoph Hellwig 
13504e529339SBrian Foster 	flags |= xfs_bmapi_aflag(whichfork);
135113b86fc3SBrian Foster 
13521da177e4SLinus Torvalds 	/*
13531da177e4SLinus Torvalds 	 * Since it is possible for space to become allocated beyond
13541da177e4SLinus Torvalds 	 * the end of the file (in a crash where the space is allocated
13551da177e4SLinus Torvalds 	 * but the inode size is not yet updated), simply remove any
13561da177e4SLinus Torvalds 	 * blocks which show up between the new EOF and the maximum
13574bbb04abSDarrick J. Wong 	 * possible file size.
13584bbb04abSDarrick J. Wong 	 *
13594bbb04abSDarrick J. Wong 	 * We have to free all the blocks to the bmbt maximum offset, even if
13604bbb04abSDarrick J. Wong 	 * the page cache can't scale that far.
13611da177e4SLinus Torvalds 	 */
13628f04c47aSChristoph Hellwig 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
136333005fd0SDarrick J. Wong 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
13644bbb04abSDarrick J. Wong 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
13658f04c47aSChristoph Hellwig 		return 0;
13664bbb04abSDarrick J. Wong 	}
13678f04c47aSChristoph Hellwig 
13684bbb04abSDarrick J. Wong 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
13694bbb04abSDarrick J. Wong 	while (unmap_len > 0) {
137002dff7bfSBrian Foster 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
13714bbb04abSDarrick J. Wong 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
13724bbb04abSDarrick J. Wong 				flags, XFS_ITRUNC_MAX_EXTENTS);
13738f04c47aSChristoph Hellwig 		if (error)
1374d5a2e289SBrian Foster 			goto out;
13751da177e4SLinus Torvalds 
13766dd379c7SBrian Foster 		/* free the just unmapped extents */
13779e28a242SBrian Foster 		error = xfs_defer_finish(&tp);
13788f04c47aSChristoph Hellwig 		if (error)
13799b1f4e98SBrian Foster 			goto out;
13801da177e4SLinus Torvalds 	}
13818f04c47aSChristoph Hellwig 
13824919d42aSDarrick J. Wong 	if (whichfork == XFS_DATA_FORK) {
1383aa8968f2SDarrick J. Wong 		/* Remove all pending CoW reservations. */
13844919d42aSDarrick J. Wong 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
13854bbb04abSDarrick J. Wong 				first_unmap_block, XFS_MAX_FILEOFF, true);
1386aa8968f2SDarrick J. Wong 		if (error)
1387aa8968f2SDarrick J. Wong 			goto out;
1388aa8968f2SDarrick J. Wong 
1389363e59baSDarrick J. Wong 		xfs_itruncate_clear_reflink_flags(ip);
13904919d42aSDarrick J. Wong 	}
1391aa8968f2SDarrick J. Wong 
1392673e8e59SChristoph Hellwig 	/*
1393673e8e59SChristoph Hellwig 	 * Always re-log the inode so that our permanent transaction can keep
1394673e8e59SChristoph Hellwig 	 * on rolling it forward in the log.
1395673e8e59SChristoph Hellwig 	 */
1396673e8e59SChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1397673e8e59SChristoph Hellwig 
1398673e8e59SChristoph Hellwig 	trace_xfs_itruncate_extents_end(ip, new_size);
1399673e8e59SChristoph Hellwig 
14008f04c47aSChristoph Hellwig out:
14018f04c47aSChristoph Hellwig 	*tpp = tp;
14028f04c47aSChristoph Hellwig 	return error;
14038f04c47aSChristoph Hellwig }
14048f04c47aSChristoph Hellwig 
1405c24b5dfaSDave Chinner int
1406c24b5dfaSDave Chinner xfs_release(
1407c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1408c24b5dfaSDave Chinner {
1409c24b5dfaSDave Chinner 	xfs_mount_t	*mp = ip->i_mount;
14107d88329eSDarrick J. Wong 	int		error = 0;
1411c24b5dfaSDave Chinner 
1412c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1413c24b5dfaSDave Chinner 		return 0;
1414c24b5dfaSDave Chinner 
1415c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
14162e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
1417c24b5dfaSDave Chinner 		return 0;
1418c24b5dfaSDave Chinner 
141975c8c50fSDave Chinner 	if (!xfs_is_shutdown(mp)) {
1420c24b5dfaSDave Chinner 		int truncated;
1421c24b5dfaSDave Chinner 
1422c24b5dfaSDave Chinner 		/*
1423c24b5dfaSDave Chinner 		 * If we previously truncated this file and removed old data
1424c24b5dfaSDave Chinner 		 * in the process, we want to initiate "early" writeout on
1425c24b5dfaSDave Chinner 		 * the last close.  This is an attempt to combat the notorious
1426c24b5dfaSDave Chinner 		 * NULL files problem which is particularly noticeable from a
1427c24b5dfaSDave Chinner 		 * truncate down, buffered (re-)write (delalloc), followed by
1428c24b5dfaSDave Chinner 		 * a crash.  What we are effectively doing here is
1429c24b5dfaSDave Chinner 		 * significantly reducing the time window where we'd otherwise
1430c24b5dfaSDave Chinner 		 * be exposed to that problem.
1431c24b5dfaSDave Chinner 		 */
1432c24b5dfaSDave Chinner 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1433c24b5dfaSDave Chinner 		if (truncated) {
1434c24b5dfaSDave Chinner 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1435eac152b4SDave Chinner 			if (ip->i_delayed_blks > 0) {
14362451337dSDave Chinner 				error = filemap_flush(VFS_I(ip)->i_mapping);
1437c24b5dfaSDave Chinner 				if (error)
1438c24b5dfaSDave Chinner 					return error;
1439c24b5dfaSDave Chinner 			}
1440c24b5dfaSDave Chinner 		}
1441c24b5dfaSDave Chinner 	}
1442c24b5dfaSDave Chinner 
144354d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink == 0)
1444c24b5dfaSDave Chinner 		return 0;
1445c24b5dfaSDave Chinner 
14467d88329eSDarrick J. Wong 	/*
14477d88329eSDarrick J. Wong 	 * If we can't get the iolock just skip truncating the blocks past EOF
14487d88329eSDarrick J. Wong 	 * because we could deadlock with the mmap_lock otherwise. We'll get
14497d88329eSDarrick J. Wong 	 * another chance to drop them once the last reference to the inode is
14507d88329eSDarrick J. Wong 	 * dropped, so we'll never leak blocks permanently.
14517d88329eSDarrick J. Wong 	 */
14527d88329eSDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
14537d88329eSDarrick J. Wong 		return 0;
1454c24b5dfaSDave Chinner 
14557d88329eSDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false)) {
1456c24b5dfaSDave Chinner 		/*
1457a36b9261SBrian Foster 		 * Check if the inode is being opened, written and closed
1458a36b9261SBrian Foster 		 * frequently and we have delayed allocation blocks outstanding
1459a36b9261SBrian Foster 		 * (e.g. streaming writes from the NFS server), truncating the
1460a36b9261SBrian Foster 		 * blocks past EOF will cause fragmentation to occur.
1461a36b9261SBrian Foster 		 *
1462a36b9261SBrian Foster 		 * In this case don't do the truncation, but we have to be
1463a36b9261SBrian Foster 		 * careful how we detect this case. Blocks beyond EOF show up as
1464a36b9261SBrian Foster 		 * i_delayed_blks even when the inode is clean, so we need to
1465a36b9261SBrian Foster 		 * truncate them away first before checking for a dirty release.
1466a36b9261SBrian Foster 		 * Hence on the first dirty close we will still remove the
1467a36b9261SBrian Foster 		 * speculative allocation, but after that we will leave it in
1468a36b9261SBrian Foster 		 * place.
1469a36b9261SBrian Foster 		 */
1470a36b9261SBrian Foster 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
14717d88329eSDarrick J. Wong 			goto out_unlock;
14727d88329eSDarrick J. Wong 
1473a36b9261SBrian Foster 		error = xfs_free_eofblocks(ip);
1474a36b9261SBrian Foster 		if (error)
14757d88329eSDarrick J. Wong 			goto out_unlock;
1476c24b5dfaSDave Chinner 
1477c24b5dfaSDave Chinner 		/* delalloc blocks after truncation means it really is dirty */
1478c24b5dfaSDave Chinner 		if (ip->i_delayed_blks)
1479c24b5dfaSDave Chinner 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1480c24b5dfaSDave Chinner 	}
14817d88329eSDarrick J. Wong 
14827d88329eSDarrick J. Wong out_unlock:
14837d88329eSDarrick J. Wong 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
14847d88329eSDarrick J. Wong 	return error;
1485c24b5dfaSDave Chinner }
1486c24b5dfaSDave Chinner 
1487c24b5dfaSDave Chinner /*
1488f7be2d7fSBrian Foster  * xfs_inactive_truncate
1489f7be2d7fSBrian Foster  *
1490f7be2d7fSBrian Foster  * Called to perform a truncate when an inode becomes unlinked.
1491f7be2d7fSBrian Foster  */
1492f7be2d7fSBrian Foster STATIC int
1493f7be2d7fSBrian Foster xfs_inactive_truncate(
1494f7be2d7fSBrian Foster 	struct xfs_inode *ip)
1495f7be2d7fSBrian Foster {
1496f7be2d7fSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1497f7be2d7fSBrian Foster 	struct xfs_trans	*tp;
1498f7be2d7fSBrian Foster 	int			error;
1499f7be2d7fSBrian Foster 
1500253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1501f7be2d7fSBrian Foster 	if (error) {
150275c8c50fSDave Chinner 		ASSERT(xfs_is_shutdown(mp));
1503f7be2d7fSBrian Foster 		return error;
1504f7be2d7fSBrian Foster 	}
1505f7be2d7fSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1506f7be2d7fSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1507f7be2d7fSBrian Foster 
1508f7be2d7fSBrian Foster 	/*
1509f7be2d7fSBrian Foster 	 * Log the inode size first to prevent stale data exposure in the event
1510f7be2d7fSBrian Foster 	 * of a system crash before the truncate completes. See the related
151169bca807SJan Kara 	 * comment in xfs_vn_setattr_size() for details.
1512f7be2d7fSBrian Foster 	 */
151313d2c10bSChristoph Hellwig 	ip->i_disk_size = 0;
1514f7be2d7fSBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1515f7be2d7fSBrian Foster 
1516f7be2d7fSBrian Foster 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1517f7be2d7fSBrian Foster 	if (error)
1518f7be2d7fSBrian Foster 		goto error_trans_cancel;
1519f7be2d7fSBrian Foster 
1520daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
1521f7be2d7fSBrian Foster 
152270393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1523f7be2d7fSBrian Foster 	if (error)
1524f7be2d7fSBrian Foster 		goto error_unlock;
1525f7be2d7fSBrian Foster 
1526f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1527f7be2d7fSBrian Foster 	return 0;
1528f7be2d7fSBrian Foster 
1529f7be2d7fSBrian Foster error_trans_cancel:
15304906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
1531f7be2d7fSBrian Foster error_unlock:
1532f7be2d7fSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1533f7be2d7fSBrian Foster 	return error;
1534f7be2d7fSBrian Foster }
1535f7be2d7fSBrian Foster 
1536f7be2d7fSBrian Foster /*
153788877d2bSBrian Foster  * xfs_inactive_ifree()
153888877d2bSBrian Foster  *
153988877d2bSBrian Foster  * Perform the inode free when an inode is unlinked.
154088877d2bSBrian Foster  */
154188877d2bSBrian Foster STATIC int
154288877d2bSBrian Foster xfs_inactive_ifree(
154388877d2bSBrian Foster 	struct xfs_inode *ip)
154488877d2bSBrian Foster {
154588877d2bSBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
154688877d2bSBrian Foster 	struct xfs_trans	*tp;
154788877d2bSBrian Foster 	int			error;
154888877d2bSBrian Foster 
15499d43b180SBrian Foster 	/*
155076d771b4SChristoph Hellwig 	 * We try to use a per-AG reservation for any block needed by the finobt
155176d771b4SChristoph Hellwig 	 * tree, but as the finobt feature predates the per-AG reservation
155276d771b4SChristoph Hellwig 	 * support a degraded file system might not have enough space for the
155376d771b4SChristoph Hellwig 	 * reservation at mount time.  In that case try to dip into the reserved
155476d771b4SChristoph Hellwig 	 * pool and pray.
15559d43b180SBrian Foster 	 *
15569d43b180SBrian Foster 	 * Send a warning if the reservation does happen to fail, as the inode
15579d43b180SBrian Foster 	 * now remains allocated and sits on the unlinked list until the fs is
15589d43b180SBrian Foster 	 * repaired.
15599d43b180SBrian Foster 	 */
1560e1f6ca11SDarrick J. Wong 	if (unlikely(mp->m_finobt_nores)) {
1561253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
156276d771b4SChristoph Hellwig 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
156376d771b4SChristoph Hellwig 				&tp);
156476d771b4SChristoph Hellwig 	} else {
156576d771b4SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
156676d771b4SChristoph Hellwig 	}
156788877d2bSBrian Foster 	if (error) {
15682451337dSDave Chinner 		if (error == -ENOSPC) {
15699d43b180SBrian Foster 			xfs_warn_ratelimited(mp,
15709d43b180SBrian Foster 			"Failed to remove inode(s) from unlinked list. "
15719d43b180SBrian Foster 			"Please free space, unmount and run xfs_repair.");
15729d43b180SBrian Foster 		} else {
157375c8c50fSDave Chinner 			ASSERT(xfs_is_shutdown(mp));
15749d43b180SBrian Foster 		}
157588877d2bSBrian Foster 		return error;
157688877d2bSBrian Foster 	}
157788877d2bSBrian Foster 
157896355d5aSDave Chinner 	/*
157996355d5aSDave Chinner 	 * We do not hold the inode locked across the entire rolling transaction
158096355d5aSDave Chinner 	 * here. We only need to hold it for the first transaction that
158196355d5aSDave Chinner 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
158296355d5aSDave Chinner 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
158396355d5aSDave Chinner 	 * here breaks the relationship between cluster buffer invalidation and
158496355d5aSDave Chinner 	 * stale inode invalidation on cluster buffer item journal commit
158596355d5aSDave Chinner 	 * completion, and can result in leaving dirty stale inodes hanging
158696355d5aSDave Chinner 	 * around in memory.
158796355d5aSDave Chinner 	 *
158896355d5aSDave Chinner 	 * We have no need for serialising this inode operation against other
158996355d5aSDave Chinner 	 * operations - we freed the inode and hence reallocation is required
159096355d5aSDave Chinner 	 * and that will serialise on reallocating the space the deferops need
159196355d5aSDave Chinner 	 * to free. Hence we can unlock the inode on the first commit of
159296355d5aSDave Chinner 	 * the transaction rather than roll it right through the deferops. This
159396355d5aSDave Chinner 	 * avoids relogging the XFS_ISTALE inode.
159496355d5aSDave Chinner 	 *
159596355d5aSDave Chinner 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
159696355d5aSDave Chinner 	 * by asserting that the inode is still locked when it returns.
159796355d5aSDave Chinner 	 */
159888877d2bSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
159996355d5aSDave Chinner 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
160088877d2bSBrian Foster 
16010e0417f3SBrian Foster 	error = xfs_ifree(tp, ip);
160296355d5aSDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
160388877d2bSBrian Foster 	if (error) {
160488877d2bSBrian Foster 		/*
160588877d2bSBrian Foster 		 * If we fail to free the inode, shut down.  The cancel
160688877d2bSBrian Foster 		 * might do that, we need to make sure.  Otherwise the
160788877d2bSBrian Foster 		 * inode might be lost for a long time or forever.
160888877d2bSBrian Foster 		 */
160975c8c50fSDave Chinner 		if (!xfs_is_shutdown(mp)) {
161088877d2bSBrian Foster 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
161188877d2bSBrian Foster 				__func__, error);
161288877d2bSBrian Foster 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
161388877d2bSBrian Foster 		}
16144906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
161588877d2bSBrian Foster 		return error;
161688877d2bSBrian Foster 	}
161788877d2bSBrian Foster 
161888877d2bSBrian Foster 	/*
161988877d2bSBrian Foster 	 * Credit the quota account(s). The inode is gone.
162088877d2bSBrian Foster 	 */
162188877d2bSBrian Foster 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
162288877d2bSBrian Foster 
162388877d2bSBrian Foster 	/*
1624d4a97a04SBrian Foster 	 * Just ignore errors at this point.  There is nothing we can do except
1625d4a97a04SBrian Foster 	 * to try to keep going. Make sure it's not a silent error.
162688877d2bSBrian Foster 	 */
162770393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
162888877d2bSBrian Foster 	if (error)
162988877d2bSBrian Foster 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
163088877d2bSBrian Foster 			__func__, error);
163188877d2bSBrian Foster 
163288877d2bSBrian Foster 	return 0;
163388877d2bSBrian Foster }
163488877d2bSBrian Foster 
163588877d2bSBrian Foster /*
163662af7d54SDarrick J. Wong  * Returns true if we need to update the on-disk metadata before we can free
163762af7d54SDarrick J. Wong  * the memory used by this inode.  Updates include freeing post-eof
163862af7d54SDarrick J. Wong  * preallocations; freeing COW staging extents; and marking the inode free in
163962af7d54SDarrick J. Wong  * the inobt if it is on the unlinked list.
164062af7d54SDarrick J. Wong  */
164162af7d54SDarrick J. Wong bool
164262af7d54SDarrick J. Wong xfs_inode_needs_inactive(
164362af7d54SDarrick J. Wong 	struct xfs_inode	*ip)
164462af7d54SDarrick J. Wong {
164562af7d54SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1646732436efSDarrick J. Wong 	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
164762af7d54SDarrick J. Wong 
164862af7d54SDarrick J. Wong 	/*
164962af7d54SDarrick J. Wong 	 * If the inode is already free, then there can be nothing
165062af7d54SDarrick J. Wong 	 * to clean up here.
165162af7d54SDarrick J. Wong 	 */
165262af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_mode == 0)
165362af7d54SDarrick J. Wong 		return false;
165462af7d54SDarrick J. Wong 
165562af7d54SDarrick J. Wong 	/* If this is a read-only mount, don't do this (would generate I/O) */
16562e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
165762af7d54SDarrick J. Wong 		return false;
165862af7d54SDarrick J. Wong 
165962af7d54SDarrick J. Wong 	/* If the log isn't running, push inodes straight to reclaim. */
166075c8c50fSDave Chinner 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
166162af7d54SDarrick J. Wong 		return false;
166262af7d54SDarrick J. Wong 
166362af7d54SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
166462af7d54SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
166562af7d54SDarrick J. Wong 		return false;
166662af7d54SDarrick J. Wong 
166762af7d54SDarrick J. Wong 	/* Want to clean out the cow blocks if there are any. */
166862af7d54SDarrick J. Wong 	if (cow_ifp && cow_ifp->if_bytes > 0)
166962af7d54SDarrick J. Wong 		return true;
167062af7d54SDarrick J. Wong 
167162af7d54SDarrick J. Wong 	/* Unlinked files must be freed. */
167262af7d54SDarrick J. Wong 	if (VFS_I(ip)->i_nlink == 0)
167362af7d54SDarrick J. Wong 		return true;
167462af7d54SDarrick J. Wong 
167562af7d54SDarrick J. Wong 	/*
167662af7d54SDarrick J. Wong 	 * This file isn't being freed, so check if there are post-eof blocks
167762af7d54SDarrick J. Wong 	 * to free.  @force is true because we are evicting an inode from the
167862af7d54SDarrick J. Wong 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
167962af7d54SDarrick J. Wong 	 * free space accounting.
168062af7d54SDarrick J. Wong 	 *
168162af7d54SDarrick J. Wong 	 * Note: don't bother with iolock here since lockdep complains about
168262af7d54SDarrick J. Wong 	 * acquiring it in reclaim context. We have the only reference to the
168362af7d54SDarrick J. Wong 	 * inode at this point anyways.
168462af7d54SDarrick J. Wong 	 */
168562af7d54SDarrick J. Wong 	return xfs_can_free_eofblocks(ip, true);
168662af7d54SDarrick J. Wong }
168762af7d54SDarrick J. Wong 
168862af7d54SDarrick J. Wong /*
1689c24b5dfaSDave Chinner  * xfs_inactive
1690c24b5dfaSDave Chinner  *
1691c24b5dfaSDave Chinner  * This is called when the vnode reference count for the vnode
1692c24b5dfaSDave Chinner  * goes to zero.  If the file has been unlinked, then it must
1693c24b5dfaSDave Chinner  * now be truncated.  Also, we clear all of the read-ahead state
1694c24b5dfaSDave Chinner  * kept for the inode here since the file is now closed.
1695c24b5dfaSDave Chinner  */
169674564fb4SBrian Foster void
1697c24b5dfaSDave Chinner xfs_inactive(
1698c24b5dfaSDave Chinner 	xfs_inode_t	*ip)
1699c24b5dfaSDave Chinner {
17003d3c8b52SJie Liu 	struct xfs_mount	*mp;
1701c24b5dfaSDave Chinner 	int			error;
1702c24b5dfaSDave Chinner 	int			truncate = 0;
1703c24b5dfaSDave Chinner 
1704c24b5dfaSDave Chinner 	/*
1705c24b5dfaSDave Chinner 	 * If the inode is already free, then there can be nothing
1706c24b5dfaSDave Chinner 	 * to clean up here.
1707c24b5dfaSDave Chinner 	 */
1708c19b3b05SDave Chinner 	if (VFS_I(ip)->i_mode == 0) {
1709c24b5dfaSDave Chinner 		ASSERT(ip->i_df.if_broot_bytes == 0);
17103ea06d73SDarrick J. Wong 		goto out;
1711c24b5dfaSDave Chinner 	}
1712c24b5dfaSDave Chinner 
1713c24b5dfaSDave Chinner 	mp = ip->i_mount;
171417c12bcdSDarrick J. Wong 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1715c24b5dfaSDave Chinner 
1716c24b5dfaSDave Chinner 	/* If this is a read-only mount, don't do this (would generate I/O) */
17172e973b2cSDave Chinner 	if (xfs_is_readonly(mp))
17183ea06d73SDarrick J. Wong 		goto out;
1719c24b5dfaSDave Chinner 
1720383e32b0SDarrick J. Wong 	/* Metadata inodes require explicit resource cleanup. */
1721383e32b0SDarrick J. Wong 	if (xfs_is_metadata_inode(ip))
17223ea06d73SDarrick J. Wong 		goto out;
1723383e32b0SDarrick J. Wong 
17246231848cSDarrick J. Wong 	/* Try to clean out the cow blocks if there are any. */
172551d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip))
17266231848cSDarrick J. Wong 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
17276231848cSDarrick J. Wong 
172854d7b5c1SDave Chinner 	if (VFS_I(ip)->i_nlink != 0) {
1729c24b5dfaSDave Chinner 		/*
1730c24b5dfaSDave Chinner 		 * force is true because we are evicting an inode from the
1731c24b5dfaSDave Chinner 		 * cache. Post-eof blocks must be freed, lest we end up with
1732c24b5dfaSDave Chinner 		 * broken free space accounting.
17333b4683c2SBrian Foster 		 *
17343b4683c2SBrian Foster 		 * Note: don't bother with iolock here since lockdep complains
17353b4683c2SBrian Foster 		 * about acquiring it in reclaim context. We have the only
17363b4683c2SBrian Foster 		 * reference to the inode at this point anyways.
1737c24b5dfaSDave Chinner 		 */
17383b4683c2SBrian Foster 		if (xfs_can_free_eofblocks(ip, true))
1739a36b9261SBrian Foster 			xfs_free_eofblocks(ip);
174074564fb4SBrian Foster 
17413ea06d73SDarrick J. Wong 		goto out;
1742c24b5dfaSDave Chinner 	}
1743c24b5dfaSDave Chinner 
1744c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode) &&
174513d2c10bSChristoph Hellwig 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1746daf83964SChristoph Hellwig 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1747c24b5dfaSDave Chinner 		truncate = 1;
1748c24b5dfaSDave Chinner 
1749c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
1750c24b5dfaSDave Chinner 	if (error)
17513ea06d73SDarrick J. Wong 		goto out;
1752c24b5dfaSDave Chinner 
1753c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
175436b21ddeSBrian Foster 		error = xfs_inactive_symlink(ip);
1755f7be2d7fSBrian Foster 	else if (truncate)
1756f7be2d7fSBrian Foster 		error = xfs_inactive_truncate(ip);
175736b21ddeSBrian Foster 	if (error)
17583ea06d73SDarrick J. Wong 		goto out;
1759c24b5dfaSDave Chinner 
1760c24b5dfaSDave Chinner 	/*
1761c24b5dfaSDave Chinner 	 * If there are attributes associated with the file then blow them away
1762c24b5dfaSDave Chinner 	 * now.  The code calls a routine that recursively deconstructs the
17636dfe5a04SDave Chinner 	 * attribute fork. If also blows away the in-core attribute fork.
1764c24b5dfaSDave Chinner 	 */
1765932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip)) {
1766c24b5dfaSDave Chinner 		error = xfs_attr_inactive(ip);
1767c24b5dfaSDave Chinner 		if (error)
17683ea06d73SDarrick J. Wong 			goto out;
1769c24b5dfaSDave Chinner 	}
1770c24b5dfaSDave Chinner 
17717821ea30SChristoph Hellwig 	ASSERT(ip->i_forkoff == 0);
1772c24b5dfaSDave Chinner 
1773c24b5dfaSDave Chinner 	/*
1774c24b5dfaSDave Chinner 	 * Free the inode.
1775c24b5dfaSDave Chinner 	 */
17763ea06d73SDarrick J. Wong 	xfs_inactive_ifree(ip);
1777c24b5dfaSDave Chinner 
17783ea06d73SDarrick J. Wong out:
1779c24b5dfaSDave Chinner 	/*
17803ea06d73SDarrick J. Wong 	 * We're done making metadata updates for this inode, so we can release
17813ea06d73SDarrick J. Wong 	 * the attached dquots.
1782c24b5dfaSDave Chinner 	 */
1783c24b5dfaSDave Chinner 	xfs_qm_dqdetach(ip);
1784c24b5dfaSDave Chinner }
1785c24b5dfaSDave Chinner 
17861da177e4SLinus Torvalds /*
17879b247179SDarrick J. Wong  * In-Core Unlinked List Lookups
17889b247179SDarrick J. Wong  * =============================
17899b247179SDarrick J. Wong  *
17909b247179SDarrick J. Wong  * Every inode is supposed to be reachable from some other piece of metadata
17919b247179SDarrick J. Wong  * with the exception of the root directory.  Inodes with a connection to a
17929b247179SDarrick J. Wong  * file descriptor but not linked from anywhere in the on-disk directory tree
17939b247179SDarrick J. Wong  * are collectively known as unlinked inodes, though the filesystem itself
17949b247179SDarrick J. Wong  * maintains links to these inodes so that on-disk metadata are consistent.
17959b247179SDarrick J. Wong  *
17969b247179SDarrick J. Wong  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
17979b247179SDarrick J. Wong  * header contains a number of buckets that point to an inode, and each inode
17989b247179SDarrick J. Wong  * record has a pointer to the next inode in the hash chain.  This
17999b247179SDarrick J. Wong  * singly-linked list causes scaling problems in the iunlink remove function
18009b247179SDarrick J. Wong  * because we must walk that list to find the inode that points to the inode
18019b247179SDarrick J. Wong  * being removed from the unlinked hash bucket list.
18029b247179SDarrick J. Wong  *
18032fd26cc0SDave Chinner  * Hence we keep an in-memory double linked list to link each inode on an
18042fd26cc0SDave Chinner  * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
18052fd26cc0SDave Chinner  * based lists would require having 64 list heads in the perag, one for each
18062fd26cc0SDave Chinner  * list. This is expensive in terms of memory (think millions of AGs) and cache
18072fd26cc0SDave Chinner  * misses on lookups. Instead, use the fact that inodes on the unlinked list
18082fd26cc0SDave Chinner  * must be referenced at the VFS level to keep them on the list and hence we
18092fd26cc0SDave Chinner  * have an existence guarantee for inodes on the unlinked list.
18109b247179SDarrick J. Wong  *
18112fd26cc0SDave Chinner  * Given we have an existence guarantee, we can use lockless inode cache lookups
18122fd26cc0SDave Chinner  * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
18132fd26cc0SDave Chinner  * for the double linked unlinked list, and we don't need any extra locking to
18142fd26cc0SDave Chinner  * keep the list safe as all manipulations are done under the AGI buffer lock.
18152fd26cc0SDave Chinner  * Keeping the list up to date does not require memory allocation, just finding
18162fd26cc0SDave Chinner  * the XFS inode and updating the next/prev unlinked list aginos.
18179b247179SDarrick J. Wong  */
18189b247179SDarrick J. Wong 
18199b247179SDarrick J. Wong /*
1820a83d5a8bSDave Chinner  * Find an inode on the unlinked list. This does not take references to the
1821a83d5a8bSDave Chinner  * inode as we have existence guarantees by holding the AGI buffer lock and that
1822a83d5a8bSDave Chinner  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1823a83d5a8bSDave Chinner  * don't find the inode in cache, then let the caller handle the situation.
18249b247179SDarrick J. Wong  */
1825a83d5a8bSDave Chinner static struct xfs_inode *
1826a83d5a8bSDave Chinner xfs_iunlink_lookup(
18279b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18289b247179SDarrick J. Wong 	xfs_agino_t		agino)
18299b247179SDarrick J. Wong {
1830a83d5a8bSDave Chinner 	struct xfs_inode	*ip;
18319b247179SDarrick J. Wong 
1832a83d5a8bSDave Chinner 	rcu_read_lock();
1833a83d5a8bSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
18349b247179SDarrick J. Wong 
18359b247179SDarrick J. Wong 	/*
1836a83d5a8bSDave Chinner 	 * Inode not in memory or in RCU freeing limbo should not happen.
1837a83d5a8bSDave Chinner 	 * Warn about this and let the caller handle the failure.
18389b247179SDarrick J. Wong 	 */
1839a83d5a8bSDave Chinner 	if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
1840a83d5a8bSDave Chinner 		rcu_read_unlock();
1841a83d5a8bSDave Chinner 		return NULL;
1842a83d5a8bSDave Chinner 	}
1843a83d5a8bSDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1844a83d5a8bSDave Chinner 	rcu_read_unlock();
1845a83d5a8bSDave Chinner 	return ip;
1846a83d5a8bSDave Chinner }
1847a83d5a8bSDave Chinner 
18482fd26cc0SDave Chinner /* Update the prev pointer of the next agino. */
18499b247179SDarrick J. Wong static int
18502fd26cc0SDave Chinner xfs_iunlink_update_backref(
18519b247179SDarrick J. Wong 	struct xfs_perag	*pag,
18529b247179SDarrick J. Wong 	xfs_agino_t		prev_agino,
18532fd26cc0SDave Chinner 	xfs_agino_t		next_agino)
18549b247179SDarrick J. Wong {
18552fd26cc0SDave Chinner 	struct xfs_inode	*ip;
18569b247179SDarrick J. Wong 
18572fd26cc0SDave Chinner 	/* No update necessary if we are at the end of the list. */
18582fd26cc0SDave Chinner 	if (next_agino == NULLAGINO)
18599b247179SDarrick J. Wong 		return 0;
18609b247179SDarrick J. Wong 
18612fd26cc0SDave Chinner 	ip = xfs_iunlink_lookup(pag, next_agino);
18622fd26cc0SDave Chinner 	if (!ip)
18632fd26cc0SDave Chinner 		return -EFSCORRUPTED;
18642fd26cc0SDave Chinner 	ip->i_prev_unlinked = prev_agino;
18659b247179SDarrick J. Wong 	return 0;
18669b247179SDarrick J. Wong }
18679b247179SDarrick J. Wong 
18689b247179SDarrick J. Wong /*
18699a4a5118SDarrick J. Wong  * Point the AGI unlinked bucket at an inode and log the results.  The caller
18709a4a5118SDarrick J. Wong  * is responsible for validating the old value.
18719a4a5118SDarrick J. Wong  */
18729a4a5118SDarrick J. Wong STATIC int
18739a4a5118SDarrick J. Wong xfs_iunlink_update_bucket(
18749a4a5118SDarrick J. Wong 	struct xfs_trans	*tp,
1875f40aadb2SDave Chinner 	struct xfs_perag	*pag,
18769a4a5118SDarrick J. Wong 	struct xfs_buf		*agibp,
18779a4a5118SDarrick J. Wong 	unsigned int		bucket_index,
18789a4a5118SDarrick J. Wong 	xfs_agino_t		new_agino)
18799a4a5118SDarrick J. Wong {
1880370c782bSChristoph Hellwig 	struct xfs_agi		*agi = agibp->b_addr;
18819a4a5118SDarrick J. Wong 	xfs_agino_t		old_value;
18829a4a5118SDarrick J. Wong 	int			offset;
18839a4a5118SDarrick J. Wong 
18842d6ca832SDave Chinner 	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
18859a4a5118SDarrick J. Wong 
18869a4a5118SDarrick J. Wong 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1887f40aadb2SDave Chinner 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
18889a4a5118SDarrick J. Wong 			old_value, new_agino);
18899a4a5118SDarrick J. Wong 
18909a4a5118SDarrick J. Wong 	/*
18919a4a5118SDarrick J. Wong 	 * We should never find the head of the list already set to the value
18929a4a5118SDarrick J. Wong 	 * passed in because either we're adding or removing ourselves from the
18939a4a5118SDarrick J. Wong 	 * head of the list.
18949a4a5118SDarrick J. Wong 	 */
1895a5155b87SDarrick J. Wong 	if (old_value == new_agino) {
18968d57c216SDarrick J. Wong 		xfs_buf_mark_corrupt(agibp);
18979a4a5118SDarrick J. Wong 		return -EFSCORRUPTED;
1898a5155b87SDarrick J. Wong 	}
18999a4a5118SDarrick J. Wong 
19009a4a5118SDarrick J. Wong 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
19019a4a5118SDarrick J. Wong 	offset = offsetof(struct xfs_agi, agi_unlinked) +
19029a4a5118SDarrick J. Wong 			(sizeof(xfs_agino_t) * bucket_index);
19039a4a5118SDarrick J. Wong 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
19049a4a5118SDarrick J. Wong 	return 0;
19059a4a5118SDarrick J. Wong }
19069a4a5118SDarrick J. Wong 
1907a4454cd6SDave Chinner static int
1908a4454cd6SDave Chinner xfs_iunlink_insert_inode(
1909f2fc16a3SDarrick J. Wong 	struct xfs_trans	*tp,
1910f40aadb2SDave Chinner 	struct xfs_perag	*pag,
1911a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
1912a4454cd6SDave Chinner 	struct xfs_inode	*ip)
1913f2fc16a3SDarrick J. Wong {
1914f2fc16a3SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
1915a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
1916a4454cd6SDave Chinner 	xfs_agino_t		next_agino;
1917a4454cd6SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1918a4454cd6SDave Chinner 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1919f2fc16a3SDarrick J. Wong 	int			error;
1920f2fc16a3SDarrick J. Wong 
1921a4454cd6SDave Chinner 	/*
1922a4454cd6SDave Chinner 	 * Get the index into the agi hash table for the list this inode will
1923a4454cd6SDave Chinner 	 * go on.  Make sure the pointer isn't garbage and that this inode
1924a4454cd6SDave Chinner 	 * isn't already on the list.
1925a4454cd6SDave Chinner 	 */
1926a4454cd6SDave Chinner 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1927a4454cd6SDave Chinner 	if (next_agino == agino ||
1928a4454cd6SDave Chinner 	    !xfs_verify_agino_or_null(pag, next_agino)) {
1929a4454cd6SDave Chinner 		xfs_buf_mark_corrupt(agibp);
1930a4454cd6SDave Chinner 		return -EFSCORRUPTED;
1931f2fc16a3SDarrick J. Wong 	}
1932f2fc16a3SDarrick J. Wong 
1933f2fc16a3SDarrick J. Wong 	/*
19342fd26cc0SDave Chinner 	 * Update the prev pointer in the next inode to point back to this
19352fd26cc0SDave Chinner 	 * inode.
1936f2fc16a3SDarrick J. Wong 	 */
19372fd26cc0SDave Chinner 	error = xfs_iunlink_update_backref(pag, agino, next_agino);
19382fd26cc0SDave Chinner 	if (error)
19392fd26cc0SDave Chinner 		return error;
19402fd26cc0SDave Chinner 
1941a5155b87SDarrick J. Wong 	if (next_agino != NULLAGINO) {
1942a4454cd6SDave Chinner 		/*
1943a4454cd6SDave Chinner 		 * There is already another inode in the bucket, so point this
1944a4454cd6SDave Chinner 		 * inode to the current head of the list.
1945a4454cd6SDave Chinner 		 */
1946062efdb0SDave Chinner 		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
1947a4454cd6SDave Chinner 		if (error)
1948a4454cd6SDave Chinner 			return error;
19494fcc94d6SDave Chinner 		ip->i_next_unlinked = next_agino;
1950f2fc16a3SDarrick J. Wong 	}
1951f2fc16a3SDarrick J. Wong 
1952a4454cd6SDave Chinner 	/* Point the head of the list to point to this inode. */
1953a4454cd6SDave Chinner 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
1954f2fc16a3SDarrick J. Wong }
1955f2fc16a3SDarrick J. Wong 
19569a4a5118SDarrick J. Wong /*
1957c4a6bf7fSDarrick J. Wong  * This is called when the inode's link count has gone to 0 or we are creating
1958c4a6bf7fSDarrick J. Wong  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
195954d7b5c1SDave Chinner  *
196054d7b5c1SDave Chinner  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
196154d7b5c1SDave Chinner  * list when the inode is freed.
19621da177e4SLinus Torvalds  */
196354d7b5c1SDave Chinner STATIC int
19641da177e4SLinus Torvalds xfs_iunlink(
196554d7b5c1SDave Chinner 	struct xfs_trans	*tp,
196654d7b5c1SDave Chinner 	struct xfs_inode	*ip)
19671da177e4SLinus Torvalds {
19685837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
1969f40aadb2SDave Chinner 	struct xfs_perag	*pag;
19705837f625SDarrick J. Wong 	struct xfs_buf		*agibp;
19711da177e4SLinus Torvalds 	int			error;
19721da177e4SLinus Torvalds 
1973c4a6bf7fSDarrick J. Wong 	ASSERT(VFS_I(ip)->i_nlink == 0);
1974c19b3b05SDave Chinner 	ASSERT(VFS_I(ip)->i_mode != 0);
19754664c66cSDarrick J. Wong 	trace_xfs_iunlink(ip);
19761da177e4SLinus Torvalds 
1977f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1978f40aadb2SDave Chinner 
19795837f625SDarrick J. Wong 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
198061021debSDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
1981859d7182SVlad Apostolov 	if (error)
1982f40aadb2SDave Chinner 		goto out;
19835e1be0fbSChristoph Hellwig 
1984a4454cd6SDave Chinner 	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
1985f40aadb2SDave Chinner out:
1986f40aadb2SDave Chinner 	xfs_perag_put(pag);
1987f40aadb2SDave Chinner 	return error;
19881da177e4SLinus Torvalds }
19891da177e4SLinus Torvalds 
1990a4454cd6SDave Chinner static int
1991a4454cd6SDave Chinner xfs_iunlink_remove_inode(
199223ffa52cSDarrick J. Wong 	struct xfs_trans	*tp,
1993f40aadb2SDave Chinner 	struct xfs_perag	*pag,
1994a4454cd6SDave Chinner 	struct xfs_buf		*agibp,
19955837f625SDarrick J. Wong 	struct xfs_inode	*ip)
19961da177e4SLinus Torvalds {
19975837f625SDarrick J. Wong 	struct xfs_mount	*mp = tp->t_mountp;
1998a4454cd6SDave Chinner 	struct xfs_agi		*agi = agibp->b_addr;
19995837f625SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2000b1d2a068SDarrick J. Wong 	xfs_agino_t		head_agino;
20015837f625SDarrick J. Wong 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
20021da177e4SLinus Torvalds 	int			error;
20031da177e4SLinus Torvalds 
20044664c66cSDarrick J. Wong 	trace_xfs_iunlink_remove(ip);
20054664c66cSDarrick J. Wong 
20061da177e4SLinus Torvalds 	/*
200786bfd375SDarrick J. Wong 	 * Get the index into the agi hash table for the list this inode will
200886bfd375SDarrick J. Wong 	 * go on.  Make sure the head pointer isn't garbage.
20091da177e4SLinus Torvalds 	 */
2010b1d2a068SDarrick J. Wong 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
20112d6ca832SDave Chinner 	if (!xfs_verify_agino(pag, head_agino)) {
2012d2e73665SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2013d2e73665SDarrick J. Wong 				agi, sizeof(*agi));
2014d2e73665SDarrick J. Wong 		return -EFSCORRUPTED;
2015d2e73665SDarrick J. Wong 	}
20161da177e4SLinus Torvalds 
20171da177e4SLinus Torvalds 	/*
2018b1d2a068SDarrick J. Wong 	 * Set our inode's next_unlinked pointer to NULL and then return
2019b1d2a068SDarrick J. Wong 	 * the old pointer value so that we can update whatever was previous
2020b1d2a068SDarrick J. Wong 	 * to us in the list to point to whatever was next in the list.
20211da177e4SLinus Torvalds 	 */
2022062efdb0SDave Chinner 	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2023f2fc16a3SDarrick J. Wong 	if (error)
20241da177e4SLinus Torvalds 		return error;
20259a4a5118SDarrick J. Wong 
20269b247179SDarrick J. Wong 	/*
20272fd26cc0SDave Chinner 	 * Update the prev pointer in the next inode to point back to previous
20282fd26cc0SDave Chinner 	 * inode in the chain.
20299b247179SDarrick J. Wong 	 */
20302fd26cc0SDave Chinner 	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
20312fd26cc0SDave Chinner 			ip->i_next_unlinked);
20329b247179SDarrick J. Wong 	if (error)
203392a00544SGao Xiang 		return error;
20349b247179SDarrick J. Wong 
203592a00544SGao Xiang 	if (head_agino != agino) {
2036a83d5a8bSDave Chinner 		struct xfs_inode	*prev_ip;
2037f2fc16a3SDarrick J. Wong 
20382fd26cc0SDave Chinner 		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
20392fd26cc0SDave Chinner 		if (!prev_ip)
20402fd26cc0SDave Chinner 			return -EFSCORRUPTED;
2041475ee413SChristoph Hellwig 
2042062efdb0SDave Chinner 		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
20435301f870SDave Chinner 				ip->i_next_unlinked);
2044a83d5a8bSDave Chinner 		prev_ip->i_next_unlinked = ip->i_next_unlinked;
20452fd26cc0SDave Chinner 	} else {
20462fd26cc0SDave Chinner 		/* Point the head of the list to the next unlinked inode. */
20472fd26cc0SDave Chinner 		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
20482fd26cc0SDave Chinner 				ip->i_next_unlinked);
20491da177e4SLinus Torvalds 	}
20509b247179SDarrick J. Wong 
2051a83d5a8bSDave Chinner 	ip->i_next_unlinked = NULLAGINO;
20522fd26cc0SDave Chinner 	ip->i_prev_unlinked = NULLAGINO;
20532fd26cc0SDave Chinner 	return error;
20541da177e4SLinus Torvalds }
20551da177e4SLinus Torvalds 
20565b3eed75SDave Chinner /*
2057a4454cd6SDave Chinner  * Pull the on-disk inode from the AGI unlinked list.
2058a4454cd6SDave Chinner  */
2059a4454cd6SDave Chinner STATIC int
2060a4454cd6SDave Chinner xfs_iunlink_remove(
2061a4454cd6SDave Chinner 	struct xfs_trans	*tp,
2062a4454cd6SDave Chinner 	struct xfs_perag	*pag,
2063a4454cd6SDave Chinner 	struct xfs_inode	*ip)
2064a4454cd6SDave Chinner {
2065a4454cd6SDave Chinner 	struct xfs_buf		*agibp;
2066a4454cd6SDave Chinner 	int			error;
2067a4454cd6SDave Chinner 
2068a4454cd6SDave Chinner 	trace_xfs_iunlink_remove(ip);
2069a4454cd6SDave Chinner 
2070a4454cd6SDave Chinner 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2071a4454cd6SDave Chinner 	error = xfs_read_agi(pag, tp, &agibp);
20721da177e4SLinus Torvalds 	if (error)
20731baaed8fSDave Chinner 		return error;
20741da177e4SLinus Torvalds 
2075a4454cd6SDave Chinner 	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
20761da177e4SLinus Torvalds }
20771da177e4SLinus Torvalds 
20781da177e4SLinus Torvalds /*
207971e3e356SDave Chinner  * Look up the inode number specified and if it is not already marked XFS_ISTALE
208071e3e356SDave Chinner  * mark it stale. We should only find clean inodes in this lookup that aren't
208171e3e356SDave Chinner  * already stale.
20825806165aSDave Chinner  */
208371e3e356SDave Chinner static void
208471e3e356SDave Chinner xfs_ifree_mark_inode_stale(
2085f40aadb2SDave Chinner 	struct xfs_perag	*pag,
20865806165aSDave Chinner 	struct xfs_inode	*free_ip,
2087d9fdd0adSBrian Foster 	xfs_ino_t		inum)
20885806165aSDave Chinner {
2089f40aadb2SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
209071e3e356SDave Chinner 	struct xfs_inode_log_item *iip;
20915806165aSDave Chinner 	struct xfs_inode	*ip;
20925806165aSDave Chinner 
20935806165aSDave Chinner retry:
20945806165aSDave Chinner 	rcu_read_lock();
20955806165aSDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
20965806165aSDave Chinner 
20975806165aSDave Chinner 	/* Inode not in memory, nothing to do */
209871e3e356SDave Chinner 	if (!ip) {
209971e3e356SDave Chinner 		rcu_read_unlock();
210071e3e356SDave Chinner 		return;
210171e3e356SDave Chinner 	}
21025806165aSDave Chinner 
21035806165aSDave Chinner 	/*
21045806165aSDave Chinner 	 * because this is an RCU protected lookup, we could find a recently
21055806165aSDave Chinner 	 * freed or even reallocated inode during the lookup. We need to check
21065806165aSDave Chinner 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
21075806165aSDave Chinner 	 * valid, the wrong inode or stale.
21085806165aSDave Chinner 	 */
21095806165aSDave Chinner 	spin_lock(&ip->i_flags_lock);
2110718ecc50SDave Chinner 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2111718ecc50SDave Chinner 		goto out_iflags_unlock;
21125806165aSDave Chinner 
21135806165aSDave Chinner 	/*
21145806165aSDave Chinner 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
21155806165aSDave Chinner 	 * other inodes that we did not find in the list attached to the buffer
21165806165aSDave Chinner 	 * and are not already marked stale. If we can't lock it, back off and
21175806165aSDave Chinner 	 * retry.
21185806165aSDave Chinner 	 */
21195806165aSDave Chinner 	if (ip != free_ip) {
21205806165aSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
212171e3e356SDave Chinner 			spin_unlock(&ip->i_flags_lock);
21225806165aSDave Chinner 			rcu_read_unlock();
21235806165aSDave Chinner 			delay(1);
21245806165aSDave Chinner 			goto retry;
21255806165aSDave Chinner 		}
21265806165aSDave Chinner 	}
212771e3e356SDave Chinner 	ip->i_flags |= XFS_ISTALE;
21285806165aSDave Chinner 
212971e3e356SDave Chinner 	/*
2130718ecc50SDave Chinner 	 * If the inode is flushing, it is already attached to the buffer.  All
213171e3e356SDave Chinner 	 * we needed to do here is mark the inode stale so buffer IO completion
213271e3e356SDave Chinner 	 * will remove it from the AIL.
213371e3e356SDave Chinner 	 */
213471e3e356SDave Chinner 	iip = ip->i_itemp;
2135718ecc50SDave Chinner 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
213671e3e356SDave Chinner 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
213771e3e356SDave Chinner 		ASSERT(iip->ili_last_fields);
213871e3e356SDave Chinner 		goto out_iunlock;
213971e3e356SDave Chinner 	}
21405806165aSDave Chinner 
21415806165aSDave Chinner 	/*
214248d55e2aSDave Chinner 	 * Inodes not attached to the buffer can be released immediately.
214348d55e2aSDave Chinner 	 * Everything else has to go through xfs_iflush_abort() on journal
214448d55e2aSDave Chinner 	 * commit as the flock synchronises removal of the inode from the
214548d55e2aSDave Chinner 	 * cluster buffer against inode reclaim.
21465806165aSDave Chinner 	 */
2147718ecc50SDave Chinner 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
214871e3e356SDave Chinner 		goto out_iunlock;
2149718ecc50SDave Chinner 
2150718ecc50SDave Chinner 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2151718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2152718ecc50SDave Chinner 	rcu_read_unlock();
21535806165aSDave Chinner 
215471e3e356SDave Chinner 	/* we have a dirty inode in memory that has not yet been flushed. */
215571e3e356SDave Chinner 	spin_lock(&iip->ili_lock);
215671e3e356SDave Chinner 	iip->ili_last_fields = iip->ili_fields;
215771e3e356SDave Chinner 	iip->ili_fields = 0;
215871e3e356SDave Chinner 	iip->ili_fsync_fields = 0;
215971e3e356SDave Chinner 	spin_unlock(&iip->ili_lock);
216071e3e356SDave Chinner 	ASSERT(iip->ili_last_fields);
216171e3e356SDave Chinner 
2162718ecc50SDave Chinner 	if (ip != free_ip)
2163718ecc50SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2164718ecc50SDave Chinner 	return;
2165718ecc50SDave Chinner 
216671e3e356SDave Chinner out_iunlock:
216771e3e356SDave Chinner 	if (ip != free_ip)
216871e3e356SDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2169718ecc50SDave Chinner out_iflags_unlock:
2170718ecc50SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2171718ecc50SDave Chinner 	rcu_read_unlock();
21725806165aSDave Chinner }
21735806165aSDave Chinner 
21745806165aSDave Chinner /*
21751da177e4SLinus Torvalds  * A big issue when freeing the inode cluster is that we _cannot_ skip any
21761da177e4SLinus Torvalds  * inodes that are in memory - they all must be marked stale and attached to
21771da177e4SLinus Torvalds  * the cluster buffer.
21781da177e4SLinus Torvalds  */
2179f40aadb2SDave Chinner static int
21801da177e4SLinus Torvalds xfs_ifree_cluster(
218171e3e356SDave Chinner 	struct xfs_trans	*tp,
2182f40aadb2SDave Chinner 	struct xfs_perag	*pag,
2183f40aadb2SDave Chinner 	struct xfs_inode	*free_ip,
21841da177e4SLinus Torvalds 	struct xfs_icluster	*xic)
21851da177e4SLinus Torvalds {
218671e3e356SDave Chinner 	struct xfs_mount	*mp = free_ip->i_mount;
218771e3e356SDave Chinner 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
218871e3e356SDave Chinner 	struct xfs_buf		*bp;
218971e3e356SDave Chinner 	xfs_daddr_t		blkno;
219071e3e356SDave Chinner 	xfs_ino_t		inum = xic->first_ino;
21911da177e4SLinus Torvalds 	int			nbufs;
21921da177e4SLinus Torvalds 	int			i, j;
21931da177e4SLinus Torvalds 	int			ioffset;
2194ce92464cSDarrick J. Wong 	int			error;
21951da177e4SLinus Torvalds 
2196ef325959SDarrick J. Wong 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
21971da177e4SLinus Torvalds 
2198ef325959SDarrick J. Wong 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
21991da177e4SLinus Torvalds 		/*
22001da177e4SLinus Torvalds 		 * The allocation bitmap tells us which inodes of the chunk were
22011da177e4SLinus Torvalds 		 * physically allocated. Skip the cluster if an inode falls into
22021da177e4SLinus Torvalds 		 * a sparse region.
22031da177e4SLinus Torvalds 		 */
22041da177e4SLinus Torvalds 		ioffset = inum - xic->first_ino;
22051da177e4SLinus Torvalds 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2206ef325959SDarrick J. Wong 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
22071da177e4SLinus Torvalds 			continue;
22081da177e4SLinus Torvalds 		}
22091da177e4SLinus Torvalds 
22101da177e4SLinus Torvalds 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
22111da177e4SLinus Torvalds 					 XFS_INO_TO_AGBNO(mp, inum));
22121da177e4SLinus Torvalds 
22131da177e4SLinus Torvalds 		/*
22141da177e4SLinus Torvalds 		 * We obtain and lock the backing buffer first in the process
2215718ecc50SDave Chinner 		 * here to ensure dirty inodes attached to the buffer remain in
2216718ecc50SDave Chinner 		 * the flushing state while we mark them stale.
2217718ecc50SDave Chinner 		 *
22181da177e4SLinus Torvalds 		 * If we scan the in-memory inodes first, then buffer IO can
22191da177e4SLinus Torvalds 		 * complete before we get a lock on it, and hence we may fail
22201da177e4SLinus Torvalds 		 * to mark all the active inodes on the buffer stale.
22211da177e4SLinus Torvalds 		 */
2222ce92464cSDarrick J. Wong 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2223ef325959SDarrick J. Wong 				mp->m_bsize * igeo->blocks_per_cluster,
2224ce92464cSDarrick J. Wong 				XBF_UNMAPPED, &bp);
222571e3e356SDave Chinner 		if (error)
2226ce92464cSDarrick J. Wong 			return error;
22271da177e4SLinus Torvalds 
22281da177e4SLinus Torvalds 		/*
22291da177e4SLinus Torvalds 		 * This buffer may not have been correctly initialised as we
22301da177e4SLinus Torvalds 		 * didn't read it from disk. That's not important because we are
22311da177e4SLinus Torvalds 		 * only using to mark the buffer as stale in the log, and to
22321da177e4SLinus Torvalds 		 * attach stale cached inodes on it. That means it will never be
22331da177e4SLinus Torvalds 		 * dispatched for IO. If it is, we want to know about it, and we
22341da177e4SLinus Torvalds 		 * want it to fail. We can acheive this by adding a write
22351da177e4SLinus Torvalds 		 * verifier to the buffer.
22361da177e4SLinus Torvalds 		 */
22371da177e4SLinus Torvalds 		bp->b_ops = &xfs_inode_buf_ops;
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 		/*
224071e3e356SDave Chinner 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
224171e3e356SDave Chinner 		 * too. This requires lookups, and will skip inodes that we've
224271e3e356SDave Chinner 		 * already marked XFS_ISTALE.
22431da177e4SLinus Torvalds 		 */
224471e3e356SDave Chinner 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2245f40aadb2SDave Chinner 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
22461da177e4SLinus Torvalds 
22471da177e4SLinus Torvalds 		xfs_trans_stale_inode_buf(tp, bp);
22481da177e4SLinus Torvalds 		xfs_trans_binval(tp, bp);
22491da177e4SLinus Torvalds 	}
22501da177e4SLinus Torvalds 	return 0;
22511da177e4SLinus Torvalds }
22521da177e4SLinus Torvalds 
22531da177e4SLinus Torvalds /*
22549a5280b3SDave Chinner  * This is called to return an inode to the inode free list.  The inode should
22559a5280b3SDave Chinner  * already be truncated to 0 length and have no pages associated with it.  This
22569a5280b3SDave Chinner  * routine also assumes that the inode is already a part of the transaction.
22571da177e4SLinus Torvalds  *
22589a5280b3SDave Chinner  * The on-disk copy of the inode will have been added to the list of unlinked
22599a5280b3SDave Chinner  * inodes in the AGI. We need to remove the inode from that list atomically with
22609a5280b3SDave Chinner  * respect to freeing it here.
22611da177e4SLinus Torvalds  */
22621da177e4SLinus Torvalds int
22631da177e4SLinus Torvalds xfs_ifree(
22641da177e4SLinus Torvalds 	struct xfs_trans	*tp,
22651da177e4SLinus Torvalds 	struct xfs_inode	*ip)
22661da177e4SLinus Torvalds {
2267f40aadb2SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2268f40aadb2SDave Chinner 	struct xfs_perag	*pag;
22691da177e4SLinus Torvalds 	struct xfs_icluster	xic = { 0 };
22701319ebefSDave Chinner 	struct xfs_inode_log_item *iip = ip->i_itemp;
2271f40aadb2SDave Chinner 	int			error;
22721da177e4SLinus Torvalds 
22731da177e4SLinus Torvalds 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
22741da177e4SLinus Torvalds 	ASSERT(VFS_I(ip)->i_nlink == 0);
2275daf83964SChristoph Hellwig 	ASSERT(ip->i_df.if_nextents == 0);
227613d2c10bSChristoph Hellwig 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
22776e73a545SChristoph Hellwig 	ASSERT(ip->i_nblocks == 0);
22781da177e4SLinus Torvalds 
2279f40aadb2SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2280f40aadb2SDave Chinner 
22811da177e4SLinus Torvalds 	/*
22829a5280b3SDave Chinner 	 * Free the inode first so that we guarantee that the AGI lock is going
22839a5280b3SDave Chinner 	 * to be taken before we remove the inode from the unlinked list. This
22849a5280b3SDave Chinner 	 * makes the AGI lock -> unlinked list modification order the same as
22859a5280b3SDave Chinner 	 * used in O_TMPFILE creation.
22861da177e4SLinus Torvalds 	 */
2287f40aadb2SDave Chinner 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
22881baaed8fSDave Chinner 	if (error)
22896f5097e3SBrian Foster 		goto out;
22909a5280b3SDave Chinner 
22919a5280b3SDave Chinner 	error = xfs_iunlink_remove(tp, pag, ip);
22929a5280b3SDave Chinner 	if (error)
2293f40aadb2SDave Chinner 		goto out;
22941baaed8fSDave Chinner 
2295b2c20045SChristoph Hellwig 	/*
2296b2c20045SChristoph Hellwig 	 * Free any local-format data sitting around before we reset the
2297b2c20045SChristoph Hellwig 	 * data fork to extents format.  Note that the attr fork data has
2298b2c20045SChristoph Hellwig 	 * already been freed by xfs_attr_inactive.
2299b2c20045SChristoph Hellwig 	 */
2300f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2301b2c20045SChristoph Hellwig 		kmem_free(ip->i_df.if_u1.if_data);
2302b2c20045SChristoph Hellwig 		ip->i_df.if_u1.if_data = NULL;
2303b2c20045SChristoph Hellwig 		ip->i_df.if_bytes = 0;
2304b2c20045SChristoph Hellwig 	}
230598c4f78dSDarrick J. Wong 
2306c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2307db07349dSChristoph Hellwig 	ip->i_diflags = 0;
2308f40aadb2SDave Chinner 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
23097821ea30SChristoph Hellwig 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2310f7e67b20SChristoph Hellwig 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
23119b3beb02SChristoph Hellwig 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
23129b3beb02SChristoph Hellwig 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2313dc1baa71SEric Sandeen 
2314dc1baa71SEric Sandeen 	/* Don't attempt to replay owner changes for a deleted inode */
23151319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
23161319ebefSDave Chinner 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
23171319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
2318dc1baa71SEric Sandeen 
23191da177e4SLinus Torvalds 	/*
23201da177e4SLinus Torvalds 	 * Bump the generation count so no one will be confused
23211da177e4SLinus Torvalds 	 * by reincarnations of this inode.
23221da177e4SLinus Torvalds 	 */
23239e9a2674SDave Chinner 	VFS_I(ip)->i_generation++;
23241da177e4SLinus Torvalds 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
23251da177e4SLinus Torvalds 
232609b56604SBrian Foster 	if (xic.deleted)
2327f40aadb2SDave Chinner 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2328f40aadb2SDave Chinner out:
2329f40aadb2SDave Chinner 	xfs_perag_put(pag);
23302a30f36dSChandra Seetharaman 	return error;
23311da177e4SLinus Torvalds }
23321da177e4SLinus Torvalds 
23331da177e4SLinus Torvalds /*
233460ec6783SChristoph Hellwig  * This is called to unpin an inode.  The caller must have the inode locked
233560ec6783SChristoph Hellwig  * in at least shared mode so that the buffer cannot be subsequently pinned
233660ec6783SChristoph Hellwig  * once someone is waiting for it to be unpinned.
23371da177e4SLinus Torvalds  */
233860ec6783SChristoph Hellwig static void
2339f392e631SChristoph Hellwig xfs_iunpin(
234060ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
2341a3f74ffbSDavid Chinner {
2342579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2343a3f74ffbSDavid Chinner 
23444aaf15d1SDave Chinner 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
23454aaf15d1SDave Chinner 
2346a3f74ffbSDavid Chinner 	/* Give the log a push to start the unpinning I/O */
23475f9b4b0dSDave Chinner 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2348a14a348bSChristoph Hellwig 
2349a3f74ffbSDavid Chinner }
2350a3f74ffbSDavid Chinner 
2351f392e631SChristoph Hellwig static void
2352f392e631SChristoph Hellwig __xfs_iunpin_wait(
2353f392e631SChristoph Hellwig 	struct xfs_inode	*ip)
2354f392e631SChristoph Hellwig {
2355f392e631SChristoph Hellwig 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2356f392e631SChristoph Hellwig 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2357f392e631SChristoph Hellwig 
2358f392e631SChristoph Hellwig 	xfs_iunpin(ip);
2359f392e631SChristoph Hellwig 
2360f392e631SChristoph Hellwig 	do {
236121417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2362f392e631SChristoph Hellwig 		if (xfs_ipincount(ip))
2363f392e631SChristoph Hellwig 			io_schedule();
2364f392e631SChristoph Hellwig 	} while (xfs_ipincount(ip));
236521417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
2366f392e631SChristoph Hellwig }
2367f392e631SChristoph Hellwig 
2368777df5afSDave Chinner void
23691da177e4SLinus Torvalds xfs_iunpin_wait(
237060ec6783SChristoph Hellwig 	struct xfs_inode	*ip)
23711da177e4SLinus Torvalds {
2372f392e631SChristoph Hellwig 	if (xfs_ipincount(ip))
2373f392e631SChristoph Hellwig 		__xfs_iunpin_wait(ip);
23741da177e4SLinus Torvalds }
23751da177e4SLinus Torvalds 
237627320369SDave Chinner /*
237727320369SDave Chinner  * Removing an inode from the namespace involves removing the directory entry
237827320369SDave Chinner  * and dropping the link count on the inode. Removing the directory entry can
237927320369SDave Chinner  * result in locking an AGF (directory blocks were freed) and removing a link
238027320369SDave Chinner  * count can result in placing the inode on an unlinked list which results in
238127320369SDave Chinner  * locking an AGI.
238227320369SDave Chinner  *
238327320369SDave Chinner  * The big problem here is that we have an ordering constraint on AGF and AGI
238427320369SDave Chinner  * locking - inode allocation locks the AGI, then can allocate a new extent for
238527320369SDave Chinner  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
238627320369SDave Chinner  * removes the inode from the unlinked list, requiring that we lock the AGI
238727320369SDave Chinner  * first, and then freeing the inode can result in an inode chunk being freed
238827320369SDave Chinner  * and hence freeing disk space requiring that we lock an AGF.
238927320369SDave Chinner  *
239027320369SDave Chinner  * Hence the ordering that is imposed by other parts of the code is AGI before
239127320369SDave Chinner  * AGF. This means we cannot remove the directory entry before we drop the inode
239227320369SDave Chinner  * reference count and put it on the unlinked list as this results in a lock
239327320369SDave Chinner  * order of AGF then AGI, and this can deadlock against inode allocation and
239427320369SDave Chinner  * freeing. Therefore we must drop the link counts before we remove the
239527320369SDave Chinner  * directory entry.
239627320369SDave Chinner  *
239727320369SDave Chinner  * This is still safe from a transactional point of view - it is not until we
2398310a75a3SDarrick J. Wong  * get to xfs_defer_finish() that we have the possibility of multiple
239927320369SDave Chinner  * transactions in this operation. Hence as long as we remove the directory
240027320369SDave Chinner  * entry and drop the link count in the first transaction of the remove
240127320369SDave Chinner  * operation, there are no transactional constraints on the ordering here.
240227320369SDave Chinner  */
2403c24b5dfaSDave Chinner int
2404c24b5dfaSDave Chinner xfs_remove(
2405c24b5dfaSDave Chinner 	xfs_inode_t             *dp,
2406c24b5dfaSDave Chinner 	struct xfs_name		*name,
2407c24b5dfaSDave Chinner 	xfs_inode_t		*ip)
2408c24b5dfaSDave Chinner {
2409c24b5dfaSDave Chinner 	xfs_mount_t		*mp = dp->i_mount;
2410c24b5dfaSDave Chinner 	xfs_trans_t             *tp = NULL;
2411c19b3b05SDave Chinner 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2412871b9316SDarrick J. Wong 	int			dontcare;
2413c24b5dfaSDave Chinner 	int                     error = 0;
2414c24b5dfaSDave Chinner 	uint			resblks;
2415c24b5dfaSDave Chinner 
2416c24b5dfaSDave Chinner 	trace_xfs_remove(dp, name);
2417c24b5dfaSDave Chinner 
241875c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
24192451337dSDave Chinner 		return -EIO;
2420c24b5dfaSDave Chinner 
2421c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(dp);
2422c24b5dfaSDave Chinner 	if (error)
2423c24b5dfaSDave Chinner 		goto std_return;
2424c24b5dfaSDave Chinner 
2425c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
2426c24b5dfaSDave Chinner 	if (error)
2427c24b5dfaSDave Chinner 		goto std_return;
2428c24b5dfaSDave Chinner 
2429c24b5dfaSDave Chinner 	/*
2430871b9316SDarrick J. Wong 	 * We try to get the real space reservation first, allowing for
2431871b9316SDarrick J. Wong 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2432871b9316SDarrick J. Wong 	 * can't get the space reservation then we use 0 instead, and avoid the
2433871b9316SDarrick J. Wong 	 * bmap btree insert(s) in the directory code by, if the bmap insert
2434871b9316SDarrick J. Wong 	 * tries to happen, instead trimming the LAST block from the directory.
2435871b9316SDarrick J. Wong 	 *
2436871b9316SDarrick J. Wong 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2437871b9316SDarrick J. Wong 	 * the directory code can handle a reservationless update and we don't
2438871b9316SDarrick J. Wong 	 * want to prevent a user from trying to free space by deleting things.
2439c24b5dfaSDave Chinner 	 */
2440c24b5dfaSDave Chinner 	resblks = XFS_REMOVE_SPACE_RES(mp);
2441871b9316SDarrick J. Wong 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2442871b9316SDarrick J. Wong 			&tp, &dontcare);
2443c24b5dfaSDave Chinner 	if (error) {
24442451337dSDave Chinner 		ASSERT(error != -ENOSPC);
2445253f4911SChristoph Hellwig 		goto std_return;
2446c24b5dfaSDave Chinner 	}
2447c24b5dfaSDave Chinner 
2448c24b5dfaSDave Chinner 	/*
2449c24b5dfaSDave Chinner 	 * If we're removing a directory perform some additional validation.
2450c24b5dfaSDave Chinner 	 */
2451c24b5dfaSDave Chinner 	if (is_dir) {
245254d7b5c1SDave Chinner 		ASSERT(VFS_I(ip)->i_nlink >= 2);
245354d7b5c1SDave Chinner 		if (VFS_I(ip)->i_nlink != 2) {
24542451337dSDave Chinner 			error = -ENOTEMPTY;
2455c24b5dfaSDave Chinner 			goto out_trans_cancel;
2456c24b5dfaSDave Chinner 		}
2457c24b5dfaSDave Chinner 		if (!xfs_dir_isempty(ip)) {
24582451337dSDave Chinner 			error = -ENOTEMPTY;
2459c24b5dfaSDave Chinner 			goto out_trans_cancel;
2460c24b5dfaSDave Chinner 		}
2461c24b5dfaSDave Chinner 
246227320369SDave Chinner 		/* Drop the link from ip's "..".  */
2463c24b5dfaSDave Chinner 		error = xfs_droplink(tp, dp);
2464c24b5dfaSDave Chinner 		if (error)
246527320369SDave Chinner 			goto out_trans_cancel;
2466c24b5dfaSDave Chinner 
246727320369SDave Chinner 		/* Drop the "." link from ip to self.  */
2468c24b5dfaSDave Chinner 		error = xfs_droplink(tp, ip);
2469c24b5dfaSDave Chinner 		if (error)
247027320369SDave Chinner 			goto out_trans_cancel;
24715838d035SDarrick J. Wong 
24725838d035SDarrick J. Wong 		/*
24735838d035SDarrick J. Wong 		 * Point the unlinked child directory's ".." entry to the root
24745838d035SDarrick J. Wong 		 * directory to eliminate back-references to inodes that may
24755838d035SDarrick J. Wong 		 * get freed before the child directory is closed.  If the fs
24765838d035SDarrick J. Wong 		 * gets shrunk, this can lead to dirent inode validation errors.
24775838d035SDarrick J. Wong 		 */
24785838d035SDarrick J. Wong 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
24795838d035SDarrick J. Wong 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
24805838d035SDarrick J. Wong 					tp->t_mountp->m_sb.sb_rootino, 0);
24815838d035SDarrick J. Wong 			if (error)
24822653d533SDarrick J. Wong 				goto out_trans_cancel;
24835838d035SDarrick J. Wong 		}
2484c24b5dfaSDave Chinner 	} else {
2485c24b5dfaSDave Chinner 		/*
2486c24b5dfaSDave Chinner 		 * When removing a non-directory we need to log the parent
2487c24b5dfaSDave Chinner 		 * inode here.  For a directory this is done implicitly
2488c24b5dfaSDave Chinner 		 * by the xfs_droplink call for the ".." entry.
2489c24b5dfaSDave Chinner 		 */
2490c24b5dfaSDave Chinner 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2491c24b5dfaSDave Chinner 	}
249227320369SDave Chinner 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2493c24b5dfaSDave Chinner 
249427320369SDave Chinner 	/* Drop the link from dp to ip. */
2495c24b5dfaSDave Chinner 	error = xfs_droplink(tp, ip);
2496c24b5dfaSDave Chinner 	if (error)
249727320369SDave Chinner 		goto out_trans_cancel;
2498c24b5dfaSDave Chinner 
2499381eee69SBrian Foster 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
250027320369SDave Chinner 	if (error) {
25012451337dSDave Chinner 		ASSERT(error != -ENOENT);
2502c8eac49eSBrian Foster 		goto out_trans_cancel;
250327320369SDave Chinner 	}
250427320369SDave Chinner 
2505c24b5dfaSDave Chinner 	/*
2506c24b5dfaSDave Chinner 	 * If this is a synchronous mount, make sure that the
2507c24b5dfaSDave Chinner 	 * remove transaction goes to disk before returning to
2508c24b5dfaSDave Chinner 	 * the user.
2509c24b5dfaSDave Chinner 	 */
25100560f31aSDave Chinner 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2511c24b5dfaSDave Chinner 		xfs_trans_set_sync(tp);
2512c24b5dfaSDave Chinner 
251370393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
2514c24b5dfaSDave Chinner 	if (error)
2515c24b5dfaSDave Chinner 		goto std_return;
2516c24b5dfaSDave Chinner 
25172cd2ef6aSChristoph Hellwig 	if (is_dir && xfs_inode_is_filestream(ip))
2518c24b5dfaSDave Chinner 		xfs_filestream_deassociate(ip);
2519c24b5dfaSDave Chinner 
2520c24b5dfaSDave Chinner 	return 0;
2521c24b5dfaSDave Chinner 
2522c24b5dfaSDave Chinner  out_trans_cancel:
25234906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2524c24b5dfaSDave Chinner  std_return:
2525c24b5dfaSDave Chinner 	return error;
2526c24b5dfaSDave Chinner }
2527c24b5dfaSDave Chinner 
2528f6bba201SDave Chinner /*
2529f6bba201SDave Chinner  * Enter all inodes for a rename transaction into a sorted array.
2530f6bba201SDave Chinner  */
253195afcf5cSDave Chinner #define __XFS_SORT_INODES	5
2532f6bba201SDave Chinner STATIC void
2533f6bba201SDave Chinner xfs_sort_for_rename(
253495afcf5cSDave Chinner 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
253595afcf5cSDave Chinner 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
253695afcf5cSDave Chinner 	struct xfs_inode	*ip1,	/* in: inode of old entry */
253795afcf5cSDave Chinner 	struct xfs_inode	*ip2,	/* in: inode of new entry */
253895afcf5cSDave Chinner 	struct xfs_inode	*wip,	/* in: whiteout inode */
253995afcf5cSDave Chinner 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
254095afcf5cSDave Chinner 	int			*num_inodes)  /* in/out: inodes in array */
2541f6bba201SDave Chinner {
2542f6bba201SDave Chinner 	int			i, j;
2543f6bba201SDave Chinner 
254495afcf5cSDave Chinner 	ASSERT(*num_inodes == __XFS_SORT_INODES);
254595afcf5cSDave Chinner 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
254695afcf5cSDave Chinner 
2547f6bba201SDave Chinner 	/*
2548f6bba201SDave Chinner 	 * i_tab contains a list of pointers to inodes.  We initialize
2549f6bba201SDave Chinner 	 * the table here & we'll sort it.  We will then use it to
2550f6bba201SDave Chinner 	 * order the acquisition of the inode locks.
2551f6bba201SDave Chinner 	 *
2552f6bba201SDave Chinner 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2553f6bba201SDave Chinner 	 */
255495afcf5cSDave Chinner 	i = 0;
255595afcf5cSDave Chinner 	i_tab[i++] = dp1;
255695afcf5cSDave Chinner 	i_tab[i++] = dp2;
255795afcf5cSDave Chinner 	i_tab[i++] = ip1;
255895afcf5cSDave Chinner 	if (ip2)
255995afcf5cSDave Chinner 		i_tab[i++] = ip2;
256095afcf5cSDave Chinner 	if (wip)
256195afcf5cSDave Chinner 		i_tab[i++] = wip;
256295afcf5cSDave Chinner 	*num_inodes = i;
2563f6bba201SDave Chinner 
2564f6bba201SDave Chinner 	/*
2565f6bba201SDave Chinner 	 * Sort the elements via bubble sort.  (Remember, there are at
256695afcf5cSDave Chinner 	 * most 5 elements to sort, so this is adequate.)
2567f6bba201SDave Chinner 	 */
2568f6bba201SDave Chinner 	for (i = 0; i < *num_inodes; i++) {
2569f6bba201SDave Chinner 		for (j = 1; j < *num_inodes; j++) {
2570f6bba201SDave Chinner 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
257195afcf5cSDave Chinner 				struct xfs_inode *temp = i_tab[j];
2572f6bba201SDave Chinner 				i_tab[j] = i_tab[j-1];
2573f6bba201SDave Chinner 				i_tab[j-1] = temp;
2574f6bba201SDave Chinner 			}
2575f6bba201SDave Chinner 		}
2576f6bba201SDave Chinner 	}
2577f6bba201SDave Chinner }
2578f6bba201SDave Chinner 
2579310606b0SDave Chinner static int
2580310606b0SDave Chinner xfs_finish_rename(
2581c9cfdb38SBrian Foster 	struct xfs_trans	*tp)
2582310606b0SDave Chinner {
2583310606b0SDave Chinner 	/*
2584310606b0SDave Chinner 	 * If this is a synchronous mount, make sure that the rename transaction
2585310606b0SDave Chinner 	 * goes to disk before returning to the user.
2586310606b0SDave Chinner 	 */
25870560f31aSDave Chinner 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2588310606b0SDave Chinner 		xfs_trans_set_sync(tp);
2589310606b0SDave Chinner 
259070393313SChristoph Hellwig 	return xfs_trans_commit(tp);
2591310606b0SDave Chinner }
2592310606b0SDave Chinner 
2593f6bba201SDave Chinner /*
2594d31a1825SCarlos Maiolino  * xfs_cross_rename()
2595d31a1825SCarlos Maiolino  *
25960145225eSBhaskar Chowdhury  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2597d31a1825SCarlos Maiolino  */
2598d31a1825SCarlos Maiolino STATIC int
2599d31a1825SCarlos Maiolino xfs_cross_rename(
2600d31a1825SCarlos Maiolino 	struct xfs_trans	*tp,
2601d31a1825SCarlos Maiolino 	struct xfs_inode	*dp1,
2602d31a1825SCarlos Maiolino 	struct xfs_name		*name1,
2603d31a1825SCarlos Maiolino 	struct xfs_inode	*ip1,
2604d31a1825SCarlos Maiolino 	struct xfs_inode	*dp2,
2605d31a1825SCarlos Maiolino 	struct xfs_name		*name2,
2606d31a1825SCarlos Maiolino 	struct xfs_inode	*ip2,
2607d31a1825SCarlos Maiolino 	int			spaceres)
2608d31a1825SCarlos Maiolino {
2609d31a1825SCarlos Maiolino 	int		error = 0;
2610d31a1825SCarlos Maiolino 	int		ip1_flags = 0;
2611d31a1825SCarlos Maiolino 	int		ip2_flags = 0;
2612d31a1825SCarlos Maiolino 	int		dp2_flags = 0;
2613d31a1825SCarlos Maiolino 
2614d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in first parent */
2615381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2616d31a1825SCarlos Maiolino 	if (error)
2617eeacd321SDave Chinner 		goto out_trans_abort;
2618d31a1825SCarlos Maiolino 
2619d31a1825SCarlos Maiolino 	/* Swap inode number for dirent in second parent */
2620381eee69SBrian Foster 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2621d31a1825SCarlos Maiolino 	if (error)
2622eeacd321SDave Chinner 		goto out_trans_abort;
2623d31a1825SCarlos Maiolino 
2624d31a1825SCarlos Maiolino 	/*
2625d31a1825SCarlos Maiolino 	 * If we're renaming one or more directories across different parents,
2626d31a1825SCarlos Maiolino 	 * update the respective ".." entries (and link counts) to match the new
2627d31a1825SCarlos Maiolino 	 * parents.
2628d31a1825SCarlos Maiolino 	 */
2629d31a1825SCarlos Maiolino 	if (dp1 != dp2) {
2630d31a1825SCarlos Maiolino 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2631d31a1825SCarlos Maiolino 
2632c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2633d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2634381eee69SBrian Foster 						dp1->i_ino, spaceres);
2635d31a1825SCarlos Maiolino 			if (error)
2636eeacd321SDave Chinner 				goto out_trans_abort;
2637d31a1825SCarlos Maiolino 
2638d31a1825SCarlos Maiolino 			/* transfer ip2 ".." reference to dp1 */
2639c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2640d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp2);
2641d31a1825SCarlos Maiolino 				if (error)
2642eeacd321SDave Chinner 					goto out_trans_abort;
264391083269SEric Sandeen 				xfs_bumplink(tp, dp1);
2644d31a1825SCarlos Maiolino 			}
2645d31a1825SCarlos Maiolino 
2646d31a1825SCarlos Maiolino 			/*
2647d31a1825SCarlos Maiolino 			 * Although ip1 isn't changed here, userspace needs
2648d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2649d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2650d31a1825SCarlos Maiolino 			 * notify the change
2651d31a1825SCarlos Maiolino 			 */
2652d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_CHG;
2653d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2654d31a1825SCarlos Maiolino 		}
2655d31a1825SCarlos Maiolino 
2656c19b3b05SDave Chinner 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2657d31a1825SCarlos Maiolino 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2658381eee69SBrian Foster 						dp2->i_ino, spaceres);
2659d31a1825SCarlos Maiolino 			if (error)
2660eeacd321SDave Chinner 				goto out_trans_abort;
2661d31a1825SCarlos Maiolino 
2662d31a1825SCarlos Maiolino 			/* transfer ip1 ".." reference to dp2 */
2663c19b3b05SDave Chinner 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2664d31a1825SCarlos Maiolino 				error = xfs_droplink(tp, dp1);
2665d31a1825SCarlos Maiolino 				if (error)
2666eeacd321SDave Chinner 					goto out_trans_abort;
266791083269SEric Sandeen 				xfs_bumplink(tp, dp2);
2668d31a1825SCarlos Maiolino 			}
2669d31a1825SCarlos Maiolino 
2670d31a1825SCarlos Maiolino 			/*
2671d31a1825SCarlos Maiolino 			 * Although ip2 isn't changed here, userspace needs
2672d31a1825SCarlos Maiolino 			 * to be warned about the change, so that applications
2673d31a1825SCarlos Maiolino 			 * relying on it (like backup ones), will properly
2674d31a1825SCarlos Maiolino 			 * notify the change
2675d31a1825SCarlos Maiolino 			 */
2676d31a1825SCarlos Maiolino 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2677d31a1825SCarlos Maiolino 			ip2_flags |= XFS_ICHGTIME_CHG;
2678d31a1825SCarlos Maiolino 		}
2679d31a1825SCarlos Maiolino 	}
2680d31a1825SCarlos Maiolino 
2681d31a1825SCarlos Maiolino 	if (ip1_flags) {
2682d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2683d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2684d31a1825SCarlos Maiolino 	}
2685d31a1825SCarlos Maiolino 	if (ip2_flags) {
2686d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2687d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2688d31a1825SCarlos Maiolino 	}
2689d31a1825SCarlos Maiolino 	if (dp2_flags) {
2690d31a1825SCarlos Maiolino 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2691d31a1825SCarlos Maiolino 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2692d31a1825SCarlos Maiolino 	}
2693d31a1825SCarlos Maiolino 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2694d31a1825SCarlos Maiolino 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2695c9cfdb38SBrian Foster 	return xfs_finish_rename(tp);
2696eeacd321SDave Chinner 
2697eeacd321SDave Chinner out_trans_abort:
26984906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
2699d31a1825SCarlos Maiolino 	return error;
2700d31a1825SCarlos Maiolino }
2701d31a1825SCarlos Maiolino 
2702d31a1825SCarlos Maiolino /*
27037dcf5c3eSDave Chinner  * xfs_rename_alloc_whiteout()
27047dcf5c3eSDave Chinner  *
2705b63da6c8SRandy Dunlap  * Return a referenced, unlinked, unlocked inode that can be used as a
27067dcf5c3eSDave Chinner  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
27077dcf5c3eSDave Chinner  * crash between allocating the inode and linking it into the rename transaction
27087dcf5c3eSDave Chinner  * recovery will free the inode and we won't leak it.
27097dcf5c3eSDave Chinner  */
27107dcf5c3eSDave Chinner static int
27117dcf5c3eSDave Chinner xfs_rename_alloc_whiteout(
2712f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
271370b589a3SEric Sandeen 	struct xfs_name		*src_name,
27147dcf5c3eSDave Chinner 	struct xfs_inode	*dp,
27157dcf5c3eSDave Chinner 	struct xfs_inode	**wip)
27167dcf5c3eSDave Chinner {
27177dcf5c3eSDave Chinner 	struct xfs_inode	*tmpfile;
271870b589a3SEric Sandeen 	struct qstr		name;
27197dcf5c3eSDave Chinner 	int			error;
27207dcf5c3eSDave Chinner 
2721f2d40141SChristian Brauner 	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2722f736d93dSChristoph Hellwig 				   &tmpfile);
27237dcf5c3eSDave Chinner 	if (error)
27247dcf5c3eSDave Chinner 		return error;
27257dcf5c3eSDave Chinner 
272670b589a3SEric Sandeen 	name.name = src_name->name;
272770b589a3SEric Sandeen 	name.len = src_name->len;
272870b589a3SEric Sandeen 	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
272970b589a3SEric Sandeen 	if (error) {
273070b589a3SEric Sandeen 		xfs_finish_inode_setup(tmpfile);
273170b589a3SEric Sandeen 		xfs_irele(tmpfile);
273270b589a3SEric Sandeen 		return error;
273370b589a3SEric Sandeen 	}
273470b589a3SEric Sandeen 
273522419ac9SBrian Foster 	/*
273622419ac9SBrian Foster 	 * Prepare the tmpfile inode as if it were created through the VFS.
2737c4a6bf7fSDarrick J. Wong 	 * Complete the inode setup and flag it as linkable.  nlink is already
2738c4a6bf7fSDarrick J. Wong 	 * zero, so we can skip the drop_nlink.
273922419ac9SBrian Foster 	 */
27402b3d1d41SChristoph Hellwig 	xfs_setup_iops(tmpfile);
27417dcf5c3eSDave Chinner 	xfs_finish_inode_setup(tmpfile);
27427dcf5c3eSDave Chinner 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
27437dcf5c3eSDave Chinner 
27447dcf5c3eSDave Chinner 	*wip = tmpfile;
27457dcf5c3eSDave Chinner 	return 0;
27467dcf5c3eSDave Chinner }
27477dcf5c3eSDave Chinner 
27487dcf5c3eSDave Chinner /*
2749f6bba201SDave Chinner  * xfs_rename
2750f6bba201SDave Chinner  */
2751f6bba201SDave Chinner int
2752f6bba201SDave Chinner xfs_rename(
2753f2d40141SChristian Brauner 	struct mnt_idmap	*idmap,
27547dcf5c3eSDave Chinner 	struct xfs_inode	*src_dp,
2755f6bba201SDave Chinner 	struct xfs_name		*src_name,
27567dcf5c3eSDave Chinner 	struct xfs_inode	*src_ip,
27577dcf5c3eSDave Chinner 	struct xfs_inode	*target_dp,
2758f6bba201SDave Chinner 	struct xfs_name		*target_name,
27597dcf5c3eSDave Chinner 	struct xfs_inode	*target_ip,
2760d31a1825SCarlos Maiolino 	unsigned int		flags)
2761f6bba201SDave Chinner {
27627dcf5c3eSDave Chinner 	struct xfs_mount	*mp = src_dp->i_mount;
27637dcf5c3eSDave Chinner 	struct xfs_trans	*tp;
27647dcf5c3eSDave Chinner 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
27657dcf5c3eSDave Chinner 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
27666da1b4b1SDarrick J. Wong 	int			i;
276795afcf5cSDave Chinner 	int			num_inodes = __XFS_SORT_INODES;
27682b93681fSDave Chinner 	bool			new_parent = (src_dp != target_dp);
2769c19b3b05SDave Chinner 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2770f6bba201SDave Chinner 	int			spaceres;
277141667260SDarrick J. Wong 	bool			retried = false;
277241667260SDarrick J. Wong 	int			error, nospace_error = 0;
2773f6bba201SDave Chinner 
2774f6bba201SDave Chinner 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2775f6bba201SDave Chinner 
2776eeacd321SDave Chinner 	if ((flags & RENAME_EXCHANGE) && !target_ip)
2777eeacd321SDave Chinner 		return -EINVAL;
2778f6bba201SDave Chinner 
27797dcf5c3eSDave Chinner 	/*
27807dcf5c3eSDave Chinner 	 * If we are doing a whiteout operation, allocate the whiteout inode
27817dcf5c3eSDave Chinner 	 * we will be placing at the target and ensure the type is set
27827dcf5c3eSDave Chinner 	 * appropriately.
27837dcf5c3eSDave Chinner 	 */
27847dcf5c3eSDave Chinner 	if (flags & RENAME_WHITEOUT) {
2785f2d40141SChristian Brauner 		error = xfs_rename_alloc_whiteout(idmap, src_name,
278670b589a3SEric Sandeen 						  target_dp, &wip);
27877dcf5c3eSDave Chinner 		if (error)
27887dcf5c3eSDave Chinner 			return error;
2789f6bba201SDave Chinner 
27907dcf5c3eSDave Chinner 		/* setup target dirent info as whiteout */
27917dcf5c3eSDave Chinner 		src_name->type = XFS_DIR3_FT_CHRDEV;
27927dcf5c3eSDave Chinner 	}
27937dcf5c3eSDave Chinner 
27947dcf5c3eSDave Chinner 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2795f6bba201SDave Chinner 				inodes, &num_inodes);
2796f6bba201SDave Chinner 
279741667260SDarrick J. Wong retry:
279841667260SDarrick J. Wong 	nospace_error = 0;
2799f6bba201SDave Chinner 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2800253f4911SChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
28012451337dSDave Chinner 	if (error == -ENOSPC) {
280241667260SDarrick J. Wong 		nospace_error = error;
2803f6bba201SDave Chinner 		spaceres = 0;
2804253f4911SChristoph Hellwig 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2805253f4911SChristoph Hellwig 				&tp);
2806f6bba201SDave Chinner 	}
2807445883e8SDave Chinner 	if (error)
2808253f4911SChristoph Hellwig 		goto out_release_wip;
2809f6bba201SDave Chinner 
2810f6bba201SDave Chinner 	/*
2811f6bba201SDave Chinner 	 * Attach the dquots to the inodes
2812f6bba201SDave Chinner 	 */
2813f6bba201SDave Chinner 	error = xfs_qm_vop_rename_dqattach(inodes);
2814445883e8SDave Chinner 	if (error)
2815445883e8SDave Chinner 		goto out_trans_cancel;
2816f6bba201SDave Chinner 
2817f6bba201SDave Chinner 	/*
2818f6bba201SDave Chinner 	 * Lock all the participating inodes. Depending upon whether
2819f6bba201SDave Chinner 	 * the target_name exists in the target directory, and
2820f6bba201SDave Chinner 	 * whether the target directory is the same as the source
2821e07ee6feSAllison Henderson 	 * directory, we can lock from 2 to 5 inodes.
2822f6bba201SDave Chinner 	 */
2823f6bba201SDave Chinner 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2824f6bba201SDave Chinner 
2825f6bba201SDave Chinner 	/*
2826f6bba201SDave Chinner 	 * Join all the inodes to the transaction. From this point on,
2827f6bba201SDave Chinner 	 * we can rely on either trans_commit or trans_cancel to unlock
2828f6bba201SDave Chinner 	 * them.
2829f6bba201SDave Chinner 	 */
283065523218SChristoph Hellwig 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2831f6bba201SDave Chinner 	if (new_parent)
283265523218SChristoph Hellwig 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2833f6bba201SDave Chinner 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2834f6bba201SDave Chinner 	if (target_ip)
2835f6bba201SDave Chinner 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
28367dcf5c3eSDave Chinner 	if (wip)
28377dcf5c3eSDave Chinner 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2838f6bba201SDave Chinner 
2839f6bba201SDave Chinner 	/*
2840f6bba201SDave Chinner 	 * If we are using project inheritance, we only allow renames
2841f6bba201SDave Chinner 	 * into our tree when the project IDs are the same; else the
2842f6bba201SDave Chinner 	 * tree quota mechanism would be circumvented.
2843f6bba201SDave Chinner 	 */
2844db07349dSChristoph Hellwig 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2845ceaf603cSChristoph Hellwig 		     target_dp->i_projid != src_ip->i_projid)) {
28462451337dSDave Chinner 		error = -EXDEV;
2847445883e8SDave Chinner 		goto out_trans_cancel;
2848f6bba201SDave Chinner 	}
2849f6bba201SDave Chinner 
2850eeacd321SDave Chinner 	/* RENAME_EXCHANGE is unique from here on. */
2851eeacd321SDave Chinner 	if (flags & RENAME_EXCHANGE)
2852eeacd321SDave Chinner 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2853d31a1825SCarlos Maiolino 					target_dp, target_name, target_ip,
2854f16dea54SBrian Foster 					spaceres);
2855d31a1825SCarlos Maiolino 
2856d31a1825SCarlos Maiolino 	/*
285741667260SDarrick J. Wong 	 * Try to reserve quota to handle an expansion of the target directory.
285841667260SDarrick J. Wong 	 * We'll allow the rename to continue in reservationless mode if we hit
285941667260SDarrick J. Wong 	 * a space usage constraint.  If we trigger reservationless mode, save
286041667260SDarrick J. Wong 	 * the errno if there isn't any free space in the target directory.
286141667260SDarrick J. Wong 	 */
286241667260SDarrick J. Wong 	if (spaceres != 0) {
286341667260SDarrick J. Wong 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
286441667260SDarrick J. Wong 				0, false);
286541667260SDarrick J. Wong 		if (error == -EDQUOT || error == -ENOSPC) {
286641667260SDarrick J. Wong 			if (!retried) {
286741667260SDarrick J. Wong 				xfs_trans_cancel(tp);
286841667260SDarrick J. Wong 				xfs_blockgc_free_quota(target_dp, 0);
286941667260SDarrick J. Wong 				retried = true;
287041667260SDarrick J. Wong 				goto retry;
287141667260SDarrick J. Wong 			}
287241667260SDarrick J. Wong 
287341667260SDarrick J. Wong 			nospace_error = error;
287441667260SDarrick J. Wong 			spaceres = 0;
287541667260SDarrick J. Wong 			error = 0;
287641667260SDarrick J. Wong 		}
287741667260SDarrick J. Wong 		if (error)
287841667260SDarrick J. Wong 			goto out_trans_cancel;
287941667260SDarrick J. Wong 	}
288041667260SDarrick J. Wong 
288141667260SDarrick J. Wong 	/*
2882bc56ad8cSkaixuxia 	 * Check for expected errors before we dirty the transaction
2883bc56ad8cSkaixuxia 	 * so we can return an error without a transaction abort.
2884f6bba201SDave Chinner 	 */
2885f6bba201SDave Chinner 	if (target_ip == NULL) {
2886f6bba201SDave Chinner 		/*
2887f6bba201SDave Chinner 		 * If there's no space reservation, check the entry will
2888f6bba201SDave Chinner 		 * fit before actually inserting it.
2889f6bba201SDave Chinner 		 */
289094f3cad5SEric Sandeen 		if (!spaceres) {
289194f3cad5SEric Sandeen 			error = xfs_dir_canenter(tp, target_dp, target_name);
2892f6bba201SDave Chinner 			if (error)
2893445883e8SDave Chinner 				goto out_trans_cancel;
289494f3cad5SEric Sandeen 		}
2895bc56ad8cSkaixuxia 	} else {
2896bc56ad8cSkaixuxia 		/*
2897bc56ad8cSkaixuxia 		 * If target exists and it's a directory, check that whether
2898bc56ad8cSkaixuxia 		 * it can be destroyed.
2899bc56ad8cSkaixuxia 		 */
2900bc56ad8cSkaixuxia 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2901bc56ad8cSkaixuxia 		    (!xfs_dir_isempty(target_ip) ||
2902bc56ad8cSkaixuxia 		     (VFS_I(target_ip)->i_nlink > 2))) {
2903bc56ad8cSkaixuxia 			error = -EEXIST;
2904bc56ad8cSkaixuxia 			goto out_trans_cancel;
2905bc56ad8cSkaixuxia 		}
2906bc56ad8cSkaixuxia 	}
2907bc56ad8cSkaixuxia 
2908bc56ad8cSkaixuxia 	/*
29096da1b4b1SDarrick J. Wong 	 * Lock the AGI buffers we need to handle bumping the nlink of the
29106da1b4b1SDarrick J. Wong 	 * whiteout inode off the unlinked list and to handle dropping the
29116da1b4b1SDarrick J. Wong 	 * nlink of the target inode.  Per locking order rules, do this in
29126da1b4b1SDarrick J. Wong 	 * increasing AG order and before directory block allocation tries to
29136da1b4b1SDarrick J. Wong 	 * grab AGFs because we grab AGIs before AGFs.
29146da1b4b1SDarrick J. Wong 	 *
29156da1b4b1SDarrick J. Wong 	 * The (vfs) caller must ensure that if src is a directory then
29166da1b4b1SDarrick J. Wong 	 * target_ip is either null or an empty directory.
29176da1b4b1SDarrick J. Wong 	 */
29186da1b4b1SDarrick J. Wong 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
29196da1b4b1SDarrick J. Wong 		if (inodes[i] == wip ||
29206da1b4b1SDarrick J. Wong 		    (inodes[i] == target_ip &&
29216da1b4b1SDarrick J. Wong 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
292261021debSDave Chinner 			struct xfs_perag	*pag;
29236da1b4b1SDarrick J. Wong 			struct xfs_buf		*bp;
29246da1b4b1SDarrick J. Wong 
292561021debSDave Chinner 			pag = xfs_perag_get(mp,
292661021debSDave Chinner 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
292761021debSDave Chinner 			error = xfs_read_agi(pag, tp, &bp);
292861021debSDave Chinner 			xfs_perag_put(pag);
29296da1b4b1SDarrick J. Wong 			if (error)
29306da1b4b1SDarrick J. Wong 				goto out_trans_cancel;
29316da1b4b1SDarrick J. Wong 		}
29326da1b4b1SDarrick J. Wong 	}
29336da1b4b1SDarrick J. Wong 
29346da1b4b1SDarrick J. Wong 	/*
2935bc56ad8cSkaixuxia 	 * Directory entry creation below may acquire the AGF. Remove
2936bc56ad8cSkaixuxia 	 * the whiteout from the unlinked list first to preserve correct
2937bc56ad8cSkaixuxia 	 * AGI/AGF locking order. This dirties the transaction so failures
2938bc56ad8cSkaixuxia 	 * after this point will abort and log recovery will clean up the
2939bc56ad8cSkaixuxia 	 * mess.
2940bc56ad8cSkaixuxia 	 *
2941bc56ad8cSkaixuxia 	 * For whiteouts, we need to bump the link count on the whiteout
2942bc56ad8cSkaixuxia 	 * inode. After this point, we have a real link, clear the tmpfile
2943bc56ad8cSkaixuxia 	 * state flag from the inode so it doesn't accidentally get misused
2944bc56ad8cSkaixuxia 	 * in future.
2945bc56ad8cSkaixuxia 	 */
2946bc56ad8cSkaixuxia 	if (wip) {
2947f40aadb2SDave Chinner 		struct xfs_perag	*pag;
2948f40aadb2SDave Chinner 
2949bc56ad8cSkaixuxia 		ASSERT(VFS_I(wip)->i_nlink == 0);
2950f40aadb2SDave Chinner 
2951f40aadb2SDave Chinner 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
2952f40aadb2SDave Chinner 		error = xfs_iunlink_remove(tp, pag, wip);
2953f40aadb2SDave Chinner 		xfs_perag_put(pag);
2954bc56ad8cSkaixuxia 		if (error)
2955bc56ad8cSkaixuxia 			goto out_trans_cancel;
2956bc56ad8cSkaixuxia 
2957bc56ad8cSkaixuxia 		xfs_bumplink(tp, wip);
2958bc56ad8cSkaixuxia 		VFS_I(wip)->i_state &= ~I_LINKABLE;
2959bc56ad8cSkaixuxia 	}
2960bc56ad8cSkaixuxia 
2961bc56ad8cSkaixuxia 	/*
2962bc56ad8cSkaixuxia 	 * Set up the target.
2963bc56ad8cSkaixuxia 	 */
2964bc56ad8cSkaixuxia 	if (target_ip == NULL) {
2965f6bba201SDave Chinner 		/*
2966f6bba201SDave Chinner 		 * If target does not exist and the rename crosses
2967f6bba201SDave Chinner 		 * directories, adjust the target directory link count
2968f6bba201SDave Chinner 		 * to account for the ".." reference from the new entry.
2969f6bba201SDave Chinner 		 */
2970f6bba201SDave Chinner 		error = xfs_dir_createname(tp, target_dp, target_name,
2971381eee69SBrian Foster 					   src_ip->i_ino, spaceres);
2972f6bba201SDave Chinner 		if (error)
2973c8eac49eSBrian Foster 			goto out_trans_cancel;
2974f6bba201SDave Chinner 
2975f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
2976f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2977f6bba201SDave Chinner 
2978f6bba201SDave Chinner 		if (new_parent && src_is_directory) {
297991083269SEric Sandeen 			xfs_bumplink(tp, target_dp);
2980f6bba201SDave Chinner 		}
2981f6bba201SDave Chinner 	} else { /* target_ip != NULL */
2982f6bba201SDave Chinner 		/*
2983f6bba201SDave Chinner 		 * Link the source inode under the target name.
2984f6bba201SDave Chinner 		 * If the source inode is a directory and we are moving
2985f6bba201SDave Chinner 		 * it across directories, its ".." entry will be
2986f6bba201SDave Chinner 		 * inconsistent until we replace that down below.
2987f6bba201SDave Chinner 		 *
2988f6bba201SDave Chinner 		 * In case there is already an entry with the same
2989f6bba201SDave Chinner 		 * name at the destination directory, remove it first.
2990f6bba201SDave Chinner 		 */
2991f6bba201SDave Chinner 		error = xfs_dir_replace(tp, target_dp, target_name,
2992381eee69SBrian Foster 					src_ip->i_ino, spaceres);
2993f6bba201SDave Chinner 		if (error)
2994c8eac49eSBrian Foster 			goto out_trans_cancel;
2995f6bba201SDave Chinner 
2996f6bba201SDave Chinner 		xfs_trans_ichgtime(tp, target_dp,
2997f6bba201SDave Chinner 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2998f6bba201SDave Chinner 
2999f6bba201SDave Chinner 		/*
3000f6bba201SDave Chinner 		 * Decrement the link count on the target since the target
3001f6bba201SDave Chinner 		 * dir no longer points to it.
3002f6bba201SDave Chinner 		 */
3003f6bba201SDave Chinner 		error = xfs_droplink(tp, target_ip);
3004f6bba201SDave Chinner 		if (error)
3005c8eac49eSBrian Foster 			goto out_trans_cancel;
3006f6bba201SDave Chinner 
3007f6bba201SDave Chinner 		if (src_is_directory) {
3008f6bba201SDave Chinner 			/*
3009f6bba201SDave Chinner 			 * Drop the link from the old "." entry.
3010f6bba201SDave Chinner 			 */
3011f6bba201SDave Chinner 			error = xfs_droplink(tp, target_ip);
3012f6bba201SDave Chinner 			if (error)
3013c8eac49eSBrian Foster 				goto out_trans_cancel;
3014f6bba201SDave Chinner 		}
3015f6bba201SDave Chinner 	} /* target_ip != NULL */
3016f6bba201SDave Chinner 
3017f6bba201SDave Chinner 	/*
3018f6bba201SDave Chinner 	 * Remove the source.
3019f6bba201SDave Chinner 	 */
3020f6bba201SDave Chinner 	if (new_parent && src_is_directory) {
3021f6bba201SDave Chinner 		/*
3022f6bba201SDave Chinner 		 * Rewrite the ".." entry to point to the new
3023f6bba201SDave Chinner 		 * directory.
3024f6bba201SDave Chinner 		 */
3025f6bba201SDave Chinner 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3026381eee69SBrian Foster 					target_dp->i_ino, spaceres);
30272451337dSDave Chinner 		ASSERT(error != -EEXIST);
3028f6bba201SDave Chinner 		if (error)
3029c8eac49eSBrian Foster 			goto out_trans_cancel;
3030f6bba201SDave Chinner 	}
3031f6bba201SDave Chinner 
3032f6bba201SDave Chinner 	/*
3033f6bba201SDave Chinner 	 * We always want to hit the ctime on the source inode.
3034f6bba201SDave Chinner 	 *
3035f6bba201SDave Chinner 	 * This isn't strictly required by the standards since the source
3036f6bba201SDave Chinner 	 * inode isn't really being changed, but old unix file systems did
3037f6bba201SDave Chinner 	 * it and some incremental backup programs won't work without it.
3038f6bba201SDave Chinner 	 */
3039f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3040f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3041f6bba201SDave Chinner 
3042f6bba201SDave Chinner 	/*
3043f6bba201SDave Chinner 	 * Adjust the link count on src_dp.  This is necessary when
3044f6bba201SDave Chinner 	 * renaming a directory, either within one parent when
3045f6bba201SDave Chinner 	 * the target existed, or across two parent directories.
3046f6bba201SDave Chinner 	 */
3047f6bba201SDave Chinner 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3048f6bba201SDave Chinner 
3049f6bba201SDave Chinner 		/*
3050f6bba201SDave Chinner 		 * Decrement link count on src_directory since the
3051f6bba201SDave Chinner 		 * entry that's moved no longer points to it.
3052f6bba201SDave Chinner 		 */
3053f6bba201SDave Chinner 		error = xfs_droplink(tp, src_dp);
3054f6bba201SDave Chinner 		if (error)
3055c8eac49eSBrian Foster 			goto out_trans_cancel;
3056f6bba201SDave Chinner 	}
3057f6bba201SDave Chinner 
30587dcf5c3eSDave Chinner 	/*
30597dcf5c3eSDave Chinner 	 * For whiteouts, we only need to update the source dirent with the
30607dcf5c3eSDave Chinner 	 * inode number of the whiteout inode rather than removing it
30617dcf5c3eSDave Chinner 	 * altogether.
30627dcf5c3eSDave Chinner 	 */
306383a21c18SChandan Babu R 	if (wip)
30647dcf5c3eSDave Chinner 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3065381eee69SBrian Foster 					spaceres);
306683a21c18SChandan Babu R 	else
3067f6bba201SDave Chinner 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3068381eee69SBrian Foster 					   spaceres);
306902092a2fSChandan Babu R 
3070f6bba201SDave Chinner 	if (error)
3071c8eac49eSBrian Foster 		goto out_trans_cancel;
3072f6bba201SDave Chinner 
3073f6bba201SDave Chinner 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3074f6bba201SDave Chinner 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3075f6bba201SDave Chinner 	if (new_parent)
3076f6bba201SDave Chinner 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3077f6bba201SDave Chinner 
3078c9cfdb38SBrian Foster 	error = xfs_finish_rename(tp);
30797dcf5c3eSDave Chinner 	if (wip)
308044a8736bSDarrick J. Wong 		xfs_irele(wip);
30817dcf5c3eSDave Chinner 	return error;
3082f6bba201SDave Chinner 
3083445883e8SDave Chinner out_trans_cancel:
30844906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
3085253f4911SChristoph Hellwig out_release_wip:
30867dcf5c3eSDave Chinner 	if (wip)
308744a8736bSDarrick J. Wong 		xfs_irele(wip);
308841667260SDarrick J. Wong 	if (error == -ENOSPC && nospace_error)
308941667260SDarrick J. Wong 		error = nospace_error;
3090f6bba201SDave Chinner 	return error;
3091f6bba201SDave Chinner }
3092f6bba201SDave Chinner 
3093e6187b34SDave Chinner static int
3094e6187b34SDave Chinner xfs_iflush(
309593848a99SChristoph Hellwig 	struct xfs_inode	*ip,
309693848a99SChristoph Hellwig 	struct xfs_buf		*bp)
30971da177e4SLinus Torvalds {
309893848a99SChristoph Hellwig 	struct xfs_inode_log_item *iip = ip->i_itemp;
309993848a99SChristoph Hellwig 	struct xfs_dinode	*dip;
310093848a99SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
3101f2019299SBrian Foster 	int			error;
31021da177e4SLinus Torvalds 
3103579aa9caSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3104718ecc50SDave Chinner 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3105f7e67b20SChristoph Hellwig 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3106daf83964SChristoph Hellwig 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
310790c60e16SDave Chinner 	ASSERT(iip->ili_item.li_buf == bp);
31081da177e4SLinus Torvalds 
310988ee2df7SChristoph Hellwig 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
31101da177e4SLinus Torvalds 
3111f2019299SBrian Foster 	/*
3112f2019299SBrian Foster 	 * We don't flush the inode if any of the following checks fail, but we
3113f2019299SBrian Foster 	 * do still update the log item and attach to the backing buffer as if
3114f2019299SBrian Foster 	 * the flush happened. This is a formality to facilitate predictable
3115f2019299SBrian Foster 	 * error handling as the caller will shutdown and fail the buffer.
3116f2019299SBrian Foster 	 */
3117f2019299SBrian Foster 	error = -EFSCORRUPTED;
311869ef921bSChristoph Hellwig 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
31199e24cfd0SDarrick J. Wong 			       mp, XFS_ERRTAG_IFLUSH_1)) {
31206a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
312178b0f58bSZeng Heng 			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
31226a19d939SDave Chinner 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3123f2019299SBrian Foster 		goto flush_out;
31241da177e4SLinus Torvalds 	}
3125c19b3b05SDave Chinner 	if (S_ISREG(VFS_I(ip)->i_mode)) {
31261da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3127f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3128f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
31299e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_3)) {
31306a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
313178b0f58bSZeng Heng 				"%s: Bad regular inode %llu, ptr "PTR_FMT,
31326a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3133f2019299SBrian Foster 			goto flush_out;
31341da177e4SLinus Torvalds 		}
3135c19b3b05SDave Chinner 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
31361da177e4SLinus Torvalds 		if (XFS_TEST_ERROR(
3137f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3138f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3139f7e67b20SChristoph Hellwig 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
31409e24cfd0SDarrick J. Wong 		    mp, XFS_ERRTAG_IFLUSH_4)) {
31416a19d939SDave Chinner 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
314278b0f58bSZeng Heng 				"%s: Bad directory inode %llu, ptr "PTR_FMT,
31436a19d939SDave Chinner 				__func__, ip->i_ino, ip);
3144f2019299SBrian Foster 			goto flush_out;
31451da177e4SLinus Torvalds 		}
31461da177e4SLinus Torvalds 	}
31472ed5b09bSDarrick J. Wong 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
31486e73a545SChristoph Hellwig 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
31496a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3150755c38ffSChandan Babu R 			"%s: detected corrupt incore inode %llu, "
3151755c38ffSChandan Babu R 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
31526a19d939SDave Chinner 			__func__, ip->i_ino,
31532ed5b09bSDarrick J. Wong 			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
31546e73a545SChristoph Hellwig 			ip->i_nblocks, ip);
3155f2019299SBrian Foster 		goto flush_out;
31561da177e4SLinus Torvalds 	}
31577821ea30SChristoph Hellwig 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
31589e24cfd0SDarrick J. Wong 				mp, XFS_ERRTAG_IFLUSH_6)) {
31596a19d939SDave Chinner 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
316078b0f58bSZeng Heng 			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
31617821ea30SChristoph Hellwig 			__func__, ip->i_ino, ip->i_forkoff, ip);
3162f2019299SBrian Foster 		goto flush_out;
31631da177e4SLinus Torvalds 	}
3164e60896d8SDave Chinner 
31651da177e4SLinus Torvalds 	/*
3166965e0a1aSChristoph Hellwig 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3167965e0a1aSChristoph Hellwig 	 * count for correct sequencing.  We bump the flush iteration count so
3168965e0a1aSChristoph Hellwig 	 * we can detect flushes which postdate a log record during recovery.
3169965e0a1aSChristoph Hellwig 	 * This is redundant as we now log every change and hence this can't
3170965e0a1aSChristoph Hellwig 	 * happen but we need to still do it to ensure backwards compatibility
3171965e0a1aSChristoph Hellwig 	 * with old kernels that predate logging all inode changes.
31721da177e4SLinus Torvalds 	 */
317338c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp))
3174965e0a1aSChristoph Hellwig 		ip->i_flushiter++;
31751da177e4SLinus Torvalds 
31760f45a1b2SChristoph Hellwig 	/*
31770f45a1b2SChristoph Hellwig 	 * If there are inline format data / attr forks attached to this inode,
31780f45a1b2SChristoph Hellwig 	 * make sure they are not corrupt.
31790f45a1b2SChristoph Hellwig 	 */
3180f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
31810f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_data(ip))
31820f45a1b2SChristoph Hellwig 		goto flush_out;
3183932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip) &&
31842ed5b09bSDarrick J. Wong 	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
31850f45a1b2SChristoph Hellwig 	    xfs_ifork_verify_local_attr(ip))
3186f2019299SBrian Foster 		goto flush_out;
3187005c5db8SDarrick J. Wong 
31881da177e4SLinus Torvalds 	/*
31893987848cSDave Chinner 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
31903987848cSDave Chinner 	 * copy out the core of the inode, because if the inode is dirty at all
31913987848cSDave Chinner 	 * the core must be.
31921da177e4SLinus Torvalds 	 */
319393f958f9SDave Chinner 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
31941da177e4SLinus Torvalds 
31951da177e4SLinus Torvalds 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
319638c26bfdSDave Chinner 	if (!xfs_has_v3inodes(mp)) {
3197965e0a1aSChristoph Hellwig 		if (ip->i_flushiter == DI_MAX_FLUSH)
3198965e0a1aSChristoph Hellwig 			ip->i_flushiter = 0;
3199ee7b83fdSChristoph Hellwig 	}
32001da177e4SLinus Torvalds 
3201005c5db8SDarrick J. Wong 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3202932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
3203005c5db8SDarrick J. Wong 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
32041da177e4SLinus Torvalds 
32051da177e4SLinus Torvalds 	/*
3206f5d8d5c4SChristoph Hellwig 	 * We've recorded everything logged in the inode, so we'd like to clear
3207f5d8d5c4SChristoph Hellwig 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3208f5d8d5c4SChristoph Hellwig 	 * However, we can't stop logging all this information until the data
3209f5d8d5c4SChristoph Hellwig 	 * we've copied into the disk buffer is written to disk.  If we did we
3210f5d8d5c4SChristoph Hellwig 	 * might overwrite the copy of the inode in the log with all the data
3211f5d8d5c4SChristoph Hellwig 	 * after re-logging only part of it, and in the face of a crash we
3212f5d8d5c4SChristoph Hellwig 	 * wouldn't have all the data we need to recover.
32131da177e4SLinus Torvalds 	 *
3214f5d8d5c4SChristoph Hellwig 	 * What we do is move the bits to the ili_last_fields field.  When
3215f5d8d5c4SChristoph Hellwig 	 * logging the inode, these bits are moved back to the ili_fields field.
3216664ffb8aSChristoph Hellwig 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3217664ffb8aSChristoph Hellwig 	 * we know that the information those bits represent is permanently on
3218f5d8d5c4SChristoph Hellwig 	 * disk.  As long as the flush completes before the inode is logged
3219f5d8d5c4SChristoph Hellwig 	 * again, then both ili_fields and ili_last_fields will be cleared.
32201da177e4SLinus Torvalds 	 */
3221f2019299SBrian Foster 	error = 0;
3222f2019299SBrian Foster flush_out:
32231319ebefSDave Chinner 	spin_lock(&iip->ili_lock);
3224f5d8d5c4SChristoph Hellwig 	iip->ili_last_fields = iip->ili_fields;
3225f5d8d5c4SChristoph Hellwig 	iip->ili_fields = 0;
3226fc0561ceSDave Chinner 	iip->ili_fsync_fields = 0;
32271319ebefSDave Chinner 	spin_unlock(&iip->ili_lock);
32281da177e4SLinus Torvalds 
32291319ebefSDave Chinner 	/*
32301319ebefSDave Chinner 	 * Store the current LSN of the inode so that we can tell whether the
3231664ffb8aSChristoph Hellwig 	 * item has moved in the AIL from xfs_buf_inode_iodone().
32321319ebefSDave Chinner 	 */
32337b2e2a31SDavid Chinner 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
32347b2e2a31SDavid Chinner 				&iip->ili_item.li_lsn);
32351da177e4SLinus Torvalds 
323693848a99SChristoph Hellwig 	/* generate the checksum. */
323793848a99SChristoph Hellwig 	xfs_dinode_calc_crc(mp, dip);
3238f2019299SBrian Foster 	return error;
32391da177e4SLinus Torvalds }
324044a8736bSDarrick J. Wong 
3241e6187b34SDave Chinner /*
3242e6187b34SDave Chinner  * Non-blocking flush of dirty inode metadata into the backing buffer.
3243e6187b34SDave Chinner  *
3244e6187b34SDave Chinner  * The caller must have a reference to the inode and hold the cluster buffer
3245e6187b34SDave Chinner  * locked. The function will walk across all the inodes on the cluster buffer it
3246e6187b34SDave Chinner  * can find and lock without blocking, and flush them to the cluster buffer.
3247e6187b34SDave Chinner  *
32485717ea4dSDave Chinner  * On successful flushing of at least one inode, the caller must write out the
32495717ea4dSDave Chinner  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
32505717ea4dSDave Chinner  * the caller needs to release the buffer. On failure, the filesystem will be
32515717ea4dSDave Chinner  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
32525717ea4dSDave Chinner  * will be returned.
3253e6187b34SDave Chinner  */
3254e6187b34SDave Chinner int
3255e6187b34SDave Chinner xfs_iflush_cluster(
3256e6187b34SDave Chinner 	struct xfs_buf		*bp)
3257e6187b34SDave Chinner {
32585717ea4dSDave Chinner 	struct xfs_mount	*mp = bp->b_mount;
32595717ea4dSDave Chinner 	struct xfs_log_item	*lip, *n;
32605717ea4dSDave Chinner 	struct xfs_inode	*ip;
32615717ea4dSDave Chinner 	struct xfs_inode_log_item *iip;
3262e6187b34SDave Chinner 	int			clcount = 0;
32635717ea4dSDave Chinner 	int			error = 0;
3264e6187b34SDave Chinner 
3265e6187b34SDave Chinner 	/*
32665717ea4dSDave Chinner 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3267d2d7c047SDave Chinner 	 * will remove itself from the list.
3268e6187b34SDave Chinner 	 */
32695717ea4dSDave Chinner 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
32705717ea4dSDave Chinner 		iip = (struct xfs_inode_log_item *)lip;
32715717ea4dSDave Chinner 		ip = iip->ili_inode;
32725717ea4dSDave Chinner 
32735717ea4dSDave Chinner 		/*
32745717ea4dSDave Chinner 		 * Quick and dirty check to avoid locks if possible.
32755717ea4dSDave Chinner 		 */
3276718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
32775717ea4dSDave Chinner 			continue;
32785717ea4dSDave Chinner 		if (xfs_ipincount(ip))
32795717ea4dSDave Chinner 			continue;
32805717ea4dSDave Chinner 
32815717ea4dSDave Chinner 		/*
32825717ea4dSDave Chinner 		 * The inode is still attached to the buffer, which means it is
32835717ea4dSDave Chinner 		 * dirty but reclaim might try to grab it. Check carefully for
32845717ea4dSDave Chinner 		 * that, and grab the ilock while still holding the i_flags_lock
32855717ea4dSDave Chinner 		 * to guarantee reclaim will not be able to reclaim this inode
32865717ea4dSDave Chinner 		 * once we drop the i_flags_lock.
32875717ea4dSDave Chinner 		 */
32885717ea4dSDave Chinner 		spin_lock(&ip->i_flags_lock);
32895717ea4dSDave Chinner 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3290718ecc50SDave Chinner 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
32915717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
3292e6187b34SDave Chinner 			continue;
3293e6187b34SDave Chinner 		}
3294e6187b34SDave Chinner 
3295e6187b34SDave Chinner 		/*
32965717ea4dSDave Chinner 		 * ILOCK will pin the inode against reclaim and prevent
32975717ea4dSDave Chinner 		 * concurrent transactions modifying the inode while we are
3298718ecc50SDave Chinner 		 * flushing the inode. If we get the lock, set the flushing
3299718ecc50SDave Chinner 		 * state before we drop the i_flags_lock.
3300e6187b34SDave Chinner 		 */
33015717ea4dSDave Chinner 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
33025717ea4dSDave Chinner 			spin_unlock(&ip->i_flags_lock);
33035717ea4dSDave Chinner 			continue;
33045717ea4dSDave Chinner 		}
3305718ecc50SDave Chinner 		__xfs_iflags_set(ip, XFS_IFLUSHING);
33065717ea4dSDave Chinner 		spin_unlock(&ip->i_flags_lock);
33075717ea4dSDave Chinner 
33085717ea4dSDave Chinner 		/*
33095717ea4dSDave Chinner 		 * Abort flushing this inode if we are shut down because the
33105717ea4dSDave Chinner 		 * inode may not currently be in the AIL. This can occur when
33115717ea4dSDave Chinner 		 * log I/O failure unpins the inode without inserting into the
33125717ea4dSDave Chinner 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
33135717ea4dSDave Chinner 		 * that otherwise looks like it should be flushed.
33145717ea4dSDave Chinner 		 */
331501728b44SDave Chinner 		if (xlog_is_shutdown(mp->m_log)) {
33165717ea4dSDave Chinner 			xfs_iunpin_wait(ip);
33175717ea4dSDave Chinner 			xfs_iflush_abort(ip);
33185717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
33195717ea4dSDave Chinner 			error = -EIO;
33205717ea4dSDave Chinner 			continue;
33215717ea4dSDave Chinner 		}
33225717ea4dSDave Chinner 
33235717ea4dSDave Chinner 		/* don't block waiting on a log force to unpin dirty inodes */
33245717ea4dSDave Chinner 		if (xfs_ipincount(ip)) {
3325718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
33265717ea4dSDave Chinner 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
33275717ea4dSDave Chinner 			continue;
33285717ea4dSDave Chinner 		}
33295717ea4dSDave Chinner 
33305717ea4dSDave Chinner 		if (!xfs_inode_clean(ip))
33315717ea4dSDave Chinner 			error = xfs_iflush(ip, bp);
33325717ea4dSDave Chinner 		else
3333718ecc50SDave Chinner 			xfs_iflags_clear(ip, XFS_IFLUSHING);
33345717ea4dSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
33355717ea4dSDave Chinner 		if (error)
3336e6187b34SDave Chinner 			break;
3337e6187b34SDave Chinner 		clcount++;
3338e6187b34SDave Chinner 	}
3339e6187b34SDave Chinner 
3340e6187b34SDave Chinner 	if (error) {
334101728b44SDave Chinner 		/*
334201728b44SDave Chinner 		 * Shutdown first so we kill the log before we release this
334301728b44SDave Chinner 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
334401728b44SDave Chinner 		 * of the log, failing it before the _log_ is shut down can
334501728b44SDave Chinner 		 * result in the log tail being moved forward in the journal
334601728b44SDave Chinner 		 * on disk because log writes can still be taking place. Hence
334701728b44SDave Chinner 		 * unpinning the tail will allow the ICREATE intent to be
334801728b44SDave Chinner 		 * removed from the log an recovery will fail with uninitialised
334901728b44SDave Chinner 		 * inode cluster buffers.
335001728b44SDave Chinner 		 */
335101728b44SDave Chinner 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3352e6187b34SDave Chinner 		bp->b_flags |= XBF_ASYNC;
3353e6187b34SDave Chinner 		xfs_buf_ioend_fail(bp);
3354e6187b34SDave Chinner 		return error;
3355e6187b34SDave Chinner 	}
3356e6187b34SDave Chinner 
33575717ea4dSDave Chinner 	if (!clcount)
33585717ea4dSDave Chinner 		return -EAGAIN;
33595717ea4dSDave Chinner 
33605717ea4dSDave Chinner 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
33615717ea4dSDave Chinner 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
33625717ea4dSDave Chinner 	return 0;
33635717ea4dSDave Chinner 
33645717ea4dSDave Chinner }
33655717ea4dSDave Chinner 
336644a8736bSDarrick J. Wong /* Release an inode. */
336744a8736bSDarrick J. Wong void
336844a8736bSDarrick J. Wong xfs_irele(
336944a8736bSDarrick J. Wong 	struct xfs_inode	*ip)
337044a8736bSDarrick J. Wong {
337144a8736bSDarrick J. Wong 	trace_xfs_irele(ip, _RET_IP_);
337244a8736bSDarrick J. Wong 	iput(VFS_I(ip));
337344a8736bSDarrick J. Wong }
337454fbdd10SChristoph Hellwig 
337554fbdd10SChristoph Hellwig /*
337654fbdd10SChristoph Hellwig  * Ensure all commited transactions touching the inode are written to the log.
337754fbdd10SChristoph Hellwig  */
337854fbdd10SChristoph Hellwig int
337954fbdd10SChristoph Hellwig xfs_log_force_inode(
338054fbdd10SChristoph Hellwig 	struct xfs_inode	*ip)
338154fbdd10SChristoph Hellwig {
33825f9b4b0dSDave Chinner 	xfs_csn_t		seq = 0;
338354fbdd10SChristoph Hellwig 
338454fbdd10SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
338554fbdd10SChristoph Hellwig 	if (xfs_ipincount(ip))
33865f9b4b0dSDave Chinner 		seq = ip->i_itemp->ili_commit_seq;
338754fbdd10SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
338854fbdd10SChristoph Hellwig 
33895f9b4b0dSDave Chinner 	if (!seq)
339054fbdd10SChristoph Hellwig 		return 0;
33915f9b4b0dSDave Chinner 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
339254fbdd10SChristoph Hellwig }
3393e2aaee9cSDarrick J. Wong 
3394e2aaee9cSDarrick J. Wong /*
3395e2aaee9cSDarrick J. Wong  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3396e2aaee9cSDarrick J. Wong  * abide vfs locking order (lowest pointer value goes first) and breaking the
3397e2aaee9cSDarrick J. Wong  * layout leases before proceeding.  The loop is needed because we cannot call
3398e2aaee9cSDarrick J. Wong  * the blocking break_layout() with the iolocks held, and therefore have to
3399e2aaee9cSDarrick J. Wong  * back out both locks.
3400e2aaee9cSDarrick J. Wong  */
3401e2aaee9cSDarrick J. Wong static int
3402e2aaee9cSDarrick J. Wong xfs_iolock_two_inodes_and_break_layout(
3403e2aaee9cSDarrick J. Wong 	struct inode		*src,
3404e2aaee9cSDarrick J. Wong 	struct inode		*dest)
3405e2aaee9cSDarrick J. Wong {
3406e2aaee9cSDarrick J. Wong 	int			error;
3407e2aaee9cSDarrick J. Wong 
3408e2aaee9cSDarrick J. Wong 	if (src > dest)
3409e2aaee9cSDarrick J. Wong 		swap(src, dest);
3410e2aaee9cSDarrick J. Wong 
3411e2aaee9cSDarrick J. Wong retry:
3412e2aaee9cSDarrick J. Wong 	/* Wait to break both inodes' layouts before we start locking. */
3413e2aaee9cSDarrick J. Wong 	error = break_layout(src, true);
3414e2aaee9cSDarrick J. Wong 	if (error)
3415e2aaee9cSDarrick J. Wong 		return error;
3416e2aaee9cSDarrick J. Wong 	if (src != dest) {
3417e2aaee9cSDarrick J. Wong 		error = break_layout(dest, true);
3418e2aaee9cSDarrick J. Wong 		if (error)
3419e2aaee9cSDarrick J. Wong 			return error;
3420e2aaee9cSDarrick J. Wong 	}
3421e2aaee9cSDarrick J. Wong 
3422e2aaee9cSDarrick J. Wong 	/* Lock one inode and make sure nobody got in and leased it. */
3423e2aaee9cSDarrick J. Wong 	inode_lock(src);
3424e2aaee9cSDarrick J. Wong 	error = break_layout(src, false);
3425e2aaee9cSDarrick J. Wong 	if (error) {
3426e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3427e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3428e2aaee9cSDarrick J. Wong 			goto retry;
3429e2aaee9cSDarrick J. Wong 		return error;
3430e2aaee9cSDarrick J. Wong 	}
3431e2aaee9cSDarrick J. Wong 
3432e2aaee9cSDarrick J. Wong 	if (src == dest)
3433e2aaee9cSDarrick J. Wong 		return 0;
3434e2aaee9cSDarrick J. Wong 
3435e2aaee9cSDarrick J. Wong 	/* Lock the other inode and make sure nobody got in and leased it. */
3436e2aaee9cSDarrick J. Wong 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3437e2aaee9cSDarrick J. Wong 	error = break_layout(dest, false);
3438e2aaee9cSDarrick J. Wong 	if (error) {
3439e2aaee9cSDarrick J. Wong 		inode_unlock(src);
3440e2aaee9cSDarrick J. Wong 		inode_unlock(dest);
3441e2aaee9cSDarrick J. Wong 		if (error == -EWOULDBLOCK)
3442e2aaee9cSDarrick J. Wong 			goto retry;
3443e2aaee9cSDarrick J. Wong 		return error;
3444e2aaee9cSDarrick J. Wong 	}
3445e2aaee9cSDarrick J. Wong 
3446e2aaee9cSDarrick J. Wong 	return 0;
3447e2aaee9cSDarrick J. Wong }
3448e2aaee9cSDarrick J. Wong 
344913f9e267SShiyang Ruan static int
345013f9e267SShiyang Ruan xfs_mmaplock_two_inodes_and_break_dax_layout(
345113f9e267SShiyang Ruan 	struct xfs_inode	*ip1,
345213f9e267SShiyang Ruan 	struct xfs_inode	*ip2)
345313f9e267SShiyang Ruan {
345413f9e267SShiyang Ruan 	int			error;
345513f9e267SShiyang Ruan 	bool			retry;
345613f9e267SShiyang Ruan 	struct page		*page;
345713f9e267SShiyang Ruan 
345813f9e267SShiyang Ruan 	if (ip1->i_ino > ip2->i_ino)
345913f9e267SShiyang Ruan 		swap(ip1, ip2);
346013f9e267SShiyang Ruan 
346113f9e267SShiyang Ruan again:
346213f9e267SShiyang Ruan 	retry = false;
346313f9e267SShiyang Ruan 	/* Lock the first inode */
346413f9e267SShiyang Ruan 	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
346513f9e267SShiyang Ruan 	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
346613f9e267SShiyang Ruan 	if (error || retry) {
346713f9e267SShiyang Ruan 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
346813f9e267SShiyang Ruan 		if (error == 0 && retry)
346913f9e267SShiyang Ruan 			goto again;
347013f9e267SShiyang Ruan 		return error;
347113f9e267SShiyang Ruan 	}
347213f9e267SShiyang Ruan 
347313f9e267SShiyang Ruan 	if (ip1 == ip2)
347413f9e267SShiyang Ruan 		return 0;
347513f9e267SShiyang Ruan 
347613f9e267SShiyang Ruan 	/* Nested lock the second inode */
347713f9e267SShiyang Ruan 	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
347813f9e267SShiyang Ruan 	/*
347913f9e267SShiyang Ruan 	 * We cannot use xfs_break_dax_layouts() directly here because it may
348013f9e267SShiyang Ruan 	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
348113f9e267SShiyang Ruan 	 * for this nested lock case.
348213f9e267SShiyang Ruan 	 */
348313f9e267SShiyang Ruan 	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
348413f9e267SShiyang Ruan 	if (page && page_ref_count(page) != 1) {
348513f9e267SShiyang Ruan 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
348613f9e267SShiyang Ruan 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
348713f9e267SShiyang Ruan 		goto again;
348813f9e267SShiyang Ruan 	}
348913f9e267SShiyang Ruan 
349013f9e267SShiyang Ruan 	return 0;
349113f9e267SShiyang Ruan }
349213f9e267SShiyang Ruan 
3493e2aaee9cSDarrick J. Wong /*
3494e2aaee9cSDarrick J. Wong  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3495e2aaee9cSDarrick J. Wong  * mmap activity.
3496e2aaee9cSDarrick J. Wong  */
3497e2aaee9cSDarrick J. Wong int
3498e2aaee9cSDarrick J. Wong xfs_ilock2_io_mmap(
3499e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3500e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3501e2aaee9cSDarrick J. Wong {
3502e2aaee9cSDarrick J. Wong 	int			ret;
3503e2aaee9cSDarrick J. Wong 
3504e2aaee9cSDarrick J. Wong 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3505e2aaee9cSDarrick J. Wong 	if (ret)
3506e2aaee9cSDarrick J. Wong 		return ret;
350713f9e267SShiyang Ruan 
350813f9e267SShiyang Ruan 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
350913f9e267SShiyang Ruan 		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
351013f9e267SShiyang Ruan 		if (ret) {
351113f9e267SShiyang Ruan 			inode_unlock(VFS_I(ip2));
351213f9e267SShiyang Ruan 			if (ip1 != ip2)
351313f9e267SShiyang Ruan 				inode_unlock(VFS_I(ip1));
351413f9e267SShiyang Ruan 			return ret;
351513f9e267SShiyang Ruan 		}
351613f9e267SShiyang Ruan 	} else
3517d2c292d8SJan Kara 		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3518d2c292d8SJan Kara 					    VFS_I(ip2)->i_mapping);
351913f9e267SShiyang Ruan 
3520e2aaee9cSDarrick J. Wong 	return 0;
3521e2aaee9cSDarrick J. Wong }
3522e2aaee9cSDarrick J. Wong 
3523e2aaee9cSDarrick J. Wong /* Unlock both inodes to allow IO and mmap activity. */
3524e2aaee9cSDarrick J. Wong void
3525e2aaee9cSDarrick J. Wong xfs_iunlock2_io_mmap(
3526e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip1,
3527e2aaee9cSDarrick J. Wong 	struct xfs_inode	*ip2)
3528e2aaee9cSDarrick J. Wong {
352913f9e267SShiyang Ruan 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
353013f9e267SShiyang Ruan 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
353113f9e267SShiyang Ruan 		if (ip1 != ip2)
353213f9e267SShiyang Ruan 			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
353313f9e267SShiyang Ruan 	} else
3534d2c292d8SJan Kara 		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3535d2c292d8SJan Kara 					      VFS_I(ip2)->i_mapping);
353613f9e267SShiyang Ruan 
3537e2aaee9cSDarrick J. Wong 	inode_unlock(VFS_I(ip2));
3538d2c292d8SJan Kara 	if (ip1 != ip2)
3539e2aaee9cSDarrick J. Wong 		inode_unlock(VFS_I(ip1));
3540e2aaee9cSDarrick J. Wong }
3541