xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 182696fb)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
26d8b79cfSDave Chinner /*
36d8b79cfSDave Chinner  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
46d8b79cfSDave Chinner  * All Rights Reserved.
56d8b79cfSDave Chinner  */
66d8b79cfSDave Chinner #include "xfs.h"
76d8b79cfSDave Chinner #include "xfs_fs.h"
85467b34bSDarrick J. Wong #include "xfs_shared.h"
96ca1c906SDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
126d8b79cfSDave Chinner #include "xfs_mount.h"
136d8b79cfSDave Chinner #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15239880efSDave Chinner #include "xfs_trans_priv.h"
166d8b79cfSDave Chinner #include "xfs_inode_item.h"
176d8b79cfSDave Chinner #include "xfs_quota.h"
186d8b79cfSDave Chinner #include "xfs_trace.h"
196d8b79cfSDave Chinner #include "xfs_icache.h"
20c24b5dfaSDave Chinner #include "xfs_bmap_util.h"
21dc06f398SBrian Foster #include "xfs_dquot_item.h"
22dc06f398SBrian Foster #include "xfs_dquot.h"
2383104d44SDarrick J. Wong #include "xfs_reflink.h"
24bb8a66afSChristoph Hellwig #include "xfs_ialloc.h"
259bbafc71SDave Chinner #include "xfs_ag.h"
266d8b79cfSDave Chinner 
27f0e28280SJeff Layton #include <linux/iversion.h>
286d8b79cfSDave Chinner 
29c809d7e9SDarrick J. Wong /* Radix tree tags for incore inode tree. */
30c809d7e9SDarrick J. Wong 
31c809d7e9SDarrick J. Wong /* inode is to be reclaimed */
32c809d7e9SDarrick J. Wong #define XFS_ICI_RECLAIM_TAG	0
33c809d7e9SDarrick J. Wong /* Inode has speculative preallocations (posteof or cow) to clean. */
34c809d7e9SDarrick J. Wong #define XFS_ICI_BLOCKGC_TAG	1
35c809d7e9SDarrick J. Wong 
36c809d7e9SDarrick J. Wong /*
37c809d7e9SDarrick J. Wong  * The goal for walking incore inodes.  These can correspond with incore inode
38c809d7e9SDarrick J. Wong  * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
39c809d7e9SDarrick J. Wong  */
40c809d7e9SDarrick J. Wong enum xfs_icwalk_goal {
41c809d7e9SDarrick J. Wong 	/* Goals directly associated with tagged inodes. */
42c809d7e9SDarrick J. Wong 	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
43f1bc5c56SDarrick J. Wong 	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
44c809d7e9SDarrick J. Wong };
45c809d7e9SDarrick J. Wong 
467fdff526SDarrick J. Wong static int xfs_icwalk(struct xfs_mount *mp,
47b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
487fdff526SDarrick J. Wong static int xfs_icwalk_ag(struct xfs_perag *pag,
49b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
50df600197SDarrick J. Wong 
5133479e05SDave Chinner /*
52b26b2bf1SDarrick J. Wong  * Private inode cache walk flags for struct xfs_icwalk.  Must not
53b26b2bf1SDarrick J. Wong  * coincide with XFS_ICWALK_FLAGS_VALID.
541ad2cfe0SDarrick J. Wong  */
551ad2cfe0SDarrick J. Wong 
56f1bc5c56SDarrick J. Wong /* Stop scanning after icw_scan_limit inodes. */
57f1bc5c56SDarrick J. Wong #define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
58f1bc5c56SDarrick J. Wong 
599492750aSDarrick J. Wong #define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
602d53f66bSDarrick J. Wong #define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
619492750aSDarrick J. Wong 
62777eb1faSChristoph Hellwig #define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
632d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
642d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_UNION)
651ad2cfe0SDarrick J. Wong 
6633479e05SDave Chinner /*
6733479e05SDave Chinner  * Allocate and initialise an xfs_inode.
6833479e05SDave Chinner  */
69638f4416SDave Chinner struct xfs_inode *
7033479e05SDave Chinner xfs_inode_alloc(
7133479e05SDave Chinner 	struct xfs_mount	*mp,
7233479e05SDave Chinner 	xfs_ino_t		ino)
7333479e05SDave Chinner {
7433479e05SDave Chinner 	struct xfs_inode	*ip;
7533479e05SDave Chinner 
7633479e05SDave Chinner 	/*
773050bd0bSCarlos Maiolino 	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
783050bd0bSCarlos Maiolino 	 * and return NULL here on ENOMEM.
7933479e05SDave Chinner 	 */
80*182696fbSDarrick J. Wong 	ip = kmem_cache_alloc(xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
813050bd0bSCarlos Maiolino 
8233479e05SDave Chinner 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
83*182696fbSDarrick J. Wong 		kmem_cache_free(xfs_inode_cache, ip);
8433479e05SDave Chinner 		return NULL;
8533479e05SDave Chinner 	}
8633479e05SDave Chinner 
87f38a032bSDave Chinner 	/* VFS doesn't initialise i_mode or i_state! */
88c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;
89f38a032bSDave Chinner 	VFS_I(ip)->i_state = 0;
90c19b3b05SDave Chinner 
91ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, vn_active);
9233479e05SDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
9333479e05SDave Chinner 	ASSERT(ip->i_ino == 0);
9433479e05SDave Chinner 
9533479e05SDave Chinner 	/* initialise the xfs inode */
9633479e05SDave Chinner 	ip->i_ino = ino;
9733479e05SDave Chinner 	ip->i_mount = mp;
9833479e05SDave Chinner 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
9933479e05SDave Chinner 	ip->i_afp = NULL;
1003993baebSDarrick J. Wong 	ip->i_cowfp = NULL;
1013ba738dfSChristoph Hellwig 	memset(&ip->i_df, 0, sizeof(ip->i_df));
10233479e05SDave Chinner 	ip->i_flags = 0;
10333479e05SDave Chinner 	ip->i_delayed_blks = 0;
1043e09ab8fSChristoph Hellwig 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
1056e73a545SChristoph Hellwig 	ip->i_nblocks = 0;
1067821ea30SChristoph Hellwig 	ip->i_forkoff = 0;
1076772c1f1SDarrick J. Wong 	ip->i_sick = 0;
1086772c1f1SDarrick J. Wong 	ip->i_checked = 0;
109cb357bf3SDarrick J. Wong 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
110cb357bf3SDarrick J. Wong 	INIT_LIST_HEAD(&ip->i_ioend_list);
111cb357bf3SDarrick J. Wong 	spin_lock_init(&ip->i_ioend_lock);
11233479e05SDave Chinner 
11333479e05SDave Chinner 	return ip;
11433479e05SDave Chinner }
11533479e05SDave Chinner 
11633479e05SDave Chinner STATIC void
11733479e05SDave Chinner xfs_inode_free_callback(
11833479e05SDave Chinner 	struct rcu_head		*head)
11933479e05SDave Chinner {
12033479e05SDave Chinner 	struct inode		*inode = container_of(head, struct inode, i_rcu);
12133479e05SDave Chinner 	struct xfs_inode	*ip = XFS_I(inode);
12233479e05SDave Chinner 
123c19b3b05SDave Chinner 	switch (VFS_I(ip)->i_mode & S_IFMT) {
12433479e05SDave Chinner 	case S_IFREG:
12533479e05SDave Chinner 	case S_IFDIR:
12633479e05SDave Chinner 	case S_IFLNK:
127ef838512SChristoph Hellwig 		xfs_idestroy_fork(&ip->i_df);
12833479e05SDave Chinner 		break;
12933479e05SDave Chinner 	}
13033479e05SDave Chinner 
131ef838512SChristoph Hellwig 	if (ip->i_afp) {
132ef838512SChristoph Hellwig 		xfs_idestroy_fork(ip->i_afp);
133*182696fbSDarrick J. Wong 		kmem_cache_free(xfs_ifork_cache, ip->i_afp);
134ef838512SChristoph Hellwig 	}
135ef838512SChristoph Hellwig 	if (ip->i_cowfp) {
136ef838512SChristoph Hellwig 		xfs_idestroy_fork(ip->i_cowfp);
137*182696fbSDarrick J. Wong 		kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
138ef838512SChristoph Hellwig 	}
13933479e05SDave Chinner 	if (ip->i_itemp) {
14022525c17SDave Chinner 		ASSERT(!test_bit(XFS_LI_IN_AIL,
14122525c17SDave Chinner 				 &ip->i_itemp->ili_item.li_flags));
14233479e05SDave Chinner 		xfs_inode_item_destroy(ip);
14333479e05SDave Chinner 		ip->i_itemp = NULL;
14433479e05SDave Chinner 	}
14533479e05SDave Chinner 
146*182696fbSDarrick J. Wong 	kmem_cache_free(xfs_inode_cache, ip);
1471f2dcfe8SDave Chinner }
1481f2dcfe8SDave Chinner 
1498a17d7ddSDave Chinner static void
1508a17d7ddSDave Chinner __xfs_inode_free(
1518a17d7ddSDave Chinner 	struct xfs_inode	*ip)
1528a17d7ddSDave Chinner {
1538a17d7ddSDave Chinner 	/* asserts to verify all state is correct here */
1548a17d7ddSDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
15548d55e2aSDave Chinner 	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
1568a17d7ddSDave Chinner 	XFS_STATS_DEC(ip->i_mount, vn_active);
1578a17d7ddSDave Chinner 
1588a17d7ddSDave Chinner 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
1598a17d7ddSDave Chinner }
1608a17d7ddSDave Chinner 
1611f2dcfe8SDave Chinner void
1621f2dcfe8SDave Chinner xfs_inode_free(
1631f2dcfe8SDave Chinner 	struct xfs_inode	*ip)
1641f2dcfe8SDave Chinner {
165718ecc50SDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
16698efe8afSBrian Foster 
16733479e05SDave Chinner 	/*
16833479e05SDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always
16933479e05SDave Chinner 	 * appears to be reclaimed with an invalid inode number when in the
17033479e05SDave Chinner 	 * free state. The ip->i_flags_lock provides the barrier against lookup
17133479e05SDave Chinner 	 * races.
17233479e05SDave Chinner 	 */
17333479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
17433479e05SDave Chinner 	ip->i_flags = XFS_IRECLAIM;
17533479e05SDave Chinner 	ip->i_ino = 0;
17633479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
17733479e05SDave Chinner 
1788a17d7ddSDave Chinner 	__xfs_inode_free(ip);
17933479e05SDave Chinner }
18033479e05SDave Chinner 
18133479e05SDave Chinner /*
18202511a5aSDave Chinner  * Queue background inode reclaim work if there are reclaimable inodes and there
18302511a5aSDave Chinner  * isn't reclaim work already scheduled or in progress.
184ad438c40SDave Chinner  */
185ad438c40SDave Chinner static void
186ad438c40SDave Chinner xfs_reclaim_work_queue(
187ad438c40SDave Chinner 	struct xfs_mount        *mp)
188ad438c40SDave Chinner {
189ad438c40SDave Chinner 
190ad438c40SDave Chinner 	rcu_read_lock();
191ad438c40SDave Chinner 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
192ad438c40SDave Chinner 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
193ad438c40SDave Chinner 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
194ad438c40SDave Chinner 	}
195ad438c40SDave Chinner 	rcu_read_unlock();
196ad438c40SDave Chinner }
197ad438c40SDave Chinner 
198c076ae7aSDarrick J. Wong /*
199c076ae7aSDarrick J. Wong  * Background scanning to trim preallocated space. This is queued based on the
200c076ae7aSDarrick J. Wong  * 'speculative_prealloc_lifetime' tunable (5m by default).
201c076ae7aSDarrick J. Wong  */
202c076ae7aSDarrick J. Wong static inline void
203c076ae7aSDarrick J. Wong xfs_blockgc_queue(
204ad438c40SDave Chinner 	struct xfs_perag	*pag)
205ad438c40SDave Chinner {
2066f649091SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
2076f649091SDarrick J. Wong 
2086f649091SDarrick J. Wong 	if (!xfs_is_blockgc_enabled(mp))
2096f649091SDarrick J. Wong 		return;
2106f649091SDarrick J. Wong 
211c076ae7aSDarrick J. Wong 	rcu_read_lock();
212c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
213ab23a776SDave Chinner 		queue_delayed_work(pag->pag_mount->m_blockgc_wq,
214c076ae7aSDarrick J. Wong 				   &pag->pag_blockgc_work,
215c076ae7aSDarrick J. Wong 				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
216c076ae7aSDarrick J. Wong 	rcu_read_unlock();
217c076ae7aSDarrick J. Wong }
218c076ae7aSDarrick J. Wong 
219c076ae7aSDarrick J. Wong /* Set a tag on both the AG incore inode tree and the AG radix tree. */
220c076ae7aSDarrick J. Wong static void
221c076ae7aSDarrick J. Wong xfs_perag_set_inode_tag(
222c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
223c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
224c076ae7aSDarrick J. Wong 	unsigned int		tag)
225c076ae7aSDarrick J. Wong {
226ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
227c076ae7aSDarrick J. Wong 	bool			was_tagged;
228ad438c40SDave Chinner 
22995989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
230c076ae7aSDarrick J. Wong 
231c076ae7aSDarrick J. Wong 	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
232c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
233c076ae7aSDarrick J. Wong 
234c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
235c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable++;
236c076ae7aSDarrick J. Wong 
237c076ae7aSDarrick J. Wong 	if (was_tagged)
238ad438c40SDave Chinner 		return;
239ad438c40SDave Chinner 
240c076ae7aSDarrick J. Wong 	/* propagate the tag up into the perag radix tree */
241ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
242c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
243ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
244ad438c40SDave Chinner 
245c076ae7aSDarrick J. Wong 	/* start background work */
246c076ae7aSDarrick J. Wong 	switch (tag) {
247c076ae7aSDarrick J. Wong 	case XFS_ICI_RECLAIM_TAG:
248ad438c40SDave Chinner 		xfs_reclaim_work_queue(mp);
249c076ae7aSDarrick J. Wong 		break;
250c076ae7aSDarrick J. Wong 	case XFS_ICI_BLOCKGC_TAG:
251c076ae7aSDarrick J. Wong 		xfs_blockgc_queue(pag);
252c076ae7aSDarrick J. Wong 		break;
253ad438c40SDave Chinner 	}
254ad438c40SDave Chinner 
255c076ae7aSDarrick J. Wong 	trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
256c076ae7aSDarrick J. Wong }
257c076ae7aSDarrick J. Wong 
258c076ae7aSDarrick J. Wong /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
259ad438c40SDave Chinner static void
260c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(
261c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
262c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
263c076ae7aSDarrick J. Wong 	unsigned int		tag)
264ad438c40SDave Chinner {
265ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
266ad438c40SDave Chinner 
26795989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
268c076ae7aSDarrick J. Wong 
269c076ae7aSDarrick J. Wong 	/*
270c076ae7aSDarrick J. Wong 	 * Reclaim can signal (with a null agino) that it cleared its own tag
271c076ae7aSDarrick J. Wong 	 * by removing the inode from the radix tree.
272c076ae7aSDarrick J. Wong 	 */
273c076ae7aSDarrick J. Wong 	if (agino != NULLAGINO)
274c076ae7aSDarrick J. Wong 		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
275c076ae7aSDarrick J. Wong 	else
276c076ae7aSDarrick J. Wong 		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
277c076ae7aSDarrick J. Wong 
278c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
279c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable--;
280c076ae7aSDarrick J. Wong 
281c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, tag))
282ad438c40SDave Chinner 		return;
283ad438c40SDave Chinner 
284c076ae7aSDarrick J. Wong 	/* clear the tag from the perag radix tree */
285ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
286c076ae7aSDarrick J. Wong 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
287ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
288ad438c40SDave Chinner 
289c076ae7aSDarrick J. Wong 	trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
290c076ae7aSDarrick J. Wong }
291ad438c40SDave Chinner 
2927fdff526SDarrick J. Wong static inline void
293ae2c4ac2SBrian Foster xfs_inew_wait(
294ae2c4ac2SBrian Foster 	struct xfs_inode	*ip)
295ae2c4ac2SBrian Foster {
296ae2c4ac2SBrian Foster 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
297ae2c4ac2SBrian Foster 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
298ae2c4ac2SBrian Foster 
299ae2c4ac2SBrian Foster 	do {
30021417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
301ae2c4ac2SBrian Foster 		if (!xfs_iflags_test(ip, XFS_INEW))
302ae2c4ac2SBrian Foster 			break;
303ae2c4ac2SBrian Foster 		schedule();
304ae2c4ac2SBrian Foster 	} while (true);
30521417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
306ae2c4ac2SBrian Foster }
307ae2c4ac2SBrian Foster 
308ad438c40SDave Chinner /*
30950997470SDave Chinner  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
31050997470SDave Chinner  * part of the structure. This is made more complex by the fact we store
31150997470SDave Chinner  * information about the on-disk values in the VFS inode and so we can't just
31283e06f21SDave Chinner  * overwrite the values unconditionally. Hence we save the parameters we
31350997470SDave Chinner  * need to retain across reinitialisation, and rewrite them into the VFS inode
31483e06f21SDave Chinner  * after reinitialisation even if it fails.
31550997470SDave Chinner  */
31650997470SDave Chinner static int
31750997470SDave Chinner xfs_reinit_inode(
31850997470SDave Chinner 	struct xfs_mount	*mp,
31950997470SDave Chinner 	struct inode		*inode)
32050997470SDave Chinner {
32150997470SDave Chinner 	int			error;
32254d7b5c1SDave Chinner 	uint32_t		nlink = inode->i_nlink;
3239e9a2674SDave Chinner 	uint32_t		generation = inode->i_generation;
324f0e28280SJeff Layton 	uint64_t		version = inode_peek_iversion(inode);
325c19b3b05SDave Chinner 	umode_t			mode = inode->i_mode;
326acd1d715SAmir Goldstein 	dev_t			dev = inode->i_rdev;
3273d8f2821SChristoph Hellwig 	kuid_t			uid = inode->i_uid;
3283d8f2821SChristoph Hellwig 	kgid_t			gid = inode->i_gid;
32950997470SDave Chinner 
33050997470SDave Chinner 	error = inode_init_always(mp->m_super, inode);
33150997470SDave Chinner 
33254d7b5c1SDave Chinner 	set_nlink(inode, nlink);
3339e9a2674SDave Chinner 	inode->i_generation = generation;
334f0e28280SJeff Layton 	inode_set_iversion_queried(inode, version);
335c19b3b05SDave Chinner 	inode->i_mode = mode;
336acd1d715SAmir Goldstein 	inode->i_rdev = dev;
3373d8f2821SChristoph Hellwig 	inode->i_uid = uid;
3383d8f2821SChristoph Hellwig 	inode->i_gid = gid;
33950997470SDave Chinner 	return error;
34050997470SDave Chinner }
34150997470SDave Chinner 
34250997470SDave Chinner /*
343ff7bebebSDarrick J. Wong  * Carefully nudge an inode whose VFS state has been torn down back into a
344ff7bebebSDarrick J. Wong  * usable state.  Drops the i_flags_lock and the rcu read lock.
345ff7bebebSDarrick J. Wong  */
346ff7bebebSDarrick J. Wong static int
347ff7bebebSDarrick J. Wong xfs_iget_recycle(
348ff7bebebSDarrick J. Wong 	struct xfs_perag	*pag,
349ff7bebebSDarrick J. Wong 	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
350ff7bebebSDarrick J. Wong {
351ff7bebebSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
352ff7bebebSDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
353ff7bebebSDarrick J. Wong 	int			error;
354ff7bebebSDarrick J. Wong 
355ff7bebebSDarrick J. Wong 	trace_xfs_iget_recycle(ip);
356ff7bebebSDarrick J. Wong 
357ff7bebebSDarrick J. Wong 	/*
358ff7bebebSDarrick J. Wong 	 * We need to make it look like the inode is being reclaimed to prevent
359ff7bebebSDarrick J. Wong 	 * the actual reclaim workers from stomping over us while we recycle
360ff7bebebSDarrick J. Wong 	 * the inode.  We can't clear the radix tree tag yet as it requires
361ff7bebebSDarrick J. Wong 	 * pag_ici_lock to be held exclusive.
362ff7bebebSDarrick J. Wong 	 */
363ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_IRECLAIM;
364ff7bebebSDarrick J. Wong 
365ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
366ff7bebebSDarrick J. Wong 	rcu_read_unlock();
367ff7bebebSDarrick J. Wong 
368ff7bebebSDarrick J. Wong 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
369ff7bebebSDarrick J. Wong 	error = xfs_reinit_inode(mp, inode);
370ff7bebebSDarrick J. Wong 	if (error) {
371ff7bebebSDarrick J. Wong 		bool	wake;
372ff7bebebSDarrick J. Wong 
373ff7bebebSDarrick J. Wong 		/*
374ff7bebebSDarrick J. Wong 		 * Re-initializing the inode failed, and we are in deep
375ff7bebebSDarrick J. Wong 		 * trouble.  Try to re-add it to the reclaim list.
376ff7bebebSDarrick J. Wong 		 */
377ff7bebebSDarrick J. Wong 		rcu_read_lock();
378ff7bebebSDarrick J. Wong 		spin_lock(&ip->i_flags_lock);
379ff7bebebSDarrick J. Wong 		wake = !!__xfs_iflags_test(ip, XFS_INEW);
380ff7bebebSDarrick J. Wong 		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
381ff7bebebSDarrick J. Wong 		if (wake)
382ff7bebebSDarrick J. Wong 			wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
383ff7bebebSDarrick J. Wong 		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
384ff7bebebSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
385ff7bebebSDarrick J. Wong 		rcu_read_unlock();
386ff7bebebSDarrick J. Wong 
387ff7bebebSDarrick J. Wong 		trace_xfs_iget_recycle_fail(ip);
388ff7bebebSDarrick J. Wong 		return error;
389ff7bebebSDarrick J. Wong 	}
390ff7bebebSDarrick J. Wong 
391ff7bebebSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
392ff7bebebSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
393ff7bebebSDarrick J. Wong 
394ff7bebebSDarrick J. Wong 	/*
395ff7bebebSDarrick J. Wong 	 * Clear the per-lifetime state in the inode as we are now effectively
396ff7bebebSDarrick J. Wong 	 * a new inode and need to return to the initial state before reuse
397ff7bebebSDarrick J. Wong 	 * occurs.
398ff7bebebSDarrick J. Wong 	 */
399ff7bebebSDarrick J. Wong 	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
400ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_INEW;
401ff7bebebSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
402ff7bebebSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
403ff7bebebSDarrick J. Wong 	inode->i_state = I_NEW;
404ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
405ff7bebebSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
406ff7bebebSDarrick J. Wong 
407ff7bebebSDarrick J. Wong 	return 0;
408ff7bebebSDarrick J. Wong }
409ff7bebebSDarrick J. Wong 
410ff7bebebSDarrick J. Wong /*
411afca6c5bSDave Chinner  * If we are allocating a new inode, then check what was returned is
412afca6c5bSDave Chinner  * actually a free, empty inode. If we are not allocating an inode,
413afca6c5bSDave Chinner  * then check we didn't find a free inode.
414afca6c5bSDave Chinner  *
415afca6c5bSDave Chinner  * Returns:
416afca6c5bSDave Chinner  *	0		if the inode free state matches the lookup context
417afca6c5bSDave Chinner  *	-ENOENT		if the inode is free and we are not allocating
418afca6c5bSDave Chinner  *	-EFSCORRUPTED	if there is any state mismatch at all
419afca6c5bSDave Chinner  */
420afca6c5bSDave Chinner static int
421afca6c5bSDave Chinner xfs_iget_check_free_state(
422afca6c5bSDave Chinner 	struct xfs_inode	*ip,
423afca6c5bSDave Chinner 	int			flags)
424afca6c5bSDave Chinner {
425afca6c5bSDave Chinner 	if (flags & XFS_IGET_CREATE) {
426afca6c5bSDave Chinner 		/* should be a free inode */
427afca6c5bSDave Chinner 		if (VFS_I(ip)->i_mode != 0) {
428afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
429afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
430afca6c5bSDave Chinner 				ip->i_ino, VFS_I(ip)->i_mode);
431afca6c5bSDave Chinner 			return -EFSCORRUPTED;
432afca6c5bSDave Chinner 		}
433afca6c5bSDave Chinner 
4346e73a545SChristoph Hellwig 		if (ip->i_nblocks != 0) {
435afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
436afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx has blocks allocated!",
437afca6c5bSDave Chinner 				ip->i_ino);
438afca6c5bSDave Chinner 			return -EFSCORRUPTED;
439afca6c5bSDave Chinner 		}
440afca6c5bSDave Chinner 		return 0;
441afca6c5bSDave Chinner 	}
442afca6c5bSDave Chinner 
443afca6c5bSDave Chinner 	/* should be an allocated inode */
444afca6c5bSDave Chinner 	if (VFS_I(ip)->i_mode == 0)
445afca6c5bSDave Chinner 		return -ENOENT;
446afca6c5bSDave Chinner 
447afca6c5bSDave Chinner 	return 0;
448afca6c5bSDave Chinner }
449afca6c5bSDave Chinner 
450ab23a776SDave Chinner /* Make all pending inactivation work start immediately. */
451ab23a776SDave Chinner static void
452ab23a776SDave Chinner xfs_inodegc_queue_all(
453ab23a776SDave Chinner 	struct xfs_mount	*mp)
454ab23a776SDave Chinner {
455ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
456ab23a776SDave Chinner 	int			cpu;
457ab23a776SDave Chinner 
458ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
459ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
460ab23a776SDave Chinner 		if (!llist_empty(&gc->list))
461ab23a776SDave Chinner 			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
462ab23a776SDave Chinner 	}
463ab23a776SDave Chinner }
464ab23a776SDave Chinner 
465afca6c5bSDave Chinner /*
46633479e05SDave Chinner  * Check the validity of the inode we just found it the cache
46733479e05SDave Chinner  */
46833479e05SDave Chinner static int
46933479e05SDave Chinner xfs_iget_cache_hit(
47033479e05SDave Chinner 	struct xfs_perag	*pag,
47133479e05SDave Chinner 	struct xfs_inode	*ip,
47233479e05SDave Chinner 	xfs_ino_t		ino,
47333479e05SDave Chinner 	int			flags,
47433479e05SDave Chinner 	int			lock_flags) __releases(RCU)
47533479e05SDave Chinner {
47633479e05SDave Chinner 	struct inode		*inode = VFS_I(ip);
47733479e05SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
47833479e05SDave Chinner 	int			error;
47933479e05SDave Chinner 
48033479e05SDave Chinner 	/*
48133479e05SDave Chinner 	 * check for re-use of an inode within an RCU grace period due to the
48233479e05SDave Chinner 	 * radix tree nodes not being updated yet. We monitor for this by
48333479e05SDave Chinner 	 * setting the inode number to zero before freeing the inode structure.
48433479e05SDave Chinner 	 * If the inode has been reallocated and set up, then the inode number
48533479e05SDave Chinner 	 * will not match, so check for that, too.
48633479e05SDave Chinner 	 */
48733479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
48877b4d286SDarrick J. Wong 	if (ip->i_ino != ino)
48977b4d286SDarrick J. Wong 		goto out_skip;
49033479e05SDave Chinner 
49133479e05SDave Chinner 	/*
49233479e05SDave Chinner 	 * If we are racing with another cache hit that is currently
49333479e05SDave Chinner 	 * instantiating this inode or currently recycling it out of
494ff7bebebSDarrick J. Wong 	 * reclaimable state, wait for the initialisation to complete
49533479e05SDave Chinner 	 * before continuing.
49633479e05SDave Chinner 	 *
497ab23a776SDave Chinner 	 * If we're racing with the inactivation worker we also want to wait.
498ab23a776SDave Chinner 	 * If we're creating a new file, it's possible that the worker
499ab23a776SDave Chinner 	 * previously marked the inode as free on disk but hasn't finished
500ab23a776SDave Chinner 	 * updating the incore state yet.  The AGI buffer will be dirty and
501ab23a776SDave Chinner 	 * locked to the icreate transaction, so a synchronous push of the
502ab23a776SDave Chinner 	 * inodegc workers would result in deadlock.  For a regular iget, the
503ab23a776SDave Chinner 	 * worker is running already, so we might as well wait.
504ab23a776SDave Chinner 	 *
50533479e05SDave Chinner 	 * XXX(hch): eventually we should do something equivalent to
50633479e05SDave Chinner 	 *	     wait_on_inode to wait for these flags to be cleared
50733479e05SDave Chinner 	 *	     instead of polling for it.
50833479e05SDave Chinner 	 */
509ab23a776SDave Chinner 	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
51077b4d286SDarrick J. Wong 		goto out_skip;
51133479e05SDave Chinner 
512ab23a776SDave Chinner 	if (ip->i_flags & XFS_NEED_INACTIVE) {
513ab23a776SDave Chinner 		/* Unlinked inodes cannot be re-grabbed. */
514ab23a776SDave Chinner 		if (VFS_I(ip)->i_nlink == 0) {
515ab23a776SDave Chinner 			error = -ENOENT;
516ab23a776SDave Chinner 			goto out_error;
517ab23a776SDave Chinner 		}
518ab23a776SDave Chinner 		goto out_inodegc_flush;
519ab23a776SDave Chinner 	}
520ab23a776SDave Chinner 
52133479e05SDave Chinner 	/*
522afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
523afca6c5bSDave Chinner 	 * racing with unlinks.
52433479e05SDave Chinner 	 */
525afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
526afca6c5bSDave Chinner 	if (error)
52733479e05SDave Chinner 		goto out_error;
52833479e05SDave Chinner 
52977b4d286SDarrick J. Wong 	/* Skip inodes that have no vfs state. */
53077b4d286SDarrick J. Wong 	if ((flags & XFS_IGET_INCORE) &&
53177b4d286SDarrick J. Wong 	    (ip->i_flags & XFS_IRECLAIMABLE))
53277b4d286SDarrick J. Wong 		goto out_skip;
533378f681cSDarrick J. Wong 
53477b4d286SDarrick J. Wong 	/* The inode fits the selection criteria; process it. */
53577b4d286SDarrick J. Wong 	if (ip->i_flags & XFS_IRECLAIMABLE) {
536ff7bebebSDarrick J. Wong 		/* Drops i_flags_lock and RCU read lock. */
537ff7bebebSDarrick J. Wong 		error = xfs_iget_recycle(pag, ip);
538ff7bebebSDarrick J. Wong 		if (error)
539ff7bebebSDarrick J. Wong 			return error;
54033479e05SDave Chinner 	} else {
54133479e05SDave Chinner 		/* If the VFS inode is being torn down, pause and try again. */
54277b4d286SDarrick J. Wong 		if (!igrab(inode))
54377b4d286SDarrick J. Wong 			goto out_skip;
54433479e05SDave Chinner 
54533479e05SDave Chinner 		/* We've got a live one. */
54633479e05SDave Chinner 		spin_unlock(&ip->i_flags_lock);
54733479e05SDave Chinner 		rcu_read_unlock();
54833479e05SDave Chinner 		trace_xfs_iget_hit(ip);
54933479e05SDave Chinner 	}
55033479e05SDave Chinner 
55133479e05SDave Chinner 	if (lock_flags != 0)
55233479e05SDave Chinner 		xfs_ilock(ip, lock_flags);
55333479e05SDave Chinner 
554378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE))
555dae2f8edSIra Weiny 		xfs_iflags_clear(ip, XFS_ISTALE);
556ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_found);
55733479e05SDave Chinner 
55833479e05SDave Chinner 	return 0;
55933479e05SDave Chinner 
56077b4d286SDarrick J. Wong out_skip:
56177b4d286SDarrick J. Wong 	trace_xfs_iget_skip(ip);
56277b4d286SDarrick J. Wong 	XFS_STATS_INC(mp, xs_ig_frecycle);
56377b4d286SDarrick J. Wong 	error = -EAGAIN;
56433479e05SDave Chinner out_error:
56533479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
56633479e05SDave Chinner 	rcu_read_unlock();
56733479e05SDave Chinner 	return error;
568ab23a776SDave Chinner 
569ab23a776SDave Chinner out_inodegc_flush:
570ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
571ab23a776SDave Chinner 	rcu_read_unlock();
572ab23a776SDave Chinner 	/*
573ab23a776SDave Chinner 	 * Do not wait for the workers, because the caller could hold an AGI
574ab23a776SDave Chinner 	 * buffer lock.  We're just going to sleep in a loop anyway.
575ab23a776SDave Chinner 	 */
576ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp))
577ab23a776SDave Chinner 		xfs_inodegc_queue_all(mp);
578ab23a776SDave Chinner 	return -EAGAIN;
57933479e05SDave Chinner }
58033479e05SDave Chinner 
58133479e05SDave Chinner static int
58233479e05SDave Chinner xfs_iget_cache_miss(
58333479e05SDave Chinner 	struct xfs_mount	*mp,
58433479e05SDave Chinner 	struct xfs_perag	*pag,
58533479e05SDave Chinner 	xfs_trans_t		*tp,
58633479e05SDave Chinner 	xfs_ino_t		ino,
58733479e05SDave Chinner 	struct xfs_inode	**ipp,
58833479e05SDave Chinner 	int			flags,
58933479e05SDave Chinner 	int			lock_flags)
59033479e05SDave Chinner {
59133479e05SDave Chinner 	struct xfs_inode	*ip;
59233479e05SDave Chinner 	int			error;
59333479e05SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
59433479e05SDave Chinner 	int			iflags;
59533479e05SDave Chinner 
59633479e05SDave Chinner 	ip = xfs_inode_alloc(mp, ino);
59733479e05SDave Chinner 	if (!ip)
5982451337dSDave Chinner 		return -ENOMEM;
59933479e05SDave Chinner 
600bb8a66afSChristoph Hellwig 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
60133479e05SDave Chinner 	if (error)
60233479e05SDave Chinner 		goto out_destroy;
60333479e05SDave Chinner 
604bb8a66afSChristoph Hellwig 	/*
605bb8a66afSChristoph Hellwig 	 * For version 5 superblocks, if we are initialising a new inode and we
6060560f31aSDave Chinner 	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
607bb8a66afSChristoph Hellwig 	 * simply build the new inode core with a random generation number.
608bb8a66afSChristoph Hellwig 	 *
609bb8a66afSChristoph Hellwig 	 * For version 4 (and older) superblocks, log recovery is dependent on
610965e0a1aSChristoph Hellwig 	 * the i_flushiter field being initialised from the current on-disk
611bb8a66afSChristoph Hellwig 	 * value and hence we must also read the inode off disk even when
612bb8a66afSChristoph Hellwig 	 * initializing new inodes.
613bb8a66afSChristoph Hellwig 	 */
61438c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp) &&
6150560f31aSDave Chinner 	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
616bb8a66afSChristoph Hellwig 		VFS_I(ip)->i_generation = prandom_u32();
617bb8a66afSChristoph Hellwig 	} else {
618bb8a66afSChristoph Hellwig 		struct xfs_buf		*bp;
619bb8a66afSChristoph Hellwig 
620af9dcddeSChristoph Hellwig 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
621bb8a66afSChristoph Hellwig 		if (error)
622bb8a66afSChristoph Hellwig 			goto out_destroy;
623bb8a66afSChristoph Hellwig 
624af9dcddeSChristoph Hellwig 		error = xfs_inode_from_disk(ip,
625af9dcddeSChristoph Hellwig 				xfs_buf_offset(bp, ip->i_imap.im_boffset));
626bb8a66afSChristoph Hellwig 		if (!error)
627bb8a66afSChristoph Hellwig 			xfs_buf_set_ref(bp, XFS_INO_REF);
628bb8a66afSChristoph Hellwig 		xfs_trans_brelse(tp, bp);
629bb8a66afSChristoph Hellwig 
630bb8a66afSChristoph Hellwig 		if (error)
631bb8a66afSChristoph Hellwig 			goto out_destroy;
632bb8a66afSChristoph Hellwig 	}
633bb8a66afSChristoph Hellwig 
63433479e05SDave Chinner 	trace_xfs_iget_miss(ip);
63533479e05SDave Chinner 
636ee457001SDave Chinner 	/*
637afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
638afca6c5bSDave Chinner 	 * racing with unlinks.
639ee457001SDave Chinner 	 */
640afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
641afca6c5bSDave Chinner 	if (error)
642ee457001SDave Chinner 		goto out_destroy;
64333479e05SDave Chinner 
64433479e05SDave Chinner 	/*
64533479e05SDave Chinner 	 * Preload the radix tree so we can insert safely under the
64633479e05SDave Chinner 	 * write spinlock. Note that we cannot sleep inside the preload
64733479e05SDave Chinner 	 * region. Since we can be called from transaction context, don't
64833479e05SDave Chinner 	 * recurse into the file system.
64933479e05SDave Chinner 	 */
65033479e05SDave Chinner 	if (radix_tree_preload(GFP_NOFS)) {
6512451337dSDave Chinner 		error = -EAGAIN;
65233479e05SDave Chinner 		goto out_destroy;
65333479e05SDave Chinner 	}
65433479e05SDave Chinner 
65533479e05SDave Chinner 	/*
65633479e05SDave Chinner 	 * Because the inode hasn't been added to the radix-tree yet it can't
65733479e05SDave Chinner 	 * be found by another thread, so we can do the non-sleeping lock here.
65833479e05SDave Chinner 	 */
65933479e05SDave Chinner 	if (lock_flags) {
66033479e05SDave Chinner 		if (!xfs_ilock_nowait(ip, lock_flags))
66133479e05SDave Chinner 			BUG();
66233479e05SDave Chinner 	}
66333479e05SDave Chinner 
66433479e05SDave Chinner 	/*
66533479e05SDave Chinner 	 * These values must be set before inserting the inode into the radix
66633479e05SDave Chinner 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
66733479e05SDave Chinner 	 * RCU locking mechanism) can find it and that lookup must see that this
66833479e05SDave Chinner 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
66933479e05SDave Chinner 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
67033479e05SDave Chinner 	 * memory barrier that ensures this detection works correctly at lookup
67133479e05SDave Chinner 	 * time.
67233479e05SDave Chinner 	 */
67333479e05SDave Chinner 	iflags = XFS_INEW;
67433479e05SDave Chinner 	if (flags & XFS_IGET_DONTCACHE)
6752c567af4SIra Weiny 		d_mark_dontcache(VFS_I(ip));
676113a5683SChandra Seetharaman 	ip->i_udquot = NULL;
677113a5683SChandra Seetharaman 	ip->i_gdquot = NULL;
67892f8ff73SChandra Seetharaman 	ip->i_pdquot = NULL;
67933479e05SDave Chinner 	xfs_iflags_set(ip, iflags);
68033479e05SDave Chinner 
68133479e05SDave Chinner 	/* insert the new inode */
68233479e05SDave Chinner 	spin_lock(&pag->pag_ici_lock);
68333479e05SDave Chinner 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
68433479e05SDave Chinner 	if (unlikely(error)) {
68533479e05SDave Chinner 		WARN_ON(error != -EEXIST);
686ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_dup);
6872451337dSDave Chinner 		error = -EAGAIN;
68833479e05SDave Chinner 		goto out_preload_end;
68933479e05SDave Chinner 	}
69033479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
69133479e05SDave Chinner 	radix_tree_preload_end();
69233479e05SDave Chinner 
69333479e05SDave Chinner 	*ipp = ip;
69433479e05SDave Chinner 	return 0;
69533479e05SDave Chinner 
69633479e05SDave Chinner out_preload_end:
69733479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
69833479e05SDave Chinner 	radix_tree_preload_end();
69933479e05SDave Chinner 	if (lock_flags)
70033479e05SDave Chinner 		xfs_iunlock(ip, lock_flags);
70133479e05SDave Chinner out_destroy:
70233479e05SDave Chinner 	__destroy_inode(VFS_I(ip));
70333479e05SDave Chinner 	xfs_inode_free(ip);
70433479e05SDave Chinner 	return error;
70533479e05SDave Chinner }
70633479e05SDave Chinner 
70733479e05SDave Chinner /*
70802511a5aSDave Chinner  * Look up an inode by number in the given file system.  The inode is looked up
70902511a5aSDave Chinner  * in the cache held in each AG.  If the inode is found in the cache, initialise
71002511a5aSDave Chinner  * the vfs inode if necessary.
71133479e05SDave Chinner  *
71202511a5aSDave Chinner  * If it is not in core, read it in from the file system's device, add it to the
71302511a5aSDave Chinner  * cache and initialise the vfs inode.
71433479e05SDave Chinner  *
71533479e05SDave Chinner  * The inode is locked according to the value of the lock_flags parameter.
71602511a5aSDave Chinner  * Inode lookup is only done during metadata operations and not as part of the
71702511a5aSDave Chinner  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
71833479e05SDave Chinner  */
71933479e05SDave Chinner int
72033479e05SDave Chinner xfs_iget(
72102511a5aSDave Chinner 	struct xfs_mount	*mp,
72202511a5aSDave Chinner 	struct xfs_trans	*tp,
72333479e05SDave Chinner 	xfs_ino_t		ino,
72433479e05SDave Chinner 	uint			flags,
72533479e05SDave Chinner 	uint			lock_flags,
72602511a5aSDave Chinner 	struct xfs_inode	**ipp)
72733479e05SDave Chinner {
72802511a5aSDave Chinner 	struct xfs_inode	*ip;
72902511a5aSDave Chinner 	struct xfs_perag	*pag;
73033479e05SDave Chinner 	xfs_agino_t		agino;
73102511a5aSDave Chinner 	int			error;
73233479e05SDave Chinner 
73333479e05SDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
73433479e05SDave Chinner 
73533479e05SDave Chinner 	/* reject inode numbers outside existing AGs */
73633479e05SDave Chinner 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
7372451337dSDave Chinner 		return -EINVAL;
73833479e05SDave Chinner 
739ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_attempts);
7408774cf8bSLucas Stach 
74133479e05SDave Chinner 	/* get the perag structure and ensure that it's inode capable */
74233479e05SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
74333479e05SDave Chinner 	agino = XFS_INO_TO_AGINO(mp, ino);
74433479e05SDave Chinner 
74533479e05SDave Chinner again:
74633479e05SDave Chinner 	error = 0;
74733479e05SDave Chinner 	rcu_read_lock();
74833479e05SDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
74933479e05SDave Chinner 
75033479e05SDave Chinner 	if (ip) {
75133479e05SDave Chinner 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
75233479e05SDave Chinner 		if (error)
75333479e05SDave Chinner 			goto out_error_or_again;
75433479e05SDave Chinner 	} else {
75533479e05SDave Chinner 		rcu_read_unlock();
756378f681cSDarrick J. Wong 		if (flags & XFS_IGET_INCORE) {
757ed438b47SDarrick J. Wong 			error = -ENODATA;
758378f681cSDarrick J. Wong 			goto out_error_or_again;
759378f681cSDarrick J. Wong 		}
760ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_missed);
76133479e05SDave Chinner 
76233479e05SDave Chinner 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
76333479e05SDave Chinner 							flags, lock_flags);
76433479e05SDave Chinner 		if (error)
76533479e05SDave Chinner 			goto out_error_or_again;
76633479e05SDave Chinner 	}
76733479e05SDave Chinner 	xfs_perag_put(pag);
76833479e05SDave Chinner 
76933479e05SDave Chinner 	*ipp = ip;
77033479e05SDave Chinner 
77133479e05SDave Chinner 	/*
77258c90473SDave Chinner 	 * If we have a real type for an on-disk inode, we can setup the inode
77333479e05SDave Chinner 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
77433479e05SDave Chinner 	 */
775c19b3b05SDave Chinner 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
77658c90473SDave Chinner 		xfs_setup_existing_inode(ip);
77733479e05SDave Chinner 	return 0;
77833479e05SDave Chinner 
77933479e05SDave Chinner out_error_or_again:
780378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
78133479e05SDave Chinner 		delay(1);
78233479e05SDave Chinner 		goto again;
78333479e05SDave Chinner 	}
78433479e05SDave Chinner 	xfs_perag_put(pag);
78533479e05SDave Chinner 	return error;
78633479e05SDave Chinner }
78733479e05SDave Chinner 
7886d8b79cfSDave Chinner /*
789378f681cSDarrick J. Wong  * "Is this a cached inode that's also allocated?"
790378f681cSDarrick J. Wong  *
791378f681cSDarrick J. Wong  * Look up an inode by number in the given file system.  If the inode is
792378f681cSDarrick J. Wong  * in cache and isn't in purgatory, return 1 if the inode is allocated
793378f681cSDarrick J. Wong  * and 0 if it is not.  For all other cases (not in cache, being torn
794378f681cSDarrick J. Wong  * down, etc.), return a negative error code.
795378f681cSDarrick J. Wong  *
796378f681cSDarrick J. Wong  * The caller has to prevent inode allocation and freeing activity,
797378f681cSDarrick J. Wong  * presumably by locking the AGI buffer.   This is to ensure that an
798378f681cSDarrick J. Wong  * inode cannot transition from allocated to freed until the caller is
799378f681cSDarrick J. Wong  * ready to allow that.  If the inode is in an intermediate state (new,
800378f681cSDarrick J. Wong  * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
801378f681cSDarrick J. Wong  * inode is not in the cache, -ENOENT will be returned.  The caller must
802378f681cSDarrick J. Wong  * deal with these scenarios appropriately.
803378f681cSDarrick J. Wong  *
804378f681cSDarrick J. Wong  * This is a specialized use case for the online scrubber; if you're
805378f681cSDarrick J. Wong  * reading this, you probably want xfs_iget.
806378f681cSDarrick J. Wong  */
807378f681cSDarrick J. Wong int
808378f681cSDarrick J. Wong xfs_icache_inode_is_allocated(
809378f681cSDarrick J. Wong 	struct xfs_mount	*mp,
810378f681cSDarrick J. Wong 	struct xfs_trans	*tp,
811378f681cSDarrick J. Wong 	xfs_ino_t		ino,
812378f681cSDarrick J. Wong 	bool			*inuse)
813378f681cSDarrick J. Wong {
814378f681cSDarrick J. Wong 	struct xfs_inode	*ip;
815378f681cSDarrick J. Wong 	int			error;
816378f681cSDarrick J. Wong 
817378f681cSDarrick J. Wong 	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
818378f681cSDarrick J. Wong 	if (error)
819378f681cSDarrick J. Wong 		return error;
820378f681cSDarrick J. Wong 
821378f681cSDarrick J. Wong 	*inuse = !!(VFS_I(ip)->i_mode);
82244a8736bSDarrick J. Wong 	xfs_irele(ip);
823378f681cSDarrick J. Wong 	return 0;
824378f681cSDarrick J. Wong }
825378f681cSDarrick J. Wong 
826579b62faSBrian Foster /*
8276d8b79cfSDave Chinner  * Grab the inode for reclaim exclusively.
82850718b8dSDave Chinner  *
82950718b8dSDave Chinner  * We have found this inode via a lookup under RCU, so the inode may have
83050718b8dSDave Chinner  * already been freed, or it may be in the process of being recycled by
83150718b8dSDave Chinner  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
83250718b8dSDave Chinner  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
83350718b8dSDave Chinner  * will not be set. Hence we need to check for both these flag conditions to
83450718b8dSDave Chinner  * avoid inodes that are no longer reclaim candidates.
83550718b8dSDave Chinner  *
83650718b8dSDave Chinner  * Note: checking for other state flags here, under the i_flags_lock or not, is
83750718b8dSDave Chinner  * racy and should be avoided. Those races should be resolved only after we have
83850718b8dSDave Chinner  * ensured that we are able to reclaim this inode and the world can see that we
83950718b8dSDave Chinner  * are going to reclaim it.
84050718b8dSDave Chinner  *
84150718b8dSDave Chinner  * Return true if we grabbed it, false otherwise.
8426d8b79cfSDave Chinner  */
84350718b8dSDave Chinner static bool
844f1bc5c56SDarrick J. Wong xfs_reclaim_igrab(
8459492750aSDarrick J. Wong 	struct xfs_inode	*ip,
846b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
8476d8b79cfSDave Chinner {
8486d8b79cfSDave Chinner 	ASSERT(rcu_read_lock_held());
8496d8b79cfSDave Chinner 
8506d8b79cfSDave Chinner 	spin_lock(&ip->i_flags_lock);
8516d8b79cfSDave Chinner 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
8526d8b79cfSDave Chinner 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
8536d8b79cfSDave Chinner 		/* not a reclaim candidate. */
8546d8b79cfSDave Chinner 		spin_unlock(&ip->i_flags_lock);
85550718b8dSDave Chinner 		return false;
8566d8b79cfSDave Chinner 	}
8579492750aSDarrick J. Wong 
8589492750aSDarrick J. Wong 	/* Don't reclaim a sick inode unless the caller asked for it. */
8599492750aSDarrick J. Wong 	if (ip->i_sick &&
860b26b2bf1SDarrick J. Wong 	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
8619492750aSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
8629492750aSDarrick J. Wong 		return false;
8639492750aSDarrick J. Wong 	}
8649492750aSDarrick J. Wong 
8656d8b79cfSDave Chinner 	__xfs_iflags_set(ip, XFS_IRECLAIM);
8666d8b79cfSDave Chinner 	spin_unlock(&ip->i_flags_lock);
86750718b8dSDave Chinner 	return true;
8686d8b79cfSDave Chinner }
8696d8b79cfSDave Chinner 
8706d8b79cfSDave Chinner /*
87102511a5aSDave Chinner  * Inode reclaim is non-blocking, so the default action if progress cannot be
87202511a5aSDave Chinner  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
87302511a5aSDave Chinner  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
87402511a5aSDave Chinner  * blocking anymore and hence we can wait for the inode to be able to reclaim
87502511a5aSDave Chinner  * it.
8766d8b79cfSDave Chinner  *
87702511a5aSDave Chinner  * We do no IO here - if callers require inodes to be cleaned they must push the
87802511a5aSDave Chinner  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
87902511a5aSDave Chinner  * done in the background in a non-blocking manner, and enables memory reclaim
88002511a5aSDave Chinner  * to make progress without blocking.
8816d8b79cfSDave Chinner  */
8824d0bab3aSDave Chinner static void
8836d8b79cfSDave Chinner xfs_reclaim_inode(
8846d8b79cfSDave Chinner 	struct xfs_inode	*ip,
88550718b8dSDave Chinner 	struct xfs_perag	*pag)
8866d8b79cfSDave Chinner {
8878a17d7ddSDave Chinner 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
8886d8b79cfSDave Chinner 
8899552e14dSDave Chinner 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
8906d8b79cfSDave Chinner 		goto out;
891718ecc50SDave Chinner 	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
8929552e14dSDave Chinner 		goto out_iunlock;
8936d8b79cfSDave Chinner 
89475c8c50fSDave Chinner 	if (xfs_is_shutdown(ip->i_mount)) {
8956d8b79cfSDave Chinner 		xfs_iunpin_wait(ip);
89688fc1879SBrian Foster 		xfs_iflush_abort(ip);
8976d8b79cfSDave Chinner 		goto reclaim;
8986d8b79cfSDave Chinner 	}
899617825feSDave Chinner 	if (xfs_ipincount(ip))
900718ecc50SDave Chinner 		goto out_clear_flush;
901617825feSDave Chinner 	if (!xfs_inode_clean(ip))
902718ecc50SDave Chinner 		goto out_clear_flush;
903617825feSDave Chinner 
904718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
9056d8b79cfSDave Chinner reclaim:
906ab23a776SDave Chinner 	trace_xfs_inode_reclaiming(ip);
90798efe8afSBrian Foster 
9088a17d7ddSDave Chinner 	/*
9098a17d7ddSDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always appears
9108a17d7ddSDave Chinner 	 * to be reclaimed with an invalid inode number when in the free state.
91198efe8afSBrian Foster 	 * We do this as early as possible under the ILOCK so that
912f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
913f2e9ad21SOmar Sandoval 	 * detect races with us here. By doing this, we guarantee that once
914f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
915f2e9ad21SOmar Sandoval 	 * it will see either a valid inode that will serialise correctly, or it
916f2e9ad21SOmar Sandoval 	 * will see an invalid inode that it can skip.
9178a17d7ddSDave Chinner 	 */
9188a17d7ddSDave Chinner 	spin_lock(&ip->i_flags_lock);
9198a17d7ddSDave Chinner 	ip->i_flags = XFS_IRECLAIM;
9208a17d7ddSDave Chinner 	ip->i_ino = 0;
921255794c7SDarrick J. Wong 	ip->i_sick = 0;
922255794c7SDarrick J. Wong 	ip->i_checked = 0;
9238a17d7ddSDave Chinner 	spin_unlock(&ip->i_flags_lock);
9248a17d7ddSDave Chinner 
9256d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9266d8b79cfSDave Chinner 
927ff6d6af2SBill O'Donnell 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
9286d8b79cfSDave Chinner 	/*
9296d8b79cfSDave Chinner 	 * Remove the inode from the per-AG radix tree.
9306d8b79cfSDave Chinner 	 *
9316d8b79cfSDave Chinner 	 * Because radix_tree_delete won't complain even if the item was never
9326d8b79cfSDave Chinner 	 * added to the tree assert that it's been there before to catch
9336d8b79cfSDave Chinner 	 * problems with the inode life time early on.
9346d8b79cfSDave Chinner 	 */
9356d8b79cfSDave Chinner 	spin_lock(&pag->pag_ici_lock);
9366d8b79cfSDave Chinner 	if (!radix_tree_delete(&pag->pag_ici_root,
9378a17d7ddSDave Chinner 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
9386d8b79cfSDave Chinner 		ASSERT(0);
939c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
9406d8b79cfSDave Chinner 	spin_unlock(&pag->pag_ici_lock);
9416d8b79cfSDave Chinner 
9426d8b79cfSDave Chinner 	/*
9436d8b79cfSDave Chinner 	 * Here we do an (almost) spurious inode lock in order to coordinate
9446d8b79cfSDave Chinner 	 * with inode cache radix tree lookups.  This is because the lookup
9456d8b79cfSDave Chinner 	 * can reference the inodes in the cache without taking references.
9466d8b79cfSDave Chinner 	 *
9476d8b79cfSDave Chinner 	 * We make that OK here by ensuring that we wait until the inode is
9486d8b79cfSDave Chinner 	 * unlocked after the lookup before we go ahead and free it.
9496d8b79cfSDave Chinner 	 */
9506d8b79cfSDave Chinner 	xfs_ilock(ip, XFS_ILOCK_EXCL);
9513ea06d73SDarrick J. Wong 	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
9526d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
95396355d5aSDave Chinner 	ASSERT(xfs_inode_clean(ip));
9546d8b79cfSDave Chinner 
9558a17d7ddSDave Chinner 	__xfs_inode_free(ip);
9564d0bab3aSDave Chinner 	return;
9576d8b79cfSDave Chinner 
958718ecc50SDave Chinner out_clear_flush:
959718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
9609552e14dSDave Chinner out_iunlock:
9616d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9629552e14dSDave Chinner out:
963617825feSDave Chinner 	xfs_iflags_clear(ip, XFS_IRECLAIM);
9646d8b79cfSDave Chinner }
9656d8b79cfSDave Chinner 
9669492750aSDarrick J. Wong /* Reclaim sick inodes if we're unmounting or the fs went down. */
9679492750aSDarrick J. Wong static inline bool
9689492750aSDarrick J. Wong xfs_want_reclaim_sick(
9699492750aSDarrick J. Wong 	struct xfs_mount	*mp)
9709492750aSDarrick J. Wong {
9712e973b2cSDave Chinner 	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
97275c8c50fSDave Chinner 	       xfs_is_shutdown(mp);
9739492750aSDarrick J. Wong }
9749492750aSDarrick J. Wong 
9754d0bab3aSDave Chinner void
9766d8b79cfSDave Chinner xfs_reclaim_inodes(
9774d0bab3aSDave Chinner 	struct xfs_mount	*mp)
9786d8b79cfSDave Chinner {
979b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
980b26b2bf1SDarrick J. Wong 		.icw_flags	= 0,
9819492750aSDarrick J. Wong 	};
9829492750aSDarrick J. Wong 
9839492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
984b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9859492750aSDarrick J. Wong 
9864d0bab3aSDave Chinner 	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
987617825feSDave Chinner 		xfs_ail_push_all_sync(mp->m_ail);
988b26b2bf1SDarrick J. Wong 		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
9890f4ec0f1SZheng Bin 	}
9906d8b79cfSDave Chinner }
9916d8b79cfSDave Chinner 
9926d8b79cfSDave Chinner /*
99302511a5aSDave Chinner  * The shrinker infrastructure determines how many inodes we should scan for
99402511a5aSDave Chinner  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
99502511a5aSDave Chinner  * push the AIL here. We also want to proactively free up memory if we can to
99602511a5aSDave Chinner  * minimise the amount of work memory reclaim has to do so we kick the
99702511a5aSDave Chinner  * background reclaim if it isn't already scheduled.
9986d8b79cfSDave Chinner  */
9990a234c6dSDave Chinner long
10006d8b79cfSDave Chinner xfs_reclaim_inodes_nr(
10016d8b79cfSDave Chinner 	struct xfs_mount	*mp,
100210be350bSDarrick J. Wong 	unsigned long		nr_to_scan)
10036d8b79cfSDave Chinner {
1004b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
1005b26b2bf1SDarrick J. Wong 		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
100610be350bSDarrick J. Wong 		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
1007f1bc5c56SDarrick J. Wong 	};
1008f1bc5c56SDarrick J. Wong 
10099492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
1010b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
10119492750aSDarrick J. Wong 
10126d8b79cfSDave Chinner 	/* kick background reclaimer and push the AIL */
10136d8b79cfSDave Chinner 	xfs_reclaim_work_queue(mp);
10146d8b79cfSDave Chinner 	xfs_ail_push_all(mp->m_ail);
10156d8b79cfSDave Chinner 
1016b26b2bf1SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1017617825feSDave Chinner 	return 0;
10186d8b79cfSDave Chinner }
10196d8b79cfSDave Chinner 
10206d8b79cfSDave Chinner /*
10216d8b79cfSDave Chinner  * Return the number of reclaimable inodes in the filesystem for
10226d8b79cfSDave Chinner  * the shrinker to determine how much to reclaim.
10236d8b79cfSDave Chinner  */
102410be350bSDarrick J. Wong long
10256d8b79cfSDave Chinner xfs_reclaim_inodes_count(
10266d8b79cfSDave Chinner 	struct xfs_mount	*mp)
10276d8b79cfSDave Chinner {
10286d8b79cfSDave Chinner 	struct xfs_perag	*pag;
10296d8b79cfSDave Chinner 	xfs_agnumber_t		ag = 0;
103010be350bSDarrick J. Wong 	long			reclaimable = 0;
10316d8b79cfSDave Chinner 
10326d8b79cfSDave Chinner 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
10336d8b79cfSDave Chinner 		ag = pag->pag_agno + 1;
10346d8b79cfSDave Chinner 		reclaimable += pag->pag_ici_reclaimable;
10356d8b79cfSDave Chinner 		xfs_perag_put(pag);
10366d8b79cfSDave Chinner 	}
10376d8b79cfSDave Chinner 	return reclaimable;
10386d8b79cfSDave Chinner }
10396d8b79cfSDave Chinner 
104039b1cfd7SDarrick J. Wong STATIC bool
1041b26b2bf1SDarrick J. Wong xfs_icwalk_match_id(
10423e3f9f58SBrian Foster 	struct xfs_inode	*ip,
1043b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
10443e3f9f58SBrian Foster {
1045b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1046b26b2bf1SDarrick J. Wong 	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
104739b1cfd7SDarrick J. Wong 		return false;
10481b556048SBrian Foster 
1049b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1050b26b2bf1SDarrick J. Wong 	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
105139b1cfd7SDarrick J. Wong 		return false;
10521b556048SBrian Foster 
1053b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1054b26b2bf1SDarrick J. Wong 	    ip->i_projid != icw->icw_prid)
105539b1cfd7SDarrick J. Wong 		return false;
10561b556048SBrian Foster 
105739b1cfd7SDarrick J. Wong 	return true;
10583e3f9f58SBrian Foster }
10593e3f9f58SBrian Foster 
1060f4526397SBrian Foster /*
1061f4526397SBrian Foster  * A union-based inode filtering algorithm. Process the inode if any of the
1062f4526397SBrian Foster  * criteria match. This is for global/internal scans only.
1063f4526397SBrian Foster  */
106439b1cfd7SDarrick J. Wong STATIC bool
1065b26b2bf1SDarrick J. Wong xfs_icwalk_match_id_union(
1066f4526397SBrian Foster 	struct xfs_inode	*ip,
1067b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1068f4526397SBrian Foster {
1069b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1070b26b2bf1SDarrick J. Wong 	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
107139b1cfd7SDarrick J. Wong 		return true;
1072f4526397SBrian Foster 
1073b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1074b26b2bf1SDarrick J. Wong 	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
107539b1cfd7SDarrick J. Wong 		return true;
1076f4526397SBrian Foster 
1077b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1078b26b2bf1SDarrick J. Wong 	    ip->i_projid == icw->icw_prid)
107939b1cfd7SDarrick J. Wong 		return true;
1080f4526397SBrian Foster 
108139b1cfd7SDarrick J. Wong 	return false;
1082f4526397SBrian Foster }
1083f4526397SBrian Foster 
1084a91bf992SDarrick J. Wong /*
1085a91bf992SDarrick J. Wong  * Is this inode @ip eligible for eof/cow block reclamation, given some
1086b26b2bf1SDarrick J. Wong  * filtering parameters @icw?  The inode is eligible if @icw is null or
1087a91bf992SDarrick J. Wong  * if the predicate functions match.
1088a91bf992SDarrick J. Wong  */
1089a91bf992SDarrick J. Wong static bool
1090b26b2bf1SDarrick J. Wong xfs_icwalk_match(
1091a91bf992SDarrick J. Wong 	struct xfs_inode	*ip,
1092b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1093a91bf992SDarrick J. Wong {
109439b1cfd7SDarrick J. Wong 	bool			match;
1095a91bf992SDarrick J. Wong 
1096b26b2bf1SDarrick J. Wong 	if (!icw)
1097a91bf992SDarrick J. Wong 		return true;
1098a91bf992SDarrick J. Wong 
1099b26b2bf1SDarrick J. Wong 	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1100b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id_union(ip, icw);
1101a91bf992SDarrick J. Wong 	else
1102b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id(ip, icw);
1103a91bf992SDarrick J. Wong 	if (!match)
1104a91bf992SDarrick J. Wong 		return false;
1105a91bf992SDarrick J. Wong 
1106a91bf992SDarrick J. Wong 	/* skip the inode if the file size is too small */
1107b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1108b26b2bf1SDarrick J. Wong 	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1109a91bf992SDarrick J. Wong 		return false;
1110a91bf992SDarrick J. Wong 
1111a91bf992SDarrick J. Wong 	return true;
1112a91bf992SDarrick J. Wong }
1113a91bf992SDarrick J. Wong 
11144d0bab3aSDave Chinner /*
11154d0bab3aSDave Chinner  * This is a fast pass over the inode cache to try to get reclaim moving on as
11164d0bab3aSDave Chinner  * many inodes as possible in a short period of time. It kicks itself every few
11174d0bab3aSDave Chinner  * seconds, as well as being kicked by the inode cache shrinker when memory
111802511a5aSDave Chinner  * goes low.
11194d0bab3aSDave Chinner  */
11204d0bab3aSDave Chinner void
11214d0bab3aSDave Chinner xfs_reclaim_worker(
11224d0bab3aSDave Chinner 	struct work_struct *work)
11234d0bab3aSDave Chinner {
11244d0bab3aSDave Chinner 	struct xfs_mount *mp = container_of(to_delayed_work(work),
11254d0bab3aSDave Chinner 					struct xfs_mount, m_reclaim_work);
11264d0bab3aSDave Chinner 
1127f1bc5c56SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
11284d0bab3aSDave Chinner 	xfs_reclaim_work_queue(mp);
11294d0bab3aSDave Chinner }
11304d0bab3aSDave Chinner 
11313e3f9f58SBrian Foster STATIC int
113241176a68SBrian Foster xfs_inode_free_eofblocks(
113341176a68SBrian Foster 	struct xfs_inode	*ip,
1134b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
11350fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
113641176a68SBrian Foster {
1137390600f8SDarrick J. Wong 	bool			wait;
1138390600f8SDarrick J. Wong 
1139b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
11405400da7dSBrian Foster 
1141ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1142ce2d3bbeSDarrick J. Wong 		return 0;
1143ce2d3bbeSDarrick J. Wong 
114441176a68SBrian Foster 	/*
114541176a68SBrian Foster 	 * If the mapping is dirty the operation can block and wait for some
114641176a68SBrian Foster 	 * time. Unless we are waiting, skip it.
114741176a68SBrian Foster 	 */
1148390600f8SDarrick J. Wong 	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
114941176a68SBrian Foster 		return 0;
115041176a68SBrian Foster 
1151b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
11523e3f9f58SBrian Foster 		return 0;
11533e3f9f58SBrian Foster 
1154a36b9261SBrian Foster 	/*
1155a36b9261SBrian Foster 	 * If the caller is waiting, return -EAGAIN to keep the background
1156a36b9261SBrian Foster 	 * scanner moving and revisit the inode in a subsequent pass.
1157a36b9261SBrian Foster 	 */
1158c3155097SBrian Foster 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1159390600f8SDarrick J. Wong 		if (wait)
1160390600f8SDarrick J. Wong 			return -EAGAIN;
1161390600f8SDarrick J. Wong 		return 0;
1162a36b9261SBrian Foster 	}
11630fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
1164390600f8SDarrick J. Wong 
11652b156ff8SDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false))
11660fa4a10aSDarrick J. Wong 		return xfs_free_eofblocks(ip);
11672b156ff8SDarrick J. Wong 
11682b156ff8SDarrick J. Wong 	/* inode could be preallocated or append-only */
11692b156ff8SDarrick J. Wong 	trace_xfs_inode_free_eofblocks_invalid(ip);
11702b156ff8SDarrick J. Wong 	xfs_inode_clear_eofblocks_tag(ip);
11712b156ff8SDarrick J. Wong 	return 0;
117241176a68SBrian Foster }
117341176a68SBrian Foster 
117483104d44SDarrick J. Wong static void
1175ce2d3bbeSDarrick J. Wong xfs_blockgc_set_iflag(
1176ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1177ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
117827b52867SBrian Foster {
117927b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
118027b52867SBrian Foster 	struct xfs_perag	*pag;
118127b52867SBrian Foster 
1182ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1183ce2d3bbeSDarrick J. Wong 
118485a6e764SChristoph Hellwig 	/*
118585a6e764SChristoph Hellwig 	 * Don't bother locking the AG and looking up in the radix trees
118685a6e764SChristoph Hellwig 	 * if we already know that we have the tag set.
118785a6e764SChristoph Hellwig 	 */
1188ce2d3bbeSDarrick J. Wong 	if (ip->i_flags & iflag)
118985a6e764SChristoph Hellwig 		return;
119085a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1191ce2d3bbeSDarrick J. Wong 	ip->i_flags |= iflag;
119285a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
119385a6e764SChristoph Hellwig 
119427b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
119527b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
119627b52867SBrian Foster 
1197c076ae7aSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1198ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
119927b52867SBrian Foster 
120027b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
120127b52867SBrian Foster 	xfs_perag_put(pag);
120227b52867SBrian Foster }
120327b52867SBrian Foster 
120427b52867SBrian Foster void
120583104d44SDarrick J. Wong xfs_inode_set_eofblocks_tag(
120627b52867SBrian Foster 	xfs_inode_t	*ip)
120727b52867SBrian Foster {
120883104d44SDarrick J. Wong 	trace_xfs_inode_set_eofblocks_tag(ip);
12099669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
121083104d44SDarrick J. Wong }
121183104d44SDarrick J. Wong 
121283104d44SDarrick J. Wong static void
1213ce2d3bbeSDarrick J. Wong xfs_blockgc_clear_iflag(
1214ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1215ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
121683104d44SDarrick J. Wong {
121727b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
121827b52867SBrian Foster 	struct xfs_perag	*pag;
1219ce2d3bbeSDarrick J. Wong 	bool			clear_tag;
1220ce2d3bbeSDarrick J. Wong 
1221ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
122227b52867SBrian Foster 
122385a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1224ce2d3bbeSDarrick J. Wong 	ip->i_flags &= ~iflag;
1225ce2d3bbeSDarrick J. Wong 	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
122685a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
122785a6e764SChristoph Hellwig 
1228ce2d3bbeSDarrick J. Wong 	if (!clear_tag)
1229ce2d3bbeSDarrick J. Wong 		return;
1230ce2d3bbeSDarrick J. Wong 
123127b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
123227b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
123327b52867SBrian Foster 
1234c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1235ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
123627b52867SBrian Foster 
123727b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
123827b52867SBrian Foster 	xfs_perag_put(pag);
123927b52867SBrian Foster }
124027b52867SBrian Foster 
124183104d44SDarrick J. Wong void
124283104d44SDarrick J. Wong xfs_inode_clear_eofblocks_tag(
124383104d44SDarrick J. Wong 	xfs_inode_t	*ip)
124483104d44SDarrick J. Wong {
124583104d44SDarrick J. Wong 	trace_xfs_inode_clear_eofblocks_tag(ip);
1246ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
124783104d44SDarrick J. Wong }
124883104d44SDarrick J. Wong 
124983104d44SDarrick J. Wong /*
1250be78ff0eSDarrick J. Wong  * Set ourselves up to free CoW blocks from this file.  If it's already clean
1251be78ff0eSDarrick J. Wong  * then we can bail out quickly, but otherwise we must back off if the file
1252be78ff0eSDarrick J. Wong  * is undergoing some kind of write.
1253be78ff0eSDarrick J. Wong  */
1254be78ff0eSDarrick J. Wong static bool
1255be78ff0eSDarrick J. Wong xfs_prep_free_cowblocks(
125651d62690SChristoph Hellwig 	struct xfs_inode	*ip)
1257be78ff0eSDarrick J. Wong {
1258be78ff0eSDarrick J. Wong 	/*
1259be78ff0eSDarrick J. Wong 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1260be78ff0eSDarrick J. Wong 	 * possible the inode was fully unshared since it was originally tagged.
1261be78ff0eSDarrick J. Wong 	 */
126251d62690SChristoph Hellwig 	if (!xfs_inode_has_cow_data(ip)) {
1263be78ff0eSDarrick J. Wong 		trace_xfs_inode_free_cowblocks_invalid(ip);
1264be78ff0eSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1265be78ff0eSDarrick J. Wong 		return false;
1266be78ff0eSDarrick J. Wong 	}
1267be78ff0eSDarrick J. Wong 
1268be78ff0eSDarrick J. Wong 	/*
1269be78ff0eSDarrick J. Wong 	 * If the mapping is dirty or under writeback we cannot touch the
1270be78ff0eSDarrick J. Wong 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1271be78ff0eSDarrick J. Wong 	 */
1272be78ff0eSDarrick J. Wong 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1273be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1274be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1275be78ff0eSDarrick J. Wong 	    atomic_read(&VFS_I(ip)->i_dio_count))
1276be78ff0eSDarrick J. Wong 		return false;
1277be78ff0eSDarrick J. Wong 
1278be78ff0eSDarrick J. Wong 	return true;
1279be78ff0eSDarrick J. Wong }
1280be78ff0eSDarrick J. Wong 
1281be78ff0eSDarrick J. Wong /*
128283104d44SDarrick J. Wong  * Automatic CoW Reservation Freeing
128383104d44SDarrick J. Wong  *
128483104d44SDarrick J. Wong  * These functions automatically garbage collect leftover CoW reservations
128583104d44SDarrick J. Wong  * that were made on behalf of a cowextsize hint when we start to run out
128683104d44SDarrick J. Wong  * of quota or when the reservations sit around for too long.  If the file
128783104d44SDarrick J. Wong  * has dirty pages or is undergoing writeback, its CoW reservations will
128883104d44SDarrick J. Wong  * be retained.
128983104d44SDarrick J. Wong  *
129083104d44SDarrick J. Wong  * The actual garbage collection piggybacks off the same code that runs
129183104d44SDarrick J. Wong  * the speculative EOF preallocation garbage collector.
129283104d44SDarrick J. Wong  */
129383104d44SDarrick J. Wong STATIC int
129483104d44SDarrick J. Wong xfs_inode_free_cowblocks(
129583104d44SDarrick J. Wong 	struct xfs_inode	*ip,
1296b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
12970fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
129883104d44SDarrick J. Wong {
1299f41a0716SDarrick J. Wong 	bool			wait;
1300be78ff0eSDarrick J. Wong 	int			ret = 0;
130183104d44SDarrick J. Wong 
1302b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1303f41a0716SDarrick J. Wong 
1304ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1305ce2d3bbeSDarrick J. Wong 		return 0;
1306ce2d3bbeSDarrick J. Wong 
130751d62690SChristoph Hellwig 	if (!xfs_prep_free_cowblocks(ip))
130883104d44SDarrick J. Wong 		return 0;
130983104d44SDarrick J. Wong 
1310b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
131183104d44SDarrick J. Wong 		return 0;
131283104d44SDarrick J. Wong 
1313f41a0716SDarrick J. Wong 	/*
1314f41a0716SDarrick J. Wong 	 * If the caller is waiting, return -EAGAIN to keep the background
1315f41a0716SDarrick J. Wong 	 * scanner moving and revisit the inode in a subsequent pass.
1316f41a0716SDarrick J. Wong 	 */
13170fa4a10aSDarrick J. Wong 	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
13180fa4a10aSDarrick J. Wong 	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1319f41a0716SDarrick J. Wong 		if (wait)
1320f41a0716SDarrick J. Wong 			return -EAGAIN;
1321f41a0716SDarrick J. Wong 		return 0;
1322f41a0716SDarrick J. Wong 	}
13230fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
13240fa4a10aSDarrick J. Wong 
1325f41a0716SDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1326f41a0716SDarrick J. Wong 		if (wait)
13270fa4a10aSDarrick J. Wong 			return -EAGAIN;
13280fa4a10aSDarrick J. Wong 		return 0;
1329f41a0716SDarrick J. Wong 	}
13300fa4a10aSDarrick J. Wong 	*lockflags |= XFS_MMAPLOCK_EXCL;
133183104d44SDarrick J. Wong 
1332be78ff0eSDarrick J. Wong 	/*
1333be78ff0eSDarrick J. Wong 	 * Check again, nobody else should be able to dirty blocks or change
1334be78ff0eSDarrick J. Wong 	 * the reflink iflag now that we have the first two locks held.
1335be78ff0eSDarrick J. Wong 	 */
133651d62690SChristoph Hellwig 	if (xfs_prep_free_cowblocks(ip))
13373802a345SChristoph Hellwig 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
133883104d44SDarrick J. Wong 	return ret;
133983104d44SDarrick J. Wong }
134083104d44SDarrick J. Wong 
134183104d44SDarrick J. Wong void
134283104d44SDarrick J. Wong xfs_inode_set_cowblocks_tag(
134383104d44SDarrick J. Wong 	xfs_inode_t	*ip)
134483104d44SDarrick J. Wong {
13457b7381f0SBrian Foster 	trace_xfs_inode_set_cowblocks_tag(ip);
13469669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
134783104d44SDarrick J. Wong }
134883104d44SDarrick J. Wong 
134983104d44SDarrick J. Wong void
135083104d44SDarrick J. Wong xfs_inode_clear_cowblocks_tag(
135183104d44SDarrick J. Wong 	xfs_inode_t	*ip)
135283104d44SDarrick J. Wong {
13537b7381f0SBrian Foster 	trace_xfs_inode_clear_cowblocks_tag(ip);
1354ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
135583104d44SDarrick J. Wong }
1356d6b636ebSDarrick J. Wong 
1357d6b636ebSDarrick J. Wong /* Disable post-EOF and CoW block auto-reclamation. */
1358d6b636ebSDarrick J. Wong void
1359c9a6526fSDarrick J. Wong xfs_blockgc_stop(
1360d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1361d6b636ebSDarrick J. Wong {
1362894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1363894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1364894ecacfSDarrick J. Wong 
13656f649091SDarrick J. Wong 	if (!xfs_clear_blockgc_enabled(mp))
13666f649091SDarrick J. Wong 		return;
13676f649091SDarrick J. Wong 
13686f649091SDarrick J. Wong 	for_each_perag(mp, agno, pag)
1369894ecacfSDarrick J. Wong 		cancel_delayed_work_sync(&pag->pag_blockgc_work);
13706f649091SDarrick J. Wong 	trace_xfs_blockgc_stop(mp, __return_address);
1371d6b636ebSDarrick J. Wong }
1372d6b636ebSDarrick J. Wong 
1373d6b636ebSDarrick J. Wong /* Enable post-EOF and CoW block auto-reclamation. */
1374d6b636ebSDarrick J. Wong void
1375c9a6526fSDarrick J. Wong xfs_blockgc_start(
1376d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1377d6b636ebSDarrick J. Wong {
1378894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1379894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1380894ecacfSDarrick J. Wong 
13816f649091SDarrick J. Wong 	if (xfs_set_blockgc_enabled(mp))
13826f649091SDarrick J. Wong 		return;
13836f649091SDarrick J. Wong 
13846f649091SDarrick J. Wong 	trace_xfs_blockgc_start(mp, __return_address);
1385894ecacfSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1386894ecacfSDarrick J. Wong 		xfs_blockgc_queue(pag);
1387d6b636ebSDarrick J. Wong }
13883d4feec0SDarrick J. Wong 
1389d20d5edcSDarrick J. Wong /* Don't try to run block gc on an inode that's in any of these states. */
1390d20d5edcSDarrick J. Wong #define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1391ab23a776SDave Chinner 					 XFS_NEED_INACTIVE | \
1392ab23a776SDave Chinner 					 XFS_INACTIVATING | \
1393d20d5edcSDarrick J. Wong 					 XFS_IRECLAIMABLE | \
1394d20d5edcSDarrick J. Wong 					 XFS_IRECLAIM)
1395df600197SDarrick J. Wong /*
1396b9baaef4SDarrick J. Wong  * Decide if the given @ip is eligible for garbage collection of speculative
1397b9baaef4SDarrick J. Wong  * preallocations, and grab it if so.  Returns true if it's ready to go or
1398b9baaef4SDarrick J. Wong  * false if we should just ignore it.
1399df600197SDarrick J. Wong  */
1400df600197SDarrick J. Wong static bool
1401b9baaef4SDarrick J. Wong xfs_blockgc_igrab(
14027fdff526SDarrick J. Wong 	struct xfs_inode	*ip)
1403df600197SDarrick J. Wong {
1404df600197SDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
1405df600197SDarrick J. Wong 
1406df600197SDarrick J. Wong 	ASSERT(rcu_read_lock_held());
1407df600197SDarrick J. Wong 
1408df600197SDarrick J. Wong 	/* Check for stale RCU freed inode */
1409df600197SDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1410df600197SDarrick J. Wong 	if (!ip->i_ino)
1411df600197SDarrick J. Wong 		goto out_unlock_noent;
1412df600197SDarrick J. Wong 
1413d20d5edcSDarrick J. Wong 	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1414df600197SDarrick J. Wong 		goto out_unlock_noent;
1415df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1416df600197SDarrick J. Wong 
1417df600197SDarrick J. Wong 	/* nothing to sync during shutdown */
141875c8c50fSDave Chinner 	if (xfs_is_shutdown(ip->i_mount))
1419df600197SDarrick J. Wong 		return false;
1420df600197SDarrick J. Wong 
1421df600197SDarrick J. Wong 	/* If we can't grab the inode, it must on it's way to reclaim. */
1422df600197SDarrick J. Wong 	if (!igrab(inode))
1423df600197SDarrick J. Wong 		return false;
1424df600197SDarrick J. Wong 
1425df600197SDarrick J. Wong 	/* inode is valid */
1426df600197SDarrick J. Wong 	return true;
1427df600197SDarrick J. Wong 
1428df600197SDarrick J. Wong out_unlock_noent:
1429df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1430df600197SDarrick J. Wong 	return false;
1431df600197SDarrick J. Wong }
1432df600197SDarrick J. Wong 
143341956753SDarrick J. Wong /* Scan one incore inode for block preallocations that we can remove. */
143441956753SDarrick J. Wong static int
143541956753SDarrick J. Wong xfs_blockgc_scan_inode(
143641956753SDarrick J. Wong 	struct xfs_inode	*ip,
1437b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
143885c5b270SDarrick J. Wong {
14390fa4a10aSDarrick J. Wong 	unsigned int		lockflags = 0;
144085c5b270SDarrick J. Wong 	int			error;
144185c5b270SDarrick J. Wong 
1442b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
144385c5b270SDarrick J. Wong 	if (error)
14440fa4a10aSDarrick J. Wong 		goto unlock;
144585c5b270SDarrick J. Wong 
1446b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
14470fa4a10aSDarrick J. Wong unlock:
14480fa4a10aSDarrick J. Wong 	if (lockflags)
14490fa4a10aSDarrick J. Wong 		xfs_iunlock(ip, lockflags);
1450594ab00bSDarrick J. Wong 	xfs_irele(ip);
145185c5b270SDarrick J. Wong 	return error;
145285c5b270SDarrick J. Wong }
145385c5b270SDarrick J. Wong 
14549669f51dSDarrick J. Wong /* Background worker that trims preallocated space. */
14559669f51dSDarrick J. Wong void
14569669f51dSDarrick J. Wong xfs_blockgc_worker(
14579669f51dSDarrick J. Wong 	struct work_struct	*work)
14589669f51dSDarrick J. Wong {
1459894ecacfSDarrick J. Wong 	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1460894ecacfSDarrick J. Wong 					struct xfs_perag, pag_blockgc_work);
1461894ecacfSDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
14629669f51dSDarrick J. Wong 	int			error;
14639669f51dSDarrick J. Wong 
14646f649091SDarrick J. Wong 	trace_xfs_blockgc_worker(mp, __return_address);
14656f649091SDarrick J. Wong 
1466f427cf5cSDarrick J. Wong 	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
14679669f51dSDarrick J. Wong 	if (error)
1468894ecacfSDarrick J. Wong 		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1469894ecacfSDarrick J. Wong 				pag->pag_agno, error);
1470894ecacfSDarrick J. Wong 	xfs_blockgc_queue(pag);
14719669f51dSDarrick J. Wong }
14729669f51dSDarrick J. Wong 
147385c5b270SDarrick J. Wong /*
14742eb66502SDarrick J. Wong  * Try to free space in the filesystem by purging inactive inodes, eofblocks
14752eb66502SDarrick J. Wong  * and cowblocks.
147685c5b270SDarrick J. Wong  */
147785c5b270SDarrick J. Wong int
147885c5b270SDarrick J. Wong xfs_blockgc_free_space(
147985c5b270SDarrick J. Wong 	struct xfs_mount	*mp,
1480b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
148185c5b270SDarrick J. Wong {
14822eb66502SDarrick J. Wong 	int			error;
14832eb66502SDarrick J. Wong 
1484b26b2bf1SDarrick J. Wong 	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
148585c5b270SDarrick J. Wong 
14862eb66502SDarrick J. Wong 	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
14872eb66502SDarrick J. Wong 	if (error)
14882eb66502SDarrick J. Wong 		return error;
14892eb66502SDarrick J. Wong 
14902eb66502SDarrick J. Wong 	xfs_inodegc_flush(mp);
14912eb66502SDarrick J. Wong 	return 0;
149285c5b270SDarrick J. Wong }
149385c5b270SDarrick J. Wong 
14943d4feec0SDarrick J. Wong /*
1495e8d04c2aSDarrick J. Wong  * Reclaim all the free space that we can by scheduling the background blockgc
1496e8d04c2aSDarrick J. Wong  * and inodegc workers immediately and waiting for them all to clear.
1497e8d04c2aSDarrick J. Wong  */
1498e8d04c2aSDarrick J. Wong void
1499e8d04c2aSDarrick J. Wong xfs_blockgc_flush_all(
1500e8d04c2aSDarrick J. Wong 	struct xfs_mount	*mp)
1501e8d04c2aSDarrick J. Wong {
1502e8d04c2aSDarrick J. Wong 	struct xfs_perag	*pag;
1503e8d04c2aSDarrick J. Wong 	xfs_agnumber_t		agno;
1504e8d04c2aSDarrick J. Wong 
1505e8d04c2aSDarrick J. Wong 	trace_xfs_blockgc_flush_all(mp, __return_address);
1506e8d04c2aSDarrick J. Wong 
1507e8d04c2aSDarrick J. Wong 	/*
1508e8d04c2aSDarrick J. Wong 	 * For each blockgc worker, move its queue time up to now.  If it
1509e8d04c2aSDarrick J. Wong 	 * wasn't queued, it will not be requeued.  Then flush whatever's
1510e8d04c2aSDarrick J. Wong 	 * left.
1511e8d04c2aSDarrick J. Wong 	 */
1512e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1513e8d04c2aSDarrick J. Wong 		mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1514e8d04c2aSDarrick J. Wong 				&pag->pag_blockgc_work, 0);
1515e8d04c2aSDarrick J. Wong 
1516e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1517e8d04c2aSDarrick J. Wong 		flush_delayed_work(&pag->pag_blockgc_work);
1518e8d04c2aSDarrick J. Wong 
1519e8d04c2aSDarrick J. Wong 	xfs_inodegc_flush(mp);
1520e8d04c2aSDarrick J. Wong }
1521e8d04c2aSDarrick J. Wong 
1522e8d04c2aSDarrick J. Wong /*
1523c237dd7cSDarrick J. Wong  * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1524c237dd7cSDarrick J. Wong  * quota caused an allocation failure, so we make a best effort by including
1525c237dd7cSDarrick J. Wong  * each quota under low free space conditions (less than 1% free space) in the
1526c237dd7cSDarrick J. Wong  * scan.
1527111068f8SDarrick J. Wong  *
1528111068f8SDarrick J. Wong  * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
15292d53f66bSDarrick J. Wong  * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1530111068f8SDarrick J. Wong  * MMAPLOCK.
15313d4feec0SDarrick J. Wong  */
1532111068f8SDarrick J. Wong int
1533c237dd7cSDarrick J. Wong xfs_blockgc_free_dquots(
1534c237dd7cSDarrick J. Wong 	struct xfs_mount	*mp,
1535c237dd7cSDarrick J. Wong 	struct xfs_dquot	*udqp,
1536c237dd7cSDarrick J. Wong 	struct xfs_dquot	*gdqp,
1537c237dd7cSDarrick J. Wong 	struct xfs_dquot	*pdqp,
15382d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
15393d4feec0SDarrick J. Wong {
1540b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {0};
15413d4feec0SDarrick J. Wong 	bool			do_work = false;
15423d4feec0SDarrick J. Wong 
1543c237dd7cSDarrick J. Wong 	if (!udqp && !gdqp && !pdqp)
1544c237dd7cSDarrick J. Wong 		return 0;
1545c237dd7cSDarrick J. Wong 
15463d4feec0SDarrick J. Wong 	/*
1547111068f8SDarrick J. Wong 	 * Run a scan to free blocks using the union filter to cover all
1548111068f8SDarrick J. Wong 	 * applicable quotas in a single scan.
15493d4feec0SDarrick J. Wong 	 */
1550b26b2bf1SDarrick J. Wong 	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
15513d4feec0SDarrick J. Wong 
1552c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1553b26b2bf1SDarrick J. Wong 		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1554b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
15553d4feec0SDarrick J. Wong 		do_work = true;
15563d4feec0SDarrick J. Wong 	}
15573d4feec0SDarrick J. Wong 
1558c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1559b26b2bf1SDarrick J. Wong 		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1560b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
15613d4feec0SDarrick J. Wong 		do_work = true;
15623d4feec0SDarrick J. Wong 	}
15633d4feec0SDarrick J. Wong 
1564c237dd7cSDarrick J. Wong 	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1565b26b2bf1SDarrick J. Wong 		icw.icw_prid = pdqp->q_id;
1566b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
15673d4feec0SDarrick J. Wong 		do_work = true;
15683d4feec0SDarrick J. Wong 	}
15693d4feec0SDarrick J. Wong 
15703d4feec0SDarrick J. Wong 	if (!do_work)
1571111068f8SDarrick J. Wong 		return 0;
15723d4feec0SDarrick J. Wong 
1573b26b2bf1SDarrick J. Wong 	return xfs_blockgc_free_space(mp, &icw);
1574c237dd7cSDarrick J. Wong }
1575c237dd7cSDarrick J. Wong 
1576c237dd7cSDarrick J. Wong /* Run cow/eofblocks scans on the quotas attached to the inode. */
1577c237dd7cSDarrick J. Wong int
1578c237dd7cSDarrick J. Wong xfs_blockgc_free_quota(
1579c237dd7cSDarrick J. Wong 	struct xfs_inode	*ip,
15802d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
1581c237dd7cSDarrick J. Wong {
1582c237dd7cSDarrick J. Wong 	return xfs_blockgc_free_dquots(ip->i_mount,
1583c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1584c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
15852d53f66bSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
15863d4feec0SDarrick J. Wong }
1587df600197SDarrick J. Wong 
1588df600197SDarrick J. Wong /* XFS Inode Cache Walking Code */
1589df600197SDarrick J. Wong 
1590df600197SDarrick J. Wong /*
1591f1bc5c56SDarrick J. Wong  * The inode lookup is done in batches to keep the amount of lock traffic and
1592f1bc5c56SDarrick J. Wong  * radix tree lookups to a minimum. The batch size is a trade off between
1593f1bc5c56SDarrick J. Wong  * lookup reduction and stack usage. This is in the reclaim path, so we can't
1594f1bc5c56SDarrick J. Wong  * be too greedy.
1595f1bc5c56SDarrick J. Wong  */
1596f1bc5c56SDarrick J. Wong #define XFS_LOOKUP_BATCH	32
1597f1bc5c56SDarrick J. Wong 
1598f1bc5c56SDarrick J. Wong 
1599f1bc5c56SDarrick J. Wong /*
1600b9baaef4SDarrick J. Wong  * Decide if we want to grab this inode in anticipation of doing work towards
1601594ab00bSDarrick J. Wong  * the goal.
1602b9baaef4SDarrick J. Wong  */
1603b9baaef4SDarrick J. Wong static inline bool
1604b9baaef4SDarrick J. Wong xfs_icwalk_igrab(
1605b9baaef4SDarrick J. Wong 	enum xfs_icwalk_goal	goal,
16069492750aSDarrick J. Wong 	struct xfs_inode	*ip,
1607b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1608b9baaef4SDarrick J. Wong {
1609b9baaef4SDarrick J. Wong 	switch (goal) {
1610b9baaef4SDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
16117fdff526SDarrick J. Wong 		return xfs_blockgc_igrab(ip);
1612f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1613b26b2bf1SDarrick J. Wong 		return xfs_reclaim_igrab(ip, icw);
1614b9baaef4SDarrick J. Wong 	default:
1615b9baaef4SDarrick J. Wong 		return false;
1616b9baaef4SDarrick J. Wong 	}
1617b9baaef4SDarrick J. Wong }
1618b9baaef4SDarrick J. Wong 
1619594ab00bSDarrick J. Wong /*
1620594ab00bSDarrick J. Wong  * Process an inode.  Each processing function must handle any state changes
1621594ab00bSDarrick J. Wong  * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1622594ab00bSDarrick J. Wong  */
1623f427cf5cSDarrick J. Wong static inline int
1624f427cf5cSDarrick J. Wong xfs_icwalk_process_inode(
1625f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1626f427cf5cSDarrick J. Wong 	struct xfs_inode	*ip,
1627f1bc5c56SDarrick J. Wong 	struct xfs_perag	*pag,
1628b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1629f427cf5cSDarrick J. Wong {
1630594ab00bSDarrick J. Wong 	int			error = 0;
1631f427cf5cSDarrick J. Wong 
1632f427cf5cSDarrick J. Wong 	switch (goal) {
1633f427cf5cSDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
1634b26b2bf1SDarrick J. Wong 		error = xfs_blockgc_scan_inode(ip, icw);
1635f427cf5cSDarrick J. Wong 		break;
1636f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1637f1bc5c56SDarrick J. Wong 		xfs_reclaim_inode(ip, pag);
1638f1bc5c56SDarrick J. Wong 		break;
1639f427cf5cSDarrick J. Wong 	}
1640f427cf5cSDarrick J. Wong 	return error;
1641f427cf5cSDarrick J. Wong }
1642f427cf5cSDarrick J. Wong 
1643b9baaef4SDarrick J. Wong /*
1644f427cf5cSDarrick J. Wong  * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1645f427cf5cSDarrick J. Wong  * process them in some manner.
1646df600197SDarrick J. Wong  */
1647df600197SDarrick J. Wong static int
1648c1115c0cSDarrick J. Wong xfs_icwalk_ag(
1649df600197SDarrick J. Wong 	struct xfs_perag	*pag,
1650f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1651b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1652df600197SDarrick J. Wong {
1653df600197SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
1654df600197SDarrick J. Wong 	uint32_t		first_index;
1655df600197SDarrick J. Wong 	int			last_error = 0;
1656df600197SDarrick J. Wong 	int			skipped;
1657df600197SDarrick J. Wong 	bool			done;
1658df600197SDarrick J. Wong 	int			nr_found;
1659df600197SDarrick J. Wong 
1660df600197SDarrick J. Wong restart:
1661df600197SDarrick J. Wong 	done = false;
1662df600197SDarrick J. Wong 	skipped = 0;
1663f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM)
1664f1bc5c56SDarrick J. Wong 		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1665f1bc5c56SDarrick J. Wong 	else
1666df600197SDarrick J. Wong 		first_index = 0;
1667df600197SDarrick J. Wong 	nr_found = 0;
1668df600197SDarrick J. Wong 	do {
1669df600197SDarrick J. Wong 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1670df600197SDarrick J. Wong 		int		error = 0;
1671df600197SDarrick J. Wong 		int		i;
1672df600197SDarrick J. Wong 
1673df600197SDarrick J. Wong 		rcu_read_lock();
1674df600197SDarrick J. Wong 
1675a437b9b4SChristoph Hellwig 		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1676df600197SDarrick J. Wong 				(void **) batch, first_index,
1677a437b9b4SChristoph Hellwig 				XFS_LOOKUP_BATCH, goal);
1678df600197SDarrick J. Wong 		if (!nr_found) {
1679f1bc5c56SDarrick J. Wong 			done = true;
1680df600197SDarrick J. Wong 			rcu_read_unlock();
1681df600197SDarrick J. Wong 			break;
1682df600197SDarrick J. Wong 		}
1683df600197SDarrick J. Wong 
1684df600197SDarrick J. Wong 		/*
1685df600197SDarrick J. Wong 		 * Grab the inodes before we drop the lock. if we found
1686df600197SDarrick J. Wong 		 * nothing, nr == 0 and the loop will be skipped.
1687df600197SDarrick J. Wong 		 */
1688df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1689df600197SDarrick J. Wong 			struct xfs_inode *ip = batch[i];
1690df600197SDarrick J. Wong 
1691b26b2bf1SDarrick J. Wong 			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1692df600197SDarrick J. Wong 				batch[i] = NULL;
1693df600197SDarrick J. Wong 
1694df600197SDarrick J. Wong 			/*
1695df600197SDarrick J. Wong 			 * Update the index for the next lookup. Catch
1696df600197SDarrick J. Wong 			 * overflows into the next AG range which can occur if
1697df600197SDarrick J. Wong 			 * we have inodes in the last block of the AG and we
1698df600197SDarrick J. Wong 			 * are currently pointing to the last inode.
1699df600197SDarrick J. Wong 			 *
1700df600197SDarrick J. Wong 			 * Because we may see inodes that are from the wrong AG
1701df600197SDarrick J. Wong 			 * due to RCU freeing and reallocation, only update the
1702df600197SDarrick J. Wong 			 * index if it lies in this AG. It was a race that lead
1703df600197SDarrick J. Wong 			 * us to see this inode, so another lookup from the
1704df600197SDarrick J. Wong 			 * same index will not find it again.
1705df600197SDarrick J. Wong 			 */
1706df600197SDarrick J. Wong 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1707df600197SDarrick J. Wong 				continue;
1708df600197SDarrick J. Wong 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1709df600197SDarrick J. Wong 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1710df600197SDarrick J. Wong 				done = true;
1711df600197SDarrick J. Wong 		}
1712df600197SDarrick J. Wong 
1713df600197SDarrick J. Wong 		/* unlock now we've grabbed the inodes. */
1714df600197SDarrick J. Wong 		rcu_read_unlock();
1715df600197SDarrick J. Wong 
1716df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1717df600197SDarrick J. Wong 			if (!batch[i])
1718df600197SDarrick J. Wong 				continue;
1719f1bc5c56SDarrick J. Wong 			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1720b26b2bf1SDarrick J. Wong 					icw);
1721df600197SDarrick J. Wong 			if (error == -EAGAIN) {
1722df600197SDarrick J. Wong 				skipped++;
1723df600197SDarrick J. Wong 				continue;
1724df600197SDarrick J. Wong 			}
1725df600197SDarrick J. Wong 			if (error && last_error != -EFSCORRUPTED)
1726df600197SDarrick J. Wong 				last_error = error;
1727df600197SDarrick J. Wong 		}
1728df600197SDarrick J. Wong 
1729df600197SDarrick J. Wong 		/* bail out if the filesystem is corrupted.  */
1730df600197SDarrick J. Wong 		if (error == -EFSCORRUPTED)
1731df600197SDarrick J. Wong 			break;
1732df600197SDarrick J. Wong 
1733df600197SDarrick J. Wong 		cond_resched();
1734df600197SDarrick J. Wong 
1735b26b2bf1SDarrick J. Wong 		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1736b26b2bf1SDarrick J. Wong 			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1737b26b2bf1SDarrick J. Wong 			if (icw->icw_scan_limit <= 0)
1738f1bc5c56SDarrick J. Wong 				break;
1739f1bc5c56SDarrick J. Wong 		}
1740df600197SDarrick J. Wong 	} while (nr_found && !done);
1741df600197SDarrick J. Wong 
1742f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM) {
1743f1bc5c56SDarrick J. Wong 		if (done)
1744f1bc5c56SDarrick J. Wong 			first_index = 0;
1745f1bc5c56SDarrick J. Wong 		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1746f1bc5c56SDarrick J. Wong 	}
1747f1bc5c56SDarrick J. Wong 
1748df600197SDarrick J. Wong 	if (skipped) {
1749df600197SDarrick J. Wong 		delay(1);
1750df600197SDarrick J. Wong 		goto restart;
1751df600197SDarrick J. Wong 	}
1752df600197SDarrick J. Wong 	return last_error;
1753df600197SDarrick J. Wong }
1754df600197SDarrick J. Wong 
1755f427cf5cSDarrick J. Wong /* Walk all incore inodes to achieve a given goal. */
1756df600197SDarrick J. Wong static int
1757c1115c0cSDarrick J. Wong xfs_icwalk(
1758df600197SDarrick J. Wong 	struct xfs_mount	*mp,
1759f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1760b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1761df600197SDarrick J. Wong {
1762df600197SDarrick J. Wong 	struct xfs_perag	*pag;
1763df600197SDarrick J. Wong 	int			error = 0;
1764df600197SDarrick J. Wong 	int			last_error = 0;
1765a437b9b4SChristoph Hellwig 	xfs_agnumber_t		agno;
1766df600197SDarrick J. Wong 
1767a437b9b4SChristoph Hellwig 	for_each_perag_tag(mp, agno, pag, goal) {
1768b26b2bf1SDarrick J. Wong 		error = xfs_icwalk_ag(pag, goal, icw);
1769df600197SDarrick J. Wong 		if (error) {
1770df600197SDarrick J. Wong 			last_error = error;
1771a437b9b4SChristoph Hellwig 			if (error == -EFSCORRUPTED) {
1772a437b9b4SChristoph Hellwig 				xfs_perag_put(pag);
1773df600197SDarrick J. Wong 				break;
1774df600197SDarrick J. Wong 			}
1775df600197SDarrick J. Wong 		}
1776a437b9b4SChristoph Hellwig 	}
1777df600197SDarrick J. Wong 	return last_error;
17782d53f66bSDarrick J. Wong 	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1779df600197SDarrick J. Wong }
1780c6c2066dSDarrick J. Wong 
1781c6c2066dSDarrick J. Wong #ifdef DEBUG
1782c6c2066dSDarrick J. Wong static void
1783c6c2066dSDarrick J. Wong xfs_check_delalloc(
1784c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip,
1785c6c2066dSDarrick J. Wong 	int			whichfork)
1786c6c2066dSDarrick J. Wong {
1787c6c2066dSDarrick J. Wong 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1788c6c2066dSDarrick J. Wong 	struct xfs_bmbt_irec	got;
1789c6c2066dSDarrick J. Wong 	struct xfs_iext_cursor	icur;
1790c6c2066dSDarrick J. Wong 
1791c6c2066dSDarrick J. Wong 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1792c6c2066dSDarrick J. Wong 		return;
1793c6c2066dSDarrick J. Wong 	do {
1794c6c2066dSDarrick J. Wong 		if (isnullstartblock(got.br_startblock)) {
1795c6c2066dSDarrick J. Wong 			xfs_warn(ip->i_mount,
1796c6c2066dSDarrick J. Wong 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1797c6c2066dSDarrick J. Wong 				ip->i_ino,
1798c6c2066dSDarrick J. Wong 				whichfork == XFS_DATA_FORK ? "data" : "cow",
1799c6c2066dSDarrick J. Wong 				got.br_startoff, got.br_blockcount);
1800c6c2066dSDarrick J. Wong 		}
1801c6c2066dSDarrick J. Wong 	} while (xfs_iext_next_extent(ifp, &icur, &got));
1802c6c2066dSDarrick J. Wong }
1803c6c2066dSDarrick J. Wong #else
1804c6c2066dSDarrick J. Wong #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1805c6c2066dSDarrick J. Wong #endif
1806c6c2066dSDarrick J. Wong 
1807ab23a776SDave Chinner /* Schedule the inode for reclaim. */
1808ab23a776SDave Chinner static void
1809ab23a776SDave Chinner xfs_inodegc_set_reclaimable(
1810c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip)
1811c6c2066dSDarrick J. Wong {
1812c6c2066dSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1813c6c2066dSDarrick J. Wong 	struct xfs_perag	*pag;
1814c6c2066dSDarrick J. Wong 
181575c8c50fSDave Chinner 	if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1816c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_DATA_FORK);
1817c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_COW_FORK);
1818c6c2066dSDarrick J. Wong 		ASSERT(0);
1819c6c2066dSDarrick J. Wong 	}
1820c6c2066dSDarrick J. Wong 
1821c6c2066dSDarrick J. Wong 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1822c6c2066dSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
1823c6c2066dSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1824c6c2066dSDarrick J. Wong 
1825ab23a776SDave Chinner 	trace_xfs_inode_set_reclaimable(ip);
1826ab23a776SDave Chinner 	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1827ab23a776SDave Chinner 	ip->i_flags |= XFS_IRECLAIMABLE;
1828c6c2066dSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1829c6c2066dSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
1830c6c2066dSDarrick J. Wong 
1831c6c2066dSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1832c6c2066dSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
1833c6c2066dSDarrick J. Wong 	xfs_perag_put(pag);
1834c6c2066dSDarrick J. Wong }
1835ab23a776SDave Chinner 
1836ab23a776SDave Chinner /*
1837ab23a776SDave Chinner  * Free all speculative preallocations and possibly even the inode itself.
1838ab23a776SDave Chinner  * This is the last chance to make changes to an otherwise unreferenced file
1839ab23a776SDave Chinner  * before incore reclamation happens.
1840ab23a776SDave Chinner  */
1841ab23a776SDave Chinner static void
1842ab23a776SDave Chinner xfs_inodegc_inactivate(
1843ab23a776SDave Chinner 	struct xfs_inode	*ip)
1844ab23a776SDave Chinner {
1845ab23a776SDave Chinner 	trace_xfs_inode_inactivating(ip);
1846ab23a776SDave Chinner 	xfs_inactive(ip);
1847ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
1848ab23a776SDave Chinner }
1849ab23a776SDave Chinner 
1850ab23a776SDave Chinner void
1851ab23a776SDave Chinner xfs_inodegc_worker(
1852ab23a776SDave Chinner 	struct work_struct	*work)
1853ab23a776SDave Chinner {
1854ab23a776SDave Chinner 	struct xfs_inodegc	*gc = container_of(work, struct xfs_inodegc,
1855ab23a776SDave Chinner 							work);
1856ab23a776SDave Chinner 	struct llist_node	*node = llist_del_all(&gc->list);
1857ab23a776SDave Chinner 	struct xfs_inode	*ip, *n;
1858ab23a776SDave Chinner 
1859ab23a776SDave Chinner 	WRITE_ONCE(gc->items, 0);
1860ab23a776SDave Chinner 
1861ab23a776SDave Chinner 	if (!node)
1862ab23a776SDave Chinner 		return;
1863ab23a776SDave Chinner 
1864ab23a776SDave Chinner 	ip = llist_entry(node, struct xfs_inode, i_gclist);
186540b1de00SDarrick J. Wong 	trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1866ab23a776SDave Chinner 
186740b1de00SDarrick J. Wong 	WRITE_ONCE(gc->shrinker_hits, 0);
1868ab23a776SDave Chinner 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1869ab23a776SDave Chinner 		xfs_iflags_set(ip, XFS_INACTIVATING);
1870ab23a776SDave Chinner 		xfs_inodegc_inactivate(ip);
1871ab23a776SDave Chinner 	}
1872ab23a776SDave Chinner }
1873ab23a776SDave Chinner 
1874ab23a776SDave Chinner /*
1875ab23a776SDave Chinner  * Force all currently queued inode inactivation work to run immediately, and
1876ab23a776SDave Chinner  * wait for the work to finish. Two pass - queue all the work first pass, wait
1877ab23a776SDave Chinner  * for it in a second pass.
1878ab23a776SDave Chinner  */
1879ab23a776SDave Chinner void
1880ab23a776SDave Chinner xfs_inodegc_flush(
1881ab23a776SDave Chinner 	struct xfs_mount	*mp)
1882ab23a776SDave Chinner {
1883ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
1884ab23a776SDave Chinner 	int			cpu;
1885ab23a776SDave Chinner 
1886ab23a776SDave Chinner 	if (!xfs_is_inodegc_enabled(mp))
1887ab23a776SDave Chinner 		return;
1888ab23a776SDave Chinner 
1889ab23a776SDave Chinner 	trace_xfs_inodegc_flush(mp, __return_address);
1890ab23a776SDave Chinner 
1891ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1892ab23a776SDave Chinner 
1893ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
1894ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1895ab23a776SDave Chinner 		flush_work(&gc->work);
1896ab23a776SDave Chinner 	}
1897ab23a776SDave Chinner }
1898ab23a776SDave Chinner 
1899ab23a776SDave Chinner /*
1900ab23a776SDave Chinner  * Flush all the pending work and then disable the inode inactivation background
1901ab23a776SDave Chinner  * workers and wait for them to stop.
1902ab23a776SDave Chinner  */
1903ab23a776SDave Chinner void
1904ab23a776SDave Chinner xfs_inodegc_stop(
1905ab23a776SDave Chinner 	struct xfs_mount	*mp)
1906ab23a776SDave Chinner {
1907ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
1908ab23a776SDave Chinner 	int			cpu;
1909ab23a776SDave Chinner 
1910ab23a776SDave Chinner 	if (!xfs_clear_inodegc_enabled(mp))
1911ab23a776SDave Chinner 		return;
1912ab23a776SDave Chinner 
1913ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1914ab23a776SDave Chinner 
1915ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
1916ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1917ab23a776SDave Chinner 		cancel_work_sync(&gc->work);
1918ab23a776SDave Chinner 	}
1919ab23a776SDave Chinner 	trace_xfs_inodegc_stop(mp, __return_address);
1920ab23a776SDave Chinner }
1921ab23a776SDave Chinner 
1922ab23a776SDave Chinner /*
1923ab23a776SDave Chinner  * Enable the inode inactivation background workers and schedule deferred inode
1924ab23a776SDave Chinner  * inactivation work if there is any.
1925ab23a776SDave Chinner  */
1926ab23a776SDave Chinner void
1927ab23a776SDave Chinner xfs_inodegc_start(
1928ab23a776SDave Chinner 	struct xfs_mount	*mp)
1929ab23a776SDave Chinner {
1930ab23a776SDave Chinner 	if (xfs_set_inodegc_enabled(mp))
1931ab23a776SDave Chinner 		return;
1932ab23a776SDave Chinner 
1933ab23a776SDave Chinner 	trace_xfs_inodegc_start(mp, __return_address);
1934ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1935ab23a776SDave Chinner }
1936ab23a776SDave Chinner 
193765f03d86SDarrick J. Wong #ifdef CONFIG_XFS_RT
193865f03d86SDarrick J. Wong static inline bool
193965f03d86SDarrick J. Wong xfs_inodegc_want_queue_rt_file(
194065f03d86SDarrick J. Wong 	struct xfs_inode	*ip)
194165f03d86SDarrick J. Wong {
194265f03d86SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
194365f03d86SDarrick J. Wong 	uint64_t		freertx;
194465f03d86SDarrick J. Wong 
194565f03d86SDarrick J. Wong 	if (!XFS_IS_REALTIME_INODE(ip))
194665f03d86SDarrick J. Wong 		return false;
194765f03d86SDarrick J. Wong 
194865f03d86SDarrick J. Wong 	freertx = READ_ONCE(mp->m_sb.sb_frextents);
194965f03d86SDarrick J. Wong 	return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT];
195065f03d86SDarrick J. Wong }
195165f03d86SDarrick J. Wong #else
195265f03d86SDarrick J. Wong # define xfs_inodegc_want_queue_rt_file(ip)	(false)
195365f03d86SDarrick J. Wong #endif /* CONFIG_XFS_RT */
195465f03d86SDarrick J. Wong 
1955ab23a776SDave Chinner /*
1956ab23a776SDave Chinner  * Schedule the inactivation worker when:
1957ab23a776SDave Chinner  *
1958ab23a776SDave Chinner  *  - We've accumulated more than one inode cluster buffer's worth of inodes.
19597d6f07d2SDarrick J. Wong  *  - There is less than 5% free space left.
1960108523b8SDarrick J. Wong  *  - Any of the quotas for this inode are near an enforcement limit.
1961ab23a776SDave Chinner  */
1962ab23a776SDave Chinner static inline bool
1963ab23a776SDave Chinner xfs_inodegc_want_queue_work(
1964ab23a776SDave Chinner 	struct xfs_inode	*ip,
1965ab23a776SDave Chinner 	unsigned int		items)
1966ab23a776SDave Chinner {
1967ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
1968ab23a776SDave Chinner 
1969ab23a776SDave Chinner 	if (items > mp->m_ino_geo.inodes_per_cluster)
1970ab23a776SDave Chinner 		return true;
1971ab23a776SDave Chinner 
19727d6f07d2SDarrick J. Wong 	if (__percpu_counter_compare(&mp->m_fdblocks,
19737d6f07d2SDarrick J. Wong 				mp->m_low_space[XFS_LOWSP_5_PCNT],
19747d6f07d2SDarrick J. Wong 				XFS_FDBLOCKS_BATCH) < 0)
19757d6f07d2SDarrick J. Wong 		return true;
19767d6f07d2SDarrick J. Wong 
197765f03d86SDarrick J. Wong 	if (xfs_inodegc_want_queue_rt_file(ip))
197865f03d86SDarrick J. Wong 		return true;
197965f03d86SDarrick J. Wong 
1980108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1981108523b8SDarrick J. Wong 		return true;
1982108523b8SDarrick J. Wong 
1983108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1984108523b8SDarrick J. Wong 		return true;
1985108523b8SDarrick J. Wong 
1986108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1987108523b8SDarrick J. Wong 		return true;
1988108523b8SDarrick J. Wong 
1989ab23a776SDave Chinner 	return false;
1990ab23a776SDave Chinner }
1991ab23a776SDave Chinner 
1992ab23a776SDave Chinner /*
1993ab23a776SDave Chinner  * Upper bound on the number of inodes in each AG that can be queued for
1994ab23a776SDave Chinner  * inactivation at any given time, to avoid monopolizing the workqueue.
1995ab23a776SDave Chinner  */
1996ab23a776SDave Chinner #define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
1997ab23a776SDave Chinner 
1998ab23a776SDave Chinner /*
1999ab23a776SDave Chinner  * Make the frontend wait for inactivations when:
2000ab23a776SDave Chinner  *
200140b1de00SDarrick J. Wong  *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
2002ab23a776SDave Chinner  *  - The queue depth exceeds the maximum allowable percpu backlog.
2003ab23a776SDave Chinner  *
2004ab23a776SDave Chinner  * Note: If the current thread is running a transaction, we don't ever want to
2005ab23a776SDave Chinner  * wait for other transactions because that could introduce a deadlock.
2006ab23a776SDave Chinner  */
2007ab23a776SDave Chinner static inline bool
2008ab23a776SDave Chinner xfs_inodegc_want_flush_work(
2009ab23a776SDave Chinner 	struct xfs_inode	*ip,
201040b1de00SDarrick J. Wong 	unsigned int		items,
201140b1de00SDarrick J. Wong 	unsigned int		shrinker_hits)
2012ab23a776SDave Chinner {
2013ab23a776SDave Chinner 	if (current->journal_info)
2014ab23a776SDave Chinner 		return false;
2015ab23a776SDave Chinner 
201640b1de00SDarrick J. Wong 	if (shrinker_hits > 0)
201740b1de00SDarrick J. Wong 		return true;
201840b1de00SDarrick J. Wong 
2019ab23a776SDave Chinner 	if (items > XFS_INODEGC_MAX_BACKLOG)
2020ab23a776SDave Chinner 		return true;
2021ab23a776SDave Chinner 
2022ab23a776SDave Chinner 	return false;
2023ab23a776SDave Chinner }
2024ab23a776SDave Chinner 
2025ab23a776SDave Chinner /*
2026ab23a776SDave Chinner  * Queue a background inactivation worker if there are inodes that need to be
2027ab23a776SDave Chinner  * inactivated and higher level xfs code hasn't disabled the background
2028ab23a776SDave Chinner  * workers.
2029ab23a776SDave Chinner  */
2030ab23a776SDave Chinner static void
2031ab23a776SDave Chinner xfs_inodegc_queue(
2032ab23a776SDave Chinner 	struct xfs_inode	*ip)
2033ab23a776SDave Chinner {
2034ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2035ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
2036ab23a776SDave Chinner 	int			items;
203740b1de00SDarrick J. Wong 	unsigned int		shrinker_hits;
2038ab23a776SDave Chinner 
2039ab23a776SDave Chinner 	trace_xfs_inode_set_need_inactive(ip);
2040ab23a776SDave Chinner 	spin_lock(&ip->i_flags_lock);
2041ab23a776SDave Chinner 	ip->i_flags |= XFS_NEED_INACTIVE;
2042ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2043ab23a776SDave Chinner 
2044ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2045ab23a776SDave Chinner 	llist_add(&ip->i_gclist, &gc->list);
2046ab23a776SDave Chinner 	items = READ_ONCE(gc->items);
2047ab23a776SDave Chinner 	WRITE_ONCE(gc->items, items + 1);
204840b1de00SDarrick J. Wong 	shrinker_hits = READ_ONCE(gc->shrinker_hits);
2049ab23a776SDave Chinner 	put_cpu_ptr(gc);
2050ab23a776SDave Chinner 
2051ab23a776SDave Chinner 	if (!xfs_is_inodegc_enabled(mp))
2052ab23a776SDave Chinner 		return;
2053ab23a776SDave Chinner 
2054ab23a776SDave Chinner 	if (xfs_inodegc_want_queue_work(ip, items)) {
2055ab23a776SDave Chinner 		trace_xfs_inodegc_queue(mp, __return_address);
2056ab23a776SDave Chinner 		queue_work(mp->m_inodegc_wq, &gc->work);
2057ab23a776SDave Chinner 	}
2058ab23a776SDave Chinner 
205940b1de00SDarrick J. Wong 	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2060ab23a776SDave Chinner 		trace_xfs_inodegc_throttle(mp, __return_address);
2061ab23a776SDave Chinner 		flush_work(&gc->work);
2062ab23a776SDave Chinner 	}
2063ab23a776SDave Chinner }
2064ab23a776SDave Chinner 
2065ab23a776SDave Chinner /*
2066ab23a776SDave Chinner  * Fold the dead CPU inodegc queue into the current CPUs queue.
2067ab23a776SDave Chinner  */
2068ab23a776SDave Chinner void
2069ab23a776SDave Chinner xfs_inodegc_cpu_dead(
2070ab23a776SDave Chinner 	struct xfs_mount	*mp,
2071ab23a776SDave Chinner 	unsigned int		dead_cpu)
2072ab23a776SDave Chinner {
2073ab23a776SDave Chinner 	struct xfs_inodegc	*dead_gc, *gc;
2074ab23a776SDave Chinner 	struct llist_node	*first, *last;
2075ab23a776SDave Chinner 	unsigned int		count = 0;
2076ab23a776SDave Chinner 
2077ab23a776SDave Chinner 	dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2078ab23a776SDave Chinner 	cancel_work_sync(&dead_gc->work);
2079ab23a776SDave Chinner 
2080ab23a776SDave Chinner 	if (llist_empty(&dead_gc->list))
2081ab23a776SDave Chinner 		return;
2082ab23a776SDave Chinner 
2083ab23a776SDave Chinner 	first = dead_gc->list.first;
2084ab23a776SDave Chinner 	last = first;
2085ab23a776SDave Chinner 	while (last->next) {
2086ab23a776SDave Chinner 		last = last->next;
2087ab23a776SDave Chinner 		count++;
2088ab23a776SDave Chinner 	}
2089ab23a776SDave Chinner 	dead_gc->list.first = NULL;
2090ab23a776SDave Chinner 	dead_gc->items = 0;
2091ab23a776SDave Chinner 
2092ab23a776SDave Chinner 	/* Add pending work to current CPU */
2093ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2094ab23a776SDave Chinner 	llist_add_batch(first, last, &gc->list);
2095ab23a776SDave Chinner 	count += READ_ONCE(gc->items);
2096ab23a776SDave Chinner 	WRITE_ONCE(gc->items, count);
2097ab23a776SDave Chinner 	put_cpu_ptr(gc);
2098ab23a776SDave Chinner 
2099ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp)) {
2100ab23a776SDave Chinner 		trace_xfs_inodegc_queue(mp, __return_address);
2101ab23a776SDave Chinner 		queue_work(mp->m_inodegc_wq, &gc->work);
2102ab23a776SDave Chinner 	}
2103ab23a776SDave Chinner }
2104ab23a776SDave Chinner 
2105ab23a776SDave Chinner /*
2106ab23a776SDave Chinner  * We set the inode flag atomically with the radix tree tag.  Once we get tag
2107ab23a776SDave Chinner  * lookups on the radix tree, this inode flag can go away.
2108ab23a776SDave Chinner  *
2109ab23a776SDave Chinner  * We always use background reclaim here because even if the inode is clean, it
2110ab23a776SDave Chinner  * still may be under IO and hence we have wait for IO completion to occur
2111ab23a776SDave Chinner  * before we can reclaim the inode. The background reclaim path handles this
2112ab23a776SDave Chinner  * more efficiently than we can here, so simply let background reclaim tear down
2113ab23a776SDave Chinner  * all inodes.
2114ab23a776SDave Chinner  */
2115ab23a776SDave Chinner void
2116ab23a776SDave Chinner xfs_inode_mark_reclaimable(
2117ab23a776SDave Chinner 	struct xfs_inode	*ip)
2118ab23a776SDave Chinner {
2119ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2120ab23a776SDave Chinner 	bool			need_inactive;
2121ab23a776SDave Chinner 
2122ab23a776SDave Chinner 	XFS_STATS_INC(mp, vn_reclaim);
2123ab23a776SDave Chinner 
2124ab23a776SDave Chinner 	/*
2125ab23a776SDave Chinner 	 * We should never get here with any of the reclaim flags already set.
2126ab23a776SDave Chinner 	 */
2127ab23a776SDave Chinner 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2128ab23a776SDave Chinner 
2129ab23a776SDave Chinner 	need_inactive = xfs_inode_needs_inactive(ip);
2130ab23a776SDave Chinner 	if (need_inactive) {
2131ab23a776SDave Chinner 		xfs_inodegc_queue(ip);
2132ab23a776SDave Chinner 		return;
2133ab23a776SDave Chinner 	}
2134ab23a776SDave Chinner 
2135ab23a776SDave Chinner 	/* Going straight to reclaim, so drop the dquots. */
2136ab23a776SDave Chinner 	xfs_qm_dqdetach(ip);
2137ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
2138ab23a776SDave Chinner }
213940b1de00SDarrick J. Wong 
214040b1de00SDarrick J. Wong /*
214140b1de00SDarrick J. Wong  * Register a phony shrinker so that we can run background inodegc sooner when
214240b1de00SDarrick J. Wong  * there's memory pressure.  Inactivation does not itself free any memory but
214340b1de00SDarrick J. Wong  * it does make inodes reclaimable, which eventually frees memory.
214440b1de00SDarrick J. Wong  *
214540b1de00SDarrick J. Wong  * The count function, seek value, and batch value are crafted to trigger the
214640b1de00SDarrick J. Wong  * scan function during the second round of scanning.  Hopefully this means
214740b1de00SDarrick J. Wong  * that we reclaimed enough memory that initiating metadata transactions won't
214840b1de00SDarrick J. Wong  * make things worse.
214940b1de00SDarrick J. Wong  */
215040b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
215140b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
215240b1de00SDarrick J. Wong 
215340b1de00SDarrick J. Wong static unsigned long
215440b1de00SDarrick J. Wong xfs_inodegc_shrinker_count(
215540b1de00SDarrick J. Wong 	struct shrinker		*shrink,
215640b1de00SDarrick J. Wong 	struct shrink_control	*sc)
215740b1de00SDarrick J. Wong {
215840b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
215940b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
216040b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
216140b1de00SDarrick J. Wong 	int			cpu;
216240b1de00SDarrick J. Wong 
216340b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
216440b1de00SDarrick J. Wong 		return 0;
216540b1de00SDarrick J. Wong 
216640b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
216740b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
216840b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list))
216940b1de00SDarrick J. Wong 			return XFS_INODEGC_SHRINKER_COUNT;
217040b1de00SDarrick J. Wong 	}
217140b1de00SDarrick J. Wong 
217240b1de00SDarrick J. Wong 	return 0;
217340b1de00SDarrick J. Wong }
217440b1de00SDarrick J. Wong 
217540b1de00SDarrick J. Wong static unsigned long
217640b1de00SDarrick J. Wong xfs_inodegc_shrinker_scan(
217740b1de00SDarrick J. Wong 	struct shrinker		*shrink,
217840b1de00SDarrick J. Wong 	struct shrink_control	*sc)
217940b1de00SDarrick J. Wong {
218040b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
218140b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
218240b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
218340b1de00SDarrick J. Wong 	int			cpu;
218440b1de00SDarrick J. Wong 	bool			no_items = true;
218540b1de00SDarrick J. Wong 
218640b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
218740b1de00SDarrick J. Wong 		return SHRINK_STOP;
218840b1de00SDarrick J. Wong 
218940b1de00SDarrick J. Wong 	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
219040b1de00SDarrick J. Wong 
219140b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
219240b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
219340b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list)) {
219440b1de00SDarrick J. Wong 			unsigned int	h = READ_ONCE(gc->shrinker_hits);
219540b1de00SDarrick J. Wong 
219640b1de00SDarrick J. Wong 			WRITE_ONCE(gc->shrinker_hits, h + 1);
219740b1de00SDarrick J. Wong 			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
219840b1de00SDarrick J. Wong 			no_items = false;
219940b1de00SDarrick J. Wong 		}
220040b1de00SDarrick J. Wong 	}
220140b1de00SDarrick J. Wong 
220240b1de00SDarrick J. Wong 	/*
220340b1de00SDarrick J. Wong 	 * If there are no inodes to inactivate, we don't want the shrinker
220440b1de00SDarrick J. Wong 	 * to think there's deferred work to call us back about.
220540b1de00SDarrick J. Wong 	 */
220640b1de00SDarrick J. Wong 	if (no_items)
220740b1de00SDarrick J. Wong 		return LONG_MAX;
220840b1de00SDarrick J. Wong 
220940b1de00SDarrick J. Wong 	return SHRINK_STOP;
221040b1de00SDarrick J. Wong }
221140b1de00SDarrick J. Wong 
221240b1de00SDarrick J. Wong /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
221340b1de00SDarrick J. Wong int
221440b1de00SDarrick J. Wong xfs_inodegc_register_shrinker(
221540b1de00SDarrick J. Wong 	struct xfs_mount	*mp)
221640b1de00SDarrick J. Wong {
221740b1de00SDarrick J. Wong 	struct shrinker		*shrink = &mp->m_inodegc_shrinker;
221840b1de00SDarrick J. Wong 
221940b1de00SDarrick J. Wong 	shrink->count_objects = xfs_inodegc_shrinker_count;
222040b1de00SDarrick J. Wong 	shrink->scan_objects = xfs_inodegc_shrinker_scan;
222140b1de00SDarrick J. Wong 	shrink->seeks = 0;
222240b1de00SDarrick J. Wong 	shrink->flags = SHRINKER_NONSLAB;
222340b1de00SDarrick J. Wong 	shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
222440b1de00SDarrick J. Wong 
222540b1de00SDarrick J. Wong 	return register_shrinker(shrink);
222640b1de00SDarrick J. Wong }
2227