xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision e45d7cb2)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
26d8b79cfSDave Chinner /*
36d8b79cfSDave Chinner  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
46d8b79cfSDave Chinner  * All Rights Reserved.
56d8b79cfSDave Chinner  */
66d8b79cfSDave Chinner #include "xfs.h"
76d8b79cfSDave Chinner #include "xfs_fs.h"
85467b34bSDarrick J. Wong #include "xfs_shared.h"
96ca1c906SDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
126d8b79cfSDave Chinner #include "xfs_mount.h"
136d8b79cfSDave Chinner #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15239880efSDave Chinner #include "xfs_trans_priv.h"
166d8b79cfSDave Chinner #include "xfs_inode_item.h"
176d8b79cfSDave Chinner #include "xfs_quota.h"
186d8b79cfSDave Chinner #include "xfs_trace.h"
196d8b79cfSDave Chinner #include "xfs_icache.h"
20c24b5dfaSDave Chinner #include "xfs_bmap_util.h"
21dc06f398SBrian Foster #include "xfs_dquot_item.h"
22dc06f398SBrian Foster #include "xfs_dquot.h"
2383104d44SDarrick J. Wong #include "xfs_reflink.h"
24bb8a66afSChristoph Hellwig #include "xfs_ialloc.h"
259bbafc71SDave Chinner #include "xfs_ag.h"
2601728b44SDave Chinner #include "xfs_log_priv.h"
276d8b79cfSDave Chinner 
28f0e28280SJeff Layton #include <linux/iversion.h>
296d8b79cfSDave Chinner 
30c809d7e9SDarrick J. Wong /* Radix tree tags for incore inode tree. */
31c809d7e9SDarrick J. Wong 
32c809d7e9SDarrick J. Wong /* inode is to be reclaimed */
33c809d7e9SDarrick J. Wong #define XFS_ICI_RECLAIM_TAG	0
34c809d7e9SDarrick J. Wong /* Inode has speculative preallocations (posteof or cow) to clean. */
35c809d7e9SDarrick J. Wong #define XFS_ICI_BLOCKGC_TAG	1
36c809d7e9SDarrick J. Wong 
37c809d7e9SDarrick J. Wong /*
38c809d7e9SDarrick J. Wong  * The goal for walking incore inodes.  These can correspond with incore inode
39c809d7e9SDarrick J. Wong  * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
40c809d7e9SDarrick J. Wong  */
41c809d7e9SDarrick J. Wong enum xfs_icwalk_goal {
42c809d7e9SDarrick J. Wong 	/* Goals directly associated with tagged inodes. */
43c809d7e9SDarrick J. Wong 	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
44f1bc5c56SDarrick J. Wong 	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
45c809d7e9SDarrick J. Wong };
46c809d7e9SDarrick J. Wong 
477fdff526SDarrick J. Wong static int xfs_icwalk(struct xfs_mount *mp,
48b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
497fdff526SDarrick J. Wong static int xfs_icwalk_ag(struct xfs_perag *pag,
50b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
51df600197SDarrick J. Wong 
5233479e05SDave Chinner /*
53b26b2bf1SDarrick J. Wong  * Private inode cache walk flags for struct xfs_icwalk.  Must not
54b26b2bf1SDarrick J. Wong  * coincide with XFS_ICWALK_FLAGS_VALID.
551ad2cfe0SDarrick J. Wong  */
561ad2cfe0SDarrick J. Wong 
57f1bc5c56SDarrick J. Wong /* Stop scanning after icw_scan_limit inodes. */
58f1bc5c56SDarrick J. Wong #define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
59f1bc5c56SDarrick J. Wong 
609492750aSDarrick J. Wong #define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
612d53f66bSDarrick J. Wong #define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
629492750aSDarrick J. Wong 
63777eb1faSChristoph Hellwig #define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
642d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
652d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_UNION)
661ad2cfe0SDarrick J. Wong 
6733479e05SDave Chinner /*
6833479e05SDave Chinner  * Allocate and initialise an xfs_inode.
6933479e05SDave Chinner  */
70638f4416SDave Chinner struct xfs_inode *
7133479e05SDave Chinner xfs_inode_alloc(
7233479e05SDave Chinner 	struct xfs_mount	*mp,
7333479e05SDave Chinner 	xfs_ino_t		ino)
7433479e05SDave Chinner {
7533479e05SDave Chinner 	struct xfs_inode	*ip;
7633479e05SDave Chinner 
7733479e05SDave Chinner 	/*
783050bd0bSCarlos Maiolino 	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
793050bd0bSCarlos Maiolino 	 * and return NULL here on ENOMEM.
8033479e05SDave Chinner 	 */
81fd60b288SMuchun Song 	ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
823050bd0bSCarlos Maiolino 
8333479e05SDave Chinner 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
84182696fbSDarrick J. Wong 		kmem_cache_free(xfs_inode_cache, ip);
8533479e05SDave Chinner 		return NULL;
8633479e05SDave Chinner 	}
8733479e05SDave Chinner 
88f38a032bSDave Chinner 	/* VFS doesn't initialise i_mode or i_state! */
89c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;
90f38a032bSDave Chinner 	VFS_I(ip)->i_state = 0;
9167958013SMatthew Wilcox (Oracle) 	mapping_set_large_folios(VFS_I(ip)->i_mapping);
92c19b3b05SDave Chinner 
93ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, vn_active);
9433479e05SDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
9533479e05SDave Chinner 	ASSERT(ip->i_ino == 0);
9633479e05SDave Chinner 
9733479e05SDave Chinner 	/* initialise the xfs inode */
9833479e05SDave Chinner 	ip->i_ino = ino;
9933479e05SDave Chinner 	ip->i_mount = mp;
10033479e05SDave Chinner 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
1013993baebSDarrick J. Wong 	ip->i_cowfp = NULL;
1022ed5b09bSDarrick J. Wong 	memset(&ip->i_af, 0, sizeof(ip->i_af));
1032ed5b09bSDarrick J. Wong 	ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
1043ba738dfSChristoph Hellwig 	memset(&ip->i_df, 0, sizeof(ip->i_df));
10533479e05SDave Chinner 	ip->i_flags = 0;
10633479e05SDave Chinner 	ip->i_delayed_blks = 0;
1073e09ab8fSChristoph Hellwig 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
1086e73a545SChristoph Hellwig 	ip->i_nblocks = 0;
1097821ea30SChristoph Hellwig 	ip->i_forkoff = 0;
1106772c1f1SDarrick J. Wong 	ip->i_sick = 0;
1116772c1f1SDarrick J. Wong 	ip->i_checked = 0;
112cb357bf3SDarrick J. Wong 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113cb357bf3SDarrick J. Wong 	INIT_LIST_HEAD(&ip->i_ioend_list);
114cb357bf3SDarrick J. Wong 	spin_lock_init(&ip->i_ioend_lock);
11533479e05SDave Chinner 
11633479e05SDave Chinner 	return ip;
11733479e05SDave Chinner }
11833479e05SDave Chinner 
11933479e05SDave Chinner STATIC void
12033479e05SDave Chinner xfs_inode_free_callback(
12133479e05SDave Chinner 	struct rcu_head		*head)
12233479e05SDave Chinner {
12333479e05SDave Chinner 	struct inode		*inode = container_of(head, struct inode, i_rcu);
12433479e05SDave Chinner 	struct xfs_inode	*ip = XFS_I(inode);
12533479e05SDave Chinner 
126c19b3b05SDave Chinner 	switch (VFS_I(ip)->i_mode & S_IFMT) {
12733479e05SDave Chinner 	case S_IFREG:
12833479e05SDave Chinner 	case S_IFDIR:
12933479e05SDave Chinner 	case S_IFLNK:
130ef838512SChristoph Hellwig 		xfs_idestroy_fork(&ip->i_df);
13133479e05SDave Chinner 		break;
13233479e05SDave Chinner 	}
13333479e05SDave Chinner 
1342ed5b09bSDarrick J. Wong 	xfs_idestroy_fork(&ip->i_af);
1352ed5b09bSDarrick J. Wong 	xfs_ifork_zap_attr(ip);
136*e45d7cb2SDarrick J. Wong 
137ef838512SChristoph Hellwig 	if (ip->i_cowfp) {
138ef838512SChristoph Hellwig 		xfs_idestroy_fork(ip->i_cowfp);
139182696fbSDarrick J. Wong 		kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
140ef838512SChristoph Hellwig 	}
14133479e05SDave Chinner 	if (ip->i_itemp) {
14222525c17SDave Chinner 		ASSERT(!test_bit(XFS_LI_IN_AIL,
14322525c17SDave Chinner 				 &ip->i_itemp->ili_item.li_flags));
14433479e05SDave Chinner 		xfs_inode_item_destroy(ip);
14533479e05SDave Chinner 		ip->i_itemp = NULL;
14633479e05SDave Chinner 	}
14733479e05SDave Chinner 
148182696fbSDarrick J. Wong 	kmem_cache_free(xfs_inode_cache, ip);
1491f2dcfe8SDave Chinner }
1501f2dcfe8SDave Chinner 
1518a17d7ddSDave Chinner static void
1528a17d7ddSDave Chinner __xfs_inode_free(
1538a17d7ddSDave Chinner 	struct xfs_inode	*ip)
1548a17d7ddSDave Chinner {
1558a17d7ddSDave Chinner 	/* asserts to verify all state is correct here */
1568a17d7ddSDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
15748d55e2aSDave Chinner 	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
1588a17d7ddSDave Chinner 	XFS_STATS_DEC(ip->i_mount, vn_active);
1598a17d7ddSDave Chinner 
1608a17d7ddSDave Chinner 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
1618a17d7ddSDave Chinner }
1628a17d7ddSDave Chinner 
1631f2dcfe8SDave Chinner void
1641f2dcfe8SDave Chinner xfs_inode_free(
1651f2dcfe8SDave Chinner 	struct xfs_inode	*ip)
1661f2dcfe8SDave Chinner {
167718ecc50SDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
16898efe8afSBrian Foster 
16933479e05SDave Chinner 	/*
17033479e05SDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always
17133479e05SDave Chinner 	 * appears to be reclaimed with an invalid inode number when in the
17233479e05SDave Chinner 	 * free state. The ip->i_flags_lock provides the barrier against lookup
17333479e05SDave Chinner 	 * races.
17433479e05SDave Chinner 	 */
17533479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
17633479e05SDave Chinner 	ip->i_flags = XFS_IRECLAIM;
17733479e05SDave Chinner 	ip->i_ino = 0;
17833479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
17933479e05SDave Chinner 
1808a17d7ddSDave Chinner 	__xfs_inode_free(ip);
18133479e05SDave Chinner }
18233479e05SDave Chinner 
18333479e05SDave Chinner /*
18402511a5aSDave Chinner  * Queue background inode reclaim work if there are reclaimable inodes and there
18502511a5aSDave Chinner  * isn't reclaim work already scheduled or in progress.
186ad438c40SDave Chinner  */
187ad438c40SDave Chinner static void
188ad438c40SDave Chinner xfs_reclaim_work_queue(
189ad438c40SDave Chinner 	struct xfs_mount        *mp)
190ad438c40SDave Chinner {
191ad438c40SDave Chinner 
192ad438c40SDave Chinner 	rcu_read_lock();
193ad438c40SDave Chinner 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
194ad438c40SDave Chinner 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
195ad438c40SDave Chinner 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
196ad438c40SDave Chinner 	}
197ad438c40SDave Chinner 	rcu_read_unlock();
198ad438c40SDave Chinner }
199ad438c40SDave Chinner 
200c076ae7aSDarrick J. Wong /*
201c076ae7aSDarrick J. Wong  * Background scanning to trim preallocated space. This is queued based on the
202c076ae7aSDarrick J. Wong  * 'speculative_prealloc_lifetime' tunable (5m by default).
203c076ae7aSDarrick J. Wong  */
204c076ae7aSDarrick J. Wong static inline void
205c076ae7aSDarrick J. Wong xfs_blockgc_queue(
206ad438c40SDave Chinner 	struct xfs_perag	*pag)
207ad438c40SDave Chinner {
2086f649091SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
2096f649091SDarrick J. Wong 
2106f649091SDarrick J. Wong 	if (!xfs_is_blockgc_enabled(mp))
2116f649091SDarrick J. Wong 		return;
2126f649091SDarrick J. Wong 
213c076ae7aSDarrick J. Wong 	rcu_read_lock();
214c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
215ab23a776SDave Chinner 		queue_delayed_work(pag->pag_mount->m_blockgc_wq,
216c076ae7aSDarrick J. Wong 				   &pag->pag_blockgc_work,
217c076ae7aSDarrick J. Wong 				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
218c076ae7aSDarrick J. Wong 	rcu_read_unlock();
219c076ae7aSDarrick J. Wong }
220c076ae7aSDarrick J. Wong 
221c076ae7aSDarrick J. Wong /* Set a tag on both the AG incore inode tree and the AG radix tree. */
222c076ae7aSDarrick J. Wong static void
223c076ae7aSDarrick J. Wong xfs_perag_set_inode_tag(
224c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
225c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
226c076ae7aSDarrick J. Wong 	unsigned int		tag)
227c076ae7aSDarrick J. Wong {
228ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
229c076ae7aSDarrick J. Wong 	bool			was_tagged;
230ad438c40SDave Chinner 
23195989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
232c076ae7aSDarrick J. Wong 
233c076ae7aSDarrick J. Wong 	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
234c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
235c076ae7aSDarrick J. Wong 
236c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
237c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable++;
238c076ae7aSDarrick J. Wong 
239c076ae7aSDarrick J. Wong 	if (was_tagged)
240ad438c40SDave Chinner 		return;
241ad438c40SDave Chinner 
242c076ae7aSDarrick J. Wong 	/* propagate the tag up into the perag radix tree */
243ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
244c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
245ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
246ad438c40SDave Chinner 
247c076ae7aSDarrick J. Wong 	/* start background work */
248c076ae7aSDarrick J. Wong 	switch (tag) {
249c076ae7aSDarrick J. Wong 	case XFS_ICI_RECLAIM_TAG:
250ad438c40SDave Chinner 		xfs_reclaim_work_queue(mp);
251c076ae7aSDarrick J. Wong 		break;
252c076ae7aSDarrick J. Wong 	case XFS_ICI_BLOCKGC_TAG:
253c076ae7aSDarrick J. Wong 		xfs_blockgc_queue(pag);
254c076ae7aSDarrick J. Wong 		break;
255ad438c40SDave Chinner 	}
256ad438c40SDave Chinner 
257c076ae7aSDarrick J. Wong 	trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
258c076ae7aSDarrick J. Wong }
259c076ae7aSDarrick J. Wong 
260c076ae7aSDarrick J. Wong /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
261ad438c40SDave Chinner static void
262c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(
263c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
264c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
265c076ae7aSDarrick J. Wong 	unsigned int		tag)
266ad438c40SDave Chinner {
267ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
268ad438c40SDave Chinner 
26995989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
270c076ae7aSDarrick J. Wong 
271c076ae7aSDarrick J. Wong 	/*
272c076ae7aSDarrick J. Wong 	 * Reclaim can signal (with a null agino) that it cleared its own tag
273c076ae7aSDarrick J. Wong 	 * by removing the inode from the radix tree.
274c076ae7aSDarrick J. Wong 	 */
275c076ae7aSDarrick J. Wong 	if (agino != NULLAGINO)
276c076ae7aSDarrick J. Wong 		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
277c076ae7aSDarrick J. Wong 	else
278c076ae7aSDarrick J. Wong 		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
279c076ae7aSDarrick J. Wong 
280c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
281c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable--;
282c076ae7aSDarrick J. Wong 
283c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, tag))
284ad438c40SDave Chinner 		return;
285ad438c40SDave Chinner 
286c076ae7aSDarrick J. Wong 	/* clear the tag from the perag radix tree */
287ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
288c076ae7aSDarrick J. Wong 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
289ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
290ad438c40SDave Chinner 
291c076ae7aSDarrick J. Wong 	trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
292c076ae7aSDarrick J. Wong }
293ad438c40SDave Chinner 
294ad438c40SDave Chinner /*
29550997470SDave Chinner  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
29650997470SDave Chinner  * part of the structure. This is made more complex by the fact we store
29750997470SDave Chinner  * information about the on-disk values in the VFS inode and so we can't just
29883e06f21SDave Chinner  * overwrite the values unconditionally. Hence we save the parameters we
29950997470SDave Chinner  * need to retain across reinitialisation, and rewrite them into the VFS inode
30083e06f21SDave Chinner  * after reinitialisation even if it fails.
30150997470SDave Chinner  */
30250997470SDave Chinner static int
30350997470SDave Chinner xfs_reinit_inode(
30450997470SDave Chinner 	struct xfs_mount	*mp,
30550997470SDave Chinner 	struct inode		*inode)
30650997470SDave Chinner {
30750997470SDave Chinner 	int			error;
30854d7b5c1SDave Chinner 	uint32_t		nlink = inode->i_nlink;
3099e9a2674SDave Chinner 	uint32_t		generation = inode->i_generation;
310f0e28280SJeff Layton 	uint64_t		version = inode_peek_iversion(inode);
311c19b3b05SDave Chinner 	umode_t			mode = inode->i_mode;
312acd1d715SAmir Goldstein 	dev_t			dev = inode->i_rdev;
3133d8f2821SChristoph Hellwig 	kuid_t			uid = inode->i_uid;
3143d8f2821SChristoph Hellwig 	kgid_t			gid = inode->i_gid;
31550997470SDave Chinner 
31650997470SDave Chinner 	error = inode_init_always(mp->m_super, inode);
31750997470SDave Chinner 
31854d7b5c1SDave Chinner 	set_nlink(inode, nlink);
3199e9a2674SDave Chinner 	inode->i_generation = generation;
320f0e28280SJeff Layton 	inode_set_iversion_queried(inode, version);
321c19b3b05SDave Chinner 	inode->i_mode = mode;
322acd1d715SAmir Goldstein 	inode->i_rdev = dev;
3233d8f2821SChristoph Hellwig 	inode->i_uid = uid;
3243d8f2821SChristoph Hellwig 	inode->i_gid = gid;
32567958013SMatthew Wilcox (Oracle) 	mapping_set_large_folios(inode->i_mapping);
32650997470SDave Chinner 	return error;
32750997470SDave Chinner }
32850997470SDave Chinner 
32950997470SDave Chinner /*
330ff7bebebSDarrick J. Wong  * Carefully nudge an inode whose VFS state has been torn down back into a
331ff7bebebSDarrick J. Wong  * usable state.  Drops the i_flags_lock and the rcu read lock.
332ff7bebebSDarrick J. Wong  */
333ff7bebebSDarrick J. Wong static int
334ff7bebebSDarrick J. Wong xfs_iget_recycle(
335ff7bebebSDarrick J. Wong 	struct xfs_perag	*pag,
336ff7bebebSDarrick J. Wong 	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
337ff7bebebSDarrick J. Wong {
338ff7bebebSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
339ff7bebebSDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
340ff7bebebSDarrick J. Wong 	int			error;
341ff7bebebSDarrick J. Wong 
342ff7bebebSDarrick J. Wong 	trace_xfs_iget_recycle(ip);
343ff7bebebSDarrick J. Wong 
344ff7bebebSDarrick J. Wong 	/*
345ff7bebebSDarrick J. Wong 	 * We need to make it look like the inode is being reclaimed to prevent
346ff7bebebSDarrick J. Wong 	 * the actual reclaim workers from stomping over us while we recycle
347ff7bebebSDarrick J. Wong 	 * the inode.  We can't clear the radix tree tag yet as it requires
348ff7bebebSDarrick J. Wong 	 * pag_ici_lock to be held exclusive.
349ff7bebebSDarrick J. Wong 	 */
350ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_IRECLAIM;
351ff7bebebSDarrick J. Wong 
352ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
353ff7bebebSDarrick J. Wong 	rcu_read_unlock();
354ff7bebebSDarrick J. Wong 
355ff7bebebSDarrick J. Wong 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
356ff7bebebSDarrick J. Wong 	error = xfs_reinit_inode(mp, inode);
357ff7bebebSDarrick J. Wong 	if (error) {
358ff7bebebSDarrick J. Wong 		/*
359ff7bebebSDarrick J. Wong 		 * Re-initializing the inode failed, and we are in deep
360ff7bebebSDarrick J. Wong 		 * trouble.  Try to re-add it to the reclaim list.
361ff7bebebSDarrick J. Wong 		 */
362ff7bebebSDarrick J. Wong 		rcu_read_lock();
363ff7bebebSDarrick J. Wong 		spin_lock(&ip->i_flags_lock);
364ff7bebebSDarrick J. Wong 		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
365ff7bebebSDarrick J. Wong 		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
366ff7bebebSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
367ff7bebebSDarrick J. Wong 		rcu_read_unlock();
368ff7bebebSDarrick J. Wong 
369ff7bebebSDarrick J. Wong 		trace_xfs_iget_recycle_fail(ip);
370ff7bebebSDarrick J. Wong 		return error;
371ff7bebebSDarrick J. Wong 	}
372ff7bebebSDarrick J. Wong 
373ff7bebebSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
374ff7bebebSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
375ff7bebebSDarrick J. Wong 
376ff7bebebSDarrick J. Wong 	/*
377ff7bebebSDarrick J. Wong 	 * Clear the per-lifetime state in the inode as we are now effectively
378ff7bebebSDarrick J. Wong 	 * a new inode and need to return to the initial state before reuse
379ff7bebebSDarrick J. Wong 	 * occurs.
380ff7bebebSDarrick J. Wong 	 */
381ff7bebebSDarrick J. Wong 	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
382ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_INEW;
383ff7bebebSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
384ff7bebebSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
385ff7bebebSDarrick J. Wong 	inode->i_state = I_NEW;
386ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
387ff7bebebSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
388ff7bebebSDarrick J. Wong 
389ff7bebebSDarrick J. Wong 	return 0;
390ff7bebebSDarrick J. Wong }
391ff7bebebSDarrick J. Wong 
392ff7bebebSDarrick J. Wong /*
393afca6c5bSDave Chinner  * If we are allocating a new inode, then check what was returned is
394afca6c5bSDave Chinner  * actually a free, empty inode. If we are not allocating an inode,
395afca6c5bSDave Chinner  * then check we didn't find a free inode.
396afca6c5bSDave Chinner  *
397afca6c5bSDave Chinner  * Returns:
398afca6c5bSDave Chinner  *	0		if the inode free state matches the lookup context
399afca6c5bSDave Chinner  *	-ENOENT		if the inode is free and we are not allocating
400afca6c5bSDave Chinner  *	-EFSCORRUPTED	if there is any state mismatch at all
401afca6c5bSDave Chinner  */
402afca6c5bSDave Chinner static int
403afca6c5bSDave Chinner xfs_iget_check_free_state(
404afca6c5bSDave Chinner 	struct xfs_inode	*ip,
405afca6c5bSDave Chinner 	int			flags)
406afca6c5bSDave Chinner {
407afca6c5bSDave Chinner 	if (flags & XFS_IGET_CREATE) {
408afca6c5bSDave Chinner 		/* should be a free inode */
409afca6c5bSDave Chinner 		if (VFS_I(ip)->i_mode != 0) {
410afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
411afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
412afca6c5bSDave Chinner 				ip->i_ino, VFS_I(ip)->i_mode);
413afca6c5bSDave Chinner 			return -EFSCORRUPTED;
414afca6c5bSDave Chinner 		}
415afca6c5bSDave Chinner 
4166e73a545SChristoph Hellwig 		if (ip->i_nblocks != 0) {
417afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
418afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx has blocks allocated!",
419afca6c5bSDave Chinner 				ip->i_ino);
420afca6c5bSDave Chinner 			return -EFSCORRUPTED;
421afca6c5bSDave Chinner 		}
422afca6c5bSDave Chinner 		return 0;
423afca6c5bSDave Chinner 	}
424afca6c5bSDave Chinner 
425afca6c5bSDave Chinner 	/* should be an allocated inode */
426afca6c5bSDave Chinner 	if (VFS_I(ip)->i_mode == 0)
427afca6c5bSDave Chinner 		return -ENOENT;
428afca6c5bSDave Chinner 
429afca6c5bSDave Chinner 	return 0;
430afca6c5bSDave Chinner }
431afca6c5bSDave Chinner 
432ab23a776SDave Chinner /* Make all pending inactivation work start immediately. */
433ab23a776SDave Chinner static void
434ab23a776SDave Chinner xfs_inodegc_queue_all(
435ab23a776SDave Chinner 	struct xfs_mount	*mp)
436ab23a776SDave Chinner {
437ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
438ab23a776SDave Chinner 	int			cpu;
439ab23a776SDave Chinner 
440ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
441ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
442ab23a776SDave Chinner 		if (!llist_empty(&gc->list))
4437cf2b0f9SDave Chinner 			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
444ab23a776SDave Chinner 	}
445ab23a776SDave Chinner }
446ab23a776SDave Chinner 
447afca6c5bSDave Chinner /*
44833479e05SDave Chinner  * Check the validity of the inode we just found it the cache
44933479e05SDave Chinner  */
45033479e05SDave Chinner static int
45133479e05SDave Chinner xfs_iget_cache_hit(
45233479e05SDave Chinner 	struct xfs_perag	*pag,
45333479e05SDave Chinner 	struct xfs_inode	*ip,
45433479e05SDave Chinner 	xfs_ino_t		ino,
45533479e05SDave Chinner 	int			flags,
45633479e05SDave Chinner 	int			lock_flags) __releases(RCU)
45733479e05SDave Chinner {
45833479e05SDave Chinner 	struct inode		*inode = VFS_I(ip);
45933479e05SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
46033479e05SDave Chinner 	int			error;
46133479e05SDave Chinner 
46233479e05SDave Chinner 	/*
46333479e05SDave Chinner 	 * check for re-use of an inode within an RCU grace period due to the
46433479e05SDave Chinner 	 * radix tree nodes not being updated yet. We monitor for this by
46533479e05SDave Chinner 	 * setting the inode number to zero before freeing the inode structure.
46633479e05SDave Chinner 	 * If the inode has been reallocated and set up, then the inode number
46733479e05SDave Chinner 	 * will not match, so check for that, too.
46833479e05SDave Chinner 	 */
46933479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
47077b4d286SDarrick J. Wong 	if (ip->i_ino != ino)
47177b4d286SDarrick J. Wong 		goto out_skip;
47233479e05SDave Chinner 
47333479e05SDave Chinner 	/*
47433479e05SDave Chinner 	 * If we are racing with another cache hit that is currently
47533479e05SDave Chinner 	 * instantiating this inode or currently recycling it out of
476ff7bebebSDarrick J. Wong 	 * reclaimable state, wait for the initialisation to complete
47733479e05SDave Chinner 	 * before continuing.
47833479e05SDave Chinner 	 *
479ab23a776SDave Chinner 	 * If we're racing with the inactivation worker we also want to wait.
480ab23a776SDave Chinner 	 * If we're creating a new file, it's possible that the worker
481ab23a776SDave Chinner 	 * previously marked the inode as free on disk but hasn't finished
482ab23a776SDave Chinner 	 * updating the incore state yet.  The AGI buffer will be dirty and
483ab23a776SDave Chinner 	 * locked to the icreate transaction, so a synchronous push of the
484ab23a776SDave Chinner 	 * inodegc workers would result in deadlock.  For a regular iget, the
485ab23a776SDave Chinner 	 * worker is running already, so we might as well wait.
486ab23a776SDave Chinner 	 *
48733479e05SDave Chinner 	 * XXX(hch): eventually we should do something equivalent to
48833479e05SDave Chinner 	 *	     wait_on_inode to wait for these flags to be cleared
48933479e05SDave Chinner 	 *	     instead of polling for it.
49033479e05SDave Chinner 	 */
491ab23a776SDave Chinner 	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
49277b4d286SDarrick J. Wong 		goto out_skip;
49333479e05SDave Chinner 
494ab23a776SDave Chinner 	if (ip->i_flags & XFS_NEED_INACTIVE) {
495ab23a776SDave Chinner 		/* Unlinked inodes cannot be re-grabbed. */
496ab23a776SDave Chinner 		if (VFS_I(ip)->i_nlink == 0) {
497ab23a776SDave Chinner 			error = -ENOENT;
498ab23a776SDave Chinner 			goto out_error;
499ab23a776SDave Chinner 		}
500ab23a776SDave Chinner 		goto out_inodegc_flush;
501ab23a776SDave Chinner 	}
502ab23a776SDave Chinner 
50333479e05SDave Chinner 	/*
504afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
505afca6c5bSDave Chinner 	 * racing with unlinks.
50633479e05SDave Chinner 	 */
507afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
508afca6c5bSDave Chinner 	if (error)
50933479e05SDave Chinner 		goto out_error;
51033479e05SDave Chinner 
51177b4d286SDarrick J. Wong 	/* Skip inodes that have no vfs state. */
51277b4d286SDarrick J. Wong 	if ((flags & XFS_IGET_INCORE) &&
51377b4d286SDarrick J. Wong 	    (ip->i_flags & XFS_IRECLAIMABLE))
51477b4d286SDarrick J. Wong 		goto out_skip;
515378f681cSDarrick J. Wong 
51677b4d286SDarrick J. Wong 	/* The inode fits the selection criteria; process it. */
51777b4d286SDarrick J. Wong 	if (ip->i_flags & XFS_IRECLAIMABLE) {
518ff7bebebSDarrick J. Wong 		/* Drops i_flags_lock and RCU read lock. */
519ff7bebebSDarrick J. Wong 		error = xfs_iget_recycle(pag, ip);
520ff7bebebSDarrick J. Wong 		if (error)
521ff7bebebSDarrick J. Wong 			return error;
52233479e05SDave Chinner 	} else {
52333479e05SDave Chinner 		/* If the VFS inode is being torn down, pause and try again. */
52477b4d286SDarrick J. Wong 		if (!igrab(inode))
52577b4d286SDarrick J. Wong 			goto out_skip;
52633479e05SDave Chinner 
52733479e05SDave Chinner 		/* We've got a live one. */
52833479e05SDave Chinner 		spin_unlock(&ip->i_flags_lock);
52933479e05SDave Chinner 		rcu_read_unlock();
53033479e05SDave Chinner 		trace_xfs_iget_hit(ip);
53133479e05SDave Chinner 	}
53233479e05SDave Chinner 
53333479e05SDave Chinner 	if (lock_flags != 0)
53433479e05SDave Chinner 		xfs_ilock(ip, lock_flags);
53533479e05SDave Chinner 
536378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE))
537dae2f8edSIra Weiny 		xfs_iflags_clear(ip, XFS_ISTALE);
538ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_found);
53933479e05SDave Chinner 
54033479e05SDave Chinner 	return 0;
54133479e05SDave Chinner 
54277b4d286SDarrick J. Wong out_skip:
54377b4d286SDarrick J. Wong 	trace_xfs_iget_skip(ip);
54477b4d286SDarrick J. Wong 	XFS_STATS_INC(mp, xs_ig_frecycle);
54577b4d286SDarrick J. Wong 	error = -EAGAIN;
54633479e05SDave Chinner out_error:
54733479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
54833479e05SDave Chinner 	rcu_read_unlock();
54933479e05SDave Chinner 	return error;
550ab23a776SDave Chinner 
551ab23a776SDave Chinner out_inodegc_flush:
552ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
553ab23a776SDave Chinner 	rcu_read_unlock();
554ab23a776SDave Chinner 	/*
555ab23a776SDave Chinner 	 * Do not wait for the workers, because the caller could hold an AGI
556ab23a776SDave Chinner 	 * buffer lock.  We're just going to sleep in a loop anyway.
557ab23a776SDave Chinner 	 */
558ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp))
559ab23a776SDave Chinner 		xfs_inodegc_queue_all(mp);
560ab23a776SDave Chinner 	return -EAGAIN;
56133479e05SDave Chinner }
56233479e05SDave Chinner 
56333479e05SDave Chinner static int
56433479e05SDave Chinner xfs_iget_cache_miss(
56533479e05SDave Chinner 	struct xfs_mount	*mp,
56633479e05SDave Chinner 	struct xfs_perag	*pag,
56733479e05SDave Chinner 	xfs_trans_t		*tp,
56833479e05SDave Chinner 	xfs_ino_t		ino,
56933479e05SDave Chinner 	struct xfs_inode	**ipp,
57033479e05SDave Chinner 	int			flags,
57133479e05SDave Chinner 	int			lock_flags)
57233479e05SDave Chinner {
57333479e05SDave Chinner 	struct xfs_inode	*ip;
57433479e05SDave Chinner 	int			error;
57533479e05SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
57633479e05SDave Chinner 	int			iflags;
57733479e05SDave Chinner 
57833479e05SDave Chinner 	ip = xfs_inode_alloc(mp, ino);
57933479e05SDave Chinner 	if (!ip)
5802451337dSDave Chinner 		return -ENOMEM;
58133479e05SDave Chinner 
582bb8a66afSChristoph Hellwig 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
58333479e05SDave Chinner 	if (error)
58433479e05SDave Chinner 		goto out_destroy;
58533479e05SDave Chinner 
586bb8a66afSChristoph Hellwig 	/*
587bb8a66afSChristoph Hellwig 	 * For version 5 superblocks, if we are initialising a new inode and we
5880560f31aSDave Chinner 	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
589bb8a66afSChristoph Hellwig 	 * simply build the new inode core with a random generation number.
590bb8a66afSChristoph Hellwig 	 *
591bb8a66afSChristoph Hellwig 	 * For version 4 (and older) superblocks, log recovery is dependent on
592965e0a1aSChristoph Hellwig 	 * the i_flushiter field being initialised from the current on-disk
593bb8a66afSChristoph Hellwig 	 * value and hence we must also read the inode off disk even when
594bb8a66afSChristoph Hellwig 	 * initializing new inodes.
595bb8a66afSChristoph Hellwig 	 */
59638c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp) &&
5970560f31aSDave Chinner 	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
598bb8a66afSChristoph Hellwig 		VFS_I(ip)->i_generation = prandom_u32();
599bb8a66afSChristoph Hellwig 	} else {
600bb8a66afSChristoph Hellwig 		struct xfs_buf		*bp;
601bb8a66afSChristoph Hellwig 
602af9dcddeSChristoph Hellwig 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
603bb8a66afSChristoph Hellwig 		if (error)
604bb8a66afSChristoph Hellwig 			goto out_destroy;
605bb8a66afSChristoph Hellwig 
606af9dcddeSChristoph Hellwig 		error = xfs_inode_from_disk(ip,
607af9dcddeSChristoph Hellwig 				xfs_buf_offset(bp, ip->i_imap.im_boffset));
608bb8a66afSChristoph Hellwig 		if (!error)
609bb8a66afSChristoph Hellwig 			xfs_buf_set_ref(bp, XFS_INO_REF);
610bb8a66afSChristoph Hellwig 		xfs_trans_brelse(tp, bp);
611bb8a66afSChristoph Hellwig 
612bb8a66afSChristoph Hellwig 		if (error)
613bb8a66afSChristoph Hellwig 			goto out_destroy;
614bb8a66afSChristoph Hellwig 	}
615bb8a66afSChristoph Hellwig 
61633479e05SDave Chinner 	trace_xfs_iget_miss(ip);
61733479e05SDave Chinner 
618ee457001SDave Chinner 	/*
619afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
620afca6c5bSDave Chinner 	 * racing with unlinks.
621ee457001SDave Chinner 	 */
622afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
623afca6c5bSDave Chinner 	if (error)
624ee457001SDave Chinner 		goto out_destroy;
62533479e05SDave Chinner 
62633479e05SDave Chinner 	/*
62733479e05SDave Chinner 	 * Preload the radix tree so we can insert safely under the
62833479e05SDave Chinner 	 * write spinlock. Note that we cannot sleep inside the preload
62933479e05SDave Chinner 	 * region. Since we can be called from transaction context, don't
63033479e05SDave Chinner 	 * recurse into the file system.
63133479e05SDave Chinner 	 */
63233479e05SDave Chinner 	if (radix_tree_preload(GFP_NOFS)) {
6332451337dSDave Chinner 		error = -EAGAIN;
63433479e05SDave Chinner 		goto out_destroy;
63533479e05SDave Chinner 	}
63633479e05SDave Chinner 
63733479e05SDave Chinner 	/*
63833479e05SDave Chinner 	 * Because the inode hasn't been added to the radix-tree yet it can't
63933479e05SDave Chinner 	 * be found by another thread, so we can do the non-sleeping lock here.
64033479e05SDave Chinner 	 */
64133479e05SDave Chinner 	if (lock_flags) {
64233479e05SDave Chinner 		if (!xfs_ilock_nowait(ip, lock_flags))
64333479e05SDave Chinner 			BUG();
64433479e05SDave Chinner 	}
64533479e05SDave Chinner 
64633479e05SDave Chinner 	/*
64733479e05SDave Chinner 	 * These values must be set before inserting the inode into the radix
64833479e05SDave Chinner 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
64933479e05SDave Chinner 	 * RCU locking mechanism) can find it and that lookup must see that this
65033479e05SDave Chinner 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
65133479e05SDave Chinner 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
65233479e05SDave Chinner 	 * memory barrier that ensures this detection works correctly at lookup
65333479e05SDave Chinner 	 * time.
65433479e05SDave Chinner 	 */
65533479e05SDave Chinner 	iflags = XFS_INEW;
65633479e05SDave Chinner 	if (flags & XFS_IGET_DONTCACHE)
6572c567af4SIra Weiny 		d_mark_dontcache(VFS_I(ip));
658113a5683SChandra Seetharaman 	ip->i_udquot = NULL;
659113a5683SChandra Seetharaman 	ip->i_gdquot = NULL;
66092f8ff73SChandra Seetharaman 	ip->i_pdquot = NULL;
66133479e05SDave Chinner 	xfs_iflags_set(ip, iflags);
66233479e05SDave Chinner 
66333479e05SDave Chinner 	/* insert the new inode */
66433479e05SDave Chinner 	spin_lock(&pag->pag_ici_lock);
66533479e05SDave Chinner 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
66633479e05SDave Chinner 	if (unlikely(error)) {
66733479e05SDave Chinner 		WARN_ON(error != -EEXIST);
668ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_dup);
6692451337dSDave Chinner 		error = -EAGAIN;
67033479e05SDave Chinner 		goto out_preload_end;
67133479e05SDave Chinner 	}
67233479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
67333479e05SDave Chinner 	radix_tree_preload_end();
67433479e05SDave Chinner 
67533479e05SDave Chinner 	*ipp = ip;
67633479e05SDave Chinner 	return 0;
67733479e05SDave Chinner 
67833479e05SDave Chinner out_preload_end:
67933479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
68033479e05SDave Chinner 	radix_tree_preload_end();
68133479e05SDave Chinner 	if (lock_flags)
68233479e05SDave Chinner 		xfs_iunlock(ip, lock_flags);
68333479e05SDave Chinner out_destroy:
68433479e05SDave Chinner 	__destroy_inode(VFS_I(ip));
68533479e05SDave Chinner 	xfs_inode_free(ip);
68633479e05SDave Chinner 	return error;
68733479e05SDave Chinner }
68833479e05SDave Chinner 
68933479e05SDave Chinner /*
69002511a5aSDave Chinner  * Look up an inode by number in the given file system.  The inode is looked up
69102511a5aSDave Chinner  * in the cache held in each AG.  If the inode is found in the cache, initialise
69202511a5aSDave Chinner  * the vfs inode if necessary.
69333479e05SDave Chinner  *
69402511a5aSDave Chinner  * If it is not in core, read it in from the file system's device, add it to the
69502511a5aSDave Chinner  * cache and initialise the vfs inode.
69633479e05SDave Chinner  *
69733479e05SDave Chinner  * The inode is locked according to the value of the lock_flags parameter.
69802511a5aSDave Chinner  * Inode lookup is only done during metadata operations and not as part of the
69902511a5aSDave Chinner  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
70033479e05SDave Chinner  */
70133479e05SDave Chinner int
70233479e05SDave Chinner xfs_iget(
70302511a5aSDave Chinner 	struct xfs_mount	*mp,
70402511a5aSDave Chinner 	struct xfs_trans	*tp,
70533479e05SDave Chinner 	xfs_ino_t		ino,
70633479e05SDave Chinner 	uint			flags,
70733479e05SDave Chinner 	uint			lock_flags,
70802511a5aSDave Chinner 	struct xfs_inode	**ipp)
70933479e05SDave Chinner {
71002511a5aSDave Chinner 	struct xfs_inode	*ip;
71102511a5aSDave Chinner 	struct xfs_perag	*pag;
71233479e05SDave Chinner 	xfs_agino_t		agino;
71302511a5aSDave Chinner 	int			error;
71433479e05SDave Chinner 
71533479e05SDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
71633479e05SDave Chinner 
71733479e05SDave Chinner 	/* reject inode numbers outside existing AGs */
71833479e05SDave Chinner 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
7192451337dSDave Chinner 		return -EINVAL;
72033479e05SDave Chinner 
721ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_attempts);
7228774cf8bSLucas Stach 
72333479e05SDave Chinner 	/* get the perag structure and ensure that it's inode capable */
72433479e05SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
72533479e05SDave Chinner 	agino = XFS_INO_TO_AGINO(mp, ino);
72633479e05SDave Chinner 
72733479e05SDave Chinner again:
72833479e05SDave Chinner 	error = 0;
72933479e05SDave Chinner 	rcu_read_lock();
73033479e05SDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
73133479e05SDave Chinner 
73233479e05SDave Chinner 	if (ip) {
73333479e05SDave Chinner 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
73433479e05SDave Chinner 		if (error)
73533479e05SDave Chinner 			goto out_error_or_again;
73633479e05SDave Chinner 	} else {
73733479e05SDave Chinner 		rcu_read_unlock();
738378f681cSDarrick J. Wong 		if (flags & XFS_IGET_INCORE) {
739ed438b47SDarrick J. Wong 			error = -ENODATA;
740378f681cSDarrick J. Wong 			goto out_error_or_again;
741378f681cSDarrick J. Wong 		}
742ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_missed);
74333479e05SDave Chinner 
74433479e05SDave Chinner 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
74533479e05SDave Chinner 							flags, lock_flags);
74633479e05SDave Chinner 		if (error)
74733479e05SDave Chinner 			goto out_error_or_again;
74833479e05SDave Chinner 	}
74933479e05SDave Chinner 	xfs_perag_put(pag);
75033479e05SDave Chinner 
75133479e05SDave Chinner 	*ipp = ip;
75233479e05SDave Chinner 
75333479e05SDave Chinner 	/*
75458c90473SDave Chinner 	 * If we have a real type for an on-disk inode, we can setup the inode
755132c460eSYang Xu 	 * now.	 If it's a new inode being created, xfs_init_new_inode will
756132c460eSYang Xu 	 * handle it.
75733479e05SDave Chinner 	 */
758c19b3b05SDave Chinner 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
75958c90473SDave Chinner 		xfs_setup_existing_inode(ip);
76033479e05SDave Chinner 	return 0;
76133479e05SDave Chinner 
76233479e05SDave Chinner out_error_or_again:
763378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
76433479e05SDave Chinner 		delay(1);
76533479e05SDave Chinner 		goto again;
76633479e05SDave Chinner 	}
76733479e05SDave Chinner 	xfs_perag_put(pag);
76833479e05SDave Chinner 	return error;
76933479e05SDave Chinner }
77033479e05SDave Chinner 
7716d8b79cfSDave Chinner /*
772378f681cSDarrick J. Wong  * "Is this a cached inode that's also allocated?"
773378f681cSDarrick J. Wong  *
774378f681cSDarrick J. Wong  * Look up an inode by number in the given file system.  If the inode is
775378f681cSDarrick J. Wong  * in cache and isn't in purgatory, return 1 if the inode is allocated
776378f681cSDarrick J. Wong  * and 0 if it is not.  For all other cases (not in cache, being torn
777378f681cSDarrick J. Wong  * down, etc.), return a negative error code.
778378f681cSDarrick J. Wong  *
779378f681cSDarrick J. Wong  * The caller has to prevent inode allocation and freeing activity,
780378f681cSDarrick J. Wong  * presumably by locking the AGI buffer.   This is to ensure that an
781378f681cSDarrick J. Wong  * inode cannot transition from allocated to freed until the caller is
782378f681cSDarrick J. Wong  * ready to allow that.  If the inode is in an intermediate state (new,
783378f681cSDarrick J. Wong  * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
784378f681cSDarrick J. Wong  * inode is not in the cache, -ENOENT will be returned.  The caller must
785378f681cSDarrick J. Wong  * deal with these scenarios appropriately.
786378f681cSDarrick J. Wong  *
787378f681cSDarrick J. Wong  * This is a specialized use case for the online scrubber; if you're
788378f681cSDarrick J. Wong  * reading this, you probably want xfs_iget.
789378f681cSDarrick J. Wong  */
790378f681cSDarrick J. Wong int
791378f681cSDarrick J. Wong xfs_icache_inode_is_allocated(
792378f681cSDarrick J. Wong 	struct xfs_mount	*mp,
793378f681cSDarrick J. Wong 	struct xfs_trans	*tp,
794378f681cSDarrick J. Wong 	xfs_ino_t		ino,
795378f681cSDarrick J. Wong 	bool			*inuse)
796378f681cSDarrick J. Wong {
797378f681cSDarrick J. Wong 	struct xfs_inode	*ip;
798378f681cSDarrick J. Wong 	int			error;
799378f681cSDarrick J. Wong 
800378f681cSDarrick J. Wong 	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
801378f681cSDarrick J. Wong 	if (error)
802378f681cSDarrick J. Wong 		return error;
803378f681cSDarrick J. Wong 
804378f681cSDarrick J. Wong 	*inuse = !!(VFS_I(ip)->i_mode);
80544a8736bSDarrick J. Wong 	xfs_irele(ip);
806378f681cSDarrick J. Wong 	return 0;
807378f681cSDarrick J. Wong }
808378f681cSDarrick J. Wong 
809579b62faSBrian Foster /*
8106d8b79cfSDave Chinner  * Grab the inode for reclaim exclusively.
81150718b8dSDave Chinner  *
81250718b8dSDave Chinner  * We have found this inode via a lookup under RCU, so the inode may have
81350718b8dSDave Chinner  * already been freed, or it may be in the process of being recycled by
81450718b8dSDave Chinner  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
81550718b8dSDave Chinner  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
81650718b8dSDave Chinner  * will not be set. Hence we need to check for both these flag conditions to
81750718b8dSDave Chinner  * avoid inodes that are no longer reclaim candidates.
81850718b8dSDave Chinner  *
81950718b8dSDave Chinner  * Note: checking for other state flags here, under the i_flags_lock or not, is
82050718b8dSDave Chinner  * racy and should be avoided. Those races should be resolved only after we have
82150718b8dSDave Chinner  * ensured that we are able to reclaim this inode and the world can see that we
82250718b8dSDave Chinner  * are going to reclaim it.
82350718b8dSDave Chinner  *
82450718b8dSDave Chinner  * Return true if we grabbed it, false otherwise.
8256d8b79cfSDave Chinner  */
82650718b8dSDave Chinner static bool
827f1bc5c56SDarrick J. Wong xfs_reclaim_igrab(
8289492750aSDarrick J. Wong 	struct xfs_inode	*ip,
829b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
8306d8b79cfSDave Chinner {
8316d8b79cfSDave Chinner 	ASSERT(rcu_read_lock_held());
8326d8b79cfSDave Chinner 
8336d8b79cfSDave Chinner 	spin_lock(&ip->i_flags_lock);
8346d8b79cfSDave Chinner 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
8356d8b79cfSDave Chinner 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
8366d8b79cfSDave Chinner 		/* not a reclaim candidate. */
8376d8b79cfSDave Chinner 		spin_unlock(&ip->i_flags_lock);
83850718b8dSDave Chinner 		return false;
8396d8b79cfSDave Chinner 	}
8409492750aSDarrick J. Wong 
8419492750aSDarrick J. Wong 	/* Don't reclaim a sick inode unless the caller asked for it. */
8429492750aSDarrick J. Wong 	if (ip->i_sick &&
843b26b2bf1SDarrick J. Wong 	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
8449492750aSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
8459492750aSDarrick J. Wong 		return false;
8469492750aSDarrick J. Wong 	}
8479492750aSDarrick J. Wong 
8486d8b79cfSDave Chinner 	__xfs_iflags_set(ip, XFS_IRECLAIM);
8496d8b79cfSDave Chinner 	spin_unlock(&ip->i_flags_lock);
85050718b8dSDave Chinner 	return true;
8516d8b79cfSDave Chinner }
8526d8b79cfSDave Chinner 
8536d8b79cfSDave Chinner /*
85402511a5aSDave Chinner  * Inode reclaim is non-blocking, so the default action if progress cannot be
85502511a5aSDave Chinner  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
85602511a5aSDave Chinner  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
85702511a5aSDave Chinner  * blocking anymore and hence we can wait for the inode to be able to reclaim
85802511a5aSDave Chinner  * it.
8596d8b79cfSDave Chinner  *
86002511a5aSDave Chinner  * We do no IO here - if callers require inodes to be cleaned they must push the
86102511a5aSDave Chinner  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
86202511a5aSDave Chinner  * done in the background in a non-blocking manner, and enables memory reclaim
86302511a5aSDave Chinner  * to make progress without blocking.
8646d8b79cfSDave Chinner  */
8654d0bab3aSDave Chinner static void
8666d8b79cfSDave Chinner xfs_reclaim_inode(
8676d8b79cfSDave Chinner 	struct xfs_inode	*ip,
86850718b8dSDave Chinner 	struct xfs_perag	*pag)
8696d8b79cfSDave Chinner {
8708a17d7ddSDave Chinner 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
8716d8b79cfSDave Chinner 
8729552e14dSDave Chinner 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
8736d8b79cfSDave Chinner 		goto out;
874718ecc50SDave Chinner 	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
8759552e14dSDave Chinner 		goto out_iunlock;
8766d8b79cfSDave Chinner 
87701728b44SDave Chinner 	/*
87801728b44SDave Chinner 	 * Check for log shutdown because aborting the inode can move the log
87901728b44SDave Chinner 	 * tail and corrupt in memory state. This is fine if the log is shut
88001728b44SDave Chinner 	 * down, but if the log is still active and only the mount is shut down
88101728b44SDave Chinner 	 * then the in-memory log tail movement caused by the abort can be
88201728b44SDave Chinner 	 * incorrectly propagated to disk.
88301728b44SDave Chinner 	 */
88401728b44SDave Chinner 	if (xlog_is_shutdown(ip->i_mount->m_log)) {
8856d8b79cfSDave Chinner 		xfs_iunpin_wait(ip);
886d2d7c047SDave Chinner 		xfs_iflush_shutdown_abort(ip);
8876d8b79cfSDave Chinner 		goto reclaim;
8886d8b79cfSDave Chinner 	}
889617825feSDave Chinner 	if (xfs_ipincount(ip))
890718ecc50SDave Chinner 		goto out_clear_flush;
891617825feSDave Chinner 	if (!xfs_inode_clean(ip))
892718ecc50SDave Chinner 		goto out_clear_flush;
893617825feSDave Chinner 
894718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
8956d8b79cfSDave Chinner reclaim:
896ab23a776SDave Chinner 	trace_xfs_inode_reclaiming(ip);
89798efe8afSBrian Foster 
8988a17d7ddSDave Chinner 	/*
8998a17d7ddSDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always appears
9008a17d7ddSDave Chinner 	 * to be reclaimed with an invalid inode number when in the free state.
90198efe8afSBrian Foster 	 * We do this as early as possible under the ILOCK so that
902f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
903f2e9ad21SOmar Sandoval 	 * detect races with us here. By doing this, we guarantee that once
904f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
905f2e9ad21SOmar Sandoval 	 * it will see either a valid inode that will serialise correctly, or it
906f2e9ad21SOmar Sandoval 	 * will see an invalid inode that it can skip.
9078a17d7ddSDave Chinner 	 */
9088a17d7ddSDave Chinner 	spin_lock(&ip->i_flags_lock);
9098a17d7ddSDave Chinner 	ip->i_flags = XFS_IRECLAIM;
9108a17d7ddSDave Chinner 	ip->i_ino = 0;
911255794c7SDarrick J. Wong 	ip->i_sick = 0;
912255794c7SDarrick J. Wong 	ip->i_checked = 0;
9138a17d7ddSDave Chinner 	spin_unlock(&ip->i_flags_lock);
9148a17d7ddSDave Chinner 
9156d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9166d8b79cfSDave Chinner 
917ff6d6af2SBill O'Donnell 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
9186d8b79cfSDave Chinner 	/*
9196d8b79cfSDave Chinner 	 * Remove the inode from the per-AG radix tree.
9206d8b79cfSDave Chinner 	 *
9216d8b79cfSDave Chinner 	 * Because radix_tree_delete won't complain even if the item was never
9226d8b79cfSDave Chinner 	 * added to the tree assert that it's been there before to catch
9236d8b79cfSDave Chinner 	 * problems with the inode life time early on.
9246d8b79cfSDave Chinner 	 */
9256d8b79cfSDave Chinner 	spin_lock(&pag->pag_ici_lock);
9266d8b79cfSDave Chinner 	if (!radix_tree_delete(&pag->pag_ici_root,
9278a17d7ddSDave Chinner 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
9286d8b79cfSDave Chinner 		ASSERT(0);
929c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
9306d8b79cfSDave Chinner 	spin_unlock(&pag->pag_ici_lock);
9316d8b79cfSDave Chinner 
9326d8b79cfSDave Chinner 	/*
9336d8b79cfSDave Chinner 	 * Here we do an (almost) spurious inode lock in order to coordinate
9346d8b79cfSDave Chinner 	 * with inode cache radix tree lookups.  This is because the lookup
9356d8b79cfSDave Chinner 	 * can reference the inodes in the cache without taking references.
9366d8b79cfSDave Chinner 	 *
9376d8b79cfSDave Chinner 	 * We make that OK here by ensuring that we wait until the inode is
9386d8b79cfSDave Chinner 	 * unlocked after the lookup before we go ahead and free it.
9396d8b79cfSDave Chinner 	 */
9406d8b79cfSDave Chinner 	xfs_ilock(ip, XFS_ILOCK_EXCL);
9413ea06d73SDarrick J. Wong 	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
9426d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
94396355d5aSDave Chinner 	ASSERT(xfs_inode_clean(ip));
9446d8b79cfSDave Chinner 
9458a17d7ddSDave Chinner 	__xfs_inode_free(ip);
9464d0bab3aSDave Chinner 	return;
9476d8b79cfSDave Chinner 
948718ecc50SDave Chinner out_clear_flush:
949718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
9509552e14dSDave Chinner out_iunlock:
9516d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9529552e14dSDave Chinner out:
953617825feSDave Chinner 	xfs_iflags_clear(ip, XFS_IRECLAIM);
9546d8b79cfSDave Chinner }
9556d8b79cfSDave Chinner 
9569492750aSDarrick J. Wong /* Reclaim sick inodes if we're unmounting or the fs went down. */
9579492750aSDarrick J. Wong static inline bool
9589492750aSDarrick J. Wong xfs_want_reclaim_sick(
9599492750aSDarrick J. Wong 	struct xfs_mount	*mp)
9609492750aSDarrick J. Wong {
9612e973b2cSDave Chinner 	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
96275c8c50fSDave Chinner 	       xfs_is_shutdown(mp);
9639492750aSDarrick J. Wong }
9649492750aSDarrick J. Wong 
9654d0bab3aSDave Chinner void
9666d8b79cfSDave Chinner xfs_reclaim_inodes(
9674d0bab3aSDave Chinner 	struct xfs_mount	*mp)
9686d8b79cfSDave Chinner {
969b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
970b26b2bf1SDarrick J. Wong 		.icw_flags	= 0,
9719492750aSDarrick J. Wong 	};
9729492750aSDarrick J. Wong 
9739492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
974b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9759492750aSDarrick J. Wong 
9764d0bab3aSDave Chinner 	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
977617825feSDave Chinner 		xfs_ail_push_all_sync(mp->m_ail);
978b26b2bf1SDarrick J. Wong 		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
9790f4ec0f1SZheng Bin 	}
9806d8b79cfSDave Chinner }
9816d8b79cfSDave Chinner 
9826d8b79cfSDave Chinner /*
98302511a5aSDave Chinner  * The shrinker infrastructure determines how many inodes we should scan for
98402511a5aSDave Chinner  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
98502511a5aSDave Chinner  * push the AIL here. We also want to proactively free up memory if we can to
98602511a5aSDave Chinner  * minimise the amount of work memory reclaim has to do so we kick the
98702511a5aSDave Chinner  * background reclaim if it isn't already scheduled.
9886d8b79cfSDave Chinner  */
9890a234c6dSDave Chinner long
9906d8b79cfSDave Chinner xfs_reclaim_inodes_nr(
9916d8b79cfSDave Chinner 	struct xfs_mount	*mp,
99210be350bSDarrick J. Wong 	unsigned long		nr_to_scan)
9936d8b79cfSDave Chinner {
994b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
995b26b2bf1SDarrick J. Wong 		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
99610be350bSDarrick J. Wong 		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
997f1bc5c56SDarrick J. Wong 	};
998f1bc5c56SDarrick J. Wong 
9999492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
1000b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
10019492750aSDarrick J. Wong 
10026d8b79cfSDave Chinner 	/* kick background reclaimer and push the AIL */
10036d8b79cfSDave Chinner 	xfs_reclaim_work_queue(mp);
10046d8b79cfSDave Chinner 	xfs_ail_push_all(mp->m_ail);
10056d8b79cfSDave Chinner 
1006b26b2bf1SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1007617825feSDave Chinner 	return 0;
10086d8b79cfSDave Chinner }
10096d8b79cfSDave Chinner 
10106d8b79cfSDave Chinner /*
10116d8b79cfSDave Chinner  * Return the number of reclaimable inodes in the filesystem for
10126d8b79cfSDave Chinner  * the shrinker to determine how much to reclaim.
10136d8b79cfSDave Chinner  */
101410be350bSDarrick J. Wong long
10156d8b79cfSDave Chinner xfs_reclaim_inodes_count(
10166d8b79cfSDave Chinner 	struct xfs_mount	*mp)
10176d8b79cfSDave Chinner {
10186d8b79cfSDave Chinner 	struct xfs_perag	*pag;
10196d8b79cfSDave Chinner 	xfs_agnumber_t		ag = 0;
102010be350bSDarrick J. Wong 	long			reclaimable = 0;
10216d8b79cfSDave Chinner 
10226d8b79cfSDave Chinner 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
10236d8b79cfSDave Chinner 		ag = pag->pag_agno + 1;
10246d8b79cfSDave Chinner 		reclaimable += pag->pag_ici_reclaimable;
10256d8b79cfSDave Chinner 		xfs_perag_put(pag);
10266d8b79cfSDave Chinner 	}
10276d8b79cfSDave Chinner 	return reclaimable;
10286d8b79cfSDave Chinner }
10296d8b79cfSDave Chinner 
103039b1cfd7SDarrick J. Wong STATIC bool
1031b26b2bf1SDarrick J. Wong xfs_icwalk_match_id(
10323e3f9f58SBrian Foster 	struct xfs_inode	*ip,
1033b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
10343e3f9f58SBrian Foster {
1035b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1036b26b2bf1SDarrick J. Wong 	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
103739b1cfd7SDarrick J. Wong 		return false;
10381b556048SBrian Foster 
1039b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1040b26b2bf1SDarrick J. Wong 	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
104139b1cfd7SDarrick J. Wong 		return false;
10421b556048SBrian Foster 
1043b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1044b26b2bf1SDarrick J. Wong 	    ip->i_projid != icw->icw_prid)
104539b1cfd7SDarrick J. Wong 		return false;
10461b556048SBrian Foster 
104739b1cfd7SDarrick J. Wong 	return true;
10483e3f9f58SBrian Foster }
10493e3f9f58SBrian Foster 
1050f4526397SBrian Foster /*
1051f4526397SBrian Foster  * A union-based inode filtering algorithm. Process the inode if any of the
1052f4526397SBrian Foster  * criteria match. This is for global/internal scans only.
1053f4526397SBrian Foster  */
105439b1cfd7SDarrick J. Wong STATIC bool
1055b26b2bf1SDarrick J. Wong xfs_icwalk_match_id_union(
1056f4526397SBrian Foster 	struct xfs_inode	*ip,
1057b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1058f4526397SBrian Foster {
1059b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1060b26b2bf1SDarrick J. Wong 	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
106139b1cfd7SDarrick J. Wong 		return true;
1062f4526397SBrian Foster 
1063b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1064b26b2bf1SDarrick J. Wong 	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
106539b1cfd7SDarrick J. Wong 		return true;
1066f4526397SBrian Foster 
1067b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1068b26b2bf1SDarrick J. Wong 	    ip->i_projid == icw->icw_prid)
106939b1cfd7SDarrick J. Wong 		return true;
1070f4526397SBrian Foster 
107139b1cfd7SDarrick J. Wong 	return false;
1072f4526397SBrian Foster }
1073f4526397SBrian Foster 
1074a91bf992SDarrick J. Wong /*
1075a91bf992SDarrick J. Wong  * Is this inode @ip eligible for eof/cow block reclamation, given some
1076b26b2bf1SDarrick J. Wong  * filtering parameters @icw?  The inode is eligible if @icw is null or
1077a91bf992SDarrick J. Wong  * if the predicate functions match.
1078a91bf992SDarrick J. Wong  */
1079a91bf992SDarrick J. Wong static bool
1080b26b2bf1SDarrick J. Wong xfs_icwalk_match(
1081a91bf992SDarrick J. Wong 	struct xfs_inode	*ip,
1082b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1083a91bf992SDarrick J. Wong {
108439b1cfd7SDarrick J. Wong 	bool			match;
1085a91bf992SDarrick J. Wong 
1086b26b2bf1SDarrick J. Wong 	if (!icw)
1087a91bf992SDarrick J. Wong 		return true;
1088a91bf992SDarrick J. Wong 
1089b26b2bf1SDarrick J. Wong 	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1090b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id_union(ip, icw);
1091a91bf992SDarrick J. Wong 	else
1092b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id(ip, icw);
1093a91bf992SDarrick J. Wong 	if (!match)
1094a91bf992SDarrick J. Wong 		return false;
1095a91bf992SDarrick J. Wong 
1096a91bf992SDarrick J. Wong 	/* skip the inode if the file size is too small */
1097b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1098b26b2bf1SDarrick J. Wong 	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1099a91bf992SDarrick J. Wong 		return false;
1100a91bf992SDarrick J. Wong 
1101a91bf992SDarrick J. Wong 	return true;
1102a91bf992SDarrick J. Wong }
1103a91bf992SDarrick J. Wong 
11044d0bab3aSDave Chinner /*
11054d0bab3aSDave Chinner  * This is a fast pass over the inode cache to try to get reclaim moving on as
11064d0bab3aSDave Chinner  * many inodes as possible in a short period of time. It kicks itself every few
11074d0bab3aSDave Chinner  * seconds, as well as being kicked by the inode cache shrinker when memory
110802511a5aSDave Chinner  * goes low.
11094d0bab3aSDave Chinner  */
11104d0bab3aSDave Chinner void
11114d0bab3aSDave Chinner xfs_reclaim_worker(
11124d0bab3aSDave Chinner 	struct work_struct *work)
11134d0bab3aSDave Chinner {
11144d0bab3aSDave Chinner 	struct xfs_mount *mp = container_of(to_delayed_work(work),
11154d0bab3aSDave Chinner 					struct xfs_mount, m_reclaim_work);
11164d0bab3aSDave Chinner 
1117f1bc5c56SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
11184d0bab3aSDave Chinner 	xfs_reclaim_work_queue(mp);
11194d0bab3aSDave Chinner }
11204d0bab3aSDave Chinner 
11213e3f9f58SBrian Foster STATIC int
112241176a68SBrian Foster xfs_inode_free_eofblocks(
112341176a68SBrian Foster 	struct xfs_inode	*ip,
1124b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
11250fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
112641176a68SBrian Foster {
1127390600f8SDarrick J. Wong 	bool			wait;
1128390600f8SDarrick J. Wong 
1129b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
11305400da7dSBrian Foster 
1131ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1132ce2d3bbeSDarrick J. Wong 		return 0;
1133ce2d3bbeSDarrick J. Wong 
113441176a68SBrian Foster 	/*
113541176a68SBrian Foster 	 * If the mapping is dirty the operation can block and wait for some
113641176a68SBrian Foster 	 * time. Unless we are waiting, skip it.
113741176a68SBrian Foster 	 */
1138390600f8SDarrick J. Wong 	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
113941176a68SBrian Foster 		return 0;
114041176a68SBrian Foster 
1141b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
11423e3f9f58SBrian Foster 		return 0;
11433e3f9f58SBrian Foster 
1144a36b9261SBrian Foster 	/*
1145a36b9261SBrian Foster 	 * If the caller is waiting, return -EAGAIN to keep the background
1146a36b9261SBrian Foster 	 * scanner moving and revisit the inode in a subsequent pass.
1147a36b9261SBrian Foster 	 */
1148c3155097SBrian Foster 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1149390600f8SDarrick J. Wong 		if (wait)
1150390600f8SDarrick J. Wong 			return -EAGAIN;
1151390600f8SDarrick J. Wong 		return 0;
1152a36b9261SBrian Foster 	}
11530fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
1154390600f8SDarrick J. Wong 
11552b156ff8SDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false))
11560fa4a10aSDarrick J. Wong 		return xfs_free_eofblocks(ip);
11572b156ff8SDarrick J. Wong 
11582b156ff8SDarrick J. Wong 	/* inode could be preallocated or append-only */
11592b156ff8SDarrick J. Wong 	trace_xfs_inode_free_eofblocks_invalid(ip);
11602b156ff8SDarrick J. Wong 	xfs_inode_clear_eofblocks_tag(ip);
11612b156ff8SDarrick J. Wong 	return 0;
116241176a68SBrian Foster }
116341176a68SBrian Foster 
116483104d44SDarrick J. Wong static void
1165ce2d3bbeSDarrick J. Wong xfs_blockgc_set_iflag(
1166ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1167ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
116827b52867SBrian Foster {
116927b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
117027b52867SBrian Foster 	struct xfs_perag	*pag;
117127b52867SBrian Foster 
1172ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1173ce2d3bbeSDarrick J. Wong 
117485a6e764SChristoph Hellwig 	/*
117585a6e764SChristoph Hellwig 	 * Don't bother locking the AG and looking up in the radix trees
117685a6e764SChristoph Hellwig 	 * if we already know that we have the tag set.
117785a6e764SChristoph Hellwig 	 */
1178ce2d3bbeSDarrick J. Wong 	if (ip->i_flags & iflag)
117985a6e764SChristoph Hellwig 		return;
118085a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1181ce2d3bbeSDarrick J. Wong 	ip->i_flags |= iflag;
118285a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
118385a6e764SChristoph Hellwig 
118427b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
118527b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
118627b52867SBrian Foster 
1187c076ae7aSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1188ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
118927b52867SBrian Foster 
119027b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
119127b52867SBrian Foster 	xfs_perag_put(pag);
119227b52867SBrian Foster }
119327b52867SBrian Foster 
119427b52867SBrian Foster void
119583104d44SDarrick J. Wong xfs_inode_set_eofblocks_tag(
119627b52867SBrian Foster 	xfs_inode_t	*ip)
119727b52867SBrian Foster {
119883104d44SDarrick J. Wong 	trace_xfs_inode_set_eofblocks_tag(ip);
11999669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
120083104d44SDarrick J. Wong }
120183104d44SDarrick J. Wong 
120283104d44SDarrick J. Wong static void
1203ce2d3bbeSDarrick J. Wong xfs_blockgc_clear_iflag(
1204ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1205ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
120683104d44SDarrick J. Wong {
120727b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
120827b52867SBrian Foster 	struct xfs_perag	*pag;
1209ce2d3bbeSDarrick J. Wong 	bool			clear_tag;
1210ce2d3bbeSDarrick J. Wong 
1211ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
121227b52867SBrian Foster 
121385a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1214ce2d3bbeSDarrick J. Wong 	ip->i_flags &= ~iflag;
1215ce2d3bbeSDarrick J. Wong 	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
121685a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
121785a6e764SChristoph Hellwig 
1218ce2d3bbeSDarrick J. Wong 	if (!clear_tag)
1219ce2d3bbeSDarrick J. Wong 		return;
1220ce2d3bbeSDarrick J. Wong 
122127b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
122227b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
122327b52867SBrian Foster 
1224c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1225ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
122627b52867SBrian Foster 
122727b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
122827b52867SBrian Foster 	xfs_perag_put(pag);
122927b52867SBrian Foster }
123027b52867SBrian Foster 
123183104d44SDarrick J. Wong void
123283104d44SDarrick J. Wong xfs_inode_clear_eofblocks_tag(
123383104d44SDarrick J. Wong 	xfs_inode_t	*ip)
123483104d44SDarrick J. Wong {
123583104d44SDarrick J. Wong 	trace_xfs_inode_clear_eofblocks_tag(ip);
1236ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
123783104d44SDarrick J. Wong }
123883104d44SDarrick J. Wong 
123983104d44SDarrick J. Wong /*
1240be78ff0eSDarrick J. Wong  * Set ourselves up to free CoW blocks from this file.  If it's already clean
1241be78ff0eSDarrick J. Wong  * then we can bail out quickly, but otherwise we must back off if the file
1242be78ff0eSDarrick J. Wong  * is undergoing some kind of write.
1243be78ff0eSDarrick J. Wong  */
1244be78ff0eSDarrick J. Wong static bool
1245be78ff0eSDarrick J. Wong xfs_prep_free_cowblocks(
124651d62690SChristoph Hellwig 	struct xfs_inode	*ip)
1247be78ff0eSDarrick J. Wong {
1248be78ff0eSDarrick J. Wong 	/*
1249be78ff0eSDarrick J. Wong 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1250be78ff0eSDarrick J. Wong 	 * possible the inode was fully unshared since it was originally tagged.
1251be78ff0eSDarrick J. Wong 	 */
125251d62690SChristoph Hellwig 	if (!xfs_inode_has_cow_data(ip)) {
1253be78ff0eSDarrick J. Wong 		trace_xfs_inode_free_cowblocks_invalid(ip);
1254be78ff0eSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1255be78ff0eSDarrick J. Wong 		return false;
1256be78ff0eSDarrick J. Wong 	}
1257be78ff0eSDarrick J. Wong 
1258be78ff0eSDarrick J. Wong 	/*
1259be78ff0eSDarrick J. Wong 	 * If the mapping is dirty or under writeback we cannot touch the
1260be78ff0eSDarrick J. Wong 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1261be78ff0eSDarrick J. Wong 	 */
1262be78ff0eSDarrick J. Wong 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1263be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1264be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1265be78ff0eSDarrick J. Wong 	    atomic_read(&VFS_I(ip)->i_dio_count))
1266be78ff0eSDarrick J. Wong 		return false;
1267be78ff0eSDarrick J. Wong 
1268be78ff0eSDarrick J. Wong 	return true;
1269be78ff0eSDarrick J. Wong }
1270be78ff0eSDarrick J. Wong 
1271be78ff0eSDarrick J. Wong /*
127283104d44SDarrick J. Wong  * Automatic CoW Reservation Freeing
127383104d44SDarrick J. Wong  *
127483104d44SDarrick J. Wong  * These functions automatically garbage collect leftover CoW reservations
127583104d44SDarrick J. Wong  * that were made on behalf of a cowextsize hint when we start to run out
127683104d44SDarrick J. Wong  * of quota or when the reservations sit around for too long.  If the file
127783104d44SDarrick J. Wong  * has dirty pages or is undergoing writeback, its CoW reservations will
127883104d44SDarrick J. Wong  * be retained.
127983104d44SDarrick J. Wong  *
128083104d44SDarrick J. Wong  * The actual garbage collection piggybacks off the same code that runs
128183104d44SDarrick J. Wong  * the speculative EOF preallocation garbage collector.
128283104d44SDarrick J. Wong  */
128383104d44SDarrick J. Wong STATIC int
128483104d44SDarrick J. Wong xfs_inode_free_cowblocks(
128583104d44SDarrick J. Wong 	struct xfs_inode	*ip,
1286b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
12870fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
128883104d44SDarrick J. Wong {
1289f41a0716SDarrick J. Wong 	bool			wait;
1290be78ff0eSDarrick J. Wong 	int			ret = 0;
129183104d44SDarrick J. Wong 
1292b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1293f41a0716SDarrick J. Wong 
1294ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1295ce2d3bbeSDarrick J. Wong 		return 0;
1296ce2d3bbeSDarrick J. Wong 
129751d62690SChristoph Hellwig 	if (!xfs_prep_free_cowblocks(ip))
129883104d44SDarrick J. Wong 		return 0;
129983104d44SDarrick J. Wong 
1300b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
130183104d44SDarrick J. Wong 		return 0;
130283104d44SDarrick J. Wong 
1303f41a0716SDarrick J. Wong 	/*
1304f41a0716SDarrick J. Wong 	 * If the caller is waiting, return -EAGAIN to keep the background
1305f41a0716SDarrick J. Wong 	 * scanner moving and revisit the inode in a subsequent pass.
1306f41a0716SDarrick J. Wong 	 */
13070fa4a10aSDarrick J. Wong 	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
13080fa4a10aSDarrick J. Wong 	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1309f41a0716SDarrick J. Wong 		if (wait)
1310f41a0716SDarrick J. Wong 			return -EAGAIN;
1311f41a0716SDarrick J. Wong 		return 0;
1312f41a0716SDarrick J. Wong 	}
13130fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
13140fa4a10aSDarrick J. Wong 
1315f41a0716SDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1316f41a0716SDarrick J. Wong 		if (wait)
13170fa4a10aSDarrick J. Wong 			return -EAGAIN;
13180fa4a10aSDarrick J. Wong 		return 0;
1319f41a0716SDarrick J. Wong 	}
13200fa4a10aSDarrick J. Wong 	*lockflags |= XFS_MMAPLOCK_EXCL;
132183104d44SDarrick J. Wong 
1322be78ff0eSDarrick J. Wong 	/*
1323be78ff0eSDarrick J. Wong 	 * Check again, nobody else should be able to dirty blocks or change
1324be78ff0eSDarrick J. Wong 	 * the reflink iflag now that we have the first two locks held.
1325be78ff0eSDarrick J. Wong 	 */
132651d62690SChristoph Hellwig 	if (xfs_prep_free_cowblocks(ip))
13273802a345SChristoph Hellwig 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
132883104d44SDarrick J. Wong 	return ret;
132983104d44SDarrick J. Wong }
133083104d44SDarrick J. Wong 
133183104d44SDarrick J. Wong void
133283104d44SDarrick J. Wong xfs_inode_set_cowblocks_tag(
133383104d44SDarrick J. Wong 	xfs_inode_t	*ip)
133483104d44SDarrick J. Wong {
13357b7381f0SBrian Foster 	trace_xfs_inode_set_cowblocks_tag(ip);
13369669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
133783104d44SDarrick J. Wong }
133883104d44SDarrick J. Wong 
133983104d44SDarrick J. Wong void
134083104d44SDarrick J. Wong xfs_inode_clear_cowblocks_tag(
134183104d44SDarrick J. Wong 	xfs_inode_t	*ip)
134283104d44SDarrick J. Wong {
13437b7381f0SBrian Foster 	trace_xfs_inode_clear_cowblocks_tag(ip);
1344ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
134583104d44SDarrick J. Wong }
1346d6b636ebSDarrick J. Wong 
1347d6b636ebSDarrick J. Wong /* Disable post-EOF and CoW block auto-reclamation. */
1348d6b636ebSDarrick J. Wong void
1349c9a6526fSDarrick J. Wong xfs_blockgc_stop(
1350d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1351d6b636ebSDarrick J. Wong {
1352894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1353894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1354894ecacfSDarrick J. Wong 
13556f649091SDarrick J. Wong 	if (!xfs_clear_blockgc_enabled(mp))
13566f649091SDarrick J. Wong 		return;
13576f649091SDarrick J. Wong 
13586f649091SDarrick J. Wong 	for_each_perag(mp, agno, pag)
1359894ecacfSDarrick J. Wong 		cancel_delayed_work_sync(&pag->pag_blockgc_work);
13606f649091SDarrick J. Wong 	trace_xfs_blockgc_stop(mp, __return_address);
1361d6b636ebSDarrick J. Wong }
1362d6b636ebSDarrick J. Wong 
1363d6b636ebSDarrick J. Wong /* Enable post-EOF and CoW block auto-reclamation. */
1364d6b636ebSDarrick J. Wong void
1365c9a6526fSDarrick J. Wong xfs_blockgc_start(
1366d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1367d6b636ebSDarrick J. Wong {
1368894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1369894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1370894ecacfSDarrick J. Wong 
13716f649091SDarrick J. Wong 	if (xfs_set_blockgc_enabled(mp))
13726f649091SDarrick J. Wong 		return;
13736f649091SDarrick J. Wong 
13746f649091SDarrick J. Wong 	trace_xfs_blockgc_start(mp, __return_address);
1375894ecacfSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1376894ecacfSDarrick J. Wong 		xfs_blockgc_queue(pag);
1377d6b636ebSDarrick J. Wong }
13783d4feec0SDarrick J. Wong 
1379d20d5edcSDarrick J. Wong /* Don't try to run block gc on an inode that's in any of these states. */
1380d20d5edcSDarrick J. Wong #define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1381ab23a776SDave Chinner 					 XFS_NEED_INACTIVE | \
1382ab23a776SDave Chinner 					 XFS_INACTIVATING | \
1383d20d5edcSDarrick J. Wong 					 XFS_IRECLAIMABLE | \
1384d20d5edcSDarrick J. Wong 					 XFS_IRECLAIM)
1385df600197SDarrick J. Wong /*
1386b9baaef4SDarrick J. Wong  * Decide if the given @ip is eligible for garbage collection of speculative
1387b9baaef4SDarrick J. Wong  * preallocations, and grab it if so.  Returns true if it's ready to go or
1388b9baaef4SDarrick J. Wong  * false if we should just ignore it.
1389df600197SDarrick J. Wong  */
1390df600197SDarrick J. Wong static bool
1391b9baaef4SDarrick J. Wong xfs_blockgc_igrab(
13927fdff526SDarrick J. Wong 	struct xfs_inode	*ip)
1393df600197SDarrick J. Wong {
1394df600197SDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
1395df600197SDarrick J. Wong 
1396df600197SDarrick J. Wong 	ASSERT(rcu_read_lock_held());
1397df600197SDarrick J. Wong 
1398df600197SDarrick J. Wong 	/* Check for stale RCU freed inode */
1399df600197SDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1400df600197SDarrick J. Wong 	if (!ip->i_ino)
1401df600197SDarrick J. Wong 		goto out_unlock_noent;
1402df600197SDarrick J. Wong 
1403d20d5edcSDarrick J. Wong 	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1404df600197SDarrick J. Wong 		goto out_unlock_noent;
1405df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1406df600197SDarrick J. Wong 
1407df600197SDarrick J. Wong 	/* nothing to sync during shutdown */
140875c8c50fSDave Chinner 	if (xfs_is_shutdown(ip->i_mount))
1409df600197SDarrick J. Wong 		return false;
1410df600197SDarrick J. Wong 
1411df600197SDarrick J. Wong 	/* If we can't grab the inode, it must on it's way to reclaim. */
1412df600197SDarrick J. Wong 	if (!igrab(inode))
1413df600197SDarrick J. Wong 		return false;
1414df600197SDarrick J. Wong 
1415df600197SDarrick J. Wong 	/* inode is valid */
1416df600197SDarrick J. Wong 	return true;
1417df600197SDarrick J. Wong 
1418df600197SDarrick J. Wong out_unlock_noent:
1419df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1420df600197SDarrick J. Wong 	return false;
1421df600197SDarrick J. Wong }
1422df600197SDarrick J. Wong 
142341956753SDarrick J. Wong /* Scan one incore inode for block preallocations that we can remove. */
142441956753SDarrick J. Wong static int
142541956753SDarrick J. Wong xfs_blockgc_scan_inode(
142641956753SDarrick J. Wong 	struct xfs_inode	*ip,
1427b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
142885c5b270SDarrick J. Wong {
14290fa4a10aSDarrick J. Wong 	unsigned int		lockflags = 0;
143085c5b270SDarrick J. Wong 	int			error;
143185c5b270SDarrick J. Wong 
1432b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
143385c5b270SDarrick J. Wong 	if (error)
14340fa4a10aSDarrick J. Wong 		goto unlock;
143585c5b270SDarrick J. Wong 
1436b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
14370fa4a10aSDarrick J. Wong unlock:
14380fa4a10aSDarrick J. Wong 	if (lockflags)
14390fa4a10aSDarrick J. Wong 		xfs_iunlock(ip, lockflags);
1440594ab00bSDarrick J. Wong 	xfs_irele(ip);
144185c5b270SDarrick J. Wong 	return error;
144285c5b270SDarrick J. Wong }
144385c5b270SDarrick J. Wong 
14449669f51dSDarrick J. Wong /* Background worker that trims preallocated space. */
14459669f51dSDarrick J. Wong void
14469669f51dSDarrick J. Wong xfs_blockgc_worker(
14479669f51dSDarrick J. Wong 	struct work_struct	*work)
14489669f51dSDarrick J. Wong {
1449894ecacfSDarrick J. Wong 	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1450894ecacfSDarrick J. Wong 					struct xfs_perag, pag_blockgc_work);
1451894ecacfSDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
14529669f51dSDarrick J. Wong 	int			error;
14539669f51dSDarrick J. Wong 
14546f649091SDarrick J. Wong 	trace_xfs_blockgc_worker(mp, __return_address);
14556f649091SDarrick J. Wong 
1456f427cf5cSDarrick J. Wong 	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
14579669f51dSDarrick J. Wong 	if (error)
1458894ecacfSDarrick J. Wong 		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1459894ecacfSDarrick J. Wong 				pag->pag_agno, error);
1460894ecacfSDarrick J. Wong 	xfs_blockgc_queue(pag);
14619669f51dSDarrick J. Wong }
14629669f51dSDarrick J. Wong 
146385c5b270SDarrick J. Wong /*
14642eb66502SDarrick J. Wong  * Try to free space in the filesystem by purging inactive inodes, eofblocks
14652eb66502SDarrick J. Wong  * and cowblocks.
146685c5b270SDarrick J. Wong  */
146785c5b270SDarrick J. Wong int
146885c5b270SDarrick J. Wong xfs_blockgc_free_space(
146985c5b270SDarrick J. Wong 	struct xfs_mount	*mp,
1470b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
147185c5b270SDarrick J. Wong {
14722eb66502SDarrick J. Wong 	int			error;
14732eb66502SDarrick J. Wong 
1474b26b2bf1SDarrick J. Wong 	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
147585c5b270SDarrick J. Wong 
14762eb66502SDarrick J. Wong 	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
14772eb66502SDarrick J. Wong 	if (error)
14782eb66502SDarrick J. Wong 		return error;
14792eb66502SDarrick J. Wong 
14802eb66502SDarrick J. Wong 	xfs_inodegc_flush(mp);
14812eb66502SDarrick J. Wong 	return 0;
148285c5b270SDarrick J. Wong }
148385c5b270SDarrick J. Wong 
14843d4feec0SDarrick J. Wong /*
1485e8d04c2aSDarrick J. Wong  * Reclaim all the free space that we can by scheduling the background blockgc
1486e8d04c2aSDarrick J. Wong  * and inodegc workers immediately and waiting for them all to clear.
1487e8d04c2aSDarrick J. Wong  */
1488e8d04c2aSDarrick J. Wong void
1489e8d04c2aSDarrick J. Wong xfs_blockgc_flush_all(
1490e8d04c2aSDarrick J. Wong 	struct xfs_mount	*mp)
1491e8d04c2aSDarrick J. Wong {
1492e8d04c2aSDarrick J. Wong 	struct xfs_perag	*pag;
1493e8d04c2aSDarrick J. Wong 	xfs_agnumber_t		agno;
1494e8d04c2aSDarrick J. Wong 
1495e8d04c2aSDarrick J. Wong 	trace_xfs_blockgc_flush_all(mp, __return_address);
1496e8d04c2aSDarrick J. Wong 
1497e8d04c2aSDarrick J. Wong 	/*
1498e8d04c2aSDarrick J. Wong 	 * For each blockgc worker, move its queue time up to now.  If it
1499e8d04c2aSDarrick J. Wong 	 * wasn't queued, it will not be requeued.  Then flush whatever's
1500e8d04c2aSDarrick J. Wong 	 * left.
1501e8d04c2aSDarrick J. Wong 	 */
1502e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1503e8d04c2aSDarrick J. Wong 		mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1504e8d04c2aSDarrick J. Wong 				&pag->pag_blockgc_work, 0);
1505e8d04c2aSDarrick J. Wong 
1506e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1507e8d04c2aSDarrick J. Wong 		flush_delayed_work(&pag->pag_blockgc_work);
1508e8d04c2aSDarrick J. Wong 
1509e8d04c2aSDarrick J. Wong 	xfs_inodegc_flush(mp);
1510e8d04c2aSDarrick J. Wong }
1511e8d04c2aSDarrick J. Wong 
1512e8d04c2aSDarrick J. Wong /*
1513c237dd7cSDarrick J. Wong  * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1514c237dd7cSDarrick J. Wong  * quota caused an allocation failure, so we make a best effort by including
1515c237dd7cSDarrick J. Wong  * each quota under low free space conditions (less than 1% free space) in the
1516c237dd7cSDarrick J. Wong  * scan.
1517111068f8SDarrick J. Wong  *
1518111068f8SDarrick J. Wong  * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
15192d53f66bSDarrick J. Wong  * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1520111068f8SDarrick J. Wong  * MMAPLOCK.
15213d4feec0SDarrick J. Wong  */
1522111068f8SDarrick J. Wong int
1523c237dd7cSDarrick J. Wong xfs_blockgc_free_dquots(
1524c237dd7cSDarrick J. Wong 	struct xfs_mount	*mp,
1525c237dd7cSDarrick J. Wong 	struct xfs_dquot	*udqp,
1526c237dd7cSDarrick J. Wong 	struct xfs_dquot	*gdqp,
1527c237dd7cSDarrick J. Wong 	struct xfs_dquot	*pdqp,
15282d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
15293d4feec0SDarrick J. Wong {
1530b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {0};
15313d4feec0SDarrick J. Wong 	bool			do_work = false;
15323d4feec0SDarrick J. Wong 
1533c237dd7cSDarrick J. Wong 	if (!udqp && !gdqp && !pdqp)
1534c237dd7cSDarrick J. Wong 		return 0;
1535c237dd7cSDarrick J. Wong 
15363d4feec0SDarrick J. Wong 	/*
1537111068f8SDarrick J. Wong 	 * Run a scan to free blocks using the union filter to cover all
1538111068f8SDarrick J. Wong 	 * applicable quotas in a single scan.
15393d4feec0SDarrick J. Wong 	 */
1540b26b2bf1SDarrick J. Wong 	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
15413d4feec0SDarrick J. Wong 
1542c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1543b26b2bf1SDarrick J. Wong 		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1544b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
15453d4feec0SDarrick J. Wong 		do_work = true;
15463d4feec0SDarrick J. Wong 	}
15473d4feec0SDarrick J. Wong 
1548c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1549b26b2bf1SDarrick J. Wong 		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1550b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
15513d4feec0SDarrick J. Wong 		do_work = true;
15523d4feec0SDarrick J. Wong 	}
15533d4feec0SDarrick J. Wong 
1554c237dd7cSDarrick J. Wong 	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1555b26b2bf1SDarrick J. Wong 		icw.icw_prid = pdqp->q_id;
1556b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
15573d4feec0SDarrick J. Wong 		do_work = true;
15583d4feec0SDarrick J. Wong 	}
15593d4feec0SDarrick J. Wong 
15603d4feec0SDarrick J. Wong 	if (!do_work)
1561111068f8SDarrick J. Wong 		return 0;
15623d4feec0SDarrick J. Wong 
1563b26b2bf1SDarrick J. Wong 	return xfs_blockgc_free_space(mp, &icw);
1564c237dd7cSDarrick J. Wong }
1565c237dd7cSDarrick J. Wong 
1566c237dd7cSDarrick J. Wong /* Run cow/eofblocks scans on the quotas attached to the inode. */
1567c237dd7cSDarrick J. Wong int
1568c237dd7cSDarrick J. Wong xfs_blockgc_free_quota(
1569c237dd7cSDarrick J. Wong 	struct xfs_inode	*ip,
15702d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
1571c237dd7cSDarrick J. Wong {
1572c237dd7cSDarrick J. Wong 	return xfs_blockgc_free_dquots(ip->i_mount,
1573c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1574c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
15752d53f66bSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
15763d4feec0SDarrick J. Wong }
1577df600197SDarrick J. Wong 
1578df600197SDarrick J. Wong /* XFS Inode Cache Walking Code */
1579df600197SDarrick J. Wong 
1580df600197SDarrick J. Wong /*
1581f1bc5c56SDarrick J. Wong  * The inode lookup is done in batches to keep the amount of lock traffic and
1582f1bc5c56SDarrick J. Wong  * radix tree lookups to a minimum. The batch size is a trade off between
1583f1bc5c56SDarrick J. Wong  * lookup reduction and stack usage. This is in the reclaim path, so we can't
1584f1bc5c56SDarrick J. Wong  * be too greedy.
1585f1bc5c56SDarrick J. Wong  */
1586f1bc5c56SDarrick J. Wong #define XFS_LOOKUP_BATCH	32
1587f1bc5c56SDarrick J. Wong 
1588f1bc5c56SDarrick J. Wong 
1589f1bc5c56SDarrick J. Wong /*
1590b9baaef4SDarrick J. Wong  * Decide if we want to grab this inode in anticipation of doing work towards
1591594ab00bSDarrick J. Wong  * the goal.
1592b9baaef4SDarrick J. Wong  */
1593b9baaef4SDarrick J. Wong static inline bool
1594b9baaef4SDarrick J. Wong xfs_icwalk_igrab(
1595b9baaef4SDarrick J. Wong 	enum xfs_icwalk_goal	goal,
15969492750aSDarrick J. Wong 	struct xfs_inode	*ip,
1597b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1598b9baaef4SDarrick J. Wong {
1599b9baaef4SDarrick J. Wong 	switch (goal) {
1600b9baaef4SDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
16017fdff526SDarrick J. Wong 		return xfs_blockgc_igrab(ip);
1602f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1603b26b2bf1SDarrick J. Wong 		return xfs_reclaim_igrab(ip, icw);
1604b9baaef4SDarrick J. Wong 	default:
1605b9baaef4SDarrick J. Wong 		return false;
1606b9baaef4SDarrick J. Wong 	}
1607b9baaef4SDarrick J. Wong }
1608b9baaef4SDarrick J. Wong 
1609594ab00bSDarrick J. Wong /*
1610594ab00bSDarrick J. Wong  * Process an inode.  Each processing function must handle any state changes
1611594ab00bSDarrick J. Wong  * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1612594ab00bSDarrick J. Wong  */
1613f427cf5cSDarrick J. Wong static inline int
1614f427cf5cSDarrick J. Wong xfs_icwalk_process_inode(
1615f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1616f427cf5cSDarrick J. Wong 	struct xfs_inode	*ip,
1617f1bc5c56SDarrick J. Wong 	struct xfs_perag	*pag,
1618b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1619f427cf5cSDarrick J. Wong {
1620594ab00bSDarrick J. Wong 	int			error = 0;
1621f427cf5cSDarrick J. Wong 
1622f427cf5cSDarrick J. Wong 	switch (goal) {
1623f427cf5cSDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
1624b26b2bf1SDarrick J. Wong 		error = xfs_blockgc_scan_inode(ip, icw);
1625f427cf5cSDarrick J. Wong 		break;
1626f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1627f1bc5c56SDarrick J. Wong 		xfs_reclaim_inode(ip, pag);
1628f1bc5c56SDarrick J. Wong 		break;
1629f427cf5cSDarrick J. Wong 	}
1630f427cf5cSDarrick J. Wong 	return error;
1631f427cf5cSDarrick J. Wong }
1632f427cf5cSDarrick J. Wong 
1633b9baaef4SDarrick J. Wong /*
1634f427cf5cSDarrick J. Wong  * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1635f427cf5cSDarrick J. Wong  * process them in some manner.
1636df600197SDarrick J. Wong  */
1637df600197SDarrick J. Wong static int
1638c1115c0cSDarrick J. Wong xfs_icwalk_ag(
1639df600197SDarrick J. Wong 	struct xfs_perag	*pag,
1640f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1641b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1642df600197SDarrick J. Wong {
1643df600197SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
1644df600197SDarrick J. Wong 	uint32_t		first_index;
1645df600197SDarrick J. Wong 	int			last_error = 0;
1646df600197SDarrick J. Wong 	int			skipped;
1647df600197SDarrick J. Wong 	bool			done;
1648df600197SDarrick J. Wong 	int			nr_found;
1649df600197SDarrick J. Wong 
1650df600197SDarrick J. Wong restart:
1651df600197SDarrick J. Wong 	done = false;
1652df600197SDarrick J. Wong 	skipped = 0;
1653f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM)
1654f1bc5c56SDarrick J. Wong 		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1655f1bc5c56SDarrick J. Wong 	else
1656df600197SDarrick J. Wong 		first_index = 0;
1657df600197SDarrick J. Wong 	nr_found = 0;
1658df600197SDarrick J. Wong 	do {
1659df600197SDarrick J. Wong 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1660df600197SDarrick J. Wong 		int		error = 0;
1661df600197SDarrick J. Wong 		int		i;
1662df600197SDarrick J. Wong 
1663df600197SDarrick J. Wong 		rcu_read_lock();
1664df600197SDarrick J. Wong 
1665a437b9b4SChristoph Hellwig 		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1666df600197SDarrick J. Wong 				(void **) batch, first_index,
1667a437b9b4SChristoph Hellwig 				XFS_LOOKUP_BATCH, goal);
1668df600197SDarrick J. Wong 		if (!nr_found) {
1669f1bc5c56SDarrick J. Wong 			done = true;
1670df600197SDarrick J. Wong 			rcu_read_unlock();
1671df600197SDarrick J. Wong 			break;
1672df600197SDarrick J. Wong 		}
1673df600197SDarrick J. Wong 
1674df600197SDarrick J. Wong 		/*
1675df600197SDarrick J. Wong 		 * Grab the inodes before we drop the lock. if we found
1676df600197SDarrick J. Wong 		 * nothing, nr == 0 and the loop will be skipped.
1677df600197SDarrick J. Wong 		 */
1678df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1679df600197SDarrick J. Wong 			struct xfs_inode *ip = batch[i];
1680df600197SDarrick J. Wong 
1681b26b2bf1SDarrick J. Wong 			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1682df600197SDarrick J. Wong 				batch[i] = NULL;
1683df600197SDarrick J. Wong 
1684df600197SDarrick J. Wong 			/*
1685df600197SDarrick J. Wong 			 * Update the index for the next lookup. Catch
1686df600197SDarrick J. Wong 			 * overflows into the next AG range which can occur if
1687df600197SDarrick J. Wong 			 * we have inodes in the last block of the AG and we
1688df600197SDarrick J. Wong 			 * are currently pointing to the last inode.
1689df600197SDarrick J. Wong 			 *
1690df600197SDarrick J. Wong 			 * Because we may see inodes that are from the wrong AG
1691df600197SDarrick J. Wong 			 * due to RCU freeing and reallocation, only update the
1692df600197SDarrick J. Wong 			 * index if it lies in this AG. It was a race that lead
1693df600197SDarrick J. Wong 			 * us to see this inode, so another lookup from the
1694df600197SDarrick J. Wong 			 * same index will not find it again.
1695df600197SDarrick J. Wong 			 */
1696df600197SDarrick J. Wong 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1697df600197SDarrick J. Wong 				continue;
1698df600197SDarrick J. Wong 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1699df600197SDarrick J. Wong 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1700df600197SDarrick J. Wong 				done = true;
1701df600197SDarrick J. Wong 		}
1702df600197SDarrick J. Wong 
1703df600197SDarrick J. Wong 		/* unlock now we've grabbed the inodes. */
1704df600197SDarrick J. Wong 		rcu_read_unlock();
1705df600197SDarrick J. Wong 
1706df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1707df600197SDarrick J. Wong 			if (!batch[i])
1708df600197SDarrick J. Wong 				continue;
1709f1bc5c56SDarrick J. Wong 			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1710b26b2bf1SDarrick J. Wong 					icw);
1711df600197SDarrick J. Wong 			if (error == -EAGAIN) {
1712df600197SDarrick J. Wong 				skipped++;
1713df600197SDarrick J. Wong 				continue;
1714df600197SDarrick J. Wong 			}
1715df600197SDarrick J. Wong 			if (error && last_error != -EFSCORRUPTED)
1716df600197SDarrick J. Wong 				last_error = error;
1717df600197SDarrick J. Wong 		}
1718df600197SDarrick J. Wong 
1719df600197SDarrick J. Wong 		/* bail out if the filesystem is corrupted.  */
1720df600197SDarrick J. Wong 		if (error == -EFSCORRUPTED)
1721df600197SDarrick J. Wong 			break;
1722df600197SDarrick J. Wong 
1723df600197SDarrick J. Wong 		cond_resched();
1724df600197SDarrick J. Wong 
1725b26b2bf1SDarrick J. Wong 		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1726b26b2bf1SDarrick J. Wong 			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1727b26b2bf1SDarrick J. Wong 			if (icw->icw_scan_limit <= 0)
1728f1bc5c56SDarrick J. Wong 				break;
1729f1bc5c56SDarrick J. Wong 		}
1730df600197SDarrick J. Wong 	} while (nr_found && !done);
1731df600197SDarrick J. Wong 
1732f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM) {
1733f1bc5c56SDarrick J. Wong 		if (done)
1734f1bc5c56SDarrick J. Wong 			first_index = 0;
1735f1bc5c56SDarrick J. Wong 		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1736f1bc5c56SDarrick J. Wong 	}
1737f1bc5c56SDarrick J. Wong 
1738df600197SDarrick J. Wong 	if (skipped) {
1739df600197SDarrick J. Wong 		delay(1);
1740df600197SDarrick J. Wong 		goto restart;
1741df600197SDarrick J. Wong 	}
1742df600197SDarrick J. Wong 	return last_error;
1743df600197SDarrick J. Wong }
1744df600197SDarrick J. Wong 
1745f427cf5cSDarrick J. Wong /* Walk all incore inodes to achieve a given goal. */
1746df600197SDarrick J. Wong static int
1747c1115c0cSDarrick J. Wong xfs_icwalk(
1748df600197SDarrick J. Wong 	struct xfs_mount	*mp,
1749f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1750b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1751df600197SDarrick J. Wong {
1752df600197SDarrick J. Wong 	struct xfs_perag	*pag;
1753df600197SDarrick J. Wong 	int			error = 0;
1754df600197SDarrick J. Wong 	int			last_error = 0;
1755a437b9b4SChristoph Hellwig 	xfs_agnumber_t		agno;
1756df600197SDarrick J. Wong 
1757a437b9b4SChristoph Hellwig 	for_each_perag_tag(mp, agno, pag, goal) {
1758b26b2bf1SDarrick J. Wong 		error = xfs_icwalk_ag(pag, goal, icw);
1759df600197SDarrick J. Wong 		if (error) {
1760df600197SDarrick J. Wong 			last_error = error;
1761a437b9b4SChristoph Hellwig 			if (error == -EFSCORRUPTED) {
1762a437b9b4SChristoph Hellwig 				xfs_perag_put(pag);
1763df600197SDarrick J. Wong 				break;
1764df600197SDarrick J. Wong 			}
1765df600197SDarrick J. Wong 		}
1766a437b9b4SChristoph Hellwig 	}
1767df600197SDarrick J. Wong 	return last_error;
17682d53f66bSDarrick J. Wong 	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1769df600197SDarrick J. Wong }
1770c6c2066dSDarrick J. Wong 
1771c6c2066dSDarrick J. Wong #ifdef DEBUG
1772c6c2066dSDarrick J. Wong static void
1773c6c2066dSDarrick J. Wong xfs_check_delalloc(
1774c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip,
1775c6c2066dSDarrick J. Wong 	int			whichfork)
1776c6c2066dSDarrick J. Wong {
1777732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1778c6c2066dSDarrick J. Wong 	struct xfs_bmbt_irec	got;
1779c6c2066dSDarrick J. Wong 	struct xfs_iext_cursor	icur;
1780c6c2066dSDarrick J. Wong 
1781c6c2066dSDarrick J. Wong 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1782c6c2066dSDarrick J. Wong 		return;
1783c6c2066dSDarrick J. Wong 	do {
1784c6c2066dSDarrick J. Wong 		if (isnullstartblock(got.br_startblock)) {
1785c6c2066dSDarrick J. Wong 			xfs_warn(ip->i_mount,
1786c6c2066dSDarrick J. Wong 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1787c6c2066dSDarrick J. Wong 				ip->i_ino,
1788c6c2066dSDarrick J. Wong 				whichfork == XFS_DATA_FORK ? "data" : "cow",
1789c6c2066dSDarrick J. Wong 				got.br_startoff, got.br_blockcount);
1790c6c2066dSDarrick J. Wong 		}
1791c6c2066dSDarrick J. Wong 	} while (xfs_iext_next_extent(ifp, &icur, &got));
1792c6c2066dSDarrick J. Wong }
1793c6c2066dSDarrick J. Wong #else
1794c6c2066dSDarrick J. Wong #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1795c6c2066dSDarrick J. Wong #endif
1796c6c2066dSDarrick J. Wong 
1797ab23a776SDave Chinner /* Schedule the inode for reclaim. */
1798ab23a776SDave Chinner static void
1799ab23a776SDave Chinner xfs_inodegc_set_reclaimable(
1800c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip)
1801c6c2066dSDarrick J. Wong {
1802c6c2066dSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1803c6c2066dSDarrick J. Wong 	struct xfs_perag	*pag;
1804c6c2066dSDarrick J. Wong 
180575c8c50fSDave Chinner 	if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1806c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_DATA_FORK);
1807c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_COW_FORK);
1808c6c2066dSDarrick J. Wong 		ASSERT(0);
1809c6c2066dSDarrick J. Wong 	}
1810c6c2066dSDarrick J. Wong 
1811c6c2066dSDarrick J. Wong 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1812c6c2066dSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
1813c6c2066dSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1814c6c2066dSDarrick J. Wong 
1815ab23a776SDave Chinner 	trace_xfs_inode_set_reclaimable(ip);
1816ab23a776SDave Chinner 	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1817ab23a776SDave Chinner 	ip->i_flags |= XFS_IRECLAIMABLE;
1818c6c2066dSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1819c6c2066dSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
1820c6c2066dSDarrick J. Wong 
1821c6c2066dSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1822c6c2066dSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
1823c6c2066dSDarrick J. Wong 	xfs_perag_put(pag);
1824c6c2066dSDarrick J. Wong }
1825ab23a776SDave Chinner 
1826ab23a776SDave Chinner /*
1827ab23a776SDave Chinner  * Free all speculative preallocations and possibly even the inode itself.
1828ab23a776SDave Chinner  * This is the last chance to make changes to an otherwise unreferenced file
1829ab23a776SDave Chinner  * before incore reclamation happens.
1830ab23a776SDave Chinner  */
1831ab23a776SDave Chinner static void
1832ab23a776SDave Chinner xfs_inodegc_inactivate(
1833ab23a776SDave Chinner 	struct xfs_inode	*ip)
1834ab23a776SDave Chinner {
1835ab23a776SDave Chinner 	trace_xfs_inode_inactivating(ip);
1836ab23a776SDave Chinner 	xfs_inactive(ip);
1837ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
1838ab23a776SDave Chinner }
1839ab23a776SDave Chinner 
1840ab23a776SDave Chinner void
1841ab23a776SDave Chinner xfs_inodegc_worker(
1842ab23a776SDave Chinner 	struct work_struct	*work)
1843ab23a776SDave Chinner {
18447cf2b0f9SDave Chinner 	struct xfs_inodegc	*gc = container_of(to_delayed_work(work),
18457cf2b0f9SDave Chinner 						struct xfs_inodegc, work);
1846ab23a776SDave Chinner 	struct llist_node	*node = llist_del_all(&gc->list);
1847ab23a776SDave Chinner 	struct xfs_inode	*ip, *n;
1848ab23a776SDave Chinner 
1849ab23a776SDave Chinner 	WRITE_ONCE(gc->items, 0);
1850ab23a776SDave Chinner 
1851ab23a776SDave Chinner 	if (!node)
1852ab23a776SDave Chinner 		return;
1853ab23a776SDave Chinner 
1854ab23a776SDave Chinner 	ip = llist_entry(node, struct xfs_inode, i_gclist);
185540b1de00SDarrick J. Wong 	trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1856ab23a776SDave Chinner 
185740b1de00SDarrick J. Wong 	WRITE_ONCE(gc->shrinker_hits, 0);
1858ab23a776SDave Chinner 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1859ab23a776SDave Chinner 		xfs_iflags_set(ip, XFS_INACTIVATING);
1860ab23a776SDave Chinner 		xfs_inodegc_inactivate(ip);
1861ab23a776SDave Chinner 	}
1862ab23a776SDave Chinner }
1863ab23a776SDave Chinner 
1864ab23a776SDave Chinner /*
18655e672cd6SDave Chinner  * Expedite all pending inodegc work to run immediately. This does not wait for
18665e672cd6SDave Chinner  * completion of the work.
18675e672cd6SDave Chinner  */
18685e672cd6SDave Chinner void
18695e672cd6SDave Chinner xfs_inodegc_push(
18705e672cd6SDave Chinner 	struct xfs_mount	*mp)
18715e672cd6SDave Chinner {
18725e672cd6SDave Chinner 	if (!xfs_is_inodegc_enabled(mp))
18735e672cd6SDave Chinner 		return;
18745e672cd6SDave Chinner 	trace_xfs_inodegc_push(mp, __return_address);
18755e672cd6SDave Chinner 	xfs_inodegc_queue_all(mp);
18765e672cd6SDave Chinner }
18775e672cd6SDave Chinner 
18785e672cd6SDave Chinner /*
18796191cf3aSBrian Foster  * Force all currently queued inode inactivation work to run immediately and
18806191cf3aSBrian Foster  * wait for the work to finish.
1881ab23a776SDave Chinner  */
1882ab23a776SDave Chinner void
1883ab23a776SDave Chinner xfs_inodegc_flush(
1884ab23a776SDave Chinner 	struct xfs_mount	*mp)
1885ab23a776SDave Chinner {
18865e672cd6SDave Chinner 	xfs_inodegc_push(mp);
1887ab23a776SDave Chinner 	trace_xfs_inodegc_flush(mp, __return_address);
18886191cf3aSBrian Foster 	flush_workqueue(mp->m_inodegc_wq);
1889ab23a776SDave Chinner }
1890ab23a776SDave Chinner 
1891ab23a776SDave Chinner /*
1892ab23a776SDave Chinner  * Flush all the pending work and then disable the inode inactivation background
1893ab23a776SDave Chinner  * workers and wait for them to stop.
1894ab23a776SDave Chinner  */
1895ab23a776SDave Chinner void
1896ab23a776SDave Chinner xfs_inodegc_stop(
1897ab23a776SDave Chinner 	struct xfs_mount	*mp)
1898ab23a776SDave Chinner {
1899ab23a776SDave Chinner 	if (!xfs_clear_inodegc_enabled(mp))
1900ab23a776SDave Chinner 		return;
1901ab23a776SDave Chinner 
1902ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
19036191cf3aSBrian Foster 	drain_workqueue(mp->m_inodegc_wq);
1904ab23a776SDave Chinner 
1905ab23a776SDave Chinner 	trace_xfs_inodegc_stop(mp, __return_address);
1906ab23a776SDave Chinner }
1907ab23a776SDave Chinner 
1908ab23a776SDave Chinner /*
1909ab23a776SDave Chinner  * Enable the inode inactivation background workers and schedule deferred inode
1910ab23a776SDave Chinner  * inactivation work if there is any.
1911ab23a776SDave Chinner  */
1912ab23a776SDave Chinner void
1913ab23a776SDave Chinner xfs_inodegc_start(
1914ab23a776SDave Chinner 	struct xfs_mount	*mp)
1915ab23a776SDave Chinner {
1916ab23a776SDave Chinner 	if (xfs_set_inodegc_enabled(mp))
1917ab23a776SDave Chinner 		return;
1918ab23a776SDave Chinner 
1919ab23a776SDave Chinner 	trace_xfs_inodegc_start(mp, __return_address);
1920ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1921ab23a776SDave Chinner }
1922ab23a776SDave Chinner 
192365f03d86SDarrick J. Wong #ifdef CONFIG_XFS_RT
192465f03d86SDarrick J. Wong static inline bool
192565f03d86SDarrick J. Wong xfs_inodegc_want_queue_rt_file(
192665f03d86SDarrick J. Wong 	struct xfs_inode	*ip)
192765f03d86SDarrick J. Wong {
192865f03d86SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
192965f03d86SDarrick J. Wong 
193065f03d86SDarrick J. Wong 	if (!XFS_IS_REALTIME_INODE(ip))
193165f03d86SDarrick J. Wong 		return false;
193265f03d86SDarrick J. Wong 
19332229276cSDarrick J. Wong 	if (__percpu_counter_compare(&mp->m_frextents,
19342229276cSDarrick J. Wong 				mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
19352229276cSDarrick J. Wong 				XFS_FDBLOCKS_BATCH) < 0)
19362229276cSDarrick J. Wong 		return true;
19372229276cSDarrick J. Wong 
19382229276cSDarrick J. Wong 	return false;
193965f03d86SDarrick J. Wong }
194065f03d86SDarrick J. Wong #else
194165f03d86SDarrick J. Wong # define xfs_inodegc_want_queue_rt_file(ip)	(false)
194265f03d86SDarrick J. Wong #endif /* CONFIG_XFS_RT */
194365f03d86SDarrick J. Wong 
1944ab23a776SDave Chinner /*
1945ab23a776SDave Chinner  * Schedule the inactivation worker when:
1946ab23a776SDave Chinner  *
1947ab23a776SDave Chinner  *  - We've accumulated more than one inode cluster buffer's worth of inodes.
19487d6f07d2SDarrick J. Wong  *  - There is less than 5% free space left.
1949108523b8SDarrick J. Wong  *  - Any of the quotas for this inode are near an enforcement limit.
1950ab23a776SDave Chinner  */
1951ab23a776SDave Chinner static inline bool
1952ab23a776SDave Chinner xfs_inodegc_want_queue_work(
1953ab23a776SDave Chinner 	struct xfs_inode	*ip,
1954ab23a776SDave Chinner 	unsigned int		items)
1955ab23a776SDave Chinner {
1956ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
1957ab23a776SDave Chinner 
1958ab23a776SDave Chinner 	if (items > mp->m_ino_geo.inodes_per_cluster)
1959ab23a776SDave Chinner 		return true;
1960ab23a776SDave Chinner 
19617d6f07d2SDarrick J. Wong 	if (__percpu_counter_compare(&mp->m_fdblocks,
19627d6f07d2SDarrick J. Wong 				mp->m_low_space[XFS_LOWSP_5_PCNT],
19637d6f07d2SDarrick J. Wong 				XFS_FDBLOCKS_BATCH) < 0)
19647d6f07d2SDarrick J. Wong 		return true;
19657d6f07d2SDarrick J. Wong 
196665f03d86SDarrick J. Wong 	if (xfs_inodegc_want_queue_rt_file(ip))
196765f03d86SDarrick J. Wong 		return true;
196865f03d86SDarrick J. Wong 
1969108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1970108523b8SDarrick J. Wong 		return true;
1971108523b8SDarrick J. Wong 
1972108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1973108523b8SDarrick J. Wong 		return true;
1974108523b8SDarrick J. Wong 
1975108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1976108523b8SDarrick J. Wong 		return true;
1977108523b8SDarrick J. Wong 
1978ab23a776SDave Chinner 	return false;
1979ab23a776SDave Chinner }
1980ab23a776SDave Chinner 
1981ab23a776SDave Chinner /*
1982ab23a776SDave Chinner  * Upper bound on the number of inodes in each AG that can be queued for
1983ab23a776SDave Chinner  * inactivation at any given time, to avoid monopolizing the workqueue.
1984ab23a776SDave Chinner  */
1985ab23a776SDave Chinner #define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
1986ab23a776SDave Chinner 
1987ab23a776SDave Chinner /*
1988ab23a776SDave Chinner  * Make the frontend wait for inactivations when:
1989ab23a776SDave Chinner  *
199040b1de00SDarrick J. Wong  *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
1991ab23a776SDave Chinner  *  - The queue depth exceeds the maximum allowable percpu backlog.
1992ab23a776SDave Chinner  *
1993ab23a776SDave Chinner  * Note: If the current thread is running a transaction, we don't ever want to
1994ab23a776SDave Chinner  * wait for other transactions because that could introduce a deadlock.
1995ab23a776SDave Chinner  */
1996ab23a776SDave Chinner static inline bool
1997ab23a776SDave Chinner xfs_inodegc_want_flush_work(
1998ab23a776SDave Chinner 	struct xfs_inode	*ip,
199940b1de00SDarrick J. Wong 	unsigned int		items,
200040b1de00SDarrick J. Wong 	unsigned int		shrinker_hits)
2001ab23a776SDave Chinner {
2002ab23a776SDave Chinner 	if (current->journal_info)
2003ab23a776SDave Chinner 		return false;
2004ab23a776SDave Chinner 
200540b1de00SDarrick J. Wong 	if (shrinker_hits > 0)
200640b1de00SDarrick J. Wong 		return true;
200740b1de00SDarrick J. Wong 
2008ab23a776SDave Chinner 	if (items > XFS_INODEGC_MAX_BACKLOG)
2009ab23a776SDave Chinner 		return true;
2010ab23a776SDave Chinner 
2011ab23a776SDave Chinner 	return false;
2012ab23a776SDave Chinner }
2013ab23a776SDave Chinner 
2014ab23a776SDave Chinner /*
2015ab23a776SDave Chinner  * Queue a background inactivation worker if there are inodes that need to be
2016ab23a776SDave Chinner  * inactivated and higher level xfs code hasn't disabled the background
2017ab23a776SDave Chinner  * workers.
2018ab23a776SDave Chinner  */
2019ab23a776SDave Chinner static void
2020ab23a776SDave Chinner xfs_inodegc_queue(
2021ab23a776SDave Chinner 	struct xfs_inode	*ip)
2022ab23a776SDave Chinner {
2023ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2024ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
2025ab23a776SDave Chinner 	int			items;
202640b1de00SDarrick J. Wong 	unsigned int		shrinker_hits;
20277cf2b0f9SDave Chinner 	unsigned long		queue_delay = 1;
2028ab23a776SDave Chinner 
2029ab23a776SDave Chinner 	trace_xfs_inode_set_need_inactive(ip);
2030ab23a776SDave Chinner 	spin_lock(&ip->i_flags_lock);
2031ab23a776SDave Chinner 	ip->i_flags |= XFS_NEED_INACTIVE;
2032ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2033ab23a776SDave Chinner 
2034ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2035ab23a776SDave Chinner 	llist_add(&ip->i_gclist, &gc->list);
2036ab23a776SDave Chinner 	items = READ_ONCE(gc->items);
2037ab23a776SDave Chinner 	WRITE_ONCE(gc->items, items + 1);
203840b1de00SDarrick J. Wong 	shrinker_hits = READ_ONCE(gc->shrinker_hits);
20397cf2b0f9SDave Chinner 
20407cf2b0f9SDave Chinner 	/*
20417cf2b0f9SDave Chinner 	 * We queue the work while holding the current CPU so that the work
20427cf2b0f9SDave Chinner 	 * is scheduled to run on this CPU.
20437cf2b0f9SDave Chinner 	 */
20447cf2b0f9SDave Chinner 	if (!xfs_is_inodegc_enabled(mp)) {
2045ab23a776SDave Chinner 		put_cpu_ptr(gc);
2046ab23a776SDave Chinner 		return;
2047ab23a776SDave Chinner 	}
2048ab23a776SDave Chinner 
20497cf2b0f9SDave Chinner 	if (xfs_inodegc_want_queue_work(ip, items))
20507cf2b0f9SDave Chinner 		queue_delay = 0;
20517cf2b0f9SDave Chinner 
20527cf2b0f9SDave Chinner 	trace_xfs_inodegc_queue(mp, __return_address);
20537cf2b0f9SDave Chinner 	mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
20547cf2b0f9SDave Chinner 	put_cpu_ptr(gc);
20557cf2b0f9SDave Chinner 
205640b1de00SDarrick J. Wong 	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2057ab23a776SDave Chinner 		trace_xfs_inodegc_throttle(mp, __return_address);
20587cf2b0f9SDave Chinner 		flush_delayed_work(&gc->work);
2059ab23a776SDave Chinner 	}
2060ab23a776SDave Chinner }
2061ab23a776SDave Chinner 
2062ab23a776SDave Chinner /*
2063ab23a776SDave Chinner  * Fold the dead CPU inodegc queue into the current CPUs queue.
2064ab23a776SDave Chinner  */
2065ab23a776SDave Chinner void
2066ab23a776SDave Chinner xfs_inodegc_cpu_dead(
2067ab23a776SDave Chinner 	struct xfs_mount	*mp,
2068ab23a776SDave Chinner 	unsigned int		dead_cpu)
2069ab23a776SDave Chinner {
2070ab23a776SDave Chinner 	struct xfs_inodegc	*dead_gc, *gc;
2071ab23a776SDave Chinner 	struct llist_node	*first, *last;
2072ab23a776SDave Chinner 	unsigned int		count = 0;
2073ab23a776SDave Chinner 
2074ab23a776SDave Chinner 	dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
20757cf2b0f9SDave Chinner 	cancel_delayed_work_sync(&dead_gc->work);
2076ab23a776SDave Chinner 
2077ab23a776SDave Chinner 	if (llist_empty(&dead_gc->list))
2078ab23a776SDave Chinner 		return;
2079ab23a776SDave Chinner 
2080ab23a776SDave Chinner 	first = dead_gc->list.first;
2081ab23a776SDave Chinner 	last = first;
2082ab23a776SDave Chinner 	while (last->next) {
2083ab23a776SDave Chinner 		last = last->next;
2084ab23a776SDave Chinner 		count++;
2085ab23a776SDave Chinner 	}
2086ab23a776SDave Chinner 	dead_gc->list.first = NULL;
2087ab23a776SDave Chinner 	dead_gc->items = 0;
2088ab23a776SDave Chinner 
2089ab23a776SDave Chinner 	/* Add pending work to current CPU */
2090ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2091ab23a776SDave Chinner 	llist_add_batch(first, last, &gc->list);
2092ab23a776SDave Chinner 	count += READ_ONCE(gc->items);
2093ab23a776SDave Chinner 	WRITE_ONCE(gc->items, count);
2094ab23a776SDave Chinner 
2095ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp)) {
2096ab23a776SDave Chinner 		trace_xfs_inodegc_queue(mp, __return_address);
20977cf2b0f9SDave Chinner 		mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
2098ab23a776SDave Chinner 	}
20997cf2b0f9SDave Chinner 	put_cpu_ptr(gc);
2100ab23a776SDave Chinner }
2101ab23a776SDave Chinner 
2102ab23a776SDave Chinner /*
2103ab23a776SDave Chinner  * We set the inode flag atomically with the radix tree tag.  Once we get tag
2104ab23a776SDave Chinner  * lookups on the radix tree, this inode flag can go away.
2105ab23a776SDave Chinner  *
2106ab23a776SDave Chinner  * We always use background reclaim here because even if the inode is clean, it
2107ab23a776SDave Chinner  * still may be under IO and hence we have wait for IO completion to occur
2108ab23a776SDave Chinner  * before we can reclaim the inode. The background reclaim path handles this
2109ab23a776SDave Chinner  * more efficiently than we can here, so simply let background reclaim tear down
2110ab23a776SDave Chinner  * all inodes.
2111ab23a776SDave Chinner  */
2112ab23a776SDave Chinner void
2113ab23a776SDave Chinner xfs_inode_mark_reclaimable(
2114ab23a776SDave Chinner 	struct xfs_inode	*ip)
2115ab23a776SDave Chinner {
2116ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2117ab23a776SDave Chinner 	bool			need_inactive;
2118ab23a776SDave Chinner 
2119ab23a776SDave Chinner 	XFS_STATS_INC(mp, vn_reclaim);
2120ab23a776SDave Chinner 
2121ab23a776SDave Chinner 	/*
2122ab23a776SDave Chinner 	 * We should never get here with any of the reclaim flags already set.
2123ab23a776SDave Chinner 	 */
2124ab23a776SDave Chinner 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2125ab23a776SDave Chinner 
2126ab23a776SDave Chinner 	need_inactive = xfs_inode_needs_inactive(ip);
2127ab23a776SDave Chinner 	if (need_inactive) {
2128ab23a776SDave Chinner 		xfs_inodegc_queue(ip);
2129ab23a776SDave Chinner 		return;
2130ab23a776SDave Chinner 	}
2131ab23a776SDave Chinner 
2132ab23a776SDave Chinner 	/* Going straight to reclaim, so drop the dquots. */
2133ab23a776SDave Chinner 	xfs_qm_dqdetach(ip);
2134ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
2135ab23a776SDave Chinner }
213640b1de00SDarrick J. Wong 
213740b1de00SDarrick J. Wong /*
213840b1de00SDarrick J. Wong  * Register a phony shrinker so that we can run background inodegc sooner when
213940b1de00SDarrick J. Wong  * there's memory pressure.  Inactivation does not itself free any memory but
214040b1de00SDarrick J. Wong  * it does make inodes reclaimable, which eventually frees memory.
214140b1de00SDarrick J. Wong  *
214240b1de00SDarrick J. Wong  * The count function, seek value, and batch value are crafted to trigger the
214340b1de00SDarrick J. Wong  * scan function during the second round of scanning.  Hopefully this means
214440b1de00SDarrick J. Wong  * that we reclaimed enough memory that initiating metadata transactions won't
214540b1de00SDarrick J. Wong  * make things worse.
214640b1de00SDarrick J. Wong  */
214740b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
214840b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
214940b1de00SDarrick J. Wong 
215040b1de00SDarrick J. Wong static unsigned long
215140b1de00SDarrick J. Wong xfs_inodegc_shrinker_count(
215240b1de00SDarrick J. Wong 	struct shrinker		*shrink,
215340b1de00SDarrick J. Wong 	struct shrink_control	*sc)
215440b1de00SDarrick J. Wong {
215540b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
215640b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
215740b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
215840b1de00SDarrick J. Wong 	int			cpu;
215940b1de00SDarrick J. Wong 
216040b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
216140b1de00SDarrick J. Wong 		return 0;
216240b1de00SDarrick J. Wong 
216340b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
216440b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
216540b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list))
216640b1de00SDarrick J. Wong 			return XFS_INODEGC_SHRINKER_COUNT;
216740b1de00SDarrick J. Wong 	}
216840b1de00SDarrick J. Wong 
216940b1de00SDarrick J. Wong 	return 0;
217040b1de00SDarrick J. Wong }
217140b1de00SDarrick J. Wong 
217240b1de00SDarrick J. Wong static unsigned long
217340b1de00SDarrick J. Wong xfs_inodegc_shrinker_scan(
217440b1de00SDarrick J. Wong 	struct shrinker		*shrink,
217540b1de00SDarrick J. Wong 	struct shrink_control	*sc)
217640b1de00SDarrick J. Wong {
217740b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
217840b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
217940b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
218040b1de00SDarrick J. Wong 	int			cpu;
218140b1de00SDarrick J. Wong 	bool			no_items = true;
218240b1de00SDarrick J. Wong 
218340b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
218440b1de00SDarrick J. Wong 		return SHRINK_STOP;
218540b1de00SDarrick J. Wong 
218640b1de00SDarrick J. Wong 	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
218740b1de00SDarrick J. Wong 
218840b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
218940b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
219040b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list)) {
219140b1de00SDarrick J. Wong 			unsigned int	h = READ_ONCE(gc->shrinker_hits);
219240b1de00SDarrick J. Wong 
219340b1de00SDarrick J. Wong 			WRITE_ONCE(gc->shrinker_hits, h + 1);
21947cf2b0f9SDave Chinner 			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
219540b1de00SDarrick J. Wong 			no_items = false;
219640b1de00SDarrick J. Wong 		}
219740b1de00SDarrick J. Wong 	}
219840b1de00SDarrick J. Wong 
219940b1de00SDarrick J. Wong 	/*
220040b1de00SDarrick J. Wong 	 * If there are no inodes to inactivate, we don't want the shrinker
220140b1de00SDarrick J. Wong 	 * to think there's deferred work to call us back about.
220240b1de00SDarrick J. Wong 	 */
220340b1de00SDarrick J. Wong 	if (no_items)
220440b1de00SDarrick J. Wong 		return LONG_MAX;
220540b1de00SDarrick J. Wong 
220640b1de00SDarrick J. Wong 	return SHRINK_STOP;
220740b1de00SDarrick J. Wong }
220840b1de00SDarrick J. Wong 
220940b1de00SDarrick J. Wong /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
221040b1de00SDarrick J. Wong int
221140b1de00SDarrick J. Wong xfs_inodegc_register_shrinker(
221240b1de00SDarrick J. Wong 	struct xfs_mount	*mp)
221340b1de00SDarrick J. Wong {
221440b1de00SDarrick J. Wong 	struct shrinker		*shrink = &mp->m_inodegc_shrinker;
221540b1de00SDarrick J. Wong 
221640b1de00SDarrick J. Wong 	shrink->count_objects = xfs_inodegc_shrinker_count;
221740b1de00SDarrick J. Wong 	shrink->scan_objects = xfs_inodegc_shrinker_scan;
221840b1de00SDarrick J. Wong 	shrink->seeks = 0;
221940b1de00SDarrick J. Wong 	shrink->flags = SHRINKER_NONSLAB;
222040b1de00SDarrick J. Wong 	shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
222140b1de00SDarrick J. Wong 
222240b1de00SDarrick J. Wong 	return register_shrinker(shrink);
222340b1de00SDarrick J. Wong }
2224