xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 2e973b2c)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
26d8b79cfSDave Chinner /*
36d8b79cfSDave Chinner  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
46d8b79cfSDave Chinner  * All Rights Reserved.
56d8b79cfSDave Chinner  */
66d8b79cfSDave Chinner #include "xfs.h"
76d8b79cfSDave Chinner #include "xfs_fs.h"
85467b34bSDarrick J. Wong #include "xfs_shared.h"
96ca1c906SDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
126d8b79cfSDave Chinner #include "xfs_mount.h"
136d8b79cfSDave Chinner #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15239880efSDave Chinner #include "xfs_trans_priv.h"
166d8b79cfSDave Chinner #include "xfs_inode_item.h"
176d8b79cfSDave Chinner #include "xfs_quota.h"
186d8b79cfSDave Chinner #include "xfs_trace.h"
196d8b79cfSDave Chinner #include "xfs_icache.h"
20c24b5dfaSDave Chinner #include "xfs_bmap_util.h"
21dc06f398SBrian Foster #include "xfs_dquot_item.h"
22dc06f398SBrian Foster #include "xfs_dquot.h"
2383104d44SDarrick J. Wong #include "xfs_reflink.h"
24bb8a66afSChristoph Hellwig #include "xfs_ialloc.h"
259bbafc71SDave Chinner #include "xfs_ag.h"
266d8b79cfSDave Chinner 
27f0e28280SJeff Layton #include <linux/iversion.h>
286d8b79cfSDave Chinner 
29c809d7e9SDarrick J. Wong /* Radix tree tags for incore inode tree. */
30c809d7e9SDarrick J. Wong 
31c809d7e9SDarrick J. Wong /* inode is to be reclaimed */
32c809d7e9SDarrick J. Wong #define XFS_ICI_RECLAIM_TAG	0
33c809d7e9SDarrick J. Wong /* Inode has speculative preallocations (posteof or cow) to clean. */
34c809d7e9SDarrick J. Wong #define XFS_ICI_BLOCKGC_TAG	1
35c809d7e9SDarrick J. Wong 
36c809d7e9SDarrick J. Wong /*
37c809d7e9SDarrick J. Wong  * The goal for walking incore inodes.  These can correspond with incore inode
38c809d7e9SDarrick J. Wong  * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
39c809d7e9SDarrick J. Wong  */
40c809d7e9SDarrick J. Wong enum xfs_icwalk_goal {
41c809d7e9SDarrick J. Wong 	/* Goals directly associated with tagged inodes. */
42c809d7e9SDarrick J. Wong 	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
43f1bc5c56SDarrick J. Wong 	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
44c809d7e9SDarrick J. Wong };
45c809d7e9SDarrick J. Wong 
467fdff526SDarrick J. Wong static int xfs_icwalk(struct xfs_mount *mp,
47b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
487fdff526SDarrick J. Wong static int xfs_icwalk_ag(struct xfs_perag *pag,
49b26b2bf1SDarrick J. Wong 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
50df600197SDarrick J. Wong 
5133479e05SDave Chinner /*
52b26b2bf1SDarrick J. Wong  * Private inode cache walk flags for struct xfs_icwalk.  Must not
53b26b2bf1SDarrick J. Wong  * coincide with XFS_ICWALK_FLAGS_VALID.
541ad2cfe0SDarrick J. Wong  */
551ad2cfe0SDarrick J. Wong 
56f1bc5c56SDarrick J. Wong /* Stop scanning after icw_scan_limit inodes. */
57f1bc5c56SDarrick J. Wong #define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
58f1bc5c56SDarrick J. Wong 
599492750aSDarrick J. Wong #define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
602d53f66bSDarrick J. Wong #define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
619492750aSDarrick J. Wong 
62777eb1faSChristoph Hellwig #define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
632d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
642d53f66bSDarrick J. Wong 					 XFS_ICWALK_FLAG_UNION)
651ad2cfe0SDarrick J. Wong 
6633479e05SDave Chinner /*
6733479e05SDave Chinner  * Allocate and initialise an xfs_inode.
6833479e05SDave Chinner  */
69638f4416SDave Chinner struct xfs_inode *
7033479e05SDave Chinner xfs_inode_alloc(
7133479e05SDave Chinner 	struct xfs_mount	*mp,
7233479e05SDave Chinner 	xfs_ino_t		ino)
7333479e05SDave Chinner {
7433479e05SDave Chinner 	struct xfs_inode	*ip;
7533479e05SDave Chinner 
7633479e05SDave Chinner 	/*
773050bd0bSCarlos Maiolino 	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
783050bd0bSCarlos Maiolino 	 * and return NULL here on ENOMEM.
7933479e05SDave Chinner 	 */
803050bd0bSCarlos Maiolino 	ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
813050bd0bSCarlos Maiolino 
8233479e05SDave Chinner 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
83377bcd5fSCarlos Maiolino 		kmem_cache_free(xfs_inode_zone, ip);
8433479e05SDave Chinner 		return NULL;
8533479e05SDave Chinner 	}
8633479e05SDave Chinner 
87c19b3b05SDave Chinner 	/* VFS doesn't initialise i_mode! */
88c19b3b05SDave Chinner 	VFS_I(ip)->i_mode = 0;
89c19b3b05SDave Chinner 
90ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, vn_active);
9133479e05SDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
9233479e05SDave Chinner 	ASSERT(ip->i_ino == 0);
9333479e05SDave Chinner 
9433479e05SDave Chinner 	/* initialise the xfs inode */
9533479e05SDave Chinner 	ip->i_ino = ino;
9633479e05SDave Chinner 	ip->i_mount = mp;
9733479e05SDave Chinner 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
9833479e05SDave Chinner 	ip->i_afp = NULL;
993993baebSDarrick J. Wong 	ip->i_cowfp = NULL;
1003ba738dfSChristoph Hellwig 	memset(&ip->i_df, 0, sizeof(ip->i_df));
10133479e05SDave Chinner 	ip->i_flags = 0;
10233479e05SDave Chinner 	ip->i_delayed_blks = 0;
1033e09ab8fSChristoph Hellwig 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
1046e73a545SChristoph Hellwig 	ip->i_nblocks = 0;
1057821ea30SChristoph Hellwig 	ip->i_forkoff = 0;
1066772c1f1SDarrick J. Wong 	ip->i_sick = 0;
1076772c1f1SDarrick J. Wong 	ip->i_checked = 0;
108cb357bf3SDarrick J. Wong 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
109cb357bf3SDarrick J. Wong 	INIT_LIST_HEAD(&ip->i_ioend_list);
110cb357bf3SDarrick J. Wong 	spin_lock_init(&ip->i_ioend_lock);
11133479e05SDave Chinner 
11233479e05SDave Chinner 	return ip;
11333479e05SDave Chinner }
11433479e05SDave Chinner 
11533479e05SDave Chinner STATIC void
11633479e05SDave Chinner xfs_inode_free_callback(
11733479e05SDave Chinner 	struct rcu_head		*head)
11833479e05SDave Chinner {
11933479e05SDave Chinner 	struct inode		*inode = container_of(head, struct inode, i_rcu);
12033479e05SDave Chinner 	struct xfs_inode	*ip = XFS_I(inode);
12133479e05SDave Chinner 
122c19b3b05SDave Chinner 	switch (VFS_I(ip)->i_mode & S_IFMT) {
12333479e05SDave Chinner 	case S_IFREG:
12433479e05SDave Chinner 	case S_IFDIR:
12533479e05SDave Chinner 	case S_IFLNK:
126ef838512SChristoph Hellwig 		xfs_idestroy_fork(&ip->i_df);
12733479e05SDave Chinner 		break;
12833479e05SDave Chinner 	}
12933479e05SDave Chinner 
130ef838512SChristoph Hellwig 	if (ip->i_afp) {
131ef838512SChristoph Hellwig 		xfs_idestroy_fork(ip->i_afp);
132ef838512SChristoph Hellwig 		kmem_cache_free(xfs_ifork_zone, ip->i_afp);
133ef838512SChristoph Hellwig 	}
134ef838512SChristoph Hellwig 	if (ip->i_cowfp) {
135ef838512SChristoph Hellwig 		xfs_idestroy_fork(ip->i_cowfp);
136ef838512SChristoph Hellwig 		kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
137ef838512SChristoph Hellwig 	}
13833479e05SDave Chinner 	if (ip->i_itemp) {
13922525c17SDave Chinner 		ASSERT(!test_bit(XFS_LI_IN_AIL,
14022525c17SDave Chinner 				 &ip->i_itemp->ili_item.li_flags));
14133479e05SDave Chinner 		xfs_inode_item_destroy(ip);
14233479e05SDave Chinner 		ip->i_itemp = NULL;
14333479e05SDave Chinner 	}
14433479e05SDave Chinner 
145377bcd5fSCarlos Maiolino 	kmem_cache_free(xfs_inode_zone, ip);
1461f2dcfe8SDave Chinner }
1471f2dcfe8SDave Chinner 
1488a17d7ddSDave Chinner static void
1498a17d7ddSDave Chinner __xfs_inode_free(
1508a17d7ddSDave Chinner 	struct xfs_inode	*ip)
1518a17d7ddSDave Chinner {
1528a17d7ddSDave Chinner 	/* asserts to verify all state is correct here */
1538a17d7ddSDave Chinner 	ASSERT(atomic_read(&ip->i_pincount) == 0);
15448d55e2aSDave Chinner 	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
1558a17d7ddSDave Chinner 	XFS_STATS_DEC(ip->i_mount, vn_active);
1568a17d7ddSDave Chinner 
1578a17d7ddSDave Chinner 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
1588a17d7ddSDave Chinner }
1598a17d7ddSDave Chinner 
1601f2dcfe8SDave Chinner void
1611f2dcfe8SDave Chinner xfs_inode_free(
1621f2dcfe8SDave Chinner 	struct xfs_inode	*ip)
1631f2dcfe8SDave Chinner {
164718ecc50SDave Chinner 	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
16598efe8afSBrian Foster 
16633479e05SDave Chinner 	/*
16733479e05SDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always
16833479e05SDave Chinner 	 * appears to be reclaimed with an invalid inode number when in the
16933479e05SDave Chinner 	 * free state. The ip->i_flags_lock provides the barrier against lookup
17033479e05SDave Chinner 	 * races.
17133479e05SDave Chinner 	 */
17233479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
17333479e05SDave Chinner 	ip->i_flags = XFS_IRECLAIM;
17433479e05SDave Chinner 	ip->i_ino = 0;
17533479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
17633479e05SDave Chinner 
1778a17d7ddSDave Chinner 	__xfs_inode_free(ip);
17833479e05SDave Chinner }
17933479e05SDave Chinner 
18033479e05SDave Chinner /*
18102511a5aSDave Chinner  * Queue background inode reclaim work if there are reclaimable inodes and there
18202511a5aSDave Chinner  * isn't reclaim work already scheduled or in progress.
183ad438c40SDave Chinner  */
184ad438c40SDave Chinner static void
185ad438c40SDave Chinner xfs_reclaim_work_queue(
186ad438c40SDave Chinner 	struct xfs_mount        *mp)
187ad438c40SDave Chinner {
188ad438c40SDave Chinner 
189ad438c40SDave Chinner 	rcu_read_lock();
190ad438c40SDave Chinner 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
191ad438c40SDave Chinner 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
192ad438c40SDave Chinner 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
193ad438c40SDave Chinner 	}
194ad438c40SDave Chinner 	rcu_read_unlock();
195ad438c40SDave Chinner }
196ad438c40SDave Chinner 
197c076ae7aSDarrick J. Wong /*
198c076ae7aSDarrick J. Wong  * Background scanning to trim preallocated space. This is queued based on the
199c076ae7aSDarrick J. Wong  * 'speculative_prealloc_lifetime' tunable (5m by default).
200c076ae7aSDarrick J. Wong  */
201c076ae7aSDarrick J. Wong static inline void
202c076ae7aSDarrick J. Wong xfs_blockgc_queue(
203ad438c40SDave Chinner 	struct xfs_perag	*pag)
204ad438c40SDave Chinner {
2056f649091SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
2066f649091SDarrick J. Wong 
2076f649091SDarrick J. Wong 	if (!xfs_is_blockgc_enabled(mp))
2086f649091SDarrick J. Wong 		return;
2096f649091SDarrick J. Wong 
210c076ae7aSDarrick J. Wong 	rcu_read_lock();
211c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
212ab23a776SDave Chinner 		queue_delayed_work(pag->pag_mount->m_blockgc_wq,
213c076ae7aSDarrick J. Wong 				   &pag->pag_blockgc_work,
214c076ae7aSDarrick J. Wong 				   msecs_to_jiffies(xfs_blockgc_secs * 1000));
215c076ae7aSDarrick J. Wong 	rcu_read_unlock();
216c076ae7aSDarrick J. Wong }
217c076ae7aSDarrick J. Wong 
218c076ae7aSDarrick J. Wong /* Set a tag on both the AG incore inode tree and the AG radix tree. */
219c076ae7aSDarrick J. Wong static void
220c076ae7aSDarrick J. Wong xfs_perag_set_inode_tag(
221c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
222c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
223c076ae7aSDarrick J. Wong 	unsigned int		tag)
224c076ae7aSDarrick J. Wong {
225ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
226c076ae7aSDarrick J. Wong 	bool			was_tagged;
227ad438c40SDave Chinner 
22895989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
229c076ae7aSDarrick J. Wong 
230c076ae7aSDarrick J. Wong 	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
231c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
232c076ae7aSDarrick J. Wong 
233c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
234c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable++;
235c076ae7aSDarrick J. Wong 
236c076ae7aSDarrick J. Wong 	if (was_tagged)
237ad438c40SDave Chinner 		return;
238ad438c40SDave Chinner 
239c076ae7aSDarrick J. Wong 	/* propagate the tag up into the perag radix tree */
240ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
241c076ae7aSDarrick J. Wong 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
242ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
243ad438c40SDave Chinner 
244c076ae7aSDarrick J. Wong 	/* start background work */
245c076ae7aSDarrick J. Wong 	switch (tag) {
246c076ae7aSDarrick J. Wong 	case XFS_ICI_RECLAIM_TAG:
247ad438c40SDave Chinner 		xfs_reclaim_work_queue(mp);
248c076ae7aSDarrick J. Wong 		break;
249c076ae7aSDarrick J. Wong 	case XFS_ICI_BLOCKGC_TAG:
250c076ae7aSDarrick J. Wong 		xfs_blockgc_queue(pag);
251c076ae7aSDarrick J. Wong 		break;
252ad438c40SDave Chinner 	}
253ad438c40SDave Chinner 
254c076ae7aSDarrick J. Wong 	trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
255c076ae7aSDarrick J. Wong }
256c076ae7aSDarrick J. Wong 
257c076ae7aSDarrick J. Wong /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
258ad438c40SDave Chinner static void
259c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(
260c076ae7aSDarrick J. Wong 	struct xfs_perag	*pag,
261c076ae7aSDarrick J. Wong 	xfs_agino_t		agino,
262c076ae7aSDarrick J. Wong 	unsigned int		tag)
263ad438c40SDave Chinner {
264ad438c40SDave Chinner 	struct xfs_mount	*mp = pag->pag_mount;
265ad438c40SDave Chinner 
26695989c46SBrian Foster 	lockdep_assert_held(&pag->pag_ici_lock);
267c076ae7aSDarrick J. Wong 
268c076ae7aSDarrick J. Wong 	/*
269c076ae7aSDarrick J. Wong 	 * Reclaim can signal (with a null agino) that it cleared its own tag
270c076ae7aSDarrick J. Wong 	 * by removing the inode from the radix tree.
271c076ae7aSDarrick J. Wong 	 */
272c076ae7aSDarrick J. Wong 	if (agino != NULLAGINO)
273c076ae7aSDarrick J. Wong 		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
274c076ae7aSDarrick J. Wong 	else
275c076ae7aSDarrick J. Wong 		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
276c076ae7aSDarrick J. Wong 
277c076ae7aSDarrick J. Wong 	if (tag == XFS_ICI_RECLAIM_TAG)
278c076ae7aSDarrick J. Wong 		pag->pag_ici_reclaimable--;
279c076ae7aSDarrick J. Wong 
280c076ae7aSDarrick J. Wong 	if (radix_tree_tagged(&pag->pag_ici_root, tag))
281ad438c40SDave Chinner 		return;
282ad438c40SDave Chinner 
283c076ae7aSDarrick J. Wong 	/* clear the tag from the perag radix tree */
284ad438c40SDave Chinner 	spin_lock(&mp->m_perag_lock);
285c076ae7aSDarrick J. Wong 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
286ad438c40SDave Chinner 	spin_unlock(&mp->m_perag_lock);
287ad438c40SDave Chinner 
288c076ae7aSDarrick J. Wong 	trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
289c076ae7aSDarrick J. Wong }
290ad438c40SDave Chinner 
2917fdff526SDarrick J. Wong static inline void
292ae2c4ac2SBrian Foster xfs_inew_wait(
293ae2c4ac2SBrian Foster 	struct xfs_inode	*ip)
294ae2c4ac2SBrian Foster {
295ae2c4ac2SBrian Foster 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
296ae2c4ac2SBrian Foster 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
297ae2c4ac2SBrian Foster 
298ae2c4ac2SBrian Foster 	do {
29921417136SIngo Molnar 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
300ae2c4ac2SBrian Foster 		if (!xfs_iflags_test(ip, XFS_INEW))
301ae2c4ac2SBrian Foster 			break;
302ae2c4ac2SBrian Foster 		schedule();
303ae2c4ac2SBrian Foster 	} while (true);
30421417136SIngo Molnar 	finish_wait(wq, &wait.wq_entry);
305ae2c4ac2SBrian Foster }
306ae2c4ac2SBrian Foster 
307ad438c40SDave Chinner /*
30850997470SDave Chinner  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
30950997470SDave Chinner  * part of the structure. This is made more complex by the fact we store
31050997470SDave Chinner  * information about the on-disk values in the VFS inode and so we can't just
31183e06f21SDave Chinner  * overwrite the values unconditionally. Hence we save the parameters we
31250997470SDave Chinner  * need to retain across reinitialisation, and rewrite them into the VFS inode
31383e06f21SDave Chinner  * after reinitialisation even if it fails.
31450997470SDave Chinner  */
31550997470SDave Chinner static int
31650997470SDave Chinner xfs_reinit_inode(
31750997470SDave Chinner 	struct xfs_mount	*mp,
31850997470SDave Chinner 	struct inode		*inode)
31950997470SDave Chinner {
32050997470SDave Chinner 	int			error;
32154d7b5c1SDave Chinner 	uint32_t		nlink = inode->i_nlink;
3229e9a2674SDave Chinner 	uint32_t		generation = inode->i_generation;
323f0e28280SJeff Layton 	uint64_t		version = inode_peek_iversion(inode);
324c19b3b05SDave Chinner 	umode_t			mode = inode->i_mode;
325acd1d715SAmir Goldstein 	dev_t			dev = inode->i_rdev;
3263d8f2821SChristoph Hellwig 	kuid_t			uid = inode->i_uid;
3273d8f2821SChristoph Hellwig 	kgid_t			gid = inode->i_gid;
32850997470SDave Chinner 
32950997470SDave Chinner 	error = inode_init_always(mp->m_super, inode);
33050997470SDave Chinner 
33154d7b5c1SDave Chinner 	set_nlink(inode, nlink);
3329e9a2674SDave Chinner 	inode->i_generation = generation;
333f0e28280SJeff Layton 	inode_set_iversion_queried(inode, version);
334c19b3b05SDave Chinner 	inode->i_mode = mode;
335acd1d715SAmir Goldstein 	inode->i_rdev = dev;
3363d8f2821SChristoph Hellwig 	inode->i_uid = uid;
3373d8f2821SChristoph Hellwig 	inode->i_gid = gid;
33850997470SDave Chinner 	return error;
33950997470SDave Chinner }
34050997470SDave Chinner 
34150997470SDave Chinner /*
342ff7bebebSDarrick J. Wong  * Carefully nudge an inode whose VFS state has been torn down back into a
343ff7bebebSDarrick J. Wong  * usable state.  Drops the i_flags_lock and the rcu read lock.
344ff7bebebSDarrick J. Wong  */
345ff7bebebSDarrick J. Wong static int
346ff7bebebSDarrick J. Wong xfs_iget_recycle(
347ff7bebebSDarrick J. Wong 	struct xfs_perag	*pag,
348ff7bebebSDarrick J. Wong 	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
349ff7bebebSDarrick J. Wong {
350ff7bebebSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
351ff7bebebSDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
352ff7bebebSDarrick J. Wong 	int			error;
353ff7bebebSDarrick J. Wong 
354ff7bebebSDarrick J. Wong 	trace_xfs_iget_recycle(ip);
355ff7bebebSDarrick J. Wong 
356ff7bebebSDarrick J. Wong 	/*
357ff7bebebSDarrick J. Wong 	 * We need to make it look like the inode is being reclaimed to prevent
358ff7bebebSDarrick J. Wong 	 * the actual reclaim workers from stomping over us while we recycle
359ff7bebebSDarrick J. Wong 	 * the inode.  We can't clear the radix tree tag yet as it requires
360ff7bebebSDarrick J. Wong 	 * pag_ici_lock to be held exclusive.
361ff7bebebSDarrick J. Wong 	 */
362ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_IRECLAIM;
363ff7bebebSDarrick J. Wong 
364ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
365ff7bebebSDarrick J. Wong 	rcu_read_unlock();
366ff7bebebSDarrick J. Wong 
367ff7bebebSDarrick J. Wong 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
368ff7bebebSDarrick J. Wong 	error = xfs_reinit_inode(mp, inode);
369ff7bebebSDarrick J. Wong 	if (error) {
370ff7bebebSDarrick J. Wong 		bool	wake;
371ff7bebebSDarrick J. Wong 
372ff7bebebSDarrick J. Wong 		/*
373ff7bebebSDarrick J. Wong 		 * Re-initializing the inode failed, and we are in deep
374ff7bebebSDarrick J. Wong 		 * trouble.  Try to re-add it to the reclaim list.
375ff7bebebSDarrick J. Wong 		 */
376ff7bebebSDarrick J. Wong 		rcu_read_lock();
377ff7bebebSDarrick J. Wong 		spin_lock(&ip->i_flags_lock);
378ff7bebebSDarrick J. Wong 		wake = !!__xfs_iflags_test(ip, XFS_INEW);
379ff7bebebSDarrick J. Wong 		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
380ff7bebebSDarrick J. Wong 		if (wake)
381ff7bebebSDarrick J. Wong 			wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
382ff7bebebSDarrick J. Wong 		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
383ff7bebebSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
384ff7bebebSDarrick J. Wong 		rcu_read_unlock();
385ff7bebebSDarrick J. Wong 
386ff7bebebSDarrick J. Wong 		trace_xfs_iget_recycle_fail(ip);
387ff7bebebSDarrick J. Wong 		return error;
388ff7bebebSDarrick J. Wong 	}
389ff7bebebSDarrick J. Wong 
390ff7bebebSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
391ff7bebebSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
392ff7bebebSDarrick J. Wong 
393ff7bebebSDarrick J. Wong 	/*
394ff7bebebSDarrick J. Wong 	 * Clear the per-lifetime state in the inode as we are now effectively
395ff7bebebSDarrick J. Wong 	 * a new inode and need to return to the initial state before reuse
396ff7bebebSDarrick J. Wong 	 * occurs.
397ff7bebebSDarrick J. Wong 	 */
398ff7bebebSDarrick J. Wong 	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
399ff7bebebSDarrick J. Wong 	ip->i_flags |= XFS_INEW;
400ff7bebebSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
401ff7bebebSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
402ff7bebebSDarrick J. Wong 	inode->i_state = I_NEW;
403ff7bebebSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
404ff7bebebSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
405ff7bebebSDarrick J. Wong 
406ff7bebebSDarrick J. Wong 	return 0;
407ff7bebebSDarrick J. Wong }
408ff7bebebSDarrick J. Wong 
409ff7bebebSDarrick J. Wong /*
410afca6c5bSDave Chinner  * If we are allocating a new inode, then check what was returned is
411afca6c5bSDave Chinner  * actually a free, empty inode. If we are not allocating an inode,
412afca6c5bSDave Chinner  * then check we didn't find a free inode.
413afca6c5bSDave Chinner  *
414afca6c5bSDave Chinner  * Returns:
415afca6c5bSDave Chinner  *	0		if the inode free state matches the lookup context
416afca6c5bSDave Chinner  *	-ENOENT		if the inode is free and we are not allocating
417afca6c5bSDave Chinner  *	-EFSCORRUPTED	if there is any state mismatch at all
418afca6c5bSDave Chinner  */
419afca6c5bSDave Chinner static int
420afca6c5bSDave Chinner xfs_iget_check_free_state(
421afca6c5bSDave Chinner 	struct xfs_inode	*ip,
422afca6c5bSDave Chinner 	int			flags)
423afca6c5bSDave Chinner {
424afca6c5bSDave Chinner 	if (flags & XFS_IGET_CREATE) {
425afca6c5bSDave Chinner 		/* should be a free inode */
426afca6c5bSDave Chinner 		if (VFS_I(ip)->i_mode != 0) {
427afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
428afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
429afca6c5bSDave Chinner 				ip->i_ino, VFS_I(ip)->i_mode);
430afca6c5bSDave Chinner 			return -EFSCORRUPTED;
431afca6c5bSDave Chinner 		}
432afca6c5bSDave Chinner 
4336e73a545SChristoph Hellwig 		if (ip->i_nblocks != 0) {
434afca6c5bSDave Chinner 			xfs_warn(ip->i_mount,
435afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx has blocks allocated!",
436afca6c5bSDave Chinner 				ip->i_ino);
437afca6c5bSDave Chinner 			return -EFSCORRUPTED;
438afca6c5bSDave Chinner 		}
439afca6c5bSDave Chinner 		return 0;
440afca6c5bSDave Chinner 	}
441afca6c5bSDave Chinner 
442afca6c5bSDave Chinner 	/* should be an allocated inode */
443afca6c5bSDave Chinner 	if (VFS_I(ip)->i_mode == 0)
444afca6c5bSDave Chinner 		return -ENOENT;
445afca6c5bSDave Chinner 
446afca6c5bSDave Chinner 	return 0;
447afca6c5bSDave Chinner }
448afca6c5bSDave Chinner 
449ab23a776SDave Chinner /* Make all pending inactivation work start immediately. */
450ab23a776SDave Chinner static void
451ab23a776SDave Chinner xfs_inodegc_queue_all(
452ab23a776SDave Chinner 	struct xfs_mount	*mp)
453ab23a776SDave Chinner {
454ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
455ab23a776SDave Chinner 	int			cpu;
456ab23a776SDave Chinner 
457ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
458ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
459ab23a776SDave Chinner 		if (!llist_empty(&gc->list))
460ab23a776SDave Chinner 			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
461ab23a776SDave Chinner 	}
462ab23a776SDave Chinner }
463ab23a776SDave Chinner 
464afca6c5bSDave Chinner /*
46533479e05SDave Chinner  * Check the validity of the inode we just found it the cache
46633479e05SDave Chinner  */
46733479e05SDave Chinner static int
46833479e05SDave Chinner xfs_iget_cache_hit(
46933479e05SDave Chinner 	struct xfs_perag	*pag,
47033479e05SDave Chinner 	struct xfs_inode	*ip,
47133479e05SDave Chinner 	xfs_ino_t		ino,
47233479e05SDave Chinner 	int			flags,
47333479e05SDave Chinner 	int			lock_flags) __releases(RCU)
47433479e05SDave Chinner {
47533479e05SDave Chinner 	struct inode		*inode = VFS_I(ip);
47633479e05SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
47733479e05SDave Chinner 	int			error;
47833479e05SDave Chinner 
47933479e05SDave Chinner 	/*
48033479e05SDave Chinner 	 * check for re-use of an inode within an RCU grace period due to the
48133479e05SDave Chinner 	 * radix tree nodes not being updated yet. We monitor for this by
48233479e05SDave Chinner 	 * setting the inode number to zero before freeing the inode structure.
48333479e05SDave Chinner 	 * If the inode has been reallocated and set up, then the inode number
48433479e05SDave Chinner 	 * will not match, so check for that, too.
48533479e05SDave Chinner 	 */
48633479e05SDave Chinner 	spin_lock(&ip->i_flags_lock);
48777b4d286SDarrick J. Wong 	if (ip->i_ino != ino)
48877b4d286SDarrick J. Wong 		goto out_skip;
48933479e05SDave Chinner 
49033479e05SDave Chinner 	/*
49133479e05SDave Chinner 	 * If we are racing with another cache hit that is currently
49233479e05SDave Chinner 	 * instantiating this inode or currently recycling it out of
493ff7bebebSDarrick J. Wong 	 * reclaimable state, wait for the initialisation to complete
49433479e05SDave Chinner 	 * before continuing.
49533479e05SDave Chinner 	 *
496ab23a776SDave Chinner 	 * If we're racing with the inactivation worker we also want to wait.
497ab23a776SDave Chinner 	 * If we're creating a new file, it's possible that the worker
498ab23a776SDave Chinner 	 * previously marked the inode as free on disk but hasn't finished
499ab23a776SDave Chinner 	 * updating the incore state yet.  The AGI buffer will be dirty and
500ab23a776SDave Chinner 	 * locked to the icreate transaction, so a synchronous push of the
501ab23a776SDave Chinner 	 * inodegc workers would result in deadlock.  For a regular iget, the
502ab23a776SDave Chinner 	 * worker is running already, so we might as well wait.
503ab23a776SDave Chinner 	 *
50433479e05SDave Chinner 	 * XXX(hch): eventually we should do something equivalent to
50533479e05SDave Chinner 	 *	     wait_on_inode to wait for these flags to be cleared
50633479e05SDave Chinner 	 *	     instead of polling for it.
50733479e05SDave Chinner 	 */
508ab23a776SDave Chinner 	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
50977b4d286SDarrick J. Wong 		goto out_skip;
51033479e05SDave Chinner 
511ab23a776SDave Chinner 	if (ip->i_flags & XFS_NEED_INACTIVE) {
512ab23a776SDave Chinner 		/* Unlinked inodes cannot be re-grabbed. */
513ab23a776SDave Chinner 		if (VFS_I(ip)->i_nlink == 0) {
514ab23a776SDave Chinner 			error = -ENOENT;
515ab23a776SDave Chinner 			goto out_error;
516ab23a776SDave Chinner 		}
517ab23a776SDave Chinner 		goto out_inodegc_flush;
518ab23a776SDave Chinner 	}
519ab23a776SDave Chinner 
52033479e05SDave Chinner 	/*
521afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
522afca6c5bSDave Chinner 	 * racing with unlinks.
52333479e05SDave Chinner 	 */
524afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
525afca6c5bSDave Chinner 	if (error)
52633479e05SDave Chinner 		goto out_error;
52733479e05SDave Chinner 
52877b4d286SDarrick J. Wong 	/* Skip inodes that have no vfs state. */
52977b4d286SDarrick J. Wong 	if ((flags & XFS_IGET_INCORE) &&
53077b4d286SDarrick J. Wong 	    (ip->i_flags & XFS_IRECLAIMABLE))
53177b4d286SDarrick J. Wong 		goto out_skip;
532378f681cSDarrick J. Wong 
53377b4d286SDarrick J. Wong 	/* The inode fits the selection criteria; process it. */
53477b4d286SDarrick J. Wong 	if (ip->i_flags & XFS_IRECLAIMABLE) {
535ff7bebebSDarrick J. Wong 		/* Drops i_flags_lock and RCU read lock. */
536ff7bebebSDarrick J. Wong 		error = xfs_iget_recycle(pag, ip);
537ff7bebebSDarrick J. Wong 		if (error)
538ff7bebebSDarrick J. Wong 			return error;
53933479e05SDave Chinner 	} else {
54033479e05SDave Chinner 		/* If the VFS inode is being torn down, pause and try again. */
54177b4d286SDarrick J. Wong 		if (!igrab(inode))
54277b4d286SDarrick J. Wong 			goto out_skip;
54333479e05SDave Chinner 
54433479e05SDave Chinner 		/* We've got a live one. */
54533479e05SDave Chinner 		spin_unlock(&ip->i_flags_lock);
54633479e05SDave Chinner 		rcu_read_unlock();
54733479e05SDave Chinner 		trace_xfs_iget_hit(ip);
54833479e05SDave Chinner 	}
54933479e05SDave Chinner 
55033479e05SDave Chinner 	if (lock_flags != 0)
55133479e05SDave Chinner 		xfs_ilock(ip, lock_flags);
55233479e05SDave Chinner 
553378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE))
554dae2f8edSIra Weiny 		xfs_iflags_clear(ip, XFS_ISTALE);
555ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_found);
55633479e05SDave Chinner 
55733479e05SDave Chinner 	return 0;
55833479e05SDave Chinner 
55977b4d286SDarrick J. Wong out_skip:
56077b4d286SDarrick J. Wong 	trace_xfs_iget_skip(ip);
56177b4d286SDarrick J. Wong 	XFS_STATS_INC(mp, xs_ig_frecycle);
56277b4d286SDarrick J. Wong 	error = -EAGAIN;
56333479e05SDave Chinner out_error:
56433479e05SDave Chinner 	spin_unlock(&ip->i_flags_lock);
56533479e05SDave Chinner 	rcu_read_unlock();
56633479e05SDave Chinner 	return error;
567ab23a776SDave Chinner 
568ab23a776SDave Chinner out_inodegc_flush:
569ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
570ab23a776SDave Chinner 	rcu_read_unlock();
571ab23a776SDave Chinner 	/*
572ab23a776SDave Chinner 	 * Do not wait for the workers, because the caller could hold an AGI
573ab23a776SDave Chinner 	 * buffer lock.  We're just going to sleep in a loop anyway.
574ab23a776SDave Chinner 	 */
575ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp))
576ab23a776SDave Chinner 		xfs_inodegc_queue_all(mp);
577ab23a776SDave Chinner 	return -EAGAIN;
57833479e05SDave Chinner }
57933479e05SDave Chinner 
58033479e05SDave Chinner static int
58133479e05SDave Chinner xfs_iget_cache_miss(
58233479e05SDave Chinner 	struct xfs_mount	*mp,
58333479e05SDave Chinner 	struct xfs_perag	*pag,
58433479e05SDave Chinner 	xfs_trans_t		*tp,
58533479e05SDave Chinner 	xfs_ino_t		ino,
58633479e05SDave Chinner 	struct xfs_inode	**ipp,
58733479e05SDave Chinner 	int			flags,
58833479e05SDave Chinner 	int			lock_flags)
58933479e05SDave Chinner {
59033479e05SDave Chinner 	struct xfs_inode	*ip;
59133479e05SDave Chinner 	int			error;
59233479e05SDave Chinner 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
59333479e05SDave Chinner 	int			iflags;
59433479e05SDave Chinner 
59533479e05SDave Chinner 	ip = xfs_inode_alloc(mp, ino);
59633479e05SDave Chinner 	if (!ip)
5972451337dSDave Chinner 		return -ENOMEM;
59833479e05SDave Chinner 
599bb8a66afSChristoph Hellwig 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
60033479e05SDave Chinner 	if (error)
60133479e05SDave Chinner 		goto out_destroy;
60233479e05SDave Chinner 
603bb8a66afSChristoph Hellwig 	/*
604bb8a66afSChristoph Hellwig 	 * For version 5 superblocks, if we are initialising a new inode and we
6050560f31aSDave Chinner 	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
606bb8a66afSChristoph Hellwig 	 * simply build the new inode core with a random generation number.
607bb8a66afSChristoph Hellwig 	 *
608bb8a66afSChristoph Hellwig 	 * For version 4 (and older) superblocks, log recovery is dependent on
609965e0a1aSChristoph Hellwig 	 * the i_flushiter field being initialised from the current on-disk
610bb8a66afSChristoph Hellwig 	 * value and hence we must also read the inode off disk even when
611bb8a66afSChristoph Hellwig 	 * initializing new inodes.
612bb8a66afSChristoph Hellwig 	 */
61338c26bfdSDave Chinner 	if (xfs_has_v3inodes(mp) &&
6140560f31aSDave Chinner 	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
615bb8a66afSChristoph Hellwig 		VFS_I(ip)->i_generation = prandom_u32();
616bb8a66afSChristoph Hellwig 	} else {
617bb8a66afSChristoph Hellwig 		struct xfs_buf		*bp;
618bb8a66afSChristoph Hellwig 
619af9dcddeSChristoph Hellwig 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
620bb8a66afSChristoph Hellwig 		if (error)
621bb8a66afSChristoph Hellwig 			goto out_destroy;
622bb8a66afSChristoph Hellwig 
623af9dcddeSChristoph Hellwig 		error = xfs_inode_from_disk(ip,
624af9dcddeSChristoph Hellwig 				xfs_buf_offset(bp, ip->i_imap.im_boffset));
625bb8a66afSChristoph Hellwig 		if (!error)
626bb8a66afSChristoph Hellwig 			xfs_buf_set_ref(bp, XFS_INO_REF);
627bb8a66afSChristoph Hellwig 		xfs_trans_brelse(tp, bp);
628bb8a66afSChristoph Hellwig 
629bb8a66afSChristoph Hellwig 		if (error)
630bb8a66afSChristoph Hellwig 			goto out_destroy;
631bb8a66afSChristoph Hellwig 	}
632bb8a66afSChristoph Hellwig 
63333479e05SDave Chinner 	trace_xfs_iget_miss(ip);
63433479e05SDave Chinner 
635ee457001SDave Chinner 	/*
636afca6c5bSDave Chinner 	 * Check the inode free state is valid. This also detects lookup
637afca6c5bSDave Chinner 	 * racing with unlinks.
638ee457001SDave Chinner 	 */
639afca6c5bSDave Chinner 	error = xfs_iget_check_free_state(ip, flags);
640afca6c5bSDave Chinner 	if (error)
641ee457001SDave Chinner 		goto out_destroy;
64233479e05SDave Chinner 
64333479e05SDave Chinner 	/*
64433479e05SDave Chinner 	 * Preload the radix tree so we can insert safely under the
64533479e05SDave Chinner 	 * write spinlock. Note that we cannot sleep inside the preload
64633479e05SDave Chinner 	 * region. Since we can be called from transaction context, don't
64733479e05SDave Chinner 	 * recurse into the file system.
64833479e05SDave Chinner 	 */
64933479e05SDave Chinner 	if (radix_tree_preload(GFP_NOFS)) {
6502451337dSDave Chinner 		error = -EAGAIN;
65133479e05SDave Chinner 		goto out_destroy;
65233479e05SDave Chinner 	}
65333479e05SDave Chinner 
65433479e05SDave Chinner 	/*
65533479e05SDave Chinner 	 * Because the inode hasn't been added to the radix-tree yet it can't
65633479e05SDave Chinner 	 * be found by another thread, so we can do the non-sleeping lock here.
65733479e05SDave Chinner 	 */
65833479e05SDave Chinner 	if (lock_flags) {
65933479e05SDave Chinner 		if (!xfs_ilock_nowait(ip, lock_flags))
66033479e05SDave Chinner 			BUG();
66133479e05SDave Chinner 	}
66233479e05SDave Chinner 
66333479e05SDave Chinner 	/*
66433479e05SDave Chinner 	 * These values must be set before inserting the inode into the radix
66533479e05SDave Chinner 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
66633479e05SDave Chinner 	 * RCU locking mechanism) can find it and that lookup must see that this
66733479e05SDave Chinner 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
66833479e05SDave Chinner 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
66933479e05SDave Chinner 	 * memory barrier that ensures this detection works correctly at lookup
67033479e05SDave Chinner 	 * time.
67133479e05SDave Chinner 	 */
67233479e05SDave Chinner 	iflags = XFS_INEW;
67333479e05SDave Chinner 	if (flags & XFS_IGET_DONTCACHE)
6742c567af4SIra Weiny 		d_mark_dontcache(VFS_I(ip));
675113a5683SChandra Seetharaman 	ip->i_udquot = NULL;
676113a5683SChandra Seetharaman 	ip->i_gdquot = NULL;
67792f8ff73SChandra Seetharaman 	ip->i_pdquot = NULL;
67833479e05SDave Chinner 	xfs_iflags_set(ip, iflags);
67933479e05SDave Chinner 
68033479e05SDave Chinner 	/* insert the new inode */
68133479e05SDave Chinner 	spin_lock(&pag->pag_ici_lock);
68233479e05SDave Chinner 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
68333479e05SDave Chinner 	if (unlikely(error)) {
68433479e05SDave Chinner 		WARN_ON(error != -EEXIST);
685ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_dup);
6862451337dSDave Chinner 		error = -EAGAIN;
68733479e05SDave Chinner 		goto out_preload_end;
68833479e05SDave Chinner 	}
68933479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
69033479e05SDave Chinner 	radix_tree_preload_end();
69133479e05SDave Chinner 
69233479e05SDave Chinner 	*ipp = ip;
69333479e05SDave Chinner 	return 0;
69433479e05SDave Chinner 
69533479e05SDave Chinner out_preload_end:
69633479e05SDave Chinner 	spin_unlock(&pag->pag_ici_lock);
69733479e05SDave Chinner 	radix_tree_preload_end();
69833479e05SDave Chinner 	if (lock_flags)
69933479e05SDave Chinner 		xfs_iunlock(ip, lock_flags);
70033479e05SDave Chinner out_destroy:
70133479e05SDave Chinner 	__destroy_inode(VFS_I(ip));
70233479e05SDave Chinner 	xfs_inode_free(ip);
70333479e05SDave Chinner 	return error;
70433479e05SDave Chinner }
70533479e05SDave Chinner 
70633479e05SDave Chinner /*
70702511a5aSDave Chinner  * Look up an inode by number in the given file system.  The inode is looked up
70802511a5aSDave Chinner  * in the cache held in each AG.  If the inode is found in the cache, initialise
70902511a5aSDave Chinner  * the vfs inode if necessary.
71033479e05SDave Chinner  *
71102511a5aSDave Chinner  * If it is not in core, read it in from the file system's device, add it to the
71202511a5aSDave Chinner  * cache and initialise the vfs inode.
71333479e05SDave Chinner  *
71433479e05SDave Chinner  * The inode is locked according to the value of the lock_flags parameter.
71502511a5aSDave Chinner  * Inode lookup is only done during metadata operations and not as part of the
71602511a5aSDave Chinner  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
71733479e05SDave Chinner  */
71833479e05SDave Chinner int
71933479e05SDave Chinner xfs_iget(
72002511a5aSDave Chinner 	struct xfs_mount	*mp,
72102511a5aSDave Chinner 	struct xfs_trans	*tp,
72233479e05SDave Chinner 	xfs_ino_t		ino,
72333479e05SDave Chinner 	uint			flags,
72433479e05SDave Chinner 	uint			lock_flags,
72502511a5aSDave Chinner 	struct xfs_inode	**ipp)
72633479e05SDave Chinner {
72702511a5aSDave Chinner 	struct xfs_inode	*ip;
72802511a5aSDave Chinner 	struct xfs_perag	*pag;
72933479e05SDave Chinner 	xfs_agino_t		agino;
73002511a5aSDave Chinner 	int			error;
73133479e05SDave Chinner 
73233479e05SDave Chinner 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
73333479e05SDave Chinner 
73433479e05SDave Chinner 	/* reject inode numbers outside existing AGs */
73533479e05SDave Chinner 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
7362451337dSDave Chinner 		return -EINVAL;
73733479e05SDave Chinner 
738ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_ig_attempts);
7398774cf8bSLucas Stach 
74033479e05SDave Chinner 	/* get the perag structure and ensure that it's inode capable */
74133479e05SDave Chinner 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
74233479e05SDave Chinner 	agino = XFS_INO_TO_AGINO(mp, ino);
74333479e05SDave Chinner 
74433479e05SDave Chinner again:
74533479e05SDave Chinner 	error = 0;
74633479e05SDave Chinner 	rcu_read_lock();
74733479e05SDave Chinner 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
74833479e05SDave Chinner 
74933479e05SDave Chinner 	if (ip) {
75033479e05SDave Chinner 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
75133479e05SDave Chinner 		if (error)
75233479e05SDave Chinner 			goto out_error_or_again;
75333479e05SDave Chinner 	} else {
75433479e05SDave Chinner 		rcu_read_unlock();
755378f681cSDarrick J. Wong 		if (flags & XFS_IGET_INCORE) {
756ed438b47SDarrick J. Wong 			error = -ENODATA;
757378f681cSDarrick J. Wong 			goto out_error_or_again;
758378f681cSDarrick J. Wong 		}
759ff6d6af2SBill O'Donnell 		XFS_STATS_INC(mp, xs_ig_missed);
76033479e05SDave Chinner 
76133479e05SDave Chinner 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
76233479e05SDave Chinner 							flags, lock_flags);
76333479e05SDave Chinner 		if (error)
76433479e05SDave Chinner 			goto out_error_or_again;
76533479e05SDave Chinner 	}
76633479e05SDave Chinner 	xfs_perag_put(pag);
76733479e05SDave Chinner 
76833479e05SDave Chinner 	*ipp = ip;
76933479e05SDave Chinner 
77033479e05SDave Chinner 	/*
77158c90473SDave Chinner 	 * If we have a real type for an on-disk inode, we can setup the inode
77233479e05SDave Chinner 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
77333479e05SDave Chinner 	 */
774c19b3b05SDave Chinner 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
77558c90473SDave Chinner 		xfs_setup_existing_inode(ip);
77633479e05SDave Chinner 	return 0;
77733479e05SDave Chinner 
77833479e05SDave Chinner out_error_or_again:
779378f681cSDarrick J. Wong 	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
78033479e05SDave Chinner 		delay(1);
78133479e05SDave Chinner 		goto again;
78233479e05SDave Chinner 	}
78333479e05SDave Chinner 	xfs_perag_put(pag);
78433479e05SDave Chinner 	return error;
78533479e05SDave Chinner }
78633479e05SDave Chinner 
7876d8b79cfSDave Chinner /*
788378f681cSDarrick J. Wong  * "Is this a cached inode that's also allocated?"
789378f681cSDarrick J. Wong  *
790378f681cSDarrick J. Wong  * Look up an inode by number in the given file system.  If the inode is
791378f681cSDarrick J. Wong  * in cache and isn't in purgatory, return 1 if the inode is allocated
792378f681cSDarrick J. Wong  * and 0 if it is not.  For all other cases (not in cache, being torn
793378f681cSDarrick J. Wong  * down, etc.), return a negative error code.
794378f681cSDarrick J. Wong  *
795378f681cSDarrick J. Wong  * The caller has to prevent inode allocation and freeing activity,
796378f681cSDarrick J. Wong  * presumably by locking the AGI buffer.   This is to ensure that an
797378f681cSDarrick J. Wong  * inode cannot transition from allocated to freed until the caller is
798378f681cSDarrick J. Wong  * ready to allow that.  If the inode is in an intermediate state (new,
799378f681cSDarrick J. Wong  * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
800378f681cSDarrick J. Wong  * inode is not in the cache, -ENOENT will be returned.  The caller must
801378f681cSDarrick J. Wong  * deal with these scenarios appropriately.
802378f681cSDarrick J. Wong  *
803378f681cSDarrick J. Wong  * This is a specialized use case for the online scrubber; if you're
804378f681cSDarrick J. Wong  * reading this, you probably want xfs_iget.
805378f681cSDarrick J. Wong  */
806378f681cSDarrick J. Wong int
807378f681cSDarrick J. Wong xfs_icache_inode_is_allocated(
808378f681cSDarrick J. Wong 	struct xfs_mount	*mp,
809378f681cSDarrick J. Wong 	struct xfs_trans	*tp,
810378f681cSDarrick J. Wong 	xfs_ino_t		ino,
811378f681cSDarrick J. Wong 	bool			*inuse)
812378f681cSDarrick J. Wong {
813378f681cSDarrick J. Wong 	struct xfs_inode	*ip;
814378f681cSDarrick J. Wong 	int			error;
815378f681cSDarrick J. Wong 
816378f681cSDarrick J. Wong 	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
817378f681cSDarrick J. Wong 	if (error)
818378f681cSDarrick J. Wong 		return error;
819378f681cSDarrick J. Wong 
820378f681cSDarrick J. Wong 	*inuse = !!(VFS_I(ip)->i_mode);
82144a8736bSDarrick J. Wong 	xfs_irele(ip);
822378f681cSDarrick J. Wong 	return 0;
823378f681cSDarrick J. Wong }
824378f681cSDarrick J. Wong 
825579b62faSBrian Foster /*
8266d8b79cfSDave Chinner  * Grab the inode for reclaim exclusively.
82750718b8dSDave Chinner  *
82850718b8dSDave Chinner  * We have found this inode via a lookup under RCU, so the inode may have
82950718b8dSDave Chinner  * already been freed, or it may be in the process of being recycled by
83050718b8dSDave Chinner  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
83150718b8dSDave Chinner  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
83250718b8dSDave Chinner  * will not be set. Hence we need to check for both these flag conditions to
83350718b8dSDave Chinner  * avoid inodes that are no longer reclaim candidates.
83450718b8dSDave Chinner  *
83550718b8dSDave Chinner  * Note: checking for other state flags here, under the i_flags_lock or not, is
83650718b8dSDave Chinner  * racy and should be avoided. Those races should be resolved only after we have
83750718b8dSDave Chinner  * ensured that we are able to reclaim this inode and the world can see that we
83850718b8dSDave Chinner  * are going to reclaim it.
83950718b8dSDave Chinner  *
84050718b8dSDave Chinner  * Return true if we grabbed it, false otherwise.
8416d8b79cfSDave Chinner  */
84250718b8dSDave Chinner static bool
843f1bc5c56SDarrick J. Wong xfs_reclaim_igrab(
8449492750aSDarrick J. Wong 	struct xfs_inode	*ip,
845b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
8466d8b79cfSDave Chinner {
8476d8b79cfSDave Chinner 	ASSERT(rcu_read_lock_held());
8486d8b79cfSDave Chinner 
8496d8b79cfSDave Chinner 	spin_lock(&ip->i_flags_lock);
8506d8b79cfSDave Chinner 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
8516d8b79cfSDave Chinner 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
8526d8b79cfSDave Chinner 		/* not a reclaim candidate. */
8536d8b79cfSDave Chinner 		spin_unlock(&ip->i_flags_lock);
85450718b8dSDave Chinner 		return false;
8556d8b79cfSDave Chinner 	}
8569492750aSDarrick J. Wong 
8579492750aSDarrick J. Wong 	/* Don't reclaim a sick inode unless the caller asked for it. */
8589492750aSDarrick J. Wong 	if (ip->i_sick &&
859b26b2bf1SDarrick J. Wong 	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
8609492750aSDarrick J. Wong 		spin_unlock(&ip->i_flags_lock);
8619492750aSDarrick J. Wong 		return false;
8629492750aSDarrick J. Wong 	}
8639492750aSDarrick J. Wong 
8646d8b79cfSDave Chinner 	__xfs_iflags_set(ip, XFS_IRECLAIM);
8656d8b79cfSDave Chinner 	spin_unlock(&ip->i_flags_lock);
86650718b8dSDave Chinner 	return true;
8676d8b79cfSDave Chinner }
8686d8b79cfSDave Chinner 
8696d8b79cfSDave Chinner /*
87002511a5aSDave Chinner  * Inode reclaim is non-blocking, so the default action if progress cannot be
87102511a5aSDave Chinner  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
87202511a5aSDave Chinner  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
87302511a5aSDave Chinner  * blocking anymore and hence we can wait for the inode to be able to reclaim
87402511a5aSDave Chinner  * it.
8756d8b79cfSDave Chinner  *
87602511a5aSDave Chinner  * We do no IO here - if callers require inodes to be cleaned they must push the
87702511a5aSDave Chinner  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
87802511a5aSDave Chinner  * done in the background in a non-blocking manner, and enables memory reclaim
87902511a5aSDave Chinner  * to make progress without blocking.
8806d8b79cfSDave Chinner  */
8814d0bab3aSDave Chinner static void
8826d8b79cfSDave Chinner xfs_reclaim_inode(
8836d8b79cfSDave Chinner 	struct xfs_inode	*ip,
88450718b8dSDave Chinner 	struct xfs_perag	*pag)
8856d8b79cfSDave Chinner {
8868a17d7ddSDave Chinner 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
8876d8b79cfSDave Chinner 
8889552e14dSDave Chinner 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
8896d8b79cfSDave Chinner 		goto out;
890718ecc50SDave Chinner 	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
8919552e14dSDave Chinner 		goto out_iunlock;
8926d8b79cfSDave Chinner 
8936d8b79cfSDave Chinner 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
8946d8b79cfSDave Chinner 		xfs_iunpin_wait(ip);
89588fc1879SBrian Foster 		xfs_iflush_abort(ip);
8966d8b79cfSDave Chinner 		goto reclaim;
8976d8b79cfSDave Chinner 	}
898617825feSDave Chinner 	if (xfs_ipincount(ip))
899718ecc50SDave Chinner 		goto out_clear_flush;
900617825feSDave Chinner 	if (!xfs_inode_clean(ip))
901718ecc50SDave Chinner 		goto out_clear_flush;
902617825feSDave Chinner 
903718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
9046d8b79cfSDave Chinner reclaim:
905ab23a776SDave Chinner 	trace_xfs_inode_reclaiming(ip);
90698efe8afSBrian Foster 
9078a17d7ddSDave Chinner 	/*
9088a17d7ddSDave Chinner 	 * Because we use RCU freeing we need to ensure the inode always appears
9098a17d7ddSDave Chinner 	 * to be reclaimed with an invalid inode number when in the free state.
91098efe8afSBrian Foster 	 * We do this as early as possible under the ILOCK so that
911f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
912f2e9ad21SOmar Sandoval 	 * detect races with us here. By doing this, we guarantee that once
913f2e9ad21SOmar Sandoval 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
914f2e9ad21SOmar Sandoval 	 * it will see either a valid inode that will serialise correctly, or it
915f2e9ad21SOmar Sandoval 	 * will see an invalid inode that it can skip.
9168a17d7ddSDave Chinner 	 */
9178a17d7ddSDave Chinner 	spin_lock(&ip->i_flags_lock);
9188a17d7ddSDave Chinner 	ip->i_flags = XFS_IRECLAIM;
9198a17d7ddSDave Chinner 	ip->i_ino = 0;
920255794c7SDarrick J. Wong 	ip->i_sick = 0;
921255794c7SDarrick J. Wong 	ip->i_checked = 0;
9228a17d7ddSDave Chinner 	spin_unlock(&ip->i_flags_lock);
9238a17d7ddSDave Chinner 
9246d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9256d8b79cfSDave Chinner 
926ff6d6af2SBill O'Donnell 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
9276d8b79cfSDave Chinner 	/*
9286d8b79cfSDave Chinner 	 * Remove the inode from the per-AG radix tree.
9296d8b79cfSDave Chinner 	 *
9306d8b79cfSDave Chinner 	 * Because radix_tree_delete won't complain even if the item was never
9316d8b79cfSDave Chinner 	 * added to the tree assert that it's been there before to catch
9326d8b79cfSDave Chinner 	 * problems with the inode life time early on.
9336d8b79cfSDave Chinner 	 */
9346d8b79cfSDave Chinner 	spin_lock(&pag->pag_ici_lock);
9356d8b79cfSDave Chinner 	if (!radix_tree_delete(&pag->pag_ici_root,
9368a17d7ddSDave Chinner 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
9376d8b79cfSDave Chinner 		ASSERT(0);
938c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
9396d8b79cfSDave Chinner 	spin_unlock(&pag->pag_ici_lock);
9406d8b79cfSDave Chinner 
9416d8b79cfSDave Chinner 	/*
9426d8b79cfSDave Chinner 	 * Here we do an (almost) spurious inode lock in order to coordinate
9436d8b79cfSDave Chinner 	 * with inode cache radix tree lookups.  This is because the lookup
9446d8b79cfSDave Chinner 	 * can reference the inodes in the cache without taking references.
9456d8b79cfSDave Chinner 	 *
9466d8b79cfSDave Chinner 	 * We make that OK here by ensuring that we wait until the inode is
9476d8b79cfSDave Chinner 	 * unlocked after the lookup before we go ahead and free it.
9486d8b79cfSDave Chinner 	 */
9496d8b79cfSDave Chinner 	xfs_ilock(ip, XFS_ILOCK_EXCL);
9503ea06d73SDarrick J. Wong 	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
9516d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
95296355d5aSDave Chinner 	ASSERT(xfs_inode_clean(ip));
9536d8b79cfSDave Chinner 
9548a17d7ddSDave Chinner 	__xfs_inode_free(ip);
9554d0bab3aSDave Chinner 	return;
9566d8b79cfSDave Chinner 
957718ecc50SDave Chinner out_clear_flush:
958718ecc50SDave Chinner 	xfs_iflags_clear(ip, XFS_IFLUSHING);
9599552e14dSDave Chinner out_iunlock:
9606d8b79cfSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
9619552e14dSDave Chinner out:
962617825feSDave Chinner 	xfs_iflags_clear(ip, XFS_IRECLAIM);
9636d8b79cfSDave Chinner }
9646d8b79cfSDave Chinner 
9659492750aSDarrick J. Wong /* Reclaim sick inodes if we're unmounting or the fs went down. */
9669492750aSDarrick J. Wong static inline bool
9679492750aSDarrick J. Wong xfs_want_reclaim_sick(
9689492750aSDarrick J. Wong 	struct xfs_mount	*mp)
9699492750aSDarrick J. Wong {
970*2e973b2cSDave Chinner 	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
9719492750aSDarrick J. Wong 	       XFS_FORCED_SHUTDOWN(mp);
9729492750aSDarrick J. Wong }
9739492750aSDarrick J. Wong 
9744d0bab3aSDave Chinner void
9756d8b79cfSDave Chinner xfs_reclaim_inodes(
9764d0bab3aSDave Chinner 	struct xfs_mount	*mp)
9776d8b79cfSDave Chinner {
978b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
979b26b2bf1SDarrick J. Wong 		.icw_flags	= 0,
9809492750aSDarrick J. Wong 	};
9819492750aSDarrick J. Wong 
9829492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
983b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9849492750aSDarrick J. Wong 
9854d0bab3aSDave Chinner 	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
986617825feSDave Chinner 		xfs_ail_push_all_sync(mp->m_ail);
987b26b2bf1SDarrick J. Wong 		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
9880f4ec0f1SZheng Bin 	}
9896d8b79cfSDave Chinner }
9906d8b79cfSDave Chinner 
9916d8b79cfSDave Chinner /*
99202511a5aSDave Chinner  * The shrinker infrastructure determines how many inodes we should scan for
99302511a5aSDave Chinner  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
99402511a5aSDave Chinner  * push the AIL here. We also want to proactively free up memory if we can to
99502511a5aSDave Chinner  * minimise the amount of work memory reclaim has to do so we kick the
99602511a5aSDave Chinner  * background reclaim if it isn't already scheduled.
9976d8b79cfSDave Chinner  */
9980a234c6dSDave Chinner long
9996d8b79cfSDave Chinner xfs_reclaim_inodes_nr(
10006d8b79cfSDave Chinner 	struct xfs_mount	*mp,
100110be350bSDarrick J. Wong 	unsigned long		nr_to_scan)
10026d8b79cfSDave Chinner {
1003b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {
1004b26b2bf1SDarrick J. Wong 		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
100510be350bSDarrick J. Wong 		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
1006f1bc5c56SDarrick J. Wong 	};
1007f1bc5c56SDarrick J. Wong 
10089492750aSDarrick J. Wong 	if (xfs_want_reclaim_sick(mp))
1009b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
10109492750aSDarrick J. Wong 
10116d8b79cfSDave Chinner 	/* kick background reclaimer and push the AIL */
10126d8b79cfSDave Chinner 	xfs_reclaim_work_queue(mp);
10136d8b79cfSDave Chinner 	xfs_ail_push_all(mp->m_ail);
10146d8b79cfSDave Chinner 
1015b26b2bf1SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1016617825feSDave Chinner 	return 0;
10176d8b79cfSDave Chinner }
10186d8b79cfSDave Chinner 
10196d8b79cfSDave Chinner /*
10206d8b79cfSDave Chinner  * Return the number of reclaimable inodes in the filesystem for
10216d8b79cfSDave Chinner  * the shrinker to determine how much to reclaim.
10226d8b79cfSDave Chinner  */
102310be350bSDarrick J. Wong long
10246d8b79cfSDave Chinner xfs_reclaim_inodes_count(
10256d8b79cfSDave Chinner 	struct xfs_mount	*mp)
10266d8b79cfSDave Chinner {
10276d8b79cfSDave Chinner 	struct xfs_perag	*pag;
10286d8b79cfSDave Chinner 	xfs_agnumber_t		ag = 0;
102910be350bSDarrick J. Wong 	long			reclaimable = 0;
10306d8b79cfSDave Chinner 
10316d8b79cfSDave Chinner 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
10326d8b79cfSDave Chinner 		ag = pag->pag_agno + 1;
10336d8b79cfSDave Chinner 		reclaimable += pag->pag_ici_reclaimable;
10346d8b79cfSDave Chinner 		xfs_perag_put(pag);
10356d8b79cfSDave Chinner 	}
10366d8b79cfSDave Chinner 	return reclaimable;
10376d8b79cfSDave Chinner }
10386d8b79cfSDave Chinner 
103939b1cfd7SDarrick J. Wong STATIC bool
1040b26b2bf1SDarrick J. Wong xfs_icwalk_match_id(
10413e3f9f58SBrian Foster 	struct xfs_inode	*ip,
1042b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
10433e3f9f58SBrian Foster {
1044b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1045b26b2bf1SDarrick J. Wong 	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
104639b1cfd7SDarrick J. Wong 		return false;
10471b556048SBrian Foster 
1048b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1049b26b2bf1SDarrick J. Wong 	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
105039b1cfd7SDarrick J. Wong 		return false;
10511b556048SBrian Foster 
1052b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1053b26b2bf1SDarrick J. Wong 	    ip->i_projid != icw->icw_prid)
105439b1cfd7SDarrick J. Wong 		return false;
10551b556048SBrian Foster 
105639b1cfd7SDarrick J. Wong 	return true;
10573e3f9f58SBrian Foster }
10583e3f9f58SBrian Foster 
1059f4526397SBrian Foster /*
1060f4526397SBrian Foster  * A union-based inode filtering algorithm. Process the inode if any of the
1061f4526397SBrian Foster  * criteria match. This is for global/internal scans only.
1062f4526397SBrian Foster  */
106339b1cfd7SDarrick J. Wong STATIC bool
1064b26b2bf1SDarrick J. Wong xfs_icwalk_match_id_union(
1065f4526397SBrian Foster 	struct xfs_inode	*ip,
1066b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1067f4526397SBrian Foster {
1068b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1069b26b2bf1SDarrick J. Wong 	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
107039b1cfd7SDarrick J. Wong 		return true;
1071f4526397SBrian Foster 
1072b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1073b26b2bf1SDarrick J. Wong 	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
107439b1cfd7SDarrick J. Wong 		return true;
1075f4526397SBrian Foster 
1076b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1077b26b2bf1SDarrick J. Wong 	    ip->i_projid == icw->icw_prid)
107839b1cfd7SDarrick J. Wong 		return true;
1079f4526397SBrian Foster 
108039b1cfd7SDarrick J. Wong 	return false;
1081f4526397SBrian Foster }
1082f4526397SBrian Foster 
1083a91bf992SDarrick J. Wong /*
1084a91bf992SDarrick J. Wong  * Is this inode @ip eligible for eof/cow block reclamation, given some
1085b26b2bf1SDarrick J. Wong  * filtering parameters @icw?  The inode is eligible if @icw is null or
1086a91bf992SDarrick J. Wong  * if the predicate functions match.
1087a91bf992SDarrick J. Wong  */
1088a91bf992SDarrick J. Wong static bool
1089b26b2bf1SDarrick J. Wong xfs_icwalk_match(
1090a91bf992SDarrick J. Wong 	struct xfs_inode	*ip,
1091b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1092a91bf992SDarrick J. Wong {
109339b1cfd7SDarrick J. Wong 	bool			match;
1094a91bf992SDarrick J. Wong 
1095b26b2bf1SDarrick J. Wong 	if (!icw)
1096a91bf992SDarrick J. Wong 		return true;
1097a91bf992SDarrick J. Wong 
1098b26b2bf1SDarrick J. Wong 	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1099b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id_union(ip, icw);
1100a91bf992SDarrick J. Wong 	else
1101b26b2bf1SDarrick J. Wong 		match = xfs_icwalk_match_id(ip, icw);
1102a91bf992SDarrick J. Wong 	if (!match)
1103a91bf992SDarrick J. Wong 		return false;
1104a91bf992SDarrick J. Wong 
1105a91bf992SDarrick J. Wong 	/* skip the inode if the file size is too small */
1106b26b2bf1SDarrick J. Wong 	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1107b26b2bf1SDarrick J. Wong 	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1108a91bf992SDarrick J. Wong 		return false;
1109a91bf992SDarrick J. Wong 
1110a91bf992SDarrick J. Wong 	return true;
1111a91bf992SDarrick J. Wong }
1112a91bf992SDarrick J. Wong 
11134d0bab3aSDave Chinner /*
11144d0bab3aSDave Chinner  * This is a fast pass over the inode cache to try to get reclaim moving on as
11154d0bab3aSDave Chinner  * many inodes as possible in a short period of time. It kicks itself every few
11164d0bab3aSDave Chinner  * seconds, as well as being kicked by the inode cache shrinker when memory
111702511a5aSDave Chinner  * goes low.
11184d0bab3aSDave Chinner  */
11194d0bab3aSDave Chinner void
11204d0bab3aSDave Chinner xfs_reclaim_worker(
11214d0bab3aSDave Chinner 	struct work_struct *work)
11224d0bab3aSDave Chinner {
11234d0bab3aSDave Chinner 	struct xfs_mount *mp = container_of(to_delayed_work(work),
11244d0bab3aSDave Chinner 					struct xfs_mount, m_reclaim_work);
11254d0bab3aSDave Chinner 
1126f1bc5c56SDarrick J. Wong 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
11274d0bab3aSDave Chinner 	xfs_reclaim_work_queue(mp);
11284d0bab3aSDave Chinner }
11294d0bab3aSDave Chinner 
11303e3f9f58SBrian Foster STATIC int
113141176a68SBrian Foster xfs_inode_free_eofblocks(
113241176a68SBrian Foster 	struct xfs_inode	*ip,
1133b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
11340fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
113541176a68SBrian Foster {
1136390600f8SDarrick J. Wong 	bool			wait;
1137390600f8SDarrick J. Wong 
1138b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
11395400da7dSBrian Foster 
1140ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1141ce2d3bbeSDarrick J. Wong 		return 0;
1142ce2d3bbeSDarrick J. Wong 
114341176a68SBrian Foster 	/*
114441176a68SBrian Foster 	 * If the mapping is dirty the operation can block and wait for some
114541176a68SBrian Foster 	 * time. Unless we are waiting, skip it.
114641176a68SBrian Foster 	 */
1147390600f8SDarrick J. Wong 	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
114841176a68SBrian Foster 		return 0;
114941176a68SBrian Foster 
1150b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
11513e3f9f58SBrian Foster 		return 0;
11523e3f9f58SBrian Foster 
1153a36b9261SBrian Foster 	/*
1154a36b9261SBrian Foster 	 * If the caller is waiting, return -EAGAIN to keep the background
1155a36b9261SBrian Foster 	 * scanner moving and revisit the inode in a subsequent pass.
1156a36b9261SBrian Foster 	 */
1157c3155097SBrian Foster 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1158390600f8SDarrick J. Wong 		if (wait)
1159390600f8SDarrick J. Wong 			return -EAGAIN;
1160390600f8SDarrick J. Wong 		return 0;
1161a36b9261SBrian Foster 	}
11620fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
1163390600f8SDarrick J. Wong 
11642b156ff8SDarrick J. Wong 	if (xfs_can_free_eofblocks(ip, false))
11650fa4a10aSDarrick J. Wong 		return xfs_free_eofblocks(ip);
11662b156ff8SDarrick J. Wong 
11672b156ff8SDarrick J. Wong 	/* inode could be preallocated or append-only */
11682b156ff8SDarrick J. Wong 	trace_xfs_inode_free_eofblocks_invalid(ip);
11692b156ff8SDarrick J. Wong 	xfs_inode_clear_eofblocks_tag(ip);
11702b156ff8SDarrick J. Wong 	return 0;
117141176a68SBrian Foster }
117241176a68SBrian Foster 
117383104d44SDarrick J. Wong static void
1174ce2d3bbeSDarrick J. Wong xfs_blockgc_set_iflag(
1175ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1176ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
117727b52867SBrian Foster {
117827b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
117927b52867SBrian Foster 	struct xfs_perag	*pag;
118027b52867SBrian Foster 
1181ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1182ce2d3bbeSDarrick J. Wong 
118385a6e764SChristoph Hellwig 	/*
118485a6e764SChristoph Hellwig 	 * Don't bother locking the AG and looking up in the radix trees
118585a6e764SChristoph Hellwig 	 * if we already know that we have the tag set.
118685a6e764SChristoph Hellwig 	 */
1187ce2d3bbeSDarrick J. Wong 	if (ip->i_flags & iflag)
118885a6e764SChristoph Hellwig 		return;
118985a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1190ce2d3bbeSDarrick J. Wong 	ip->i_flags |= iflag;
119185a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
119285a6e764SChristoph Hellwig 
119327b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
119427b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
119527b52867SBrian Foster 
1196c076ae7aSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1197ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
119827b52867SBrian Foster 
119927b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
120027b52867SBrian Foster 	xfs_perag_put(pag);
120127b52867SBrian Foster }
120227b52867SBrian Foster 
120327b52867SBrian Foster void
120483104d44SDarrick J. Wong xfs_inode_set_eofblocks_tag(
120527b52867SBrian Foster 	xfs_inode_t	*ip)
120627b52867SBrian Foster {
120783104d44SDarrick J. Wong 	trace_xfs_inode_set_eofblocks_tag(ip);
12089669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
120983104d44SDarrick J. Wong }
121083104d44SDarrick J. Wong 
121183104d44SDarrick J. Wong static void
1212ce2d3bbeSDarrick J. Wong xfs_blockgc_clear_iflag(
1213ce2d3bbeSDarrick J. Wong 	struct xfs_inode	*ip,
1214ce2d3bbeSDarrick J. Wong 	unsigned long		iflag)
121583104d44SDarrick J. Wong {
121627b52867SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
121727b52867SBrian Foster 	struct xfs_perag	*pag;
1218ce2d3bbeSDarrick J. Wong 	bool			clear_tag;
1219ce2d3bbeSDarrick J. Wong 
1220ce2d3bbeSDarrick J. Wong 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
122127b52867SBrian Foster 
122285a6e764SChristoph Hellwig 	spin_lock(&ip->i_flags_lock);
1223ce2d3bbeSDarrick J. Wong 	ip->i_flags &= ~iflag;
1224ce2d3bbeSDarrick J. Wong 	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
122585a6e764SChristoph Hellwig 	spin_unlock(&ip->i_flags_lock);
122685a6e764SChristoph Hellwig 
1227ce2d3bbeSDarrick J. Wong 	if (!clear_tag)
1228ce2d3bbeSDarrick J. Wong 		return;
1229ce2d3bbeSDarrick J. Wong 
123027b52867SBrian Foster 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
123127b52867SBrian Foster 	spin_lock(&pag->pag_ici_lock);
123227b52867SBrian Foster 
1233c076ae7aSDarrick J. Wong 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1234ce2d3bbeSDarrick J. Wong 			XFS_ICI_BLOCKGC_TAG);
123527b52867SBrian Foster 
123627b52867SBrian Foster 	spin_unlock(&pag->pag_ici_lock);
123727b52867SBrian Foster 	xfs_perag_put(pag);
123827b52867SBrian Foster }
123927b52867SBrian Foster 
124083104d44SDarrick J. Wong void
124183104d44SDarrick J. Wong xfs_inode_clear_eofblocks_tag(
124283104d44SDarrick J. Wong 	xfs_inode_t	*ip)
124383104d44SDarrick J. Wong {
124483104d44SDarrick J. Wong 	trace_xfs_inode_clear_eofblocks_tag(ip);
1245ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
124683104d44SDarrick J. Wong }
124783104d44SDarrick J. Wong 
124883104d44SDarrick J. Wong /*
1249be78ff0eSDarrick J. Wong  * Set ourselves up to free CoW blocks from this file.  If it's already clean
1250be78ff0eSDarrick J. Wong  * then we can bail out quickly, but otherwise we must back off if the file
1251be78ff0eSDarrick J. Wong  * is undergoing some kind of write.
1252be78ff0eSDarrick J. Wong  */
1253be78ff0eSDarrick J. Wong static bool
1254be78ff0eSDarrick J. Wong xfs_prep_free_cowblocks(
125551d62690SChristoph Hellwig 	struct xfs_inode	*ip)
1256be78ff0eSDarrick J. Wong {
1257be78ff0eSDarrick J. Wong 	/*
1258be78ff0eSDarrick J. Wong 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1259be78ff0eSDarrick J. Wong 	 * possible the inode was fully unshared since it was originally tagged.
1260be78ff0eSDarrick J. Wong 	 */
126151d62690SChristoph Hellwig 	if (!xfs_inode_has_cow_data(ip)) {
1262be78ff0eSDarrick J. Wong 		trace_xfs_inode_free_cowblocks_invalid(ip);
1263be78ff0eSDarrick J. Wong 		xfs_inode_clear_cowblocks_tag(ip);
1264be78ff0eSDarrick J. Wong 		return false;
1265be78ff0eSDarrick J. Wong 	}
1266be78ff0eSDarrick J. Wong 
1267be78ff0eSDarrick J. Wong 	/*
1268be78ff0eSDarrick J. Wong 	 * If the mapping is dirty or under writeback we cannot touch the
1269be78ff0eSDarrick J. Wong 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1270be78ff0eSDarrick J. Wong 	 */
1271be78ff0eSDarrick J. Wong 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1272be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1273be78ff0eSDarrick J. Wong 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1274be78ff0eSDarrick J. Wong 	    atomic_read(&VFS_I(ip)->i_dio_count))
1275be78ff0eSDarrick J. Wong 		return false;
1276be78ff0eSDarrick J. Wong 
1277be78ff0eSDarrick J. Wong 	return true;
1278be78ff0eSDarrick J. Wong }
1279be78ff0eSDarrick J. Wong 
1280be78ff0eSDarrick J. Wong /*
128183104d44SDarrick J. Wong  * Automatic CoW Reservation Freeing
128283104d44SDarrick J. Wong  *
128383104d44SDarrick J. Wong  * These functions automatically garbage collect leftover CoW reservations
128483104d44SDarrick J. Wong  * that were made on behalf of a cowextsize hint when we start to run out
128583104d44SDarrick J. Wong  * of quota or when the reservations sit around for too long.  If the file
128683104d44SDarrick J. Wong  * has dirty pages or is undergoing writeback, its CoW reservations will
128783104d44SDarrick J. Wong  * be retained.
128883104d44SDarrick J. Wong  *
128983104d44SDarrick J. Wong  * The actual garbage collection piggybacks off the same code that runs
129083104d44SDarrick J. Wong  * the speculative EOF preallocation garbage collector.
129183104d44SDarrick J. Wong  */
129283104d44SDarrick J. Wong STATIC int
129383104d44SDarrick J. Wong xfs_inode_free_cowblocks(
129483104d44SDarrick J. Wong 	struct xfs_inode	*ip,
1295b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw,
12960fa4a10aSDarrick J. Wong 	unsigned int		*lockflags)
129783104d44SDarrick J. Wong {
1298f41a0716SDarrick J. Wong 	bool			wait;
1299be78ff0eSDarrick J. Wong 	int			ret = 0;
130083104d44SDarrick J. Wong 
1301b26b2bf1SDarrick J. Wong 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1302f41a0716SDarrick J. Wong 
1303ce2d3bbeSDarrick J. Wong 	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1304ce2d3bbeSDarrick J. Wong 		return 0;
1305ce2d3bbeSDarrick J. Wong 
130651d62690SChristoph Hellwig 	if (!xfs_prep_free_cowblocks(ip))
130783104d44SDarrick J. Wong 		return 0;
130883104d44SDarrick J. Wong 
1309b26b2bf1SDarrick J. Wong 	if (!xfs_icwalk_match(ip, icw))
131083104d44SDarrick J. Wong 		return 0;
131183104d44SDarrick J. Wong 
1312f41a0716SDarrick J. Wong 	/*
1313f41a0716SDarrick J. Wong 	 * If the caller is waiting, return -EAGAIN to keep the background
1314f41a0716SDarrick J. Wong 	 * scanner moving and revisit the inode in a subsequent pass.
1315f41a0716SDarrick J. Wong 	 */
13160fa4a10aSDarrick J. Wong 	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
13170fa4a10aSDarrick J. Wong 	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1318f41a0716SDarrick J. Wong 		if (wait)
1319f41a0716SDarrick J. Wong 			return -EAGAIN;
1320f41a0716SDarrick J. Wong 		return 0;
1321f41a0716SDarrick J. Wong 	}
13220fa4a10aSDarrick J. Wong 	*lockflags |= XFS_IOLOCK_EXCL;
13230fa4a10aSDarrick J. Wong 
1324f41a0716SDarrick J. Wong 	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1325f41a0716SDarrick J. Wong 		if (wait)
13260fa4a10aSDarrick J. Wong 			return -EAGAIN;
13270fa4a10aSDarrick J. Wong 		return 0;
1328f41a0716SDarrick J. Wong 	}
13290fa4a10aSDarrick J. Wong 	*lockflags |= XFS_MMAPLOCK_EXCL;
133083104d44SDarrick J. Wong 
1331be78ff0eSDarrick J. Wong 	/*
1332be78ff0eSDarrick J. Wong 	 * Check again, nobody else should be able to dirty blocks or change
1333be78ff0eSDarrick J. Wong 	 * the reflink iflag now that we have the first two locks held.
1334be78ff0eSDarrick J. Wong 	 */
133551d62690SChristoph Hellwig 	if (xfs_prep_free_cowblocks(ip))
13363802a345SChristoph Hellwig 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
133783104d44SDarrick J. Wong 	return ret;
133883104d44SDarrick J. Wong }
133983104d44SDarrick J. Wong 
134083104d44SDarrick J. Wong void
134183104d44SDarrick J. Wong xfs_inode_set_cowblocks_tag(
134283104d44SDarrick J. Wong 	xfs_inode_t	*ip)
134383104d44SDarrick J. Wong {
13447b7381f0SBrian Foster 	trace_xfs_inode_set_cowblocks_tag(ip);
13459669f51dSDarrick J. Wong 	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
134683104d44SDarrick J. Wong }
134783104d44SDarrick J. Wong 
134883104d44SDarrick J. Wong void
134983104d44SDarrick J. Wong xfs_inode_clear_cowblocks_tag(
135083104d44SDarrick J. Wong 	xfs_inode_t	*ip)
135183104d44SDarrick J. Wong {
13527b7381f0SBrian Foster 	trace_xfs_inode_clear_cowblocks_tag(ip);
1353ce2d3bbeSDarrick J. Wong 	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
135483104d44SDarrick J. Wong }
1355d6b636ebSDarrick J. Wong 
1356d6b636ebSDarrick J. Wong /* Disable post-EOF and CoW block auto-reclamation. */
1357d6b636ebSDarrick J. Wong void
1358c9a6526fSDarrick J. Wong xfs_blockgc_stop(
1359d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1360d6b636ebSDarrick J. Wong {
1361894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1362894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1363894ecacfSDarrick J. Wong 
13646f649091SDarrick J. Wong 	if (!xfs_clear_blockgc_enabled(mp))
13656f649091SDarrick J. Wong 		return;
13666f649091SDarrick J. Wong 
13676f649091SDarrick J. Wong 	for_each_perag(mp, agno, pag)
1368894ecacfSDarrick J. Wong 		cancel_delayed_work_sync(&pag->pag_blockgc_work);
13696f649091SDarrick J. Wong 	trace_xfs_blockgc_stop(mp, __return_address);
1370d6b636ebSDarrick J. Wong }
1371d6b636ebSDarrick J. Wong 
1372d6b636ebSDarrick J. Wong /* Enable post-EOF and CoW block auto-reclamation. */
1373d6b636ebSDarrick J. Wong void
1374c9a6526fSDarrick J. Wong xfs_blockgc_start(
1375d6b636ebSDarrick J. Wong 	struct xfs_mount	*mp)
1376d6b636ebSDarrick J. Wong {
1377894ecacfSDarrick J. Wong 	struct xfs_perag	*pag;
1378894ecacfSDarrick J. Wong 	xfs_agnumber_t		agno;
1379894ecacfSDarrick J. Wong 
13806f649091SDarrick J. Wong 	if (xfs_set_blockgc_enabled(mp))
13816f649091SDarrick J. Wong 		return;
13826f649091SDarrick J. Wong 
13836f649091SDarrick J. Wong 	trace_xfs_blockgc_start(mp, __return_address);
1384894ecacfSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1385894ecacfSDarrick J. Wong 		xfs_blockgc_queue(pag);
1386d6b636ebSDarrick J. Wong }
13873d4feec0SDarrick J. Wong 
1388d20d5edcSDarrick J. Wong /* Don't try to run block gc on an inode that's in any of these states. */
1389d20d5edcSDarrick J. Wong #define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1390ab23a776SDave Chinner 					 XFS_NEED_INACTIVE | \
1391ab23a776SDave Chinner 					 XFS_INACTIVATING | \
1392d20d5edcSDarrick J. Wong 					 XFS_IRECLAIMABLE | \
1393d20d5edcSDarrick J. Wong 					 XFS_IRECLAIM)
1394df600197SDarrick J. Wong /*
1395b9baaef4SDarrick J. Wong  * Decide if the given @ip is eligible for garbage collection of speculative
1396b9baaef4SDarrick J. Wong  * preallocations, and grab it if so.  Returns true if it's ready to go or
1397b9baaef4SDarrick J. Wong  * false if we should just ignore it.
1398df600197SDarrick J. Wong  */
1399df600197SDarrick J. Wong static bool
1400b9baaef4SDarrick J. Wong xfs_blockgc_igrab(
14017fdff526SDarrick J. Wong 	struct xfs_inode	*ip)
1402df600197SDarrick J. Wong {
1403df600197SDarrick J. Wong 	struct inode		*inode = VFS_I(ip);
1404df600197SDarrick J. Wong 
1405df600197SDarrick J. Wong 	ASSERT(rcu_read_lock_held());
1406df600197SDarrick J. Wong 
1407df600197SDarrick J. Wong 	/* Check for stale RCU freed inode */
1408df600197SDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1409df600197SDarrick J. Wong 	if (!ip->i_ino)
1410df600197SDarrick J. Wong 		goto out_unlock_noent;
1411df600197SDarrick J. Wong 
1412d20d5edcSDarrick J. Wong 	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1413df600197SDarrick J. Wong 		goto out_unlock_noent;
1414df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1415df600197SDarrick J. Wong 
1416df600197SDarrick J. Wong 	/* nothing to sync during shutdown */
1417df600197SDarrick J. Wong 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1418df600197SDarrick J. Wong 		return false;
1419df600197SDarrick J. Wong 
1420df600197SDarrick J. Wong 	/* If we can't grab the inode, it must on it's way to reclaim. */
1421df600197SDarrick J. Wong 	if (!igrab(inode))
1422df600197SDarrick J. Wong 		return false;
1423df600197SDarrick J. Wong 
1424df600197SDarrick J. Wong 	/* inode is valid */
1425df600197SDarrick J. Wong 	return true;
1426df600197SDarrick J. Wong 
1427df600197SDarrick J. Wong out_unlock_noent:
1428df600197SDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1429df600197SDarrick J. Wong 	return false;
1430df600197SDarrick J. Wong }
1431df600197SDarrick J. Wong 
143241956753SDarrick J. Wong /* Scan one incore inode for block preallocations that we can remove. */
143341956753SDarrick J. Wong static int
143441956753SDarrick J. Wong xfs_blockgc_scan_inode(
143541956753SDarrick J. Wong 	struct xfs_inode	*ip,
1436b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
143785c5b270SDarrick J. Wong {
14380fa4a10aSDarrick J. Wong 	unsigned int		lockflags = 0;
143985c5b270SDarrick J. Wong 	int			error;
144085c5b270SDarrick J. Wong 
1441b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
144285c5b270SDarrick J. Wong 	if (error)
14430fa4a10aSDarrick J. Wong 		goto unlock;
144485c5b270SDarrick J. Wong 
1445b26b2bf1SDarrick J. Wong 	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
14460fa4a10aSDarrick J. Wong unlock:
14470fa4a10aSDarrick J. Wong 	if (lockflags)
14480fa4a10aSDarrick J. Wong 		xfs_iunlock(ip, lockflags);
1449594ab00bSDarrick J. Wong 	xfs_irele(ip);
145085c5b270SDarrick J. Wong 	return error;
145185c5b270SDarrick J. Wong }
145285c5b270SDarrick J. Wong 
14539669f51dSDarrick J. Wong /* Background worker that trims preallocated space. */
14549669f51dSDarrick J. Wong void
14559669f51dSDarrick J. Wong xfs_blockgc_worker(
14569669f51dSDarrick J. Wong 	struct work_struct	*work)
14579669f51dSDarrick J. Wong {
1458894ecacfSDarrick J. Wong 	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1459894ecacfSDarrick J. Wong 					struct xfs_perag, pag_blockgc_work);
1460894ecacfSDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
14619669f51dSDarrick J. Wong 	int			error;
14629669f51dSDarrick J. Wong 
14636f649091SDarrick J. Wong 	trace_xfs_blockgc_worker(mp, __return_address);
14646f649091SDarrick J. Wong 
1465f427cf5cSDarrick J. Wong 	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
14669669f51dSDarrick J. Wong 	if (error)
1467894ecacfSDarrick J. Wong 		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1468894ecacfSDarrick J. Wong 				pag->pag_agno, error);
1469894ecacfSDarrick J. Wong 	xfs_blockgc_queue(pag);
14709669f51dSDarrick J. Wong }
14719669f51dSDarrick J. Wong 
147285c5b270SDarrick J. Wong /*
14732eb66502SDarrick J. Wong  * Try to free space in the filesystem by purging inactive inodes, eofblocks
14742eb66502SDarrick J. Wong  * and cowblocks.
147585c5b270SDarrick J. Wong  */
147685c5b270SDarrick J. Wong int
147785c5b270SDarrick J. Wong xfs_blockgc_free_space(
147885c5b270SDarrick J. Wong 	struct xfs_mount	*mp,
1479b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
148085c5b270SDarrick J. Wong {
14812eb66502SDarrick J. Wong 	int			error;
14822eb66502SDarrick J. Wong 
1483b26b2bf1SDarrick J. Wong 	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
148485c5b270SDarrick J. Wong 
14852eb66502SDarrick J. Wong 	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
14862eb66502SDarrick J. Wong 	if (error)
14872eb66502SDarrick J. Wong 		return error;
14882eb66502SDarrick J. Wong 
14892eb66502SDarrick J. Wong 	xfs_inodegc_flush(mp);
14902eb66502SDarrick J. Wong 	return 0;
149185c5b270SDarrick J. Wong }
149285c5b270SDarrick J. Wong 
14933d4feec0SDarrick J. Wong /*
1494e8d04c2aSDarrick J. Wong  * Reclaim all the free space that we can by scheduling the background blockgc
1495e8d04c2aSDarrick J. Wong  * and inodegc workers immediately and waiting for them all to clear.
1496e8d04c2aSDarrick J. Wong  */
1497e8d04c2aSDarrick J. Wong void
1498e8d04c2aSDarrick J. Wong xfs_blockgc_flush_all(
1499e8d04c2aSDarrick J. Wong 	struct xfs_mount	*mp)
1500e8d04c2aSDarrick J. Wong {
1501e8d04c2aSDarrick J. Wong 	struct xfs_perag	*pag;
1502e8d04c2aSDarrick J. Wong 	xfs_agnumber_t		agno;
1503e8d04c2aSDarrick J. Wong 
1504e8d04c2aSDarrick J. Wong 	trace_xfs_blockgc_flush_all(mp, __return_address);
1505e8d04c2aSDarrick J. Wong 
1506e8d04c2aSDarrick J. Wong 	/*
1507e8d04c2aSDarrick J. Wong 	 * For each blockgc worker, move its queue time up to now.  If it
1508e8d04c2aSDarrick J. Wong 	 * wasn't queued, it will not be requeued.  Then flush whatever's
1509e8d04c2aSDarrick J. Wong 	 * left.
1510e8d04c2aSDarrick J. Wong 	 */
1511e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1512e8d04c2aSDarrick J. Wong 		mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1513e8d04c2aSDarrick J. Wong 				&pag->pag_blockgc_work, 0);
1514e8d04c2aSDarrick J. Wong 
1515e8d04c2aSDarrick J. Wong 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1516e8d04c2aSDarrick J. Wong 		flush_delayed_work(&pag->pag_blockgc_work);
1517e8d04c2aSDarrick J. Wong 
1518e8d04c2aSDarrick J. Wong 	xfs_inodegc_flush(mp);
1519e8d04c2aSDarrick J. Wong }
1520e8d04c2aSDarrick J. Wong 
1521e8d04c2aSDarrick J. Wong /*
1522c237dd7cSDarrick J. Wong  * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1523c237dd7cSDarrick J. Wong  * quota caused an allocation failure, so we make a best effort by including
1524c237dd7cSDarrick J. Wong  * each quota under low free space conditions (less than 1% free space) in the
1525c237dd7cSDarrick J. Wong  * scan.
1526111068f8SDarrick J. Wong  *
1527111068f8SDarrick J. Wong  * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
15282d53f66bSDarrick J. Wong  * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1529111068f8SDarrick J. Wong  * MMAPLOCK.
15303d4feec0SDarrick J. Wong  */
1531111068f8SDarrick J. Wong int
1532c237dd7cSDarrick J. Wong xfs_blockgc_free_dquots(
1533c237dd7cSDarrick J. Wong 	struct xfs_mount	*mp,
1534c237dd7cSDarrick J. Wong 	struct xfs_dquot	*udqp,
1535c237dd7cSDarrick J. Wong 	struct xfs_dquot	*gdqp,
1536c237dd7cSDarrick J. Wong 	struct xfs_dquot	*pdqp,
15372d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
15383d4feec0SDarrick J. Wong {
1539b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	icw = {0};
15403d4feec0SDarrick J. Wong 	bool			do_work = false;
15413d4feec0SDarrick J. Wong 
1542c237dd7cSDarrick J. Wong 	if (!udqp && !gdqp && !pdqp)
1543c237dd7cSDarrick J. Wong 		return 0;
1544c237dd7cSDarrick J. Wong 
15453d4feec0SDarrick J. Wong 	/*
1546111068f8SDarrick J. Wong 	 * Run a scan to free blocks using the union filter to cover all
1547111068f8SDarrick J. Wong 	 * applicable quotas in a single scan.
15483d4feec0SDarrick J. Wong 	 */
1549b26b2bf1SDarrick J. Wong 	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
15503d4feec0SDarrick J. Wong 
1551c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1552b26b2bf1SDarrick J. Wong 		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1553b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
15543d4feec0SDarrick J. Wong 		do_work = true;
15553d4feec0SDarrick J. Wong 	}
15563d4feec0SDarrick J. Wong 
1557c237dd7cSDarrick J. Wong 	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1558b26b2bf1SDarrick J. Wong 		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1559b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
15603d4feec0SDarrick J. Wong 		do_work = true;
15613d4feec0SDarrick J. Wong 	}
15623d4feec0SDarrick J. Wong 
1563c237dd7cSDarrick J. Wong 	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1564b26b2bf1SDarrick J. Wong 		icw.icw_prid = pdqp->q_id;
1565b26b2bf1SDarrick J. Wong 		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
15663d4feec0SDarrick J. Wong 		do_work = true;
15673d4feec0SDarrick J. Wong 	}
15683d4feec0SDarrick J. Wong 
15693d4feec0SDarrick J. Wong 	if (!do_work)
1570111068f8SDarrick J. Wong 		return 0;
15713d4feec0SDarrick J. Wong 
1572b26b2bf1SDarrick J. Wong 	return xfs_blockgc_free_space(mp, &icw);
1573c237dd7cSDarrick J. Wong }
1574c237dd7cSDarrick J. Wong 
1575c237dd7cSDarrick J. Wong /* Run cow/eofblocks scans on the quotas attached to the inode. */
1576c237dd7cSDarrick J. Wong int
1577c237dd7cSDarrick J. Wong xfs_blockgc_free_quota(
1578c237dd7cSDarrick J. Wong 	struct xfs_inode	*ip,
15792d53f66bSDarrick J. Wong 	unsigned int		iwalk_flags)
1580c237dd7cSDarrick J. Wong {
1581c237dd7cSDarrick J. Wong 	return xfs_blockgc_free_dquots(ip->i_mount,
1582c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1583c237dd7cSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
15842d53f66bSDarrick J. Wong 			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
15853d4feec0SDarrick J. Wong }
1586df600197SDarrick J. Wong 
1587df600197SDarrick J. Wong /* XFS Inode Cache Walking Code */
1588df600197SDarrick J. Wong 
1589df600197SDarrick J. Wong /*
1590f1bc5c56SDarrick J. Wong  * The inode lookup is done in batches to keep the amount of lock traffic and
1591f1bc5c56SDarrick J. Wong  * radix tree lookups to a minimum. The batch size is a trade off between
1592f1bc5c56SDarrick J. Wong  * lookup reduction and stack usage. This is in the reclaim path, so we can't
1593f1bc5c56SDarrick J. Wong  * be too greedy.
1594f1bc5c56SDarrick J. Wong  */
1595f1bc5c56SDarrick J. Wong #define XFS_LOOKUP_BATCH	32
1596f1bc5c56SDarrick J. Wong 
1597f1bc5c56SDarrick J. Wong 
1598f1bc5c56SDarrick J. Wong /*
1599b9baaef4SDarrick J. Wong  * Decide if we want to grab this inode in anticipation of doing work towards
1600594ab00bSDarrick J. Wong  * the goal.
1601b9baaef4SDarrick J. Wong  */
1602b9baaef4SDarrick J. Wong static inline bool
1603b9baaef4SDarrick J. Wong xfs_icwalk_igrab(
1604b9baaef4SDarrick J. Wong 	enum xfs_icwalk_goal	goal,
16059492750aSDarrick J. Wong 	struct xfs_inode	*ip,
1606b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1607b9baaef4SDarrick J. Wong {
1608b9baaef4SDarrick J. Wong 	switch (goal) {
1609b9baaef4SDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
16107fdff526SDarrick J. Wong 		return xfs_blockgc_igrab(ip);
1611f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1612b26b2bf1SDarrick J. Wong 		return xfs_reclaim_igrab(ip, icw);
1613b9baaef4SDarrick J. Wong 	default:
1614b9baaef4SDarrick J. Wong 		return false;
1615b9baaef4SDarrick J. Wong 	}
1616b9baaef4SDarrick J. Wong }
1617b9baaef4SDarrick J. Wong 
1618594ab00bSDarrick J. Wong /*
1619594ab00bSDarrick J. Wong  * Process an inode.  Each processing function must handle any state changes
1620594ab00bSDarrick J. Wong  * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1621594ab00bSDarrick J. Wong  */
1622f427cf5cSDarrick J. Wong static inline int
1623f427cf5cSDarrick J. Wong xfs_icwalk_process_inode(
1624f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1625f427cf5cSDarrick J. Wong 	struct xfs_inode	*ip,
1626f1bc5c56SDarrick J. Wong 	struct xfs_perag	*pag,
1627b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1628f427cf5cSDarrick J. Wong {
1629594ab00bSDarrick J. Wong 	int			error = 0;
1630f427cf5cSDarrick J. Wong 
1631f427cf5cSDarrick J. Wong 	switch (goal) {
1632f427cf5cSDarrick J. Wong 	case XFS_ICWALK_BLOCKGC:
1633b26b2bf1SDarrick J. Wong 		error = xfs_blockgc_scan_inode(ip, icw);
1634f427cf5cSDarrick J. Wong 		break;
1635f1bc5c56SDarrick J. Wong 	case XFS_ICWALK_RECLAIM:
1636f1bc5c56SDarrick J. Wong 		xfs_reclaim_inode(ip, pag);
1637f1bc5c56SDarrick J. Wong 		break;
1638f427cf5cSDarrick J. Wong 	}
1639f427cf5cSDarrick J. Wong 	return error;
1640f427cf5cSDarrick J. Wong }
1641f427cf5cSDarrick J. Wong 
1642b9baaef4SDarrick J. Wong /*
1643f427cf5cSDarrick J. Wong  * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1644f427cf5cSDarrick J. Wong  * process them in some manner.
1645df600197SDarrick J. Wong  */
1646df600197SDarrick J. Wong static int
1647c1115c0cSDarrick J. Wong xfs_icwalk_ag(
1648df600197SDarrick J. Wong 	struct xfs_perag	*pag,
1649f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1650b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1651df600197SDarrick J. Wong {
1652df600197SDarrick J. Wong 	struct xfs_mount	*mp = pag->pag_mount;
1653df600197SDarrick J. Wong 	uint32_t		first_index;
1654df600197SDarrick J. Wong 	int			last_error = 0;
1655df600197SDarrick J. Wong 	int			skipped;
1656df600197SDarrick J. Wong 	bool			done;
1657df600197SDarrick J. Wong 	int			nr_found;
1658df600197SDarrick J. Wong 
1659df600197SDarrick J. Wong restart:
1660df600197SDarrick J. Wong 	done = false;
1661df600197SDarrick J. Wong 	skipped = 0;
1662f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM)
1663f1bc5c56SDarrick J. Wong 		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1664f1bc5c56SDarrick J. Wong 	else
1665df600197SDarrick J. Wong 		first_index = 0;
1666df600197SDarrick J. Wong 	nr_found = 0;
1667df600197SDarrick J. Wong 	do {
1668df600197SDarrick J. Wong 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1669df600197SDarrick J. Wong 		int		error = 0;
1670df600197SDarrick J. Wong 		int		i;
1671df600197SDarrick J. Wong 
1672df600197SDarrick J. Wong 		rcu_read_lock();
1673df600197SDarrick J. Wong 
1674a437b9b4SChristoph Hellwig 		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1675df600197SDarrick J. Wong 				(void **) batch, first_index,
1676a437b9b4SChristoph Hellwig 				XFS_LOOKUP_BATCH, goal);
1677df600197SDarrick J. Wong 		if (!nr_found) {
1678f1bc5c56SDarrick J. Wong 			done = true;
1679df600197SDarrick J. Wong 			rcu_read_unlock();
1680df600197SDarrick J. Wong 			break;
1681df600197SDarrick J. Wong 		}
1682df600197SDarrick J. Wong 
1683df600197SDarrick J. Wong 		/*
1684df600197SDarrick J. Wong 		 * Grab the inodes before we drop the lock. if we found
1685df600197SDarrick J. Wong 		 * nothing, nr == 0 and the loop will be skipped.
1686df600197SDarrick J. Wong 		 */
1687df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1688df600197SDarrick J. Wong 			struct xfs_inode *ip = batch[i];
1689df600197SDarrick J. Wong 
1690b26b2bf1SDarrick J. Wong 			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1691df600197SDarrick J. Wong 				batch[i] = NULL;
1692df600197SDarrick J. Wong 
1693df600197SDarrick J. Wong 			/*
1694df600197SDarrick J. Wong 			 * Update the index for the next lookup. Catch
1695df600197SDarrick J. Wong 			 * overflows into the next AG range which can occur if
1696df600197SDarrick J. Wong 			 * we have inodes in the last block of the AG and we
1697df600197SDarrick J. Wong 			 * are currently pointing to the last inode.
1698df600197SDarrick J. Wong 			 *
1699df600197SDarrick J. Wong 			 * Because we may see inodes that are from the wrong AG
1700df600197SDarrick J. Wong 			 * due to RCU freeing and reallocation, only update the
1701df600197SDarrick J. Wong 			 * index if it lies in this AG. It was a race that lead
1702df600197SDarrick J. Wong 			 * us to see this inode, so another lookup from the
1703df600197SDarrick J. Wong 			 * same index will not find it again.
1704df600197SDarrick J. Wong 			 */
1705df600197SDarrick J. Wong 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1706df600197SDarrick J. Wong 				continue;
1707df600197SDarrick J. Wong 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1708df600197SDarrick J. Wong 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1709df600197SDarrick J. Wong 				done = true;
1710df600197SDarrick J. Wong 		}
1711df600197SDarrick J. Wong 
1712df600197SDarrick J. Wong 		/* unlock now we've grabbed the inodes. */
1713df600197SDarrick J. Wong 		rcu_read_unlock();
1714df600197SDarrick J. Wong 
1715df600197SDarrick J. Wong 		for (i = 0; i < nr_found; i++) {
1716df600197SDarrick J. Wong 			if (!batch[i])
1717df600197SDarrick J. Wong 				continue;
1718f1bc5c56SDarrick J. Wong 			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1719b26b2bf1SDarrick J. Wong 					icw);
1720df600197SDarrick J. Wong 			if (error == -EAGAIN) {
1721df600197SDarrick J. Wong 				skipped++;
1722df600197SDarrick J. Wong 				continue;
1723df600197SDarrick J. Wong 			}
1724df600197SDarrick J. Wong 			if (error && last_error != -EFSCORRUPTED)
1725df600197SDarrick J. Wong 				last_error = error;
1726df600197SDarrick J. Wong 		}
1727df600197SDarrick J. Wong 
1728df600197SDarrick J. Wong 		/* bail out if the filesystem is corrupted.  */
1729df600197SDarrick J. Wong 		if (error == -EFSCORRUPTED)
1730df600197SDarrick J. Wong 			break;
1731df600197SDarrick J. Wong 
1732df600197SDarrick J. Wong 		cond_resched();
1733df600197SDarrick J. Wong 
1734b26b2bf1SDarrick J. Wong 		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1735b26b2bf1SDarrick J. Wong 			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1736b26b2bf1SDarrick J. Wong 			if (icw->icw_scan_limit <= 0)
1737f1bc5c56SDarrick J. Wong 				break;
1738f1bc5c56SDarrick J. Wong 		}
1739df600197SDarrick J. Wong 	} while (nr_found && !done);
1740df600197SDarrick J. Wong 
1741f1bc5c56SDarrick J. Wong 	if (goal == XFS_ICWALK_RECLAIM) {
1742f1bc5c56SDarrick J. Wong 		if (done)
1743f1bc5c56SDarrick J. Wong 			first_index = 0;
1744f1bc5c56SDarrick J. Wong 		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1745f1bc5c56SDarrick J. Wong 	}
1746f1bc5c56SDarrick J. Wong 
1747df600197SDarrick J. Wong 	if (skipped) {
1748df600197SDarrick J. Wong 		delay(1);
1749df600197SDarrick J. Wong 		goto restart;
1750df600197SDarrick J. Wong 	}
1751df600197SDarrick J. Wong 	return last_error;
1752df600197SDarrick J. Wong }
1753df600197SDarrick J. Wong 
1754f427cf5cSDarrick J. Wong /* Walk all incore inodes to achieve a given goal. */
1755df600197SDarrick J. Wong static int
1756c1115c0cSDarrick J. Wong xfs_icwalk(
1757df600197SDarrick J. Wong 	struct xfs_mount	*mp,
1758f427cf5cSDarrick J. Wong 	enum xfs_icwalk_goal	goal,
1759b26b2bf1SDarrick J. Wong 	struct xfs_icwalk	*icw)
1760df600197SDarrick J. Wong {
1761df600197SDarrick J. Wong 	struct xfs_perag	*pag;
1762df600197SDarrick J. Wong 	int			error = 0;
1763df600197SDarrick J. Wong 	int			last_error = 0;
1764a437b9b4SChristoph Hellwig 	xfs_agnumber_t		agno;
1765df600197SDarrick J. Wong 
1766a437b9b4SChristoph Hellwig 	for_each_perag_tag(mp, agno, pag, goal) {
1767b26b2bf1SDarrick J. Wong 		error = xfs_icwalk_ag(pag, goal, icw);
1768df600197SDarrick J. Wong 		if (error) {
1769df600197SDarrick J. Wong 			last_error = error;
1770a437b9b4SChristoph Hellwig 			if (error == -EFSCORRUPTED) {
1771a437b9b4SChristoph Hellwig 				xfs_perag_put(pag);
1772df600197SDarrick J. Wong 				break;
1773df600197SDarrick J. Wong 			}
1774df600197SDarrick J. Wong 		}
1775a437b9b4SChristoph Hellwig 	}
1776df600197SDarrick J. Wong 	return last_error;
17772d53f66bSDarrick J. Wong 	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1778df600197SDarrick J. Wong }
1779c6c2066dSDarrick J. Wong 
1780c6c2066dSDarrick J. Wong #ifdef DEBUG
1781c6c2066dSDarrick J. Wong static void
1782c6c2066dSDarrick J. Wong xfs_check_delalloc(
1783c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip,
1784c6c2066dSDarrick J. Wong 	int			whichfork)
1785c6c2066dSDarrick J. Wong {
1786c6c2066dSDarrick J. Wong 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1787c6c2066dSDarrick J. Wong 	struct xfs_bmbt_irec	got;
1788c6c2066dSDarrick J. Wong 	struct xfs_iext_cursor	icur;
1789c6c2066dSDarrick J. Wong 
1790c6c2066dSDarrick J. Wong 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1791c6c2066dSDarrick J. Wong 		return;
1792c6c2066dSDarrick J. Wong 	do {
1793c6c2066dSDarrick J. Wong 		if (isnullstartblock(got.br_startblock)) {
1794c6c2066dSDarrick J. Wong 			xfs_warn(ip->i_mount,
1795c6c2066dSDarrick J. Wong 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1796c6c2066dSDarrick J. Wong 				ip->i_ino,
1797c6c2066dSDarrick J. Wong 				whichfork == XFS_DATA_FORK ? "data" : "cow",
1798c6c2066dSDarrick J. Wong 				got.br_startoff, got.br_blockcount);
1799c6c2066dSDarrick J. Wong 		}
1800c6c2066dSDarrick J. Wong 	} while (xfs_iext_next_extent(ifp, &icur, &got));
1801c6c2066dSDarrick J. Wong }
1802c6c2066dSDarrick J. Wong #else
1803c6c2066dSDarrick J. Wong #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1804c6c2066dSDarrick J. Wong #endif
1805c6c2066dSDarrick J. Wong 
1806ab23a776SDave Chinner /* Schedule the inode for reclaim. */
1807ab23a776SDave Chinner static void
1808ab23a776SDave Chinner xfs_inodegc_set_reclaimable(
1809c6c2066dSDarrick J. Wong 	struct xfs_inode	*ip)
1810c6c2066dSDarrick J. Wong {
1811c6c2066dSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1812c6c2066dSDarrick J. Wong 	struct xfs_perag	*pag;
1813c6c2066dSDarrick J. Wong 
1814c6c2066dSDarrick J. Wong 	if (!XFS_FORCED_SHUTDOWN(mp) && ip->i_delayed_blks) {
1815c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_DATA_FORK);
1816c6c2066dSDarrick J. Wong 		xfs_check_delalloc(ip, XFS_COW_FORK);
1817c6c2066dSDarrick J. Wong 		ASSERT(0);
1818c6c2066dSDarrick J. Wong 	}
1819c6c2066dSDarrick J. Wong 
1820c6c2066dSDarrick J. Wong 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1821c6c2066dSDarrick J. Wong 	spin_lock(&pag->pag_ici_lock);
1822c6c2066dSDarrick J. Wong 	spin_lock(&ip->i_flags_lock);
1823c6c2066dSDarrick J. Wong 
1824ab23a776SDave Chinner 	trace_xfs_inode_set_reclaimable(ip);
1825ab23a776SDave Chinner 	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1826ab23a776SDave Chinner 	ip->i_flags |= XFS_IRECLAIMABLE;
1827c6c2066dSDarrick J. Wong 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1828c6c2066dSDarrick J. Wong 			XFS_ICI_RECLAIM_TAG);
1829c6c2066dSDarrick J. Wong 
1830c6c2066dSDarrick J. Wong 	spin_unlock(&ip->i_flags_lock);
1831c6c2066dSDarrick J. Wong 	spin_unlock(&pag->pag_ici_lock);
1832c6c2066dSDarrick J. Wong 	xfs_perag_put(pag);
1833c6c2066dSDarrick J. Wong }
1834ab23a776SDave Chinner 
1835ab23a776SDave Chinner /*
1836ab23a776SDave Chinner  * Free all speculative preallocations and possibly even the inode itself.
1837ab23a776SDave Chinner  * This is the last chance to make changes to an otherwise unreferenced file
1838ab23a776SDave Chinner  * before incore reclamation happens.
1839ab23a776SDave Chinner  */
1840ab23a776SDave Chinner static void
1841ab23a776SDave Chinner xfs_inodegc_inactivate(
1842ab23a776SDave Chinner 	struct xfs_inode	*ip)
1843ab23a776SDave Chinner {
1844ab23a776SDave Chinner 	trace_xfs_inode_inactivating(ip);
1845ab23a776SDave Chinner 	xfs_inactive(ip);
1846ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
1847ab23a776SDave Chinner }
1848ab23a776SDave Chinner 
1849ab23a776SDave Chinner void
1850ab23a776SDave Chinner xfs_inodegc_worker(
1851ab23a776SDave Chinner 	struct work_struct	*work)
1852ab23a776SDave Chinner {
1853ab23a776SDave Chinner 	struct xfs_inodegc	*gc = container_of(work, struct xfs_inodegc,
1854ab23a776SDave Chinner 							work);
1855ab23a776SDave Chinner 	struct llist_node	*node = llist_del_all(&gc->list);
1856ab23a776SDave Chinner 	struct xfs_inode	*ip, *n;
1857ab23a776SDave Chinner 
1858ab23a776SDave Chinner 	WRITE_ONCE(gc->items, 0);
1859ab23a776SDave Chinner 
1860ab23a776SDave Chinner 	if (!node)
1861ab23a776SDave Chinner 		return;
1862ab23a776SDave Chinner 
1863ab23a776SDave Chinner 	ip = llist_entry(node, struct xfs_inode, i_gclist);
186440b1de00SDarrick J. Wong 	trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1865ab23a776SDave Chinner 
186640b1de00SDarrick J. Wong 	WRITE_ONCE(gc->shrinker_hits, 0);
1867ab23a776SDave Chinner 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1868ab23a776SDave Chinner 		xfs_iflags_set(ip, XFS_INACTIVATING);
1869ab23a776SDave Chinner 		xfs_inodegc_inactivate(ip);
1870ab23a776SDave Chinner 	}
1871ab23a776SDave Chinner }
1872ab23a776SDave Chinner 
1873ab23a776SDave Chinner /*
1874ab23a776SDave Chinner  * Force all currently queued inode inactivation work to run immediately, and
1875ab23a776SDave Chinner  * wait for the work to finish. Two pass - queue all the work first pass, wait
1876ab23a776SDave Chinner  * for it in a second pass.
1877ab23a776SDave Chinner  */
1878ab23a776SDave Chinner void
1879ab23a776SDave Chinner xfs_inodegc_flush(
1880ab23a776SDave Chinner 	struct xfs_mount	*mp)
1881ab23a776SDave Chinner {
1882ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
1883ab23a776SDave Chinner 	int			cpu;
1884ab23a776SDave Chinner 
1885ab23a776SDave Chinner 	if (!xfs_is_inodegc_enabled(mp))
1886ab23a776SDave Chinner 		return;
1887ab23a776SDave Chinner 
1888ab23a776SDave Chinner 	trace_xfs_inodegc_flush(mp, __return_address);
1889ab23a776SDave Chinner 
1890ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1891ab23a776SDave Chinner 
1892ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
1893ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1894ab23a776SDave Chinner 		flush_work(&gc->work);
1895ab23a776SDave Chinner 	}
1896ab23a776SDave Chinner }
1897ab23a776SDave Chinner 
1898ab23a776SDave Chinner /*
1899ab23a776SDave Chinner  * Flush all the pending work and then disable the inode inactivation background
1900ab23a776SDave Chinner  * workers and wait for them to stop.
1901ab23a776SDave Chinner  */
1902ab23a776SDave Chinner void
1903ab23a776SDave Chinner xfs_inodegc_stop(
1904ab23a776SDave Chinner 	struct xfs_mount	*mp)
1905ab23a776SDave Chinner {
1906ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
1907ab23a776SDave Chinner 	int			cpu;
1908ab23a776SDave Chinner 
1909ab23a776SDave Chinner 	if (!xfs_clear_inodegc_enabled(mp))
1910ab23a776SDave Chinner 		return;
1911ab23a776SDave Chinner 
1912ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1913ab23a776SDave Chinner 
1914ab23a776SDave Chinner 	for_each_online_cpu(cpu) {
1915ab23a776SDave Chinner 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1916ab23a776SDave Chinner 		cancel_work_sync(&gc->work);
1917ab23a776SDave Chinner 	}
1918ab23a776SDave Chinner 	trace_xfs_inodegc_stop(mp, __return_address);
1919ab23a776SDave Chinner }
1920ab23a776SDave Chinner 
1921ab23a776SDave Chinner /*
1922ab23a776SDave Chinner  * Enable the inode inactivation background workers and schedule deferred inode
1923ab23a776SDave Chinner  * inactivation work if there is any.
1924ab23a776SDave Chinner  */
1925ab23a776SDave Chinner void
1926ab23a776SDave Chinner xfs_inodegc_start(
1927ab23a776SDave Chinner 	struct xfs_mount	*mp)
1928ab23a776SDave Chinner {
1929ab23a776SDave Chinner 	if (xfs_set_inodegc_enabled(mp))
1930ab23a776SDave Chinner 		return;
1931ab23a776SDave Chinner 
1932ab23a776SDave Chinner 	trace_xfs_inodegc_start(mp, __return_address);
1933ab23a776SDave Chinner 	xfs_inodegc_queue_all(mp);
1934ab23a776SDave Chinner }
1935ab23a776SDave Chinner 
193665f03d86SDarrick J. Wong #ifdef CONFIG_XFS_RT
193765f03d86SDarrick J. Wong static inline bool
193865f03d86SDarrick J. Wong xfs_inodegc_want_queue_rt_file(
193965f03d86SDarrick J. Wong 	struct xfs_inode	*ip)
194065f03d86SDarrick J. Wong {
194165f03d86SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
194265f03d86SDarrick J. Wong 	uint64_t		freertx;
194365f03d86SDarrick J. Wong 
194465f03d86SDarrick J. Wong 	if (!XFS_IS_REALTIME_INODE(ip))
194565f03d86SDarrick J. Wong 		return false;
194665f03d86SDarrick J. Wong 
194765f03d86SDarrick J. Wong 	freertx = READ_ONCE(mp->m_sb.sb_frextents);
194865f03d86SDarrick J. Wong 	return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT];
194965f03d86SDarrick J. Wong }
195065f03d86SDarrick J. Wong #else
195165f03d86SDarrick J. Wong # define xfs_inodegc_want_queue_rt_file(ip)	(false)
195265f03d86SDarrick J. Wong #endif /* CONFIG_XFS_RT */
195365f03d86SDarrick J. Wong 
1954ab23a776SDave Chinner /*
1955ab23a776SDave Chinner  * Schedule the inactivation worker when:
1956ab23a776SDave Chinner  *
1957ab23a776SDave Chinner  *  - We've accumulated more than one inode cluster buffer's worth of inodes.
19587d6f07d2SDarrick J. Wong  *  - There is less than 5% free space left.
1959108523b8SDarrick J. Wong  *  - Any of the quotas for this inode are near an enforcement limit.
1960ab23a776SDave Chinner  */
1961ab23a776SDave Chinner static inline bool
1962ab23a776SDave Chinner xfs_inodegc_want_queue_work(
1963ab23a776SDave Chinner 	struct xfs_inode	*ip,
1964ab23a776SDave Chinner 	unsigned int		items)
1965ab23a776SDave Chinner {
1966ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
1967ab23a776SDave Chinner 
1968ab23a776SDave Chinner 	if (items > mp->m_ino_geo.inodes_per_cluster)
1969ab23a776SDave Chinner 		return true;
1970ab23a776SDave Chinner 
19717d6f07d2SDarrick J. Wong 	if (__percpu_counter_compare(&mp->m_fdblocks,
19727d6f07d2SDarrick J. Wong 				mp->m_low_space[XFS_LOWSP_5_PCNT],
19737d6f07d2SDarrick J. Wong 				XFS_FDBLOCKS_BATCH) < 0)
19747d6f07d2SDarrick J. Wong 		return true;
19757d6f07d2SDarrick J. Wong 
197665f03d86SDarrick J. Wong 	if (xfs_inodegc_want_queue_rt_file(ip))
197765f03d86SDarrick J. Wong 		return true;
197865f03d86SDarrick J. Wong 
1979108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1980108523b8SDarrick J. Wong 		return true;
1981108523b8SDarrick J. Wong 
1982108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1983108523b8SDarrick J. Wong 		return true;
1984108523b8SDarrick J. Wong 
1985108523b8SDarrick J. Wong 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1986108523b8SDarrick J. Wong 		return true;
1987108523b8SDarrick J. Wong 
1988ab23a776SDave Chinner 	return false;
1989ab23a776SDave Chinner }
1990ab23a776SDave Chinner 
1991ab23a776SDave Chinner /*
1992ab23a776SDave Chinner  * Upper bound on the number of inodes in each AG that can be queued for
1993ab23a776SDave Chinner  * inactivation at any given time, to avoid monopolizing the workqueue.
1994ab23a776SDave Chinner  */
1995ab23a776SDave Chinner #define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
1996ab23a776SDave Chinner 
1997ab23a776SDave Chinner /*
1998ab23a776SDave Chinner  * Make the frontend wait for inactivations when:
1999ab23a776SDave Chinner  *
200040b1de00SDarrick J. Wong  *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
2001ab23a776SDave Chinner  *  - The queue depth exceeds the maximum allowable percpu backlog.
2002ab23a776SDave Chinner  *
2003ab23a776SDave Chinner  * Note: If the current thread is running a transaction, we don't ever want to
2004ab23a776SDave Chinner  * wait for other transactions because that could introduce a deadlock.
2005ab23a776SDave Chinner  */
2006ab23a776SDave Chinner static inline bool
2007ab23a776SDave Chinner xfs_inodegc_want_flush_work(
2008ab23a776SDave Chinner 	struct xfs_inode	*ip,
200940b1de00SDarrick J. Wong 	unsigned int		items,
201040b1de00SDarrick J. Wong 	unsigned int		shrinker_hits)
2011ab23a776SDave Chinner {
2012ab23a776SDave Chinner 	if (current->journal_info)
2013ab23a776SDave Chinner 		return false;
2014ab23a776SDave Chinner 
201540b1de00SDarrick J. Wong 	if (shrinker_hits > 0)
201640b1de00SDarrick J. Wong 		return true;
201740b1de00SDarrick J. Wong 
2018ab23a776SDave Chinner 	if (items > XFS_INODEGC_MAX_BACKLOG)
2019ab23a776SDave Chinner 		return true;
2020ab23a776SDave Chinner 
2021ab23a776SDave Chinner 	return false;
2022ab23a776SDave Chinner }
2023ab23a776SDave Chinner 
2024ab23a776SDave Chinner /*
2025ab23a776SDave Chinner  * Queue a background inactivation worker if there are inodes that need to be
2026ab23a776SDave Chinner  * inactivated and higher level xfs code hasn't disabled the background
2027ab23a776SDave Chinner  * workers.
2028ab23a776SDave Chinner  */
2029ab23a776SDave Chinner static void
2030ab23a776SDave Chinner xfs_inodegc_queue(
2031ab23a776SDave Chinner 	struct xfs_inode	*ip)
2032ab23a776SDave Chinner {
2033ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2034ab23a776SDave Chinner 	struct xfs_inodegc	*gc;
2035ab23a776SDave Chinner 	int			items;
203640b1de00SDarrick J. Wong 	unsigned int		shrinker_hits;
2037ab23a776SDave Chinner 
2038ab23a776SDave Chinner 	trace_xfs_inode_set_need_inactive(ip);
2039ab23a776SDave Chinner 	spin_lock(&ip->i_flags_lock);
2040ab23a776SDave Chinner 	ip->i_flags |= XFS_NEED_INACTIVE;
2041ab23a776SDave Chinner 	spin_unlock(&ip->i_flags_lock);
2042ab23a776SDave Chinner 
2043ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2044ab23a776SDave Chinner 	llist_add(&ip->i_gclist, &gc->list);
2045ab23a776SDave Chinner 	items = READ_ONCE(gc->items);
2046ab23a776SDave Chinner 	WRITE_ONCE(gc->items, items + 1);
204740b1de00SDarrick J. Wong 	shrinker_hits = READ_ONCE(gc->shrinker_hits);
2048ab23a776SDave Chinner 	put_cpu_ptr(gc);
2049ab23a776SDave Chinner 
2050ab23a776SDave Chinner 	if (!xfs_is_inodegc_enabled(mp))
2051ab23a776SDave Chinner 		return;
2052ab23a776SDave Chinner 
2053ab23a776SDave Chinner 	if (xfs_inodegc_want_queue_work(ip, items)) {
2054ab23a776SDave Chinner 		trace_xfs_inodegc_queue(mp, __return_address);
2055ab23a776SDave Chinner 		queue_work(mp->m_inodegc_wq, &gc->work);
2056ab23a776SDave Chinner 	}
2057ab23a776SDave Chinner 
205840b1de00SDarrick J. Wong 	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2059ab23a776SDave Chinner 		trace_xfs_inodegc_throttle(mp, __return_address);
2060ab23a776SDave Chinner 		flush_work(&gc->work);
2061ab23a776SDave Chinner 	}
2062ab23a776SDave Chinner }
2063ab23a776SDave Chinner 
2064ab23a776SDave Chinner /*
2065ab23a776SDave Chinner  * Fold the dead CPU inodegc queue into the current CPUs queue.
2066ab23a776SDave Chinner  */
2067ab23a776SDave Chinner void
2068ab23a776SDave Chinner xfs_inodegc_cpu_dead(
2069ab23a776SDave Chinner 	struct xfs_mount	*mp,
2070ab23a776SDave Chinner 	unsigned int		dead_cpu)
2071ab23a776SDave Chinner {
2072ab23a776SDave Chinner 	struct xfs_inodegc	*dead_gc, *gc;
2073ab23a776SDave Chinner 	struct llist_node	*first, *last;
2074ab23a776SDave Chinner 	unsigned int		count = 0;
2075ab23a776SDave Chinner 
2076ab23a776SDave Chinner 	dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2077ab23a776SDave Chinner 	cancel_work_sync(&dead_gc->work);
2078ab23a776SDave Chinner 
2079ab23a776SDave Chinner 	if (llist_empty(&dead_gc->list))
2080ab23a776SDave Chinner 		return;
2081ab23a776SDave Chinner 
2082ab23a776SDave Chinner 	first = dead_gc->list.first;
2083ab23a776SDave Chinner 	last = first;
2084ab23a776SDave Chinner 	while (last->next) {
2085ab23a776SDave Chinner 		last = last->next;
2086ab23a776SDave Chinner 		count++;
2087ab23a776SDave Chinner 	}
2088ab23a776SDave Chinner 	dead_gc->list.first = NULL;
2089ab23a776SDave Chinner 	dead_gc->items = 0;
2090ab23a776SDave Chinner 
2091ab23a776SDave Chinner 	/* Add pending work to current CPU */
2092ab23a776SDave Chinner 	gc = get_cpu_ptr(mp->m_inodegc);
2093ab23a776SDave Chinner 	llist_add_batch(first, last, &gc->list);
2094ab23a776SDave Chinner 	count += READ_ONCE(gc->items);
2095ab23a776SDave Chinner 	WRITE_ONCE(gc->items, count);
2096ab23a776SDave Chinner 	put_cpu_ptr(gc);
2097ab23a776SDave Chinner 
2098ab23a776SDave Chinner 	if (xfs_is_inodegc_enabled(mp)) {
2099ab23a776SDave Chinner 		trace_xfs_inodegc_queue(mp, __return_address);
2100ab23a776SDave Chinner 		queue_work(mp->m_inodegc_wq, &gc->work);
2101ab23a776SDave Chinner 	}
2102ab23a776SDave Chinner }
2103ab23a776SDave Chinner 
2104ab23a776SDave Chinner /*
2105ab23a776SDave Chinner  * We set the inode flag atomically with the radix tree tag.  Once we get tag
2106ab23a776SDave Chinner  * lookups on the radix tree, this inode flag can go away.
2107ab23a776SDave Chinner  *
2108ab23a776SDave Chinner  * We always use background reclaim here because even if the inode is clean, it
2109ab23a776SDave Chinner  * still may be under IO and hence we have wait for IO completion to occur
2110ab23a776SDave Chinner  * before we can reclaim the inode. The background reclaim path handles this
2111ab23a776SDave Chinner  * more efficiently than we can here, so simply let background reclaim tear down
2112ab23a776SDave Chinner  * all inodes.
2113ab23a776SDave Chinner  */
2114ab23a776SDave Chinner void
2115ab23a776SDave Chinner xfs_inode_mark_reclaimable(
2116ab23a776SDave Chinner 	struct xfs_inode	*ip)
2117ab23a776SDave Chinner {
2118ab23a776SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
2119ab23a776SDave Chinner 	bool			need_inactive;
2120ab23a776SDave Chinner 
2121ab23a776SDave Chinner 	XFS_STATS_INC(mp, vn_reclaim);
2122ab23a776SDave Chinner 
2123ab23a776SDave Chinner 	/*
2124ab23a776SDave Chinner 	 * We should never get here with any of the reclaim flags already set.
2125ab23a776SDave Chinner 	 */
2126ab23a776SDave Chinner 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2127ab23a776SDave Chinner 
2128ab23a776SDave Chinner 	need_inactive = xfs_inode_needs_inactive(ip);
2129ab23a776SDave Chinner 	if (need_inactive) {
2130ab23a776SDave Chinner 		xfs_inodegc_queue(ip);
2131ab23a776SDave Chinner 		return;
2132ab23a776SDave Chinner 	}
2133ab23a776SDave Chinner 
2134ab23a776SDave Chinner 	/* Going straight to reclaim, so drop the dquots. */
2135ab23a776SDave Chinner 	xfs_qm_dqdetach(ip);
2136ab23a776SDave Chinner 	xfs_inodegc_set_reclaimable(ip);
2137ab23a776SDave Chinner }
213840b1de00SDarrick J. Wong 
213940b1de00SDarrick J. Wong /*
214040b1de00SDarrick J. Wong  * Register a phony shrinker so that we can run background inodegc sooner when
214140b1de00SDarrick J. Wong  * there's memory pressure.  Inactivation does not itself free any memory but
214240b1de00SDarrick J. Wong  * it does make inodes reclaimable, which eventually frees memory.
214340b1de00SDarrick J. Wong  *
214440b1de00SDarrick J. Wong  * The count function, seek value, and batch value are crafted to trigger the
214540b1de00SDarrick J. Wong  * scan function during the second round of scanning.  Hopefully this means
214640b1de00SDarrick J. Wong  * that we reclaimed enough memory that initiating metadata transactions won't
214740b1de00SDarrick J. Wong  * make things worse.
214840b1de00SDarrick J. Wong  */
214940b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
215040b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
215140b1de00SDarrick J. Wong 
215240b1de00SDarrick J. Wong static unsigned long
215340b1de00SDarrick J. Wong xfs_inodegc_shrinker_count(
215440b1de00SDarrick J. Wong 	struct shrinker		*shrink,
215540b1de00SDarrick J. Wong 	struct shrink_control	*sc)
215640b1de00SDarrick J. Wong {
215740b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
215840b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
215940b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
216040b1de00SDarrick J. Wong 	int			cpu;
216140b1de00SDarrick J. Wong 
216240b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
216340b1de00SDarrick J. Wong 		return 0;
216440b1de00SDarrick J. Wong 
216540b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
216640b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
216740b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list))
216840b1de00SDarrick J. Wong 			return XFS_INODEGC_SHRINKER_COUNT;
216940b1de00SDarrick J. Wong 	}
217040b1de00SDarrick J. Wong 
217140b1de00SDarrick J. Wong 	return 0;
217240b1de00SDarrick J. Wong }
217340b1de00SDarrick J. Wong 
217440b1de00SDarrick J. Wong static unsigned long
217540b1de00SDarrick J. Wong xfs_inodegc_shrinker_scan(
217640b1de00SDarrick J. Wong 	struct shrinker		*shrink,
217740b1de00SDarrick J. Wong 	struct shrink_control	*sc)
217840b1de00SDarrick J. Wong {
217940b1de00SDarrick J. Wong 	struct xfs_mount	*mp = container_of(shrink, struct xfs_mount,
218040b1de00SDarrick J. Wong 						   m_inodegc_shrinker);
218140b1de00SDarrick J. Wong 	struct xfs_inodegc	*gc;
218240b1de00SDarrick J. Wong 	int			cpu;
218340b1de00SDarrick J. Wong 	bool			no_items = true;
218440b1de00SDarrick J. Wong 
218540b1de00SDarrick J. Wong 	if (!xfs_is_inodegc_enabled(mp))
218640b1de00SDarrick J. Wong 		return SHRINK_STOP;
218740b1de00SDarrick J. Wong 
218840b1de00SDarrick J. Wong 	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
218940b1de00SDarrick J. Wong 
219040b1de00SDarrick J. Wong 	for_each_online_cpu(cpu) {
219140b1de00SDarrick J. Wong 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
219240b1de00SDarrick J. Wong 		if (!llist_empty(&gc->list)) {
219340b1de00SDarrick J. Wong 			unsigned int	h = READ_ONCE(gc->shrinker_hits);
219440b1de00SDarrick J. Wong 
219540b1de00SDarrick J. Wong 			WRITE_ONCE(gc->shrinker_hits, h + 1);
219640b1de00SDarrick J. Wong 			queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
219740b1de00SDarrick J. Wong 			no_items = false;
219840b1de00SDarrick J. Wong 		}
219940b1de00SDarrick J. Wong 	}
220040b1de00SDarrick J. Wong 
220140b1de00SDarrick J. Wong 	/*
220240b1de00SDarrick J. Wong 	 * If there are no inodes to inactivate, we don't want the shrinker
220340b1de00SDarrick J. Wong 	 * to think there's deferred work to call us back about.
220440b1de00SDarrick J. Wong 	 */
220540b1de00SDarrick J. Wong 	if (no_items)
220640b1de00SDarrick J. Wong 		return LONG_MAX;
220740b1de00SDarrick J. Wong 
220840b1de00SDarrick J. Wong 	return SHRINK_STOP;
220940b1de00SDarrick J. Wong }
221040b1de00SDarrick J. Wong 
221140b1de00SDarrick J. Wong /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
221240b1de00SDarrick J. Wong int
221340b1de00SDarrick J. Wong xfs_inodegc_register_shrinker(
221440b1de00SDarrick J. Wong 	struct xfs_mount	*mp)
221540b1de00SDarrick J. Wong {
221640b1de00SDarrick J. Wong 	struct shrinker		*shrink = &mp->m_inodegc_shrinker;
221740b1de00SDarrick J. Wong 
221840b1de00SDarrick J. Wong 	shrink->count_objects = xfs_inodegc_shrinker_count;
221940b1de00SDarrick J. Wong 	shrink->scan_objects = xfs_inodegc_shrinker_scan;
222040b1de00SDarrick J. Wong 	shrink->seeks = 0;
222140b1de00SDarrick J. Wong 	shrink->flags = SHRINKER_NONSLAB;
222240b1de00SDarrick J. Wong 	shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
222340b1de00SDarrick J. Wong 
222440b1de00SDarrick J. Wong 	return register_shrinker(shrink);
222540b1de00SDarrick J. Wong }
2226