10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
26d8b79cfSDave Chinner /*
36d8b79cfSDave Chinner * Copyright (c) 2000-2005 Silicon Graphics, Inc.
46d8b79cfSDave Chinner * All Rights Reserved.
56d8b79cfSDave Chinner */
66d8b79cfSDave Chinner #include "xfs.h"
76d8b79cfSDave Chinner #include "xfs_fs.h"
85467b34bSDarrick J. Wong #include "xfs_shared.h"
96ca1c906SDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
126d8b79cfSDave Chinner #include "xfs_mount.h"
136d8b79cfSDave Chinner #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15239880efSDave Chinner #include "xfs_trans_priv.h"
166d8b79cfSDave Chinner #include "xfs_inode_item.h"
176d8b79cfSDave Chinner #include "xfs_quota.h"
186d8b79cfSDave Chinner #include "xfs_trace.h"
196d8b79cfSDave Chinner #include "xfs_icache.h"
20c24b5dfaSDave Chinner #include "xfs_bmap_util.h"
21dc06f398SBrian Foster #include "xfs_dquot_item.h"
22dc06f398SBrian Foster #include "xfs_dquot.h"
2383104d44SDarrick J. Wong #include "xfs_reflink.h"
24bb8a66afSChristoph Hellwig #include "xfs_ialloc.h"
259bbafc71SDave Chinner #include "xfs_ag.h"
2601728b44SDave Chinner #include "xfs_log_priv.h"
276d8b79cfSDave Chinner
28f0e28280SJeff Layton #include <linux/iversion.h>
296d8b79cfSDave Chinner
30c809d7e9SDarrick J. Wong /* Radix tree tags for incore inode tree. */
31c809d7e9SDarrick J. Wong
32c809d7e9SDarrick J. Wong /* inode is to be reclaimed */
33c809d7e9SDarrick J. Wong #define XFS_ICI_RECLAIM_TAG 0
34c809d7e9SDarrick J. Wong /* Inode has speculative preallocations (posteof or cow) to clean. */
35c809d7e9SDarrick J. Wong #define XFS_ICI_BLOCKGC_TAG 1
36c809d7e9SDarrick J. Wong
37c809d7e9SDarrick J. Wong /*
38c809d7e9SDarrick J. Wong * The goal for walking incore inodes. These can correspond with incore inode
39c809d7e9SDarrick J. Wong * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40c809d7e9SDarrick J. Wong */
41c809d7e9SDarrick J. Wong enum xfs_icwalk_goal {
42c809d7e9SDarrick J. Wong /* Goals directly associated with tagged inodes. */
43c809d7e9SDarrick J. Wong XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
44f1bc5c56SDarrick J. Wong XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
45c809d7e9SDarrick J. Wong };
46c809d7e9SDarrick J. Wong
477fdff526SDarrick J. Wong static int xfs_icwalk(struct xfs_mount *mp,
48b26b2bf1SDarrick J. Wong enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
497fdff526SDarrick J. Wong static int xfs_icwalk_ag(struct xfs_perag *pag,
50b26b2bf1SDarrick J. Wong enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
51df600197SDarrick J. Wong
5233479e05SDave Chinner /*
53b26b2bf1SDarrick J. Wong * Private inode cache walk flags for struct xfs_icwalk. Must not
54b26b2bf1SDarrick J. Wong * coincide with XFS_ICWALK_FLAGS_VALID.
551ad2cfe0SDarrick J. Wong */
561ad2cfe0SDarrick J. Wong
57f1bc5c56SDarrick J. Wong /* Stop scanning after icw_scan_limit inodes. */
58f1bc5c56SDarrick J. Wong #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59f1bc5c56SDarrick J. Wong
609492750aSDarrick J. Wong #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
612d53f66bSDarrick J. Wong #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
629492750aSDarrick J. Wong
63777eb1faSChristoph Hellwig #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
642d53f66bSDarrick J. Wong XFS_ICWALK_FLAG_RECLAIM_SICK | \
652d53f66bSDarrick J. Wong XFS_ICWALK_FLAG_UNION)
661ad2cfe0SDarrick J. Wong
6733479e05SDave Chinner /*
6833479e05SDave Chinner * Allocate and initialise an xfs_inode.
6933479e05SDave Chinner */
70638f4416SDave Chinner struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)7133479e05SDave Chinner xfs_inode_alloc(
7233479e05SDave Chinner struct xfs_mount *mp,
7333479e05SDave Chinner xfs_ino_t ino)
7433479e05SDave Chinner {
7533479e05SDave Chinner struct xfs_inode *ip;
7633479e05SDave Chinner
7733479e05SDave Chinner /*
783050bd0bSCarlos Maiolino * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
793050bd0bSCarlos Maiolino * and return NULL here on ENOMEM.
8033479e05SDave Chinner */
81fd60b288SMuchun Song ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
823050bd0bSCarlos Maiolino
8333479e05SDave Chinner if (inode_init_always(mp->m_super, VFS_I(ip))) {
84182696fbSDarrick J. Wong kmem_cache_free(xfs_inode_cache, ip);
8533479e05SDave Chinner return NULL;
8633479e05SDave Chinner }
8733479e05SDave Chinner
88f38a032bSDave Chinner /* VFS doesn't initialise i_mode or i_state! */
89c19b3b05SDave Chinner VFS_I(ip)->i_mode = 0;
90f38a032bSDave Chinner VFS_I(ip)->i_state = 0;
9167958013SMatthew Wilcox (Oracle) mapping_set_large_folios(VFS_I(ip)->i_mapping);
92c19b3b05SDave Chinner
93ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, vn_active);
9433479e05SDave Chinner ASSERT(atomic_read(&ip->i_pincount) == 0);
9533479e05SDave Chinner ASSERT(ip->i_ino == 0);
9633479e05SDave Chinner
9733479e05SDave Chinner /* initialise the xfs inode */
9833479e05SDave Chinner ip->i_ino = ino;
9933479e05SDave Chinner ip->i_mount = mp;
10033479e05SDave Chinner memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
1013993baebSDarrick J. Wong ip->i_cowfp = NULL;
1022ed5b09bSDarrick J. Wong memset(&ip->i_af, 0, sizeof(ip->i_af));
1032ed5b09bSDarrick J. Wong ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
1043ba738dfSChristoph Hellwig memset(&ip->i_df, 0, sizeof(ip->i_df));
10533479e05SDave Chinner ip->i_flags = 0;
10633479e05SDave Chinner ip->i_delayed_blks = 0;
1073e09ab8fSChristoph Hellwig ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
1086e73a545SChristoph Hellwig ip->i_nblocks = 0;
1097821ea30SChristoph Hellwig ip->i_forkoff = 0;
1106772c1f1SDarrick J. Wong ip->i_sick = 0;
1116772c1f1SDarrick J. Wong ip->i_checked = 0;
112cb357bf3SDarrick J. Wong INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113cb357bf3SDarrick J. Wong INIT_LIST_HEAD(&ip->i_ioend_list);
114cb357bf3SDarrick J. Wong spin_lock_init(&ip->i_ioend_lock);
1152fd26cc0SDave Chinner ip->i_next_unlinked = NULLAGINO;
116f12b9668SDarrick J. Wong ip->i_prev_unlinked = 0;
11733479e05SDave Chinner
11833479e05SDave Chinner return ip;
11933479e05SDave Chinner }
12033479e05SDave Chinner
12133479e05SDave Chinner STATIC void
xfs_inode_free_callback(struct rcu_head * head)12233479e05SDave Chinner xfs_inode_free_callback(
12333479e05SDave Chinner struct rcu_head *head)
12433479e05SDave Chinner {
12533479e05SDave Chinner struct inode *inode = container_of(head, struct inode, i_rcu);
12633479e05SDave Chinner struct xfs_inode *ip = XFS_I(inode);
12733479e05SDave Chinner
128c19b3b05SDave Chinner switch (VFS_I(ip)->i_mode & S_IFMT) {
12933479e05SDave Chinner case S_IFREG:
13033479e05SDave Chinner case S_IFDIR:
13133479e05SDave Chinner case S_IFLNK:
132ef838512SChristoph Hellwig xfs_idestroy_fork(&ip->i_df);
13333479e05SDave Chinner break;
13433479e05SDave Chinner }
13533479e05SDave Chinner
1362ed5b09bSDarrick J. Wong xfs_ifork_zap_attr(ip);
137e45d7cb2SDarrick J. Wong
138ef838512SChristoph Hellwig if (ip->i_cowfp) {
139ef838512SChristoph Hellwig xfs_idestroy_fork(ip->i_cowfp);
140182696fbSDarrick J. Wong kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
141ef838512SChristoph Hellwig }
14233479e05SDave Chinner if (ip->i_itemp) {
14322525c17SDave Chinner ASSERT(!test_bit(XFS_LI_IN_AIL,
14422525c17SDave Chinner &ip->i_itemp->ili_item.li_flags));
14533479e05SDave Chinner xfs_inode_item_destroy(ip);
14633479e05SDave Chinner ip->i_itemp = NULL;
14733479e05SDave Chinner }
14833479e05SDave Chinner
149182696fbSDarrick J. Wong kmem_cache_free(xfs_inode_cache, ip);
1501f2dcfe8SDave Chinner }
1511f2dcfe8SDave Chinner
1528a17d7ddSDave Chinner static void
__xfs_inode_free(struct xfs_inode * ip)1538a17d7ddSDave Chinner __xfs_inode_free(
1548a17d7ddSDave Chinner struct xfs_inode *ip)
1558a17d7ddSDave Chinner {
1568a17d7ddSDave Chinner /* asserts to verify all state is correct here */
1578a17d7ddSDave Chinner ASSERT(atomic_read(&ip->i_pincount) == 0);
15848d55e2aSDave Chinner ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
1598a17d7ddSDave Chinner XFS_STATS_DEC(ip->i_mount, vn_active);
1608a17d7ddSDave Chinner
1618a17d7ddSDave Chinner call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
1628a17d7ddSDave Chinner }
1638a17d7ddSDave Chinner
1641f2dcfe8SDave Chinner void
xfs_inode_free(struct xfs_inode * ip)1651f2dcfe8SDave Chinner xfs_inode_free(
1661f2dcfe8SDave Chinner struct xfs_inode *ip)
1671f2dcfe8SDave Chinner {
168718ecc50SDave Chinner ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
16998efe8afSBrian Foster
17033479e05SDave Chinner /*
17133479e05SDave Chinner * Because we use RCU freeing we need to ensure the inode always
17233479e05SDave Chinner * appears to be reclaimed with an invalid inode number when in the
17333479e05SDave Chinner * free state. The ip->i_flags_lock provides the barrier against lookup
17433479e05SDave Chinner * races.
17533479e05SDave Chinner */
17633479e05SDave Chinner spin_lock(&ip->i_flags_lock);
17733479e05SDave Chinner ip->i_flags = XFS_IRECLAIM;
17833479e05SDave Chinner ip->i_ino = 0;
17933479e05SDave Chinner spin_unlock(&ip->i_flags_lock);
18033479e05SDave Chinner
1818a17d7ddSDave Chinner __xfs_inode_free(ip);
18233479e05SDave Chinner }
18333479e05SDave Chinner
18433479e05SDave Chinner /*
18502511a5aSDave Chinner * Queue background inode reclaim work if there are reclaimable inodes and there
18602511a5aSDave Chinner * isn't reclaim work already scheduled or in progress.
187ad438c40SDave Chinner */
188ad438c40SDave Chinner static void
xfs_reclaim_work_queue(struct xfs_mount * mp)189ad438c40SDave Chinner xfs_reclaim_work_queue(
190ad438c40SDave Chinner struct xfs_mount *mp)
191ad438c40SDave Chinner {
192ad438c40SDave Chinner
193ad438c40SDave Chinner rcu_read_lock();
194ad438c40SDave Chinner if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195ad438c40SDave Chinner queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196ad438c40SDave Chinner msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
197ad438c40SDave Chinner }
198ad438c40SDave Chinner rcu_read_unlock();
199ad438c40SDave Chinner }
200ad438c40SDave Chinner
201c076ae7aSDarrick J. Wong /*
202c076ae7aSDarrick J. Wong * Background scanning to trim preallocated space. This is queued based on the
203c076ae7aSDarrick J. Wong * 'speculative_prealloc_lifetime' tunable (5m by default).
204c076ae7aSDarrick J. Wong */
205c076ae7aSDarrick J. Wong static inline void
xfs_blockgc_queue(struct xfs_perag * pag)206c076ae7aSDarrick J. Wong xfs_blockgc_queue(
207ad438c40SDave Chinner struct xfs_perag *pag)
208ad438c40SDave Chinner {
2096f649091SDarrick J. Wong struct xfs_mount *mp = pag->pag_mount;
2106f649091SDarrick J. Wong
2116f649091SDarrick J. Wong if (!xfs_is_blockgc_enabled(mp))
2126f649091SDarrick J. Wong return;
2136f649091SDarrick J. Wong
214c076ae7aSDarrick J. Wong rcu_read_lock();
215c076ae7aSDarrick J. Wong if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
216ab23a776SDave Chinner queue_delayed_work(pag->pag_mount->m_blockgc_wq,
217c076ae7aSDarrick J. Wong &pag->pag_blockgc_work,
218c076ae7aSDarrick J. Wong msecs_to_jiffies(xfs_blockgc_secs * 1000));
219c076ae7aSDarrick J. Wong rcu_read_unlock();
220c076ae7aSDarrick J. Wong }
221c076ae7aSDarrick J. Wong
222c076ae7aSDarrick J. Wong /* Set a tag on both the AG incore inode tree and the AG radix tree. */
223c076ae7aSDarrick J. Wong static void
xfs_perag_set_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)224c076ae7aSDarrick J. Wong xfs_perag_set_inode_tag(
225c076ae7aSDarrick J. Wong struct xfs_perag *pag,
226c076ae7aSDarrick J. Wong xfs_agino_t agino,
227c076ae7aSDarrick J. Wong unsigned int tag)
228c076ae7aSDarrick J. Wong {
229ad438c40SDave Chinner struct xfs_mount *mp = pag->pag_mount;
230c076ae7aSDarrick J. Wong bool was_tagged;
231ad438c40SDave Chinner
23295989c46SBrian Foster lockdep_assert_held(&pag->pag_ici_lock);
233c076ae7aSDarrick J. Wong
234c076ae7aSDarrick J. Wong was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235c076ae7aSDarrick J. Wong radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
236c076ae7aSDarrick J. Wong
237c076ae7aSDarrick J. Wong if (tag == XFS_ICI_RECLAIM_TAG)
238c076ae7aSDarrick J. Wong pag->pag_ici_reclaimable++;
239c076ae7aSDarrick J. Wong
240c076ae7aSDarrick J. Wong if (was_tagged)
241ad438c40SDave Chinner return;
242ad438c40SDave Chinner
243c076ae7aSDarrick J. Wong /* propagate the tag up into the perag radix tree */
244ad438c40SDave Chinner spin_lock(&mp->m_perag_lock);
245c076ae7aSDarrick J. Wong radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246ad438c40SDave Chinner spin_unlock(&mp->m_perag_lock);
247ad438c40SDave Chinner
248c076ae7aSDarrick J. Wong /* start background work */
249c076ae7aSDarrick J. Wong switch (tag) {
250c076ae7aSDarrick J. Wong case XFS_ICI_RECLAIM_TAG:
251ad438c40SDave Chinner xfs_reclaim_work_queue(mp);
252c076ae7aSDarrick J. Wong break;
253c076ae7aSDarrick J. Wong case XFS_ICI_BLOCKGC_TAG:
254c076ae7aSDarrick J. Wong xfs_blockgc_queue(pag);
255c076ae7aSDarrick J. Wong break;
256ad438c40SDave Chinner }
257ad438c40SDave Chinner
258368e2d09SDave Chinner trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
259c076ae7aSDarrick J. Wong }
260c076ae7aSDarrick J. Wong
261c076ae7aSDarrick J. Wong /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
262ad438c40SDave Chinner static void
xfs_perag_clear_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)263c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(
264c076ae7aSDarrick J. Wong struct xfs_perag *pag,
265c076ae7aSDarrick J. Wong xfs_agino_t agino,
266c076ae7aSDarrick J. Wong unsigned int tag)
267ad438c40SDave Chinner {
268ad438c40SDave Chinner struct xfs_mount *mp = pag->pag_mount;
269ad438c40SDave Chinner
27095989c46SBrian Foster lockdep_assert_held(&pag->pag_ici_lock);
271c076ae7aSDarrick J. Wong
272c076ae7aSDarrick J. Wong /*
273c076ae7aSDarrick J. Wong * Reclaim can signal (with a null agino) that it cleared its own tag
274c076ae7aSDarrick J. Wong * by removing the inode from the radix tree.
275c076ae7aSDarrick J. Wong */
276c076ae7aSDarrick J. Wong if (agino != NULLAGINO)
277c076ae7aSDarrick J. Wong radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
278c076ae7aSDarrick J. Wong else
279c076ae7aSDarrick J. Wong ASSERT(tag == XFS_ICI_RECLAIM_TAG);
280c076ae7aSDarrick J. Wong
281c076ae7aSDarrick J. Wong if (tag == XFS_ICI_RECLAIM_TAG)
282c076ae7aSDarrick J. Wong pag->pag_ici_reclaimable--;
283c076ae7aSDarrick J. Wong
284c076ae7aSDarrick J. Wong if (radix_tree_tagged(&pag->pag_ici_root, tag))
285ad438c40SDave Chinner return;
286ad438c40SDave Chinner
287c076ae7aSDarrick J. Wong /* clear the tag from the perag radix tree */
288ad438c40SDave Chinner spin_lock(&mp->m_perag_lock);
289c076ae7aSDarrick J. Wong radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290ad438c40SDave Chinner spin_unlock(&mp->m_perag_lock);
291ad438c40SDave Chinner
292368e2d09SDave Chinner trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
293c076ae7aSDarrick J. Wong }
294ad438c40SDave Chinner
295ad438c40SDave Chinner /*
29650997470SDave Chinner * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
29750997470SDave Chinner * part of the structure. This is made more complex by the fact we store
29850997470SDave Chinner * information about the on-disk values in the VFS inode and so we can't just
29983e06f21SDave Chinner * overwrite the values unconditionally. Hence we save the parameters we
30050997470SDave Chinner * need to retain across reinitialisation, and rewrite them into the VFS inode
30183e06f21SDave Chinner * after reinitialisation even if it fails.
30250997470SDave Chinner */
30350997470SDave Chinner static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)30450997470SDave Chinner xfs_reinit_inode(
30550997470SDave Chinner struct xfs_mount *mp,
30650997470SDave Chinner struct inode *inode)
30750997470SDave Chinner {
30850997470SDave Chinner int error;
30954d7b5c1SDave Chinner uint32_t nlink = inode->i_nlink;
3109e9a2674SDave Chinner uint32_t generation = inode->i_generation;
311f0e28280SJeff Layton uint64_t version = inode_peek_iversion(inode);
312c19b3b05SDave Chinner umode_t mode = inode->i_mode;
313acd1d715SAmir Goldstein dev_t dev = inode->i_rdev;
3143d8f2821SChristoph Hellwig kuid_t uid = inode->i_uid;
3153d8f2821SChristoph Hellwig kgid_t gid = inode->i_gid;
31650997470SDave Chinner
31750997470SDave Chinner error = inode_init_always(mp->m_super, inode);
31850997470SDave Chinner
31954d7b5c1SDave Chinner set_nlink(inode, nlink);
3209e9a2674SDave Chinner inode->i_generation = generation;
321f0e28280SJeff Layton inode_set_iversion_queried(inode, version);
322c19b3b05SDave Chinner inode->i_mode = mode;
323acd1d715SAmir Goldstein inode->i_rdev = dev;
3243d8f2821SChristoph Hellwig inode->i_uid = uid;
3253d8f2821SChristoph Hellwig inode->i_gid = gid;
32667958013SMatthew Wilcox (Oracle) mapping_set_large_folios(inode->i_mapping);
32750997470SDave Chinner return error;
32850997470SDave Chinner }
32950997470SDave Chinner
33050997470SDave Chinner /*
331ff7bebebSDarrick J. Wong * Carefully nudge an inode whose VFS state has been torn down back into a
332ff7bebebSDarrick J. Wong * usable state. Drops the i_flags_lock and the rcu read lock.
333ff7bebebSDarrick J. Wong */
334ff7bebebSDarrick J. Wong static int
xfs_iget_recycle(struct xfs_perag * pag,struct xfs_inode * ip)335ff7bebebSDarrick J. Wong xfs_iget_recycle(
336ff7bebebSDarrick J. Wong struct xfs_perag *pag,
337ff7bebebSDarrick J. Wong struct xfs_inode *ip) __releases(&ip->i_flags_lock)
338ff7bebebSDarrick J. Wong {
339ff7bebebSDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
340ff7bebebSDarrick J. Wong struct inode *inode = VFS_I(ip);
341ff7bebebSDarrick J. Wong int error;
342ff7bebebSDarrick J. Wong
343ff7bebebSDarrick J. Wong trace_xfs_iget_recycle(ip);
344ff7bebebSDarrick J. Wong
34528b4b059SLong Li if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
34628b4b059SLong Li return -EAGAIN;
34728b4b059SLong Li
348ff7bebebSDarrick J. Wong /*
349ff7bebebSDarrick J. Wong * We need to make it look like the inode is being reclaimed to prevent
350ff7bebebSDarrick J. Wong * the actual reclaim workers from stomping over us while we recycle
351ff7bebebSDarrick J. Wong * the inode. We can't clear the radix tree tag yet as it requires
352ff7bebebSDarrick J. Wong * pag_ici_lock to be held exclusive.
353ff7bebebSDarrick J. Wong */
354ff7bebebSDarrick J. Wong ip->i_flags |= XFS_IRECLAIM;
355ff7bebebSDarrick J. Wong
356ff7bebebSDarrick J. Wong spin_unlock(&ip->i_flags_lock);
357ff7bebebSDarrick J. Wong rcu_read_unlock();
358ff7bebebSDarrick J. Wong
359ff7bebebSDarrick J. Wong ASSERT(!rwsem_is_locked(&inode->i_rwsem));
360ff7bebebSDarrick J. Wong error = xfs_reinit_inode(mp, inode);
36128b4b059SLong Li xfs_iunlock(ip, XFS_ILOCK_EXCL);
362ff7bebebSDarrick J. Wong if (error) {
363ff7bebebSDarrick J. Wong /*
364ff7bebebSDarrick J. Wong * Re-initializing the inode failed, and we are in deep
365ff7bebebSDarrick J. Wong * trouble. Try to re-add it to the reclaim list.
366ff7bebebSDarrick J. Wong */
367ff7bebebSDarrick J. Wong rcu_read_lock();
368ff7bebebSDarrick J. Wong spin_lock(&ip->i_flags_lock);
369ff7bebebSDarrick J. Wong ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
370ff7bebebSDarrick J. Wong ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
371ff7bebebSDarrick J. Wong spin_unlock(&ip->i_flags_lock);
372ff7bebebSDarrick J. Wong rcu_read_unlock();
373ff7bebebSDarrick J. Wong
374ff7bebebSDarrick J. Wong trace_xfs_iget_recycle_fail(ip);
375ff7bebebSDarrick J. Wong return error;
376ff7bebebSDarrick J. Wong }
377ff7bebebSDarrick J. Wong
378ff7bebebSDarrick J. Wong spin_lock(&pag->pag_ici_lock);
379ff7bebebSDarrick J. Wong spin_lock(&ip->i_flags_lock);
380ff7bebebSDarrick J. Wong
381ff7bebebSDarrick J. Wong /*
382ff7bebebSDarrick J. Wong * Clear the per-lifetime state in the inode as we are now effectively
383ff7bebebSDarrick J. Wong * a new inode and need to return to the initial state before reuse
384ff7bebebSDarrick J. Wong * occurs.
385ff7bebebSDarrick J. Wong */
386ff7bebebSDarrick J. Wong ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
387ff7bebebSDarrick J. Wong ip->i_flags |= XFS_INEW;
388ff7bebebSDarrick J. Wong xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
389ff7bebebSDarrick J. Wong XFS_ICI_RECLAIM_TAG);
390ff7bebebSDarrick J. Wong inode->i_state = I_NEW;
391ff7bebebSDarrick J. Wong spin_unlock(&ip->i_flags_lock);
392ff7bebebSDarrick J. Wong spin_unlock(&pag->pag_ici_lock);
393ff7bebebSDarrick J. Wong
394ff7bebebSDarrick J. Wong return 0;
395ff7bebebSDarrick J. Wong }
396ff7bebebSDarrick J. Wong
397ff7bebebSDarrick J. Wong /*
398afca6c5bSDave Chinner * If we are allocating a new inode, then check what was returned is
399afca6c5bSDave Chinner * actually a free, empty inode. If we are not allocating an inode,
400afca6c5bSDave Chinner * then check we didn't find a free inode.
401afca6c5bSDave Chinner *
402afca6c5bSDave Chinner * Returns:
403afca6c5bSDave Chinner * 0 if the inode free state matches the lookup context
404afca6c5bSDave Chinner * -ENOENT if the inode is free and we are not allocating
405afca6c5bSDave Chinner * -EFSCORRUPTED if there is any state mismatch at all
406afca6c5bSDave Chinner */
407afca6c5bSDave Chinner static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)408afca6c5bSDave Chinner xfs_iget_check_free_state(
409afca6c5bSDave Chinner struct xfs_inode *ip,
410afca6c5bSDave Chinner int flags)
411afca6c5bSDave Chinner {
412afca6c5bSDave Chinner if (flags & XFS_IGET_CREATE) {
413afca6c5bSDave Chinner /* should be a free inode */
414afca6c5bSDave Chinner if (VFS_I(ip)->i_mode != 0) {
415afca6c5bSDave Chinner xfs_warn(ip->i_mount,
416afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
417afca6c5bSDave Chinner ip->i_ino, VFS_I(ip)->i_mode);
418afca6c5bSDave Chinner return -EFSCORRUPTED;
419afca6c5bSDave Chinner }
420afca6c5bSDave Chinner
4216e73a545SChristoph Hellwig if (ip->i_nblocks != 0) {
422afca6c5bSDave Chinner xfs_warn(ip->i_mount,
423afca6c5bSDave Chinner "Corruption detected! Free inode 0x%llx has blocks allocated!",
424afca6c5bSDave Chinner ip->i_ino);
425afca6c5bSDave Chinner return -EFSCORRUPTED;
426afca6c5bSDave Chinner }
427afca6c5bSDave Chinner return 0;
428afca6c5bSDave Chinner }
429afca6c5bSDave Chinner
430afca6c5bSDave Chinner /* should be an allocated inode */
431afca6c5bSDave Chinner if (VFS_I(ip)->i_mode == 0)
432afca6c5bSDave Chinner return -ENOENT;
433afca6c5bSDave Chinner
434afca6c5bSDave Chinner return 0;
435afca6c5bSDave Chinner }
436afca6c5bSDave Chinner
437ab23a776SDave Chinner /* Make all pending inactivation work start immediately. */
4382254a739SDarrick J. Wong static bool
xfs_inodegc_queue_all(struct xfs_mount * mp)439ab23a776SDave Chinner xfs_inodegc_queue_all(
440ab23a776SDave Chinner struct xfs_mount *mp)
441ab23a776SDave Chinner {
442ab23a776SDave Chinner struct xfs_inodegc *gc;
443ab23a776SDave Chinner int cpu;
4442254a739SDarrick J. Wong bool ret = false;
445ab23a776SDave Chinner
44662334fabSDarrick J. Wong for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
447ab23a776SDave Chinner gc = per_cpu_ptr(mp->m_inodegc, cpu);
4482254a739SDarrick J. Wong if (!llist_empty(&gc->list)) {
4497cf2b0f9SDave Chinner mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
4502254a739SDarrick J. Wong ret = true;
451ab23a776SDave Chinner }
452ab23a776SDave Chinner }
453ab23a776SDave Chinner
4542254a739SDarrick J. Wong return ret;
4552254a739SDarrick J. Wong }
4562254a739SDarrick J. Wong
457d4d12c02SDave Chinner /* Wait for all queued work and collect errors */
458d4d12c02SDave Chinner static int
xfs_inodegc_wait_all(struct xfs_mount * mp)459d4d12c02SDave Chinner xfs_inodegc_wait_all(
460d4d12c02SDave Chinner struct xfs_mount *mp)
461d4d12c02SDave Chinner {
462d4d12c02SDave Chinner int cpu;
463d4d12c02SDave Chinner int error = 0;
464d4d12c02SDave Chinner
465d4d12c02SDave Chinner flush_workqueue(mp->m_inodegc_wq);
46662334fabSDarrick J. Wong for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
467d4d12c02SDave Chinner struct xfs_inodegc *gc;
468d4d12c02SDave Chinner
469d4d12c02SDave Chinner gc = per_cpu_ptr(mp->m_inodegc, cpu);
470d4d12c02SDave Chinner if (gc->error && !error)
471d4d12c02SDave Chinner error = gc->error;
472d4d12c02SDave Chinner gc->error = 0;
473d4d12c02SDave Chinner }
474d4d12c02SDave Chinner
475d4d12c02SDave Chinner return error;
476d4d12c02SDave Chinner }
477d4d12c02SDave Chinner
478afca6c5bSDave Chinner /*
47933479e05SDave Chinner * Check the validity of the inode we just found it the cache
48033479e05SDave Chinner */
48133479e05SDave Chinner static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)48233479e05SDave Chinner xfs_iget_cache_hit(
48333479e05SDave Chinner struct xfs_perag *pag,
48433479e05SDave Chinner struct xfs_inode *ip,
48533479e05SDave Chinner xfs_ino_t ino,
48633479e05SDave Chinner int flags,
48733479e05SDave Chinner int lock_flags) __releases(RCU)
48833479e05SDave Chinner {
48933479e05SDave Chinner struct inode *inode = VFS_I(ip);
49033479e05SDave Chinner struct xfs_mount *mp = ip->i_mount;
49133479e05SDave Chinner int error;
49233479e05SDave Chinner
49333479e05SDave Chinner /*
49433479e05SDave Chinner * check for re-use of an inode within an RCU grace period due to the
49533479e05SDave Chinner * radix tree nodes not being updated yet. We monitor for this by
49633479e05SDave Chinner * setting the inode number to zero before freeing the inode structure.
49733479e05SDave Chinner * If the inode has been reallocated and set up, then the inode number
49833479e05SDave Chinner * will not match, so check for that, too.
49933479e05SDave Chinner */
50033479e05SDave Chinner spin_lock(&ip->i_flags_lock);
50177b4d286SDarrick J. Wong if (ip->i_ino != ino)
50277b4d286SDarrick J. Wong goto out_skip;
50333479e05SDave Chinner
50433479e05SDave Chinner /*
50533479e05SDave Chinner * If we are racing with another cache hit that is currently
50633479e05SDave Chinner * instantiating this inode or currently recycling it out of
507ff7bebebSDarrick J. Wong * reclaimable state, wait for the initialisation to complete
50833479e05SDave Chinner * before continuing.
50933479e05SDave Chinner *
510ab23a776SDave Chinner * If we're racing with the inactivation worker we also want to wait.
511ab23a776SDave Chinner * If we're creating a new file, it's possible that the worker
512ab23a776SDave Chinner * previously marked the inode as free on disk but hasn't finished
513ab23a776SDave Chinner * updating the incore state yet. The AGI buffer will be dirty and
514ab23a776SDave Chinner * locked to the icreate transaction, so a synchronous push of the
515ab23a776SDave Chinner * inodegc workers would result in deadlock. For a regular iget, the
516ab23a776SDave Chinner * worker is running already, so we might as well wait.
517ab23a776SDave Chinner *
51833479e05SDave Chinner * XXX(hch): eventually we should do something equivalent to
51933479e05SDave Chinner * wait_on_inode to wait for these flags to be cleared
52033479e05SDave Chinner * instead of polling for it.
52133479e05SDave Chinner */
522ab23a776SDave Chinner if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
52377b4d286SDarrick J. Wong goto out_skip;
52433479e05SDave Chinner
525ab23a776SDave Chinner if (ip->i_flags & XFS_NEED_INACTIVE) {
526ab23a776SDave Chinner /* Unlinked inodes cannot be re-grabbed. */
527ab23a776SDave Chinner if (VFS_I(ip)->i_nlink == 0) {
528ab23a776SDave Chinner error = -ENOENT;
529ab23a776SDave Chinner goto out_error;
530ab23a776SDave Chinner }
531ab23a776SDave Chinner goto out_inodegc_flush;
532ab23a776SDave Chinner }
533ab23a776SDave Chinner
53433479e05SDave Chinner /*
535afca6c5bSDave Chinner * Check the inode free state is valid. This also detects lookup
536afca6c5bSDave Chinner * racing with unlinks.
53733479e05SDave Chinner */
538afca6c5bSDave Chinner error = xfs_iget_check_free_state(ip, flags);
539afca6c5bSDave Chinner if (error)
54033479e05SDave Chinner goto out_error;
54133479e05SDave Chinner
54277b4d286SDarrick J. Wong /* Skip inodes that have no vfs state. */
54377b4d286SDarrick J. Wong if ((flags & XFS_IGET_INCORE) &&
54477b4d286SDarrick J. Wong (ip->i_flags & XFS_IRECLAIMABLE))
54577b4d286SDarrick J. Wong goto out_skip;
546378f681cSDarrick J. Wong
54777b4d286SDarrick J. Wong /* The inode fits the selection criteria; process it. */
54877b4d286SDarrick J. Wong if (ip->i_flags & XFS_IRECLAIMABLE) {
549ff7bebebSDarrick J. Wong /* Drops i_flags_lock and RCU read lock. */
550ff7bebebSDarrick J. Wong error = xfs_iget_recycle(pag, ip);
55128b4b059SLong Li if (error == -EAGAIN)
55228b4b059SLong Li goto out_skip;
553ff7bebebSDarrick J. Wong if (error)
554ff7bebebSDarrick J. Wong return error;
55533479e05SDave Chinner } else {
55633479e05SDave Chinner /* If the VFS inode is being torn down, pause and try again. */
55777b4d286SDarrick J. Wong if (!igrab(inode))
55877b4d286SDarrick J. Wong goto out_skip;
55933479e05SDave Chinner
56033479e05SDave Chinner /* We've got a live one. */
56133479e05SDave Chinner spin_unlock(&ip->i_flags_lock);
56233479e05SDave Chinner rcu_read_unlock();
56333479e05SDave Chinner trace_xfs_iget_hit(ip);
56433479e05SDave Chinner }
56533479e05SDave Chinner
56633479e05SDave Chinner if (lock_flags != 0)
56733479e05SDave Chinner xfs_ilock(ip, lock_flags);
56833479e05SDave Chinner
569378f681cSDarrick J. Wong if (!(flags & XFS_IGET_INCORE))
570dae2f8edSIra Weiny xfs_iflags_clear(ip, XFS_ISTALE);
571ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_ig_found);
57233479e05SDave Chinner
57333479e05SDave Chinner return 0;
57433479e05SDave Chinner
57577b4d286SDarrick J. Wong out_skip:
57677b4d286SDarrick J. Wong trace_xfs_iget_skip(ip);
57777b4d286SDarrick J. Wong XFS_STATS_INC(mp, xs_ig_frecycle);
57877b4d286SDarrick J. Wong error = -EAGAIN;
57933479e05SDave Chinner out_error:
58033479e05SDave Chinner spin_unlock(&ip->i_flags_lock);
58133479e05SDave Chinner rcu_read_unlock();
58233479e05SDave Chinner return error;
583ab23a776SDave Chinner
584ab23a776SDave Chinner out_inodegc_flush:
585ab23a776SDave Chinner spin_unlock(&ip->i_flags_lock);
586ab23a776SDave Chinner rcu_read_unlock();
587ab23a776SDave Chinner /*
588ab23a776SDave Chinner * Do not wait for the workers, because the caller could hold an AGI
589ab23a776SDave Chinner * buffer lock. We're just going to sleep in a loop anyway.
590ab23a776SDave Chinner */
591ab23a776SDave Chinner if (xfs_is_inodegc_enabled(mp))
592ab23a776SDave Chinner xfs_inodegc_queue_all(mp);
593ab23a776SDave Chinner return -EAGAIN;
59433479e05SDave Chinner }
59533479e05SDave Chinner
59633479e05SDave Chinner static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)59733479e05SDave Chinner xfs_iget_cache_miss(
59833479e05SDave Chinner struct xfs_mount *mp,
59933479e05SDave Chinner struct xfs_perag *pag,
60033479e05SDave Chinner xfs_trans_t *tp,
60133479e05SDave Chinner xfs_ino_t ino,
60233479e05SDave Chinner struct xfs_inode **ipp,
60333479e05SDave Chinner int flags,
60433479e05SDave Chinner int lock_flags)
60533479e05SDave Chinner {
60633479e05SDave Chinner struct xfs_inode *ip;
60733479e05SDave Chinner int error;
60833479e05SDave Chinner xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
60933479e05SDave Chinner int iflags;
61033479e05SDave Chinner
61133479e05SDave Chinner ip = xfs_inode_alloc(mp, ino);
61233479e05SDave Chinner if (!ip)
6132451337dSDave Chinner return -ENOMEM;
61433479e05SDave Chinner
615498f0adbSDave Chinner error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
61633479e05SDave Chinner if (error)
61733479e05SDave Chinner goto out_destroy;
61833479e05SDave Chinner
619bb8a66afSChristoph Hellwig /*
620bb8a66afSChristoph Hellwig * For version 5 superblocks, if we are initialising a new inode and we
6210560f31aSDave Chinner * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
622bb8a66afSChristoph Hellwig * simply build the new inode core with a random generation number.
623bb8a66afSChristoph Hellwig *
624bb8a66afSChristoph Hellwig * For version 4 (and older) superblocks, log recovery is dependent on
625965e0a1aSChristoph Hellwig * the i_flushiter field being initialised from the current on-disk
626bb8a66afSChristoph Hellwig * value and hence we must also read the inode off disk even when
627bb8a66afSChristoph Hellwig * initializing new inodes.
628bb8a66afSChristoph Hellwig */
62938c26bfdSDave Chinner if (xfs_has_v3inodes(mp) &&
6300560f31aSDave Chinner (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
631a251c17aSJason A. Donenfeld VFS_I(ip)->i_generation = get_random_u32();
632bb8a66afSChristoph Hellwig } else {
633bb8a66afSChristoph Hellwig struct xfs_buf *bp;
634bb8a66afSChristoph Hellwig
635af9dcddeSChristoph Hellwig error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
636bb8a66afSChristoph Hellwig if (error)
637bb8a66afSChristoph Hellwig goto out_destroy;
638bb8a66afSChristoph Hellwig
639af9dcddeSChristoph Hellwig error = xfs_inode_from_disk(ip,
640af9dcddeSChristoph Hellwig xfs_buf_offset(bp, ip->i_imap.im_boffset));
641bb8a66afSChristoph Hellwig if (!error)
642bb8a66afSChristoph Hellwig xfs_buf_set_ref(bp, XFS_INO_REF);
643bb8a66afSChristoph Hellwig xfs_trans_brelse(tp, bp);
644bb8a66afSChristoph Hellwig
645bb8a66afSChristoph Hellwig if (error)
646bb8a66afSChristoph Hellwig goto out_destroy;
647bb8a66afSChristoph Hellwig }
648bb8a66afSChristoph Hellwig
64933479e05SDave Chinner trace_xfs_iget_miss(ip);
65033479e05SDave Chinner
651ee457001SDave Chinner /*
652afca6c5bSDave Chinner * Check the inode free state is valid. This also detects lookup
653afca6c5bSDave Chinner * racing with unlinks.
654ee457001SDave Chinner */
655afca6c5bSDave Chinner error = xfs_iget_check_free_state(ip, flags);
656afca6c5bSDave Chinner if (error)
657ee457001SDave Chinner goto out_destroy;
65833479e05SDave Chinner
65933479e05SDave Chinner /*
66033479e05SDave Chinner * Preload the radix tree so we can insert safely under the
66133479e05SDave Chinner * write spinlock. Note that we cannot sleep inside the preload
66233479e05SDave Chinner * region. Since we can be called from transaction context, don't
66333479e05SDave Chinner * recurse into the file system.
66433479e05SDave Chinner */
66533479e05SDave Chinner if (radix_tree_preload(GFP_NOFS)) {
6662451337dSDave Chinner error = -EAGAIN;
66733479e05SDave Chinner goto out_destroy;
66833479e05SDave Chinner }
66933479e05SDave Chinner
67033479e05SDave Chinner /*
67133479e05SDave Chinner * Because the inode hasn't been added to the radix-tree yet it can't
67233479e05SDave Chinner * be found by another thread, so we can do the non-sleeping lock here.
67333479e05SDave Chinner */
67433479e05SDave Chinner if (lock_flags) {
67533479e05SDave Chinner if (!xfs_ilock_nowait(ip, lock_flags))
67633479e05SDave Chinner BUG();
67733479e05SDave Chinner }
67833479e05SDave Chinner
67933479e05SDave Chinner /*
68033479e05SDave Chinner * These values must be set before inserting the inode into the radix
68133479e05SDave Chinner * tree as the moment it is inserted a concurrent lookup (allowed by the
68233479e05SDave Chinner * RCU locking mechanism) can find it and that lookup must see that this
68333479e05SDave Chinner * is an inode currently under construction (i.e. that XFS_INEW is set).
68433479e05SDave Chinner * The ip->i_flags_lock that protects the XFS_INEW flag forms the
68533479e05SDave Chinner * memory barrier that ensures this detection works correctly at lookup
68633479e05SDave Chinner * time.
68733479e05SDave Chinner */
68833479e05SDave Chinner iflags = XFS_INEW;
68933479e05SDave Chinner if (flags & XFS_IGET_DONTCACHE)
6902c567af4SIra Weiny d_mark_dontcache(VFS_I(ip));
691113a5683SChandra Seetharaman ip->i_udquot = NULL;
692113a5683SChandra Seetharaman ip->i_gdquot = NULL;
69392f8ff73SChandra Seetharaman ip->i_pdquot = NULL;
69433479e05SDave Chinner xfs_iflags_set(ip, iflags);
69533479e05SDave Chinner
69633479e05SDave Chinner /* insert the new inode */
69733479e05SDave Chinner spin_lock(&pag->pag_ici_lock);
69833479e05SDave Chinner error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
69933479e05SDave Chinner if (unlikely(error)) {
70033479e05SDave Chinner WARN_ON(error != -EEXIST);
701ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_ig_dup);
7022451337dSDave Chinner error = -EAGAIN;
70333479e05SDave Chinner goto out_preload_end;
70433479e05SDave Chinner }
70533479e05SDave Chinner spin_unlock(&pag->pag_ici_lock);
70633479e05SDave Chinner radix_tree_preload_end();
70733479e05SDave Chinner
70833479e05SDave Chinner *ipp = ip;
70933479e05SDave Chinner return 0;
71033479e05SDave Chinner
71133479e05SDave Chinner out_preload_end:
71233479e05SDave Chinner spin_unlock(&pag->pag_ici_lock);
71333479e05SDave Chinner radix_tree_preload_end();
71433479e05SDave Chinner if (lock_flags)
71533479e05SDave Chinner xfs_iunlock(ip, lock_flags);
71633479e05SDave Chinner out_destroy:
71733479e05SDave Chinner __destroy_inode(VFS_I(ip));
71833479e05SDave Chinner xfs_inode_free(ip);
71933479e05SDave Chinner return error;
72033479e05SDave Chinner }
72133479e05SDave Chinner
72233479e05SDave Chinner /*
72302511a5aSDave Chinner * Look up an inode by number in the given file system. The inode is looked up
72402511a5aSDave Chinner * in the cache held in each AG. If the inode is found in the cache, initialise
72502511a5aSDave Chinner * the vfs inode if necessary.
72633479e05SDave Chinner *
72702511a5aSDave Chinner * If it is not in core, read it in from the file system's device, add it to the
72802511a5aSDave Chinner * cache and initialise the vfs inode.
72933479e05SDave Chinner *
73033479e05SDave Chinner * The inode is locked according to the value of the lock_flags parameter.
73102511a5aSDave Chinner * Inode lookup is only done during metadata operations and not as part of the
73202511a5aSDave Chinner * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
73333479e05SDave Chinner */
73433479e05SDave Chinner int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)73533479e05SDave Chinner xfs_iget(
73602511a5aSDave Chinner struct xfs_mount *mp,
73702511a5aSDave Chinner struct xfs_trans *tp,
73833479e05SDave Chinner xfs_ino_t ino,
73933479e05SDave Chinner uint flags,
74033479e05SDave Chinner uint lock_flags,
74102511a5aSDave Chinner struct xfs_inode **ipp)
74233479e05SDave Chinner {
74302511a5aSDave Chinner struct xfs_inode *ip;
74402511a5aSDave Chinner struct xfs_perag *pag;
74533479e05SDave Chinner xfs_agino_t agino;
74602511a5aSDave Chinner int error;
74733479e05SDave Chinner
74833479e05SDave Chinner ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
74933479e05SDave Chinner
75033479e05SDave Chinner /* reject inode numbers outside existing AGs */
751629e6a35SDarrick J. Wong if (!xfs_verify_ino(mp, ino))
7522451337dSDave Chinner return -EINVAL;
75333479e05SDave Chinner
754ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_ig_attempts);
7558774cf8bSLucas Stach
75633479e05SDave Chinner /* get the perag structure and ensure that it's inode capable */
75733479e05SDave Chinner pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
75833479e05SDave Chinner agino = XFS_INO_TO_AGINO(mp, ino);
75933479e05SDave Chinner
76033479e05SDave Chinner again:
76133479e05SDave Chinner error = 0;
76233479e05SDave Chinner rcu_read_lock();
76333479e05SDave Chinner ip = radix_tree_lookup(&pag->pag_ici_root, agino);
76433479e05SDave Chinner
76533479e05SDave Chinner if (ip) {
76633479e05SDave Chinner error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
76733479e05SDave Chinner if (error)
76833479e05SDave Chinner goto out_error_or_again;
76933479e05SDave Chinner } else {
77033479e05SDave Chinner rcu_read_unlock();
771378f681cSDarrick J. Wong if (flags & XFS_IGET_INCORE) {
772ed438b47SDarrick J. Wong error = -ENODATA;
773378f681cSDarrick J. Wong goto out_error_or_again;
774378f681cSDarrick J. Wong }
775ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_ig_missed);
77633479e05SDave Chinner
77733479e05SDave Chinner error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
77833479e05SDave Chinner flags, lock_flags);
77933479e05SDave Chinner if (error)
78033479e05SDave Chinner goto out_error_or_again;
78133479e05SDave Chinner }
78233479e05SDave Chinner xfs_perag_put(pag);
78333479e05SDave Chinner
78433479e05SDave Chinner *ipp = ip;
78533479e05SDave Chinner
78633479e05SDave Chinner /*
78758c90473SDave Chinner * If we have a real type for an on-disk inode, we can setup the inode
788132c460eSYang Xu * now. If it's a new inode being created, xfs_init_new_inode will
789132c460eSYang Xu * handle it.
79033479e05SDave Chinner */
791c19b3b05SDave Chinner if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
79258c90473SDave Chinner xfs_setup_existing_inode(ip);
79333479e05SDave Chinner return 0;
79433479e05SDave Chinner
79533479e05SDave Chinner out_error_or_again:
796302436c2SDarrick J. Wong if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
797302436c2SDarrick J. Wong error == -EAGAIN) {
79833479e05SDave Chinner delay(1);
79933479e05SDave Chinner goto again;
80033479e05SDave Chinner }
80133479e05SDave Chinner xfs_perag_put(pag);
80233479e05SDave Chinner return error;
80333479e05SDave Chinner }
80433479e05SDave Chinner
8056d8b79cfSDave Chinner /*
8066d8b79cfSDave Chinner * Grab the inode for reclaim exclusively.
80750718b8dSDave Chinner *
80850718b8dSDave Chinner * We have found this inode via a lookup under RCU, so the inode may have
80950718b8dSDave Chinner * already been freed, or it may be in the process of being recycled by
81050718b8dSDave Chinner * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
81150718b8dSDave Chinner * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
81250718b8dSDave Chinner * will not be set. Hence we need to check for both these flag conditions to
81350718b8dSDave Chinner * avoid inodes that are no longer reclaim candidates.
81450718b8dSDave Chinner *
81550718b8dSDave Chinner * Note: checking for other state flags here, under the i_flags_lock or not, is
81650718b8dSDave Chinner * racy and should be avoided. Those races should be resolved only after we have
81750718b8dSDave Chinner * ensured that we are able to reclaim this inode and the world can see that we
81850718b8dSDave Chinner * are going to reclaim it.
81950718b8dSDave Chinner *
82050718b8dSDave Chinner * Return true if we grabbed it, false otherwise.
8216d8b79cfSDave Chinner */
82250718b8dSDave Chinner static bool
xfs_reclaim_igrab(struct xfs_inode * ip,struct xfs_icwalk * icw)823f1bc5c56SDarrick J. Wong xfs_reclaim_igrab(
8249492750aSDarrick J. Wong struct xfs_inode *ip,
825b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
8266d8b79cfSDave Chinner {
8276d8b79cfSDave Chinner ASSERT(rcu_read_lock_held());
8286d8b79cfSDave Chinner
8296d8b79cfSDave Chinner spin_lock(&ip->i_flags_lock);
8306d8b79cfSDave Chinner if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
8316d8b79cfSDave Chinner __xfs_iflags_test(ip, XFS_IRECLAIM)) {
8326d8b79cfSDave Chinner /* not a reclaim candidate. */
8336d8b79cfSDave Chinner spin_unlock(&ip->i_flags_lock);
83450718b8dSDave Chinner return false;
8356d8b79cfSDave Chinner }
8369492750aSDarrick J. Wong
8379492750aSDarrick J. Wong /* Don't reclaim a sick inode unless the caller asked for it. */
8389492750aSDarrick J. Wong if (ip->i_sick &&
839b26b2bf1SDarrick J. Wong (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
8409492750aSDarrick J. Wong spin_unlock(&ip->i_flags_lock);
8419492750aSDarrick J. Wong return false;
8429492750aSDarrick J. Wong }
8439492750aSDarrick J. Wong
8446d8b79cfSDave Chinner __xfs_iflags_set(ip, XFS_IRECLAIM);
8456d8b79cfSDave Chinner spin_unlock(&ip->i_flags_lock);
84650718b8dSDave Chinner return true;
8476d8b79cfSDave Chinner }
8486d8b79cfSDave Chinner
8496d8b79cfSDave Chinner /*
85002511a5aSDave Chinner * Inode reclaim is non-blocking, so the default action if progress cannot be
85102511a5aSDave Chinner * made is to "requeue" the inode for reclaim by unlocking it and clearing the
85202511a5aSDave Chinner * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
85302511a5aSDave Chinner * blocking anymore and hence we can wait for the inode to be able to reclaim
85402511a5aSDave Chinner * it.
8556d8b79cfSDave Chinner *
85602511a5aSDave Chinner * We do no IO here - if callers require inodes to be cleaned they must push the
85702511a5aSDave Chinner * AIL first to trigger writeback of dirty inodes. This enables writeback to be
85802511a5aSDave Chinner * done in the background in a non-blocking manner, and enables memory reclaim
85902511a5aSDave Chinner * to make progress without blocking.
8606d8b79cfSDave Chinner */
8614d0bab3aSDave Chinner static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)8626d8b79cfSDave Chinner xfs_reclaim_inode(
8636d8b79cfSDave Chinner struct xfs_inode *ip,
86450718b8dSDave Chinner struct xfs_perag *pag)
8656d8b79cfSDave Chinner {
8668a17d7ddSDave Chinner xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
8676d8b79cfSDave Chinner
8689552e14dSDave Chinner if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
8696d8b79cfSDave Chinner goto out;
870718ecc50SDave Chinner if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
8719552e14dSDave Chinner goto out_iunlock;
8726d8b79cfSDave Chinner
87301728b44SDave Chinner /*
87401728b44SDave Chinner * Check for log shutdown because aborting the inode can move the log
87501728b44SDave Chinner * tail and corrupt in memory state. This is fine if the log is shut
87601728b44SDave Chinner * down, but if the log is still active and only the mount is shut down
87701728b44SDave Chinner * then the in-memory log tail movement caused by the abort can be
87801728b44SDave Chinner * incorrectly propagated to disk.
87901728b44SDave Chinner */
88001728b44SDave Chinner if (xlog_is_shutdown(ip->i_mount->m_log)) {
8816d8b79cfSDave Chinner xfs_iunpin_wait(ip);
882d2d7c047SDave Chinner xfs_iflush_shutdown_abort(ip);
8836d8b79cfSDave Chinner goto reclaim;
8846d8b79cfSDave Chinner }
885617825feSDave Chinner if (xfs_ipincount(ip))
886718ecc50SDave Chinner goto out_clear_flush;
887617825feSDave Chinner if (!xfs_inode_clean(ip))
888718ecc50SDave Chinner goto out_clear_flush;
889617825feSDave Chinner
890718ecc50SDave Chinner xfs_iflags_clear(ip, XFS_IFLUSHING);
8916d8b79cfSDave Chinner reclaim:
892ab23a776SDave Chinner trace_xfs_inode_reclaiming(ip);
89398efe8afSBrian Foster
8948a17d7ddSDave Chinner /*
8958a17d7ddSDave Chinner * Because we use RCU freeing we need to ensure the inode always appears
8968a17d7ddSDave Chinner * to be reclaimed with an invalid inode number when in the free state.
89798efe8afSBrian Foster * We do this as early as possible under the ILOCK so that
898f2e9ad21SOmar Sandoval * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
899f2e9ad21SOmar Sandoval * detect races with us here. By doing this, we guarantee that once
900f2e9ad21SOmar Sandoval * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
901f2e9ad21SOmar Sandoval * it will see either a valid inode that will serialise correctly, or it
902f2e9ad21SOmar Sandoval * will see an invalid inode that it can skip.
9038a17d7ddSDave Chinner */
9048a17d7ddSDave Chinner spin_lock(&ip->i_flags_lock);
9058a17d7ddSDave Chinner ip->i_flags = XFS_IRECLAIM;
9068a17d7ddSDave Chinner ip->i_ino = 0;
907255794c7SDarrick J. Wong ip->i_sick = 0;
908255794c7SDarrick J. Wong ip->i_checked = 0;
9098a17d7ddSDave Chinner spin_unlock(&ip->i_flags_lock);
9108a17d7ddSDave Chinner
911fad743d7SDave Chinner ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
9126d8b79cfSDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL);
9136d8b79cfSDave Chinner
914ff6d6af2SBill O'Donnell XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
9156d8b79cfSDave Chinner /*
9166d8b79cfSDave Chinner * Remove the inode from the per-AG radix tree.
9176d8b79cfSDave Chinner *
9186d8b79cfSDave Chinner * Because radix_tree_delete won't complain even if the item was never
9196d8b79cfSDave Chinner * added to the tree assert that it's been there before to catch
9206d8b79cfSDave Chinner * problems with the inode life time early on.
9216d8b79cfSDave Chinner */
9226d8b79cfSDave Chinner spin_lock(&pag->pag_ici_lock);
9236d8b79cfSDave Chinner if (!radix_tree_delete(&pag->pag_ici_root,
9248a17d7ddSDave Chinner XFS_INO_TO_AGINO(ip->i_mount, ino)))
9256d8b79cfSDave Chinner ASSERT(0);
926c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
9276d8b79cfSDave Chinner spin_unlock(&pag->pag_ici_lock);
9286d8b79cfSDave Chinner
9296d8b79cfSDave Chinner /*
9306d8b79cfSDave Chinner * Here we do an (almost) spurious inode lock in order to coordinate
9316d8b79cfSDave Chinner * with inode cache radix tree lookups. This is because the lookup
9326d8b79cfSDave Chinner * can reference the inodes in the cache without taking references.
9336d8b79cfSDave Chinner *
9346d8b79cfSDave Chinner * We make that OK here by ensuring that we wait until the inode is
9356d8b79cfSDave Chinner * unlocked after the lookup before we go ahead and free it.
9366d8b79cfSDave Chinner */
9376d8b79cfSDave Chinner xfs_ilock(ip, XFS_ILOCK_EXCL);
9383ea06d73SDarrick J. Wong ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
9396d8b79cfSDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL);
94096355d5aSDave Chinner ASSERT(xfs_inode_clean(ip));
9416d8b79cfSDave Chinner
9428a17d7ddSDave Chinner __xfs_inode_free(ip);
9434d0bab3aSDave Chinner return;
9446d8b79cfSDave Chinner
945718ecc50SDave Chinner out_clear_flush:
946718ecc50SDave Chinner xfs_iflags_clear(ip, XFS_IFLUSHING);
9479552e14dSDave Chinner out_iunlock:
9486d8b79cfSDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL);
9499552e14dSDave Chinner out:
950617825feSDave Chinner xfs_iflags_clear(ip, XFS_IRECLAIM);
9516d8b79cfSDave Chinner }
9526d8b79cfSDave Chinner
9539492750aSDarrick J. Wong /* Reclaim sick inodes if we're unmounting or the fs went down. */
9549492750aSDarrick J. Wong static inline bool
xfs_want_reclaim_sick(struct xfs_mount * mp)9559492750aSDarrick J. Wong xfs_want_reclaim_sick(
9569492750aSDarrick J. Wong struct xfs_mount *mp)
9579492750aSDarrick J. Wong {
9582e973b2cSDave Chinner return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
95975c8c50fSDave Chinner xfs_is_shutdown(mp);
9609492750aSDarrick J. Wong }
9619492750aSDarrick J. Wong
9624d0bab3aSDave Chinner void
xfs_reclaim_inodes(struct xfs_mount * mp)9636d8b79cfSDave Chinner xfs_reclaim_inodes(
9644d0bab3aSDave Chinner struct xfs_mount *mp)
9656d8b79cfSDave Chinner {
966b26b2bf1SDarrick J. Wong struct xfs_icwalk icw = {
967b26b2bf1SDarrick J. Wong .icw_flags = 0,
9689492750aSDarrick J. Wong };
9699492750aSDarrick J. Wong
9709492750aSDarrick J. Wong if (xfs_want_reclaim_sick(mp))
971b26b2bf1SDarrick J. Wong icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9729492750aSDarrick J. Wong
9734d0bab3aSDave Chinner while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
974617825feSDave Chinner xfs_ail_push_all_sync(mp->m_ail);
975b26b2bf1SDarrick J. Wong xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
9760f4ec0f1SZheng Bin }
9776d8b79cfSDave Chinner }
9786d8b79cfSDave Chinner
9796d8b79cfSDave Chinner /*
98002511a5aSDave Chinner * The shrinker infrastructure determines how many inodes we should scan for
98102511a5aSDave Chinner * reclaim. We want as many clean inodes ready to reclaim as possible, so we
98202511a5aSDave Chinner * push the AIL here. We also want to proactively free up memory if we can to
98302511a5aSDave Chinner * minimise the amount of work memory reclaim has to do so we kick the
98402511a5aSDave Chinner * background reclaim if it isn't already scheduled.
9856d8b79cfSDave Chinner */
9860a234c6dSDave Chinner long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,unsigned long nr_to_scan)9876d8b79cfSDave Chinner xfs_reclaim_inodes_nr(
9886d8b79cfSDave Chinner struct xfs_mount *mp,
98910be350bSDarrick J. Wong unsigned long nr_to_scan)
9906d8b79cfSDave Chinner {
991b26b2bf1SDarrick J. Wong struct xfs_icwalk icw = {
992b26b2bf1SDarrick J. Wong .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
99310be350bSDarrick J. Wong .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
994f1bc5c56SDarrick J. Wong };
995f1bc5c56SDarrick J. Wong
9969492750aSDarrick J. Wong if (xfs_want_reclaim_sick(mp))
997b26b2bf1SDarrick J. Wong icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9989492750aSDarrick J. Wong
9996d8b79cfSDave Chinner /* kick background reclaimer and push the AIL */
10006d8b79cfSDave Chinner xfs_reclaim_work_queue(mp);
10016d8b79cfSDave Chinner xfs_ail_push_all(mp->m_ail);
10026d8b79cfSDave Chinner
1003b26b2bf1SDarrick J. Wong xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1004617825feSDave Chinner return 0;
10056d8b79cfSDave Chinner }
10066d8b79cfSDave Chinner
10076d8b79cfSDave Chinner /*
10086d8b79cfSDave Chinner * Return the number of reclaimable inodes in the filesystem for
10096d8b79cfSDave Chinner * the shrinker to determine how much to reclaim.
10106d8b79cfSDave Chinner */
101110be350bSDarrick J. Wong long
xfs_reclaim_inodes_count(struct xfs_mount * mp)10126d8b79cfSDave Chinner xfs_reclaim_inodes_count(
10136d8b79cfSDave Chinner struct xfs_mount *mp)
10146d8b79cfSDave Chinner {
10156d8b79cfSDave Chinner struct xfs_perag *pag;
10166d8b79cfSDave Chinner xfs_agnumber_t ag = 0;
101710be350bSDarrick J. Wong long reclaimable = 0;
10186d8b79cfSDave Chinner
10196d8b79cfSDave Chinner while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
10206d8b79cfSDave Chinner ag = pag->pag_agno + 1;
10216d8b79cfSDave Chinner reclaimable += pag->pag_ici_reclaimable;
10226d8b79cfSDave Chinner xfs_perag_put(pag);
10236d8b79cfSDave Chinner }
10246d8b79cfSDave Chinner return reclaimable;
10256d8b79cfSDave Chinner }
10266d8b79cfSDave Chinner
102739b1cfd7SDarrick J. Wong STATIC bool
xfs_icwalk_match_id(struct xfs_inode * ip,struct xfs_icwalk * icw)1028b26b2bf1SDarrick J. Wong xfs_icwalk_match_id(
10293e3f9f58SBrian Foster struct xfs_inode *ip,
1030b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
10313e3f9f58SBrian Foster {
1032b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1033b26b2bf1SDarrick J. Wong !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
103439b1cfd7SDarrick J. Wong return false;
10351b556048SBrian Foster
1036b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1037b26b2bf1SDarrick J. Wong !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
103839b1cfd7SDarrick J. Wong return false;
10391b556048SBrian Foster
1040b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1041b26b2bf1SDarrick J. Wong ip->i_projid != icw->icw_prid)
104239b1cfd7SDarrick J. Wong return false;
10431b556048SBrian Foster
104439b1cfd7SDarrick J. Wong return true;
10453e3f9f58SBrian Foster }
10463e3f9f58SBrian Foster
1047f4526397SBrian Foster /*
1048f4526397SBrian Foster * A union-based inode filtering algorithm. Process the inode if any of the
1049f4526397SBrian Foster * criteria match. This is for global/internal scans only.
1050f4526397SBrian Foster */
105139b1cfd7SDarrick J. Wong STATIC bool
xfs_icwalk_match_id_union(struct xfs_inode * ip,struct xfs_icwalk * icw)1052b26b2bf1SDarrick J. Wong xfs_icwalk_match_id_union(
1053f4526397SBrian Foster struct xfs_inode *ip,
1054b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1055f4526397SBrian Foster {
1056b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1057b26b2bf1SDarrick J. Wong uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
105839b1cfd7SDarrick J. Wong return true;
1059f4526397SBrian Foster
1060b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1061b26b2bf1SDarrick J. Wong gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
106239b1cfd7SDarrick J. Wong return true;
1063f4526397SBrian Foster
1064b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1065b26b2bf1SDarrick J. Wong ip->i_projid == icw->icw_prid)
106639b1cfd7SDarrick J. Wong return true;
1067f4526397SBrian Foster
106839b1cfd7SDarrick J. Wong return false;
1069f4526397SBrian Foster }
1070f4526397SBrian Foster
1071a91bf992SDarrick J. Wong /*
1072a91bf992SDarrick J. Wong * Is this inode @ip eligible for eof/cow block reclamation, given some
1073b26b2bf1SDarrick J. Wong * filtering parameters @icw? The inode is eligible if @icw is null or
1074a91bf992SDarrick J. Wong * if the predicate functions match.
1075a91bf992SDarrick J. Wong */
1076a91bf992SDarrick J. Wong static bool
xfs_icwalk_match(struct xfs_inode * ip,struct xfs_icwalk * icw)1077b26b2bf1SDarrick J. Wong xfs_icwalk_match(
1078a91bf992SDarrick J. Wong struct xfs_inode *ip,
1079b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1080a91bf992SDarrick J. Wong {
108139b1cfd7SDarrick J. Wong bool match;
1082a91bf992SDarrick J. Wong
1083b26b2bf1SDarrick J. Wong if (!icw)
1084a91bf992SDarrick J. Wong return true;
1085a91bf992SDarrick J. Wong
1086b26b2bf1SDarrick J. Wong if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1087b26b2bf1SDarrick J. Wong match = xfs_icwalk_match_id_union(ip, icw);
1088a91bf992SDarrick J. Wong else
1089b26b2bf1SDarrick J. Wong match = xfs_icwalk_match_id(ip, icw);
1090a91bf992SDarrick J. Wong if (!match)
1091a91bf992SDarrick J. Wong return false;
1092a91bf992SDarrick J. Wong
1093a91bf992SDarrick J. Wong /* skip the inode if the file size is too small */
1094b26b2bf1SDarrick J. Wong if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1095b26b2bf1SDarrick J. Wong XFS_ISIZE(ip) < icw->icw_min_file_size)
1096a91bf992SDarrick J. Wong return false;
1097a91bf992SDarrick J. Wong
1098a91bf992SDarrick J. Wong return true;
1099a91bf992SDarrick J. Wong }
1100a91bf992SDarrick J. Wong
11014d0bab3aSDave Chinner /*
11024d0bab3aSDave Chinner * This is a fast pass over the inode cache to try to get reclaim moving on as
11034d0bab3aSDave Chinner * many inodes as possible in a short period of time. It kicks itself every few
11044d0bab3aSDave Chinner * seconds, as well as being kicked by the inode cache shrinker when memory
110502511a5aSDave Chinner * goes low.
11064d0bab3aSDave Chinner */
11074d0bab3aSDave Chinner void
xfs_reclaim_worker(struct work_struct * work)11084d0bab3aSDave Chinner xfs_reclaim_worker(
11094d0bab3aSDave Chinner struct work_struct *work)
11104d0bab3aSDave Chinner {
11114d0bab3aSDave Chinner struct xfs_mount *mp = container_of(to_delayed_work(work),
11124d0bab3aSDave Chinner struct xfs_mount, m_reclaim_work);
11134d0bab3aSDave Chinner
1114f1bc5c56SDarrick J. Wong xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
11154d0bab3aSDave Chinner xfs_reclaim_work_queue(mp);
11164d0bab3aSDave Chinner }
11174d0bab3aSDave Chinner
11183e3f9f58SBrian Foster STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)111941176a68SBrian Foster xfs_inode_free_eofblocks(
112041176a68SBrian Foster struct xfs_inode *ip,
1121b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw,
11220fa4a10aSDarrick J. Wong unsigned int *lockflags)
112341176a68SBrian Foster {
1124390600f8SDarrick J. Wong bool wait;
1125390600f8SDarrick J. Wong
1126b26b2bf1SDarrick J. Wong wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
11275400da7dSBrian Foster
1128ce2d3bbeSDarrick J. Wong if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1129ce2d3bbeSDarrick J. Wong return 0;
1130ce2d3bbeSDarrick J. Wong
113141176a68SBrian Foster /*
113241176a68SBrian Foster * If the mapping is dirty the operation can block and wait for some
113341176a68SBrian Foster * time. Unless we are waiting, skip it.
113441176a68SBrian Foster */
1135390600f8SDarrick J. Wong if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
113641176a68SBrian Foster return 0;
113741176a68SBrian Foster
1138b26b2bf1SDarrick J. Wong if (!xfs_icwalk_match(ip, icw))
11393e3f9f58SBrian Foster return 0;
11403e3f9f58SBrian Foster
1141a36b9261SBrian Foster /*
1142a36b9261SBrian Foster * If the caller is waiting, return -EAGAIN to keep the background
1143a36b9261SBrian Foster * scanner moving and revisit the inode in a subsequent pass.
1144a36b9261SBrian Foster */
1145c3155097SBrian Foster if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1146390600f8SDarrick J. Wong if (wait)
1147390600f8SDarrick J. Wong return -EAGAIN;
1148390600f8SDarrick J. Wong return 0;
1149a36b9261SBrian Foster }
11500fa4a10aSDarrick J. Wong *lockflags |= XFS_IOLOCK_EXCL;
1151390600f8SDarrick J. Wong
11522bc2d49cSChristoph Hellwig if (xfs_can_free_eofblocks(ip))
11530fa4a10aSDarrick J. Wong return xfs_free_eofblocks(ip);
11542b156ff8SDarrick J. Wong
11552b156ff8SDarrick J. Wong /* inode could be preallocated or append-only */
11562b156ff8SDarrick J. Wong trace_xfs_inode_free_eofblocks_invalid(ip);
11572b156ff8SDarrick J. Wong xfs_inode_clear_eofblocks_tag(ip);
11582b156ff8SDarrick J. Wong return 0;
115941176a68SBrian Foster }
116041176a68SBrian Foster
116183104d44SDarrick J. Wong static void
xfs_blockgc_set_iflag(struct xfs_inode * ip,unsigned long iflag)1162ce2d3bbeSDarrick J. Wong xfs_blockgc_set_iflag(
1163ce2d3bbeSDarrick J. Wong struct xfs_inode *ip,
1164ce2d3bbeSDarrick J. Wong unsigned long iflag)
116527b52867SBrian Foster {
116627b52867SBrian Foster struct xfs_mount *mp = ip->i_mount;
116727b52867SBrian Foster struct xfs_perag *pag;
116827b52867SBrian Foster
1169ce2d3bbeSDarrick J. Wong ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1170ce2d3bbeSDarrick J. Wong
117185a6e764SChristoph Hellwig /*
117285a6e764SChristoph Hellwig * Don't bother locking the AG and looking up in the radix trees
117385a6e764SChristoph Hellwig * if we already know that we have the tag set.
117485a6e764SChristoph Hellwig */
1175ce2d3bbeSDarrick J. Wong if (ip->i_flags & iflag)
117685a6e764SChristoph Hellwig return;
117785a6e764SChristoph Hellwig spin_lock(&ip->i_flags_lock);
1178ce2d3bbeSDarrick J. Wong ip->i_flags |= iflag;
117985a6e764SChristoph Hellwig spin_unlock(&ip->i_flags_lock);
118085a6e764SChristoph Hellwig
118127b52867SBrian Foster pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
118227b52867SBrian Foster spin_lock(&pag->pag_ici_lock);
118327b52867SBrian Foster
1184c076ae7aSDarrick J. Wong xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1185ce2d3bbeSDarrick J. Wong XFS_ICI_BLOCKGC_TAG);
118627b52867SBrian Foster
118727b52867SBrian Foster spin_unlock(&pag->pag_ici_lock);
118827b52867SBrian Foster xfs_perag_put(pag);
118927b52867SBrian Foster }
119027b52867SBrian Foster
119127b52867SBrian Foster void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)119283104d44SDarrick J. Wong xfs_inode_set_eofblocks_tag(
119327b52867SBrian Foster xfs_inode_t *ip)
119427b52867SBrian Foster {
119583104d44SDarrick J. Wong trace_xfs_inode_set_eofblocks_tag(ip);
11969669f51dSDarrick J. Wong return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
119783104d44SDarrick J. Wong }
119883104d44SDarrick J. Wong
119983104d44SDarrick J. Wong static void
xfs_blockgc_clear_iflag(struct xfs_inode * ip,unsigned long iflag)1200ce2d3bbeSDarrick J. Wong xfs_blockgc_clear_iflag(
1201ce2d3bbeSDarrick J. Wong struct xfs_inode *ip,
1202ce2d3bbeSDarrick J. Wong unsigned long iflag)
120383104d44SDarrick J. Wong {
120427b52867SBrian Foster struct xfs_mount *mp = ip->i_mount;
120527b52867SBrian Foster struct xfs_perag *pag;
1206ce2d3bbeSDarrick J. Wong bool clear_tag;
1207ce2d3bbeSDarrick J. Wong
1208ce2d3bbeSDarrick J. Wong ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
120927b52867SBrian Foster
121085a6e764SChristoph Hellwig spin_lock(&ip->i_flags_lock);
1211ce2d3bbeSDarrick J. Wong ip->i_flags &= ~iflag;
1212ce2d3bbeSDarrick J. Wong clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
121385a6e764SChristoph Hellwig spin_unlock(&ip->i_flags_lock);
121485a6e764SChristoph Hellwig
1215ce2d3bbeSDarrick J. Wong if (!clear_tag)
1216ce2d3bbeSDarrick J. Wong return;
1217ce2d3bbeSDarrick J. Wong
121827b52867SBrian Foster pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
121927b52867SBrian Foster spin_lock(&pag->pag_ici_lock);
122027b52867SBrian Foster
1221c076ae7aSDarrick J. Wong xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1222ce2d3bbeSDarrick J. Wong XFS_ICI_BLOCKGC_TAG);
122327b52867SBrian Foster
122427b52867SBrian Foster spin_unlock(&pag->pag_ici_lock);
122527b52867SBrian Foster xfs_perag_put(pag);
122627b52867SBrian Foster }
122727b52867SBrian Foster
122883104d44SDarrick J. Wong void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)122983104d44SDarrick J. Wong xfs_inode_clear_eofblocks_tag(
123083104d44SDarrick J. Wong xfs_inode_t *ip)
123183104d44SDarrick J. Wong {
123283104d44SDarrick J. Wong trace_xfs_inode_clear_eofblocks_tag(ip);
1233ce2d3bbeSDarrick J. Wong return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
123483104d44SDarrick J. Wong }
123583104d44SDarrick J. Wong
123683104d44SDarrick J. Wong /*
1237f56db9ceSBrian Foster * Prepare to free COW fork blocks from an inode.
1238be78ff0eSDarrick J. Wong */
1239be78ff0eSDarrick J. Wong static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw)1240be78ff0eSDarrick J. Wong xfs_prep_free_cowblocks(
1241f56db9ceSBrian Foster struct xfs_inode *ip,
1242f56db9ceSBrian Foster struct xfs_icwalk *icw)
1243be78ff0eSDarrick J. Wong {
1244f56db9ceSBrian Foster bool sync;
1245f56db9ceSBrian Foster
1246f56db9ceSBrian Foster sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1247f56db9ceSBrian Foster
1248be78ff0eSDarrick J. Wong /*
1249be78ff0eSDarrick J. Wong * Just clear the tag if we have an empty cow fork or none at all. It's
1250be78ff0eSDarrick J. Wong * possible the inode was fully unshared since it was originally tagged.
1251be78ff0eSDarrick J. Wong */
125251d62690SChristoph Hellwig if (!xfs_inode_has_cow_data(ip)) {
1253be78ff0eSDarrick J. Wong trace_xfs_inode_free_cowblocks_invalid(ip);
1254be78ff0eSDarrick J. Wong xfs_inode_clear_cowblocks_tag(ip);
1255be78ff0eSDarrick J. Wong return false;
1256be78ff0eSDarrick J. Wong }
1257be78ff0eSDarrick J. Wong
1258be78ff0eSDarrick J. Wong /*
1259f56db9ceSBrian Foster * A cowblocks trim of an inode can have a significant effect on
1260f56db9ceSBrian Foster * fragmentation even when a reasonable COW extent size hint is set.
1261f56db9ceSBrian Foster * Therefore, we prefer to not process cowblocks unless they are clean
1262f56db9ceSBrian Foster * and idle. We can never process a cowblocks inode that is dirty or has
1263f56db9ceSBrian Foster * in-flight I/O under any circumstances, because outstanding writeback
1264f56db9ceSBrian Foster * or dio expects targeted COW fork blocks exist through write
1265f56db9ceSBrian Foster * completion where they can be remapped into the data fork.
1266f56db9ceSBrian Foster *
1267f56db9ceSBrian Foster * Therefore, the heuristic used here is to never process inodes
1268f56db9ceSBrian Foster * currently opened for write from background (i.e. non-sync) scans. For
1269f56db9ceSBrian Foster * sync scans, use the pagecache/dio state of the inode to ensure we
1270f56db9ceSBrian Foster * never free COW fork blocks out from under pending I/O.
1271be78ff0eSDarrick J. Wong */
1272f56db9ceSBrian Foster if (!sync && inode_is_open_for_write(VFS_I(ip)))
1273f56db9ceSBrian Foster return false;
1274*7b5b1191SBrian Foster return xfs_can_free_cowblocks(ip);
1275be78ff0eSDarrick J. Wong }
1276be78ff0eSDarrick J. Wong
1277be78ff0eSDarrick J. Wong /*
127883104d44SDarrick J. Wong * Automatic CoW Reservation Freeing
127983104d44SDarrick J. Wong *
128083104d44SDarrick J. Wong * These functions automatically garbage collect leftover CoW reservations
128183104d44SDarrick J. Wong * that were made on behalf of a cowextsize hint when we start to run out
128283104d44SDarrick J. Wong * of quota or when the reservations sit around for too long. If the file
128383104d44SDarrick J. Wong * has dirty pages or is undergoing writeback, its CoW reservations will
128483104d44SDarrick J. Wong * be retained.
128583104d44SDarrick J. Wong *
128683104d44SDarrick J. Wong * The actual garbage collection piggybacks off the same code that runs
128783104d44SDarrick J. Wong * the speculative EOF preallocation garbage collector.
128883104d44SDarrick J. Wong */
128983104d44SDarrick J. Wong STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)129083104d44SDarrick J. Wong xfs_inode_free_cowblocks(
129183104d44SDarrick J. Wong struct xfs_inode *ip,
1292b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw,
12930fa4a10aSDarrick J. Wong unsigned int *lockflags)
129483104d44SDarrick J. Wong {
1295f41a0716SDarrick J. Wong bool wait;
1296be78ff0eSDarrick J. Wong int ret = 0;
129783104d44SDarrick J. Wong
1298b26b2bf1SDarrick J. Wong wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1299f41a0716SDarrick J. Wong
1300ce2d3bbeSDarrick J. Wong if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1301ce2d3bbeSDarrick J. Wong return 0;
1302ce2d3bbeSDarrick J. Wong
1303f56db9ceSBrian Foster if (!xfs_prep_free_cowblocks(ip, icw))
130483104d44SDarrick J. Wong return 0;
130583104d44SDarrick J. Wong
1306b26b2bf1SDarrick J. Wong if (!xfs_icwalk_match(ip, icw))
130783104d44SDarrick J. Wong return 0;
130883104d44SDarrick J. Wong
1309f41a0716SDarrick J. Wong /*
1310f41a0716SDarrick J. Wong * If the caller is waiting, return -EAGAIN to keep the background
1311f41a0716SDarrick J. Wong * scanner moving and revisit the inode in a subsequent pass.
1312f41a0716SDarrick J. Wong */
13130fa4a10aSDarrick J. Wong if (!(*lockflags & XFS_IOLOCK_EXCL) &&
13140fa4a10aSDarrick J. Wong !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1315f41a0716SDarrick J. Wong if (wait)
1316f41a0716SDarrick J. Wong return -EAGAIN;
1317f41a0716SDarrick J. Wong return 0;
1318f41a0716SDarrick J. Wong }
13190fa4a10aSDarrick J. Wong *lockflags |= XFS_IOLOCK_EXCL;
13200fa4a10aSDarrick J. Wong
1321f41a0716SDarrick J. Wong if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1322f41a0716SDarrick J. Wong if (wait)
13230fa4a10aSDarrick J. Wong return -EAGAIN;
13240fa4a10aSDarrick J. Wong return 0;
1325f41a0716SDarrick J. Wong }
13260fa4a10aSDarrick J. Wong *lockflags |= XFS_MMAPLOCK_EXCL;
132783104d44SDarrick J. Wong
1328be78ff0eSDarrick J. Wong /*
1329be78ff0eSDarrick J. Wong * Check again, nobody else should be able to dirty blocks or change
1330be78ff0eSDarrick J. Wong * the reflink iflag now that we have the first two locks held.
1331be78ff0eSDarrick J. Wong */
1332f56db9ceSBrian Foster if (xfs_prep_free_cowblocks(ip, icw))
13333802a345SChristoph Hellwig ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
133483104d44SDarrick J. Wong return ret;
133583104d44SDarrick J. Wong }
133683104d44SDarrick J. Wong
133783104d44SDarrick J. Wong void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)133883104d44SDarrick J. Wong xfs_inode_set_cowblocks_tag(
133983104d44SDarrick J. Wong xfs_inode_t *ip)
134083104d44SDarrick J. Wong {
13417b7381f0SBrian Foster trace_xfs_inode_set_cowblocks_tag(ip);
13429669f51dSDarrick J. Wong return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
134383104d44SDarrick J. Wong }
134483104d44SDarrick J. Wong
134583104d44SDarrick J. Wong void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)134683104d44SDarrick J. Wong xfs_inode_clear_cowblocks_tag(
134783104d44SDarrick J. Wong xfs_inode_t *ip)
134883104d44SDarrick J. Wong {
13497b7381f0SBrian Foster trace_xfs_inode_clear_cowblocks_tag(ip);
1350ce2d3bbeSDarrick J. Wong return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
135183104d44SDarrick J. Wong }
1352d6b636ebSDarrick J. Wong
1353d6b636ebSDarrick J. Wong /* Disable post-EOF and CoW block auto-reclamation. */
1354d6b636ebSDarrick J. Wong void
xfs_blockgc_stop(struct xfs_mount * mp)1355c9a6526fSDarrick J. Wong xfs_blockgc_stop(
1356d6b636ebSDarrick J. Wong struct xfs_mount *mp)
1357d6b636ebSDarrick J. Wong {
1358894ecacfSDarrick J. Wong struct xfs_perag *pag;
1359894ecacfSDarrick J. Wong xfs_agnumber_t agno;
1360894ecacfSDarrick J. Wong
13616f649091SDarrick J. Wong if (!xfs_clear_blockgc_enabled(mp))
13626f649091SDarrick J. Wong return;
13636f649091SDarrick J. Wong
13646f649091SDarrick J. Wong for_each_perag(mp, agno, pag)
1365894ecacfSDarrick J. Wong cancel_delayed_work_sync(&pag->pag_blockgc_work);
13666f649091SDarrick J. Wong trace_xfs_blockgc_stop(mp, __return_address);
1367d6b636ebSDarrick J. Wong }
1368d6b636ebSDarrick J. Wong
1369d6b636ebSDarrick J. Wong /* Enable post-EOF and CoW block auto-reclamation. */
1370d6b636ebSDarrick J. Wong void
xfs_blockgc_start(struct xfs_mount * mp)1371c9a6526fSDarrick J. Wong xfs_blockgc_start(
1372d6b636ebSDarrick J. Wong struct xfs_mount *mp)
1373d6b636ebSDarrick J. Wong {
1374894ecacfSDarrick J. Wong struct xfs_perag *pag;
1375894ecacfSDarrick J. Wong xfs_agnumber_t agno;
1376894ecacfSDarrick J. Wong
13776f649091SDarrick J. Wong if (xfs_set_blockgc_enabled(mp))
13786f649091SDarrick J. Wong return;
13796f649091SDarrick J. Wong
13806f649091SDarrick J. Wong trace_xfs_blockgc_start(mp, __return_address);
1381894ecacfSDarrick J. Wong for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1382894ecacfSDarrick J. Wong xfs_blockgc_queue(pag);
1383d6b636ebSDarrick J. Wong }
13843d4feec0SDarrick J. Wong
1385d20d5edcSDarrick J. Wong /* Don't try to run block gc on an inode that's in any of these states. */
1386d20d5edcSDarrick J. Wong #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1387ab23a776SDave Chinner XFS_NEED_INACTIVE | \
1388ab23a776SDave Chinner XFS_INACTIVATING | \
1389d20d5edcSDarrick J. Wong XFS_IRECLAIMABLE | \
1390d20d5edcSDarrick J. Wong XFS_IRECLAIM)
1391df600197SDarrick J. Wong /*
1392b9baaef4SDarrick J. Wong * Decide if the given @ip is eligible for garbage collection of speculative
1393b9baaef4SDarrick J. Wong * preallocations, and grab it if so. Returns true if it's ready to go or
1394b9baaef4SDarrick J. Wong * false if we should just ignore it.
1395df600197SDarrick J. Wong */
1396df600197SDarrick J. Wong static bool
xfs_blockgc_igrab(struct xfs_inode * ip)1397b9baaef4SDarrick J. Wong xfs_blockgc_igrab(
13987fdff526SDarrick J. Wong struct xfs_inode *ip)
1399df600197SDarrick J. Wong {
1400df600197SDarrick J. Wong struct inode *inode = VFS_I(ip);
1401df600197SDarrick J. Wong
1402df600197SDarrick J. Wong ASSERT(rcu_read_lock_held());
1403df600197SDarrick J. Wong
1404df600197SDarrick J. Wong /* Check for stale RCU freed inode */
1405df600197SDarrick J. Wong spin_lock(&ip->i_flags_lock);
1406df600197SDarrick J. Wong if (!ip->i_ino)
1407df600197SDarrick J. Wong goto out_unlock_noent;
1408df600197SDarrick J. Wong
1409d20d5edcSDarrick J. Wong if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1410df600197SDarrick J. Wong goto out_unlock_noent;
1411df600197SDarrick J. Wong spin_unlock(&ip->i_flags_lock);
1412df600197SDarrick J. Wong
1413df600197SDarrick J. Wong /* nothing to sync during shutdown */
141475c8c50fSDave Chinner if (xfs_is_shutdown(ip->i_mount))
1415df600197SDarrick J. Wong return false;
1416df600197SDarrick J. Wong
1417df600197SDarrick J. Wong /* If we can't grab the inode, it must on it's way to reclaim. */
1418df600197SDarrick J. Wong if (!igrab(inode))
1419df600197SDarrick J. Wong return false;
1420df600197SDarrick J. Wong
1421df600197SDarrick J. Wong /* inode is valid */
1422df600197SDarrick J. Wong return true;
1423df600197SDarrick J. Wong
1424df600197SDarrick J. Wong out_unlock_noent:
1425df600197SDarrick J. Wong spin_unlock(&ip->i_flags_lock);
1426df600197SDarrick J. Wong return false;
1427df600197SDarrick J. Wong }
1428df600197SDarrick J. Wong
142941956753SDarrick J. Wong /* Scan one incore inode for block preallocations that we can remove. */
143041956753SDarrick J. Wong static int
xfs_blockgc_scan_inode(struct xfs_inode * ip,struct xfs_icwalk * icw)143141956753SDarrick J. Wong xfs_blockgc_scan_inode(
143241956753SDarrick J. Wong struct xfs_inode *ip,
1433b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
143485c5b270SDarrick J. Wong {
14350fa4a10aSDarrick J. Wong unsigned int lockflags = 0;
143685c5b270SDarrick J. Wong int error;
143785c5b270SDarrick J. Wong
1438b26b2bf1SDarrick J. Wong error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
143985c5b270SDarrick J. Wong if (error)
14400fa4a10aSDarrick J. Wong goto unlock;
144185c5b270SDarrick J. Wong
1442b26b2bf1SDarrick J. Wong error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
14430fa4a10aSDarrick J. Wong unlock:
14440fa4a10aSDarrick J. Wong if (lockflags)
14450fa4a10aSDarrick J. Wong xfs_iunlock(ip, lockflags);
1446594ab00bSDarrick J. Wong xfs_irele(ip);
144785c5b270SDarrick J. Wong return error;
144885c5b270SDarrick J. Wong }
144985c5b270SDarrick J. Wong
14509669f51dSDarrick J. Wong /* Background worker that trims preallocated space. */
14519669f51dSDarrick J. Wong void
xfs_blockgc_worker(struct work_struct * work)14529669f51dSDarrick J. Wong xfs_blockgc_worker(
14539669f51dSDarrick J. Wong struct work_struct *work)
14549669f51dSDarrick J. Wong {
1455894ecacfSDarrick J. Wong struct xfs_perag *pag = container_of(to_delayed_work(work),
1456894ecacfSDarrick J. Wong struct xfs_perag, pag_blockgc_work);
1457894ecacfSDarrick J. Wong struct xfs_mount *mp = pag->pag_mount;
14589669f51dSDarrick J. Wong int error;
14599669f51dSDarrick J. Wong
14606f649091SDarrick J. Wong trace_xfs_blockgc_worker(mp, __return_address);
14616f649091SDarrick J. Wong
1462f427cf5cSDarrick J. Wong error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
14639669f51dSDarrick J. Wong if (error)
1464894ecacfSDarrick J. Wong xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1465894ecacfSDarrick J. Wong pag->pag_agno, error);
1466894ecacfSDarrick J. Wong xfs_blockgc_queue(pag);
14679669f51dSDarrick J. Wong }
14689669f51dSDarrick J. Wong
146985c5b270SDarrick J. Wong /*
14702eb66502SDarrick J. Wong * Try to free space in the filesystem by purging inactive inodes, eofblocks
14712eb66502SDarrick J. Wong * and cowblocks.
147285c5b270SDarrick J. Wong */
147385c5b270SDarrick J. Wong int
xfs_blockgc_free_space(struct xfs_mount * mp,struct xfs_icwalk * icw)147485c5b270SDarrick J. Wong xfs_blockgc_free_space(
147585c5b270SDarrick J. Wong struct xfs_mount *mp,
1476b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
147785c5b270SDarrick J. Wong {
14782eb66502SDarrick J. Wong int error;
14792eb66502SDarrick J. Wong
1480b26b2bf1SDarrick J. Wong trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
148185c5b270SDarrick J. Wong
14822eb66502SDarrick J. Wong error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
14832eb66502SDarrick J. Wong if (error)
14842eb66502SDarrick J. Wong return error;
14852eb66502SDarrick J. Wong
1486d4d12c02SDave Chinner return xfs_inodegc_flush(mp);
148785c5b270SDarrick J. Wong }
148885c5b270SDarrick J. Wong
14893d4feec0SDarrick J. Wong /*
1490e8d04c2aSDarrick J. Wong * Reclaim all the free space that we can by scheduling the background blockgc
1491e8d04c2aSDarrick J. Wong * and inodegc workers immediately and waiting for them all to clear.
1492e8d04c2aSDarrick J. Wong */
1493d4d12c02SDave Chinner int
xfs_blockgc_flush_all(struct xfs_mount * mp)1494e8d04c2aSDarrick J. Wong xfs_blockgc_flush_all(
1495e8d04c2aSDarrick J. Wong struct xfs_mount *mp)
1496e8d04c2aSDarrick J. Wong {
1497e8d04c2aSDarrick J. Wong struct xfs_perag *pag;
1498e8d04c2aSDarrick J. Wong xfs_agnumber_t agno;
1499e8d04c2aSDarrick J. Wong
1500e8d04c2aSDarrick J. Wong trace_xfs_blockgc_flush_all(mp, __return_address);
1501e8d04c2aSDarrick J. Wong
1502e8d04c2aSDarrick J. Wong /*
1503e8d04c2aSDarrick J. Wong * For each blockgc worker, move its queue time up to now. If it
1504e8d04c2aSDarrick J. Wong * wasn't queued, it will not be requeued. Then flush whatever's
1505e8d04c2aSDarrick J. Wong * left.
1506e8d04c2aSDarrick J. Wong */
1507e8d04c2aSDarrick J. Wong for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1508e8d04c2aSDarrick J. Wong mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1509e8d04c2aSDarrick J. Wong &pag->pag_blockgc_work, 0);
1510e8d04c2aSDarrick J. Wong
1511e8d04c2aSDarrick J. Wong for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1512e8d04c2aSDarrick J. Wong flush_delayed_work(&pag->pag_blockgc_work);
1513e8d04c2aSDarrick J. Wong
1514d4d12c02SDave Chinner return xfs_inodegc_flush(mp);
1515e8d04c2aSDarrick J. Wong }
1516e8d04c2aSDarrick J. Wong
1517e8d04c2aSDarrick J. Wong /*
1518c237dd7cSDarrick J. Wong * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1519c237dd7cSDarrick J. Wong * quota caused an allocation failure, so we make a best effort by including
1520c237dd7cSDarrick J. Wong * each quota under low free space conditions (less than 1% free space) in the
1521c237dd7cSDarrick J. Wong * scan.
1522111068f8SDarrick J. Wong *
1523111068f8SDarrick J. Wong * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
15242d53f66bSDarrick J. Wong * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1525111068f8SDarrick J. Wong * MMAPLOCK.
15263d4feec0SDarrick J. Wong */
1527111068f8SDarrick J. Wong int
xfs_blockgc_free_dquots(struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,unsigned int iwalk_flags)1528c237dd7cSDarrick J. Wong xfs_blockgc_free_dquots(
1529c237dd7cSDarrick J. Wong struct xfs_mount *mp,
1530c237dd7cSDarrick J. Wong struct xfs_dquot *udqp,
1531c237dd7cSDarrick J. Wong struct xfs_dquot *gdqp,
1532c237dd7cSDarrick J. Wong struct xfs_dquot *pdqp,
15332d53f66bSDarrick J. Wong unsigned int iwalk_flags)
15343d4feec0SDarrick J. Wong {
1535b26b2bf1SDarrick J. Wong struct xfs_icwalk icw = {0};
15363d4feec0SDarrick J. Wong bool do_work = false;
15373d4feec0SDarrick J. Wong
1538c237dd7cSDarrick J. Wong if (!udqp && !gdqp && !pdqp)
1539c237dd7cSDarrick J. Wong return 0;
1540c237dd7cSDarrick J. Wong
15413d4feec0SDarrick J. Wong /*
1542111068f8SDarrick J. Wong * Run a scan to free blocks using the union filter to cover all
1543111068f8SDarrick J. Wong * applicable quotas in a single scan.
15443d4feec0SDarrick J. Wong */
1545b26b2bf1SDarrick J. Wong icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
15463d4feec0SDarrick J. Wong
1547c237dd7cSDarrick J. Wong if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1548b26b2bf1SDarrick J. Wong icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1549b26b2bf1SDarrick J. Wong icw.icw_flags |= XFS_ICWALK_FLAG_UID;
15503d4feec0SDarrick J. Wong do_work = true;
15513d4feec0SDarrick J. Wong }
15523d4feec0SDarrick J. Wong
1553c237dd7cSDarrick J. Wong if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1554b26b2bf1SDarrick J. Wong icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1555b26b2bf1SDarrick J. Wong icw.icw_flags |= XFS_ICWALK_FLAG_GID;
15563d4feec0SDarrick J. Wong do_work = true;
15573d4feec0SDarrick J. Wong }
15583d4feec0SDarrick J. Wong
1559c237dd7cSDarrick J. Wong if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1560b26b2bf1SDarrick J. Wong icw.icw_prid = pdqp->q_id;
1561b26b2bf1SDarrick J. Wong icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
15623d4feec0SDarrick J. Wong do_work = true;
15633d4feec0SDarrick J. Wong }
15643d4feec0SDarrick J. Wong
15653d4feec0SDarrick J. Wong if (!do_work)
1566111068f8SDarrick J. Wong return 0;
15673d4feec0SDarrick J. Wong
1568b26b2bf1SDarrick J. Wong return xfs_blockgc_free_space(mp, &icw);
1569c237dd7cSDarrick J. Wong }
1570c237dd7cSDarrick J. Wong
1571c237dd7cSDarrick J. Wong /* Run cow/eofblocks scans on the quotas attached to the inode. */
1572c237dd7cSDarrick J. Wong int
xfs_blockgc_free_quota(struct xfs_inode * ip,unsigned int iwalk_flags)1573c237dd7cSDarrick J. Wong xfs_blockgc_free_quota(
1574c237dd7cSDarrick J. Wong struct xfs_inode *ip,
15752d53f66bSDarrick J. Wong unsigned int iwalk_flags)
1576c237dd7cSDarrick J. Wong {
1577c237dd7cSDarrick J. Wong return xfs_blockgc_free_dquots(ip->i_mount,
1578c237dd7cSDarrick J. Wong xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1579c237dd7cSDarrick J. Wong xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
15802d53f66bSDarrick J. Wong xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
15813d4feec0SDarrick J. Wong }
1582df600197SDarrick J. Wong
1583df600197SDarrick J. Wong /* XFS Inode Cache Walking Code */
1584df600197SDarrick J. Wong
1585df600197SDarrick J. Wong /*
1586f1bc5c56SDarrick J. Wong * The inode lookup is done in batches to keep the amount of lock traffic and
1587f1bc5c56SDarrick J. Wong * radix tree lookups to a minimum. The batch size is a trade off between
1588f1bc5c56SDarrick J. Wong * lookup reduction and stack usage. This is in the reclaim path, so we can't
1589f1bc5c56SDarrick J. Wong * be too greedy.
1590f1bc5c56SDarrick J. Wong */
1591f1bc5c56SDarrick J. Wong #define XFS_LOOKUP_BATCH 32
1592f1bc5c56SDarrick J. Wong
1593f1bc5c56SDarrick J. Wong
1594f1bc5c56SDarrick J. Wong /*
1595b9baaef4SDarrick J. Wong * Decide if we want to grab this inode in anticipation of doing work towards
1596594ab00bSDarrick J. Wong * the goal.
1597b9baaef4SDarrick J. Wong */
1598b9baaef4SDarrick J. Wong static inline bool
xfs_icwalk_igrab(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_icwalk * icw)1599b9baaef4SDarrick J. Wong xfs_icwalk_igrab(
1600b9baaef4SDarrick J. Wong enum xfs_icwalk_goal goal,
16019492750aSDarrick J. Wong struct xfs_inode *ip,
1602b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1603b9baaef4SDarrick J. Wong {
1604b9baaef4SDarrick J. Wong switch (goal) {
1605b9baaef4SDarrick J. Wong case XFS_ICWALK_BLOCKGC:
16067fdff526SDarrick J. Wong return xfs_blockgc_igrab(ip);
1607f1bc5c56SDarrick J. Wong case XFS_ICWALK_RECLAIM:
1608b26b2bf1SDarrick J. Wong return xfs_reclaim_igrab(ip, icw);
1609b9baaef4SDarrick J. Wong default:
1610b9baaef4SDarrick J. Wong return false;
1611b9baaef4SDarrick J. Wong }
1612b9baaef4SDarrick J. Wong }
1613b9baaef4SDarrick J. Wong
1614594ab00bSDarrick J. Wong /*
1615594ab00bSDarrick J. Wong * Process an inode. Each processing function must handle any state changes
1616594ab00bSDarrick J. Wong * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1617594ab00bSDarrick J. Wong */
1618f427cf5cSDarrick J. Wong static inline int
xfs_icwalk_process_inode(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_perag * pag,struct xfs_icwalk * icw)1619f427cf5cSDarrick J. Wong xfs_icwalk_process_inode(
1620f427cf5cSDarrick J. Wong enum xfs_icwalk_goal goal,
1621f427cf5cSDarrick J. Wong struct xfs_inode *ip,
1622f1bc5c56SDarrick J. Wong struct xfs_perag *pag,
1623b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1624f427cf5cSDarrick J. Wong {
1625594ab00bSDarrick J. Wong int error = 0;
1626f427cf5cSDarrick J. Wong
1627f427cf5cSDarrick J. Wong switch (goal) {
1628f427cf5cSDarrick J. Wong case XFS_ICWALK_BLOCKGC:
1629b26b2bf1SDarrick J. Wong error = xfs_blockgc_scan_inode(ip, icw);
1630f427cf5cSDarrick J. Wong break;
1631f1bc5c56SDarrick J. Wong case XFS_ICWALK_RECLAIM:
1632f1bc5c56SDarrick J. Wong xfs_reclaim_inode(ip, pag);
1633f1bc5c56SDarrick J. Wong break;
1634f427cf5cSDarrick J. Wong }
1635f427cf5cSDarrick J. Wong return error;
1636f427cf5cSDarrick J. Wong }
1637f427cf5cSDarrick J. Wong
1638b9baaef4SDarrick J. Wong /*
1639f427cf5cSDarrick J. Wong * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1640f427cf5cSDarrick J. Wong * process them in some manner.
1641df600197SDarrick J. Wong */
1642df600197SDarrick J. Wong static int
xfs_icwalk_ag(struct xfs_perag * pag,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1643c1115c0cSDarrick J. Wong xfs_icwalk_ag(
1644df600197SDarrick J. Wong struct xfs_perag *pag,
1645f427cf5cSDarrick J. Wong enum xfs_icwalk_goal goal,
1646b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1647df600197SDarrick J. Wong {
1648df600197SDarrick J. Wong struct xfs_mount *mp = pag->pag_mount;
1649df600197SDarrick J. Wong uint32_t first_index;
1650df600197SDarrick J. Wong int last_error = 0;
1651df600197SDarrick J. Wong int skipped;
1652df600197SDarrick J. Wong bool done;
1653df600197SDarrick J. Wong int nr_found;
1654df600197SDarrick J. Wong
1655df600197SDarrick J. Wong restart:
1656df600197SDarrick J. Wong done = false;
1657df600197SDarrick J. Wong skipped = 0;
1658f1bc5c56SDarrick J. Wong if (goal == XFS_ICWALK_RECLAIM)
1659f1bc5c56SDarrick J. Wong first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1660f1bc5c56SDarrick J. Wong else
1661df600197SDarrick J. Wong first_index = 0;
1662df600197SDarrick J. Wong nr_found = 0;
1663df600197SDarrick J. Wong do {
1664df600197SDarrick J. Wong struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1665df600197SDarrick J. Wong int error = 0;
1666df600197SDarrick J. Wong int i;
1667df600197SDarrick J. Wong
1668df600197SDarrick J. Wong rcu_read_lock();
1669df600197SDarrick J. Wong
1670a437b9b4SChristoph Hellwig nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1671df600197SDarrick J. Wong (void **) batch, first_index,
1672a437b9b4SChristoph Hellwig XFS_LOOKUP_BATCH, goal);
1673df600197SDarrick J. Wong if (!nr_found) {
1674f1bc5c56SDarrick J. Wong done = true;
1675df600197SDarrick J. Wong rcu_read_unlock();
1676df600197SDarrick J. Wong break;
1677df600197SDarrick J. Wong }
1678df600197SDarrick J. Wong
1679df600197SDarrick J. Wong /*
1680df600197SDarrick J. Wong * Grab the inodes before we drop the lock. if we found
1681df600197SDarrick J. Wong * nothing, nr == 0 and the loop will be skipped.
1682df600197SDarrick J. Wong */
1683df600197SDarrick J. Wong for (i = 0; i < nr_found; i++) {
1684df600197SDarrick J. Wong struct xfs_inode *ip = batch[i];
1685df600197SDarrick J. Wong
1686b26b2bf1SDarrick J. Wong if (done || !xfs_icwalk_igrab(goal, ip, icw))
1687df600197SDarrick J. Wong batch[i] = NULL;
1688df600197SDarrick J. Wong
1689df600197SDarrick J. Wong /*
1690df600197SDarrick J. Wong * Update the index for the next lookup. Catch
1691df600197SDarrick J. Wong * overflows into the next AG range which can occur if
1692df600197SDarrick J. Wong * we have inodes in the last block of the AG and we
1693df600197SDarrick J. Wong * are currently pointing to the last inode.
1694df600197SDarrick J. Wong *
1695df600197SDarrick J. Wong * Because we may see inodes that are from the wrong AG
1696df600197SDarrick J. Wong * due to RCU freeing and reallocation, only update the
1697df600197SDarrick J. Wong * index if it lies in this AG. It was a race that lead
1698df600197SDarrick J. Wong * us to see this inode, so another lookup from the
1699df600197SDarrick J. Wong * same index will not find it again.
1700df600197SDarrick J. Wong */
1701df600197SDarrick J. Wong if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1702df600197SDarrick J. Wong continue;
1703df600197SDarrick J. Wong first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1704df600197SDarrick J. Wong if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1705df600197SDarrick J. Wong done = true;
1706df600197SDarrick J. Wong }
1707df600197SDarrick J. Wong
1708df600197SDarrick J. Wong /* unlock now we've grabbed the inodes. */
1709df600197SDarrick J. Wong rcu_read_unlock();
1710df600197SDarrick J. Wong
1711df600197SDarrick J. Wong for (i = 0; i < nr_found; i++) {
1712df600197SDarrick J. Wong if (!batch[i])
1713df600197SDarrick J. Wong continue;
1714f1bc5c56SDarrick J. Wong error = xfs_icwalk_process_inode(goal, batch[i], pag,
1715b26b2bf1SDarrick J. Wong icw);
1716df600197SDarrick J. Wong if (error == -EAGAIN) {
1717df600197SDarrick J. Wong skipped++;
1718df600197SDarrick J. Wong continue;
1719df600197SDarrick J. Wong }
1720df600197SDarrick J. Wong if (error && last_error != -EFSCORRUPTED)
1721df600197SDarrick J. Wong last_error = error;
1722df600197SDarrick J. Wong }
1723df600197SDarrick J. Wong
1724df600197SDarrick J. Wong /* bail out if the filesystem is corrupted. */
1725df600197SDarrick J. Wong if (error == -EFSCORRUPTED)
1726df600197SDarrick J. Wong break;
1727df600197SDarrick J. Wong
1728df600197SDarrick J. Wong cond_resched();
1729df600197SDarrick J. Wong
1730b26b2bf1SDarrick J. Wong if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1731b26b2bf1SDarrick J. Wong icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1732b26b2bf1SDarrick J. Wong if (icw->icw_scan_limit <= 0)
1733f1bc5c56SDarrick J. Wong break;
1734f1bc5c56SDarrick J. Wong }
1735df600197SDarrick J. Wong } while (nr_found && !done);
1736df600197SDarrick J. Wong
1737f1bc5c56SDarrick J. Wong if (goal == XFS_ICWALK_RECLAIM) {
1738f1bc5c56SDarrick J. Wong if (done)
1739f1bc5c56SDarrick J. Wong first_index = 0;
1740f1bc5c56SDarrick J. Wong WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1741f1bc5c56SDarrick J. Wong }
1742f1bc5c56SDarrick J. Wong
1743df600197SDarrick J. Wong if (skipped) {
1744df600197SDarrick J. Wong delay(1);
1745df600197SDarrick J. Wong goto restart;
1746df600197SDarrick J. Wong }
1747df600197SDarrick J. Wong return last_error;
1748df600197SDarrick J. Wong }
1749df600197SDarrick J. Wong
1750f427cf5cSDarrick J. Wong /* Walk all incore inodes to achieve a given goal. */
1751df600197SDarrick J. Wong static int
xfs_icwalk(struct xfs_mount * mp,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1752c1115c0cSDarrick J. Wong xfs_icwalk(
1753df600197SDarrick J. Wong struct xfs_mount *mp,
1754f427cf5cSDarrick J. Wong enum xfs_icwalk_goal goal,
1755b26b2bf1SDarrick J. Wong struct xfs_icwalk *icw)
1756df600197SDarrick J. Wong {
1757df600197SDarrick J. Wong struct xfs_perag *pag;
1758df600197SDarrick J. Wong int error = 0;
1759df600197SDarrick J. Wong int last_error = 0;
1760a437b9b4SChristoph Hellwig xfs_agnumber_t agno;
1761df600197SDarrick J. Wong
1762a437b9b4SChristoph Hellwig for_each_perag_tag(mp, agno, pag, goal) {
1763b26b2bf1SDarrick J. Wong error = xfs_icwalk_ag(pag, goal, icw);
1764df600197SDarrick J. Wong if (error) {
1765df600197SDarrick J. Wong last_error = error;
1766a437b9b4SChristoph Hellwig if (error == -EFSCORRUPTED) {
1767c4d5660aSDave Chinner xfs_perag_rele(pag);
1768df600197SDarrick J. Wong break;
1769df600197SDarrick J. Wong }
1770df600197SDarrick J. Wong }
1771a437b9b4SChristoph Hellwig }
1772df600197SDarrick J. Wong return last_error;
17732d53f66bSDarrick J. Wong BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1774df600197SDarrick J. Wong }
1775c6c2066dSDarrick J. Wong
1776c6c2066dSDarrick J. Wong #ifdef DEBUG
1777c6c2066dSDarrick J. Wong static void
xfs_check_delalloc(struct xfs_inode * ip,int whichfork)1778c6c2066dSDarrick J. Wong xfs_check_delalloc(
1779c6c2066dSDarrick J. Wong struct xfs_inode *ip,
1780c6c2066dSDarrick J. Wong int whichfork)
1781c6c2066dSDarrick J. Wong {
1782732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1783c6c2066dSDarrick J. Wong struct xfs_bmbt_irec got;
1784c6c2066dSDarrick J. Wong struct xfs_iext_cursor icur;
1785c6c2066dSDarrick J. Wong
1786c6c2066dSDarrick J. Wong if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1787c6c2066dSDarrick J. Wong return;
1788c6c2066dSDarrick J. Wong do {
1789c6c2066dSDarrick J. Wong if (isnullstartblock(got.br_startblock)) {
1790c6c2066dSDarrick J. Wong xfs_warn(ip->i_mount,
1791c6c2066dSDarrick J. Wong "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1792c6c2066dSDarrick J. Wong ip->i_ino,
1793c6c2066dSDarrick J. Wong whichfork == XFS_DATA_FORK ? "data" : "cow",
1794c6c2066dSDarrick J. Wong got.br_startoff, got.br_blockcount);
1795c6c2066dSDarrick J. Wong }
1796c6c2066dSDarrick J. Wong } while (xfs_iext_next_extent(ifp, &icur, &got));
1797c6c2066dSDarrick J. Wong }
1798c6c2066dSDarrick J. Wong #else
1799c6c2066dSDarrick J. Wong #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1800c6c2066dSDarrick J. Wong #endif
1801c6c2066dSDarrick J. Wong
1802ab23a776SDave Chinner /* Schedule the inode for reclaim. */
1803ab23a776SDave Chinner static void
xfs_inodegc_set_reclaimable(struct xfs_inode * ip)1804ab23a776SDave Chinner xfs_inodegc_set_reclaimable(
1805c6c2066dSDarrick J. Wong struct xfs_inode *ip)
1806c6c2066dSDarrick J. Wong {
1807c6c2066dSDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
1808c6c2066dSDarrick J. Wong struct xfs_perag *pag;
1809c6c2066dSDarrick J. Wong
181075c8c50fSDave Chinner if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1811c6c2066dSDarrick J. Wong xfs_check_delalloc(ip, XFS_DATA_FORK);
1812c6c2066dSDarrick J. Wong xfs_check_delalloc(ip, XFS_COW_FORK);
1813c6c2066dSDarrick J. Wong ASSERT(0);
1814c6c2066dSDarrick J. Wong }
1815c6c2066dSDarrick J. Wong
1816c6c2066dSDarrick J. Wong pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1817c6c2066dSDarrick J. Wong spin_lock(&pag->pag_ici_lock);
1818c6c2066dSDarrick J. Wong spin_lock(&ip->i_flags_lock);
1819c6c2066dSDarrick J. Wong
1820ab23a776SDave Chinner trace_xfs_inode_set_reclaimable(ip);
1821ab23a776SDave Chinner ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1822ab23a776SDave Chinner ip->i_flags |= XFS_IRECLAIMABLE;
1823c6c2066dSDarrick J. Wong xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1824c6c2066dSDarrick J. Wong XFS_ICI_RECLAIM_TAG);
1825c6c2066dSDarrick J. Wong
1826c6c2066dSDarrick J. Wong spin_unlock(&ip->i_flags_lock);
1827c6c2066dSDarrick J. Wong spin_unlock(&pag->pag_ici_lock);
1828c6c2066dSDarrick J. Wong xfs_perag_put(pag);
1829c6c2066dSDarrick J. Wong }
1830ab23a776SDave Chinner
1831ab23a776SDave Chinner /*
1832ab23a776SDave Chinner * Free all speculative preallocations and possibly even the inode itself.
1833ab23a776SDave Chinner * This is the last chance to make changes to an otherwise unreferenced file
1834ab23a776SDave Chinner * before incore reclamation happens.
1835ab23a776SDave Chinner */
1836d4d12c02SDave Chinner static int
xfs_inodegc_inactivate(struct xfs_inode * ip)1837ab23a776SDave Chinner xfs_inodegc_inactivate(
1838ab23a776SDave Chinner struct xfs_inode *ip)
1839ab23a776SDave Chinner {
1840d4d12c02SDave Chinner int error;
1841d4d12c02SDave Chinner
1842ab23a776SDave Chinner trace_xfs_inode_inactivating(ip);
1843d4d12c02SDave Chinner error = xfs_inactive(ip);
1844ab23a776SDave Chinner xfs_inodegc_set_reclaimable(ip);
1845d4d12c02SDave Chinner return error;
1846d4d12c02SDave Chinner
1847ab23a776SDave Chinner }
1848ab23a776SDave Chinner
1849ab23a776SDave Chinner void
xfs_inodegc_worker(struct work_struct * work)1850ab23a776SDave Chinner xfs_inodegc_worker(
1851ab23a776SDave Chinner struct work_struct *work)
1852ab23a776SDave Chinner {
18537cf2b0f9SDave Chinner struct xfs_inodegc *gc = container_of(to_delayed_work(work),
18547cf2b0f9SDave Chinner struct xfs_inodegc, work);
1855ab23a776SDave Chinner struct llist_node *node = llist_del_all(&gc->list);
1856ab23a776SDave Chinner struct xfs_inode *ip, *n;
185762334fabSDarrick J. Wong struct xfs_mount *mp = gc->mp;
18584da11251SWu Guanghao unsigned int nofs_flag;
1859ab23a776SDave Chinner
186062334fabSDarrick J. Wong /*
186162334fabSDarrick J. Wong * Clear the cpu mask bit and ensure that we have seen the latest
186262334fabSDarrick J. Wong * update of the gc structure associated with this CPU. This matches
186362334fabSDarrick J. Wong * with the release semantics used when setting the cpumask bit in
186462334fabSDarrick J. Wong * xfs_inodegc_queue.
186562334fabSDarrick J. Wong */
186662334fabSDarrick J. Wong cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
186762334fabSDarrick J. Wong smp_mb__after_atomic();
1868b37c4c83SDarrick J. Wong
1869ab23a776SDave Chinner WRITE_ONCE(gc->items, 0);
1870ab23a776SDave Chinner
1871ab23a776SDave Chinner if (!node)
1872ab23a776SDave Chinner return;
1873ab23a776SDave Chinner
18744da11251SWu Guanghao /*
18754da11251SWu Guanghao * We can allocate memory here while doing writeback on behalf of
18764da11251SWu Guanghao * memory reclaim. To avoid memory allocation deadlocks set the
18774da11251SWu Guanghao * task-wide nofs context for the following operations.
18784da11251SWu Guanghao */
18794da11251SWu Guanghao nofs_flag = memalloc_nofs_save();
18804da11251SWu Guanghao
1881ab23a776SDave Chinner ip = llist_entry(node, struct xfs_inode, i_gclist);
188262334fabSDarrick J. Wong trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1883ab23a776SDave Chinner
188440b1de00SDarrick J. Wong WRITE_ONCE(gc->shrinker_hits, 0);
1885ab23a776SDave Chinner llist_for_each_entry_safe(ip, n, node, i_gclist) {
1886d4d12c02SDave Chinner int error;
1887d4d12c02SDave Chinner
1888ab23a776SDave Chinner xfs_iflags_set(ip, XFS_INACTIVATING);
1889d4d12c02SDave Chinner error = xfs_inodegc_inactivate(ip);
1890d4d12c02SDave Chinner if (error && !gc->error)
1891d4d12c02SDave Chinner gc->error = error;
1892ab23a776SDave Chinner }
18934da11251SWu Guanghao
18944da11251SWu Guanghao memalloc_nofs_restore(nofs_flag);
1895ab23a776SDave Chinner }
1896ab23a776SDave Chinner
1897ab23a776SDave Chinner /*
18985e672cd6SDave Chinner * Expedite all pending inodegc work to run immediately. This does not wait for
18995e672cd6SDave Chinner * completion of the work.
19005e672cd6SDave Chinner */
19015e672cd6SDave Chinner void
xfs_inodegc_push(struct xfs_mount * mp)19025e672cd6SDave Chinner xfs_inodegc_push(
19035e672cd6SDave Chinner struct xfs_mount *mp)
19045e672cd6SDave Chinner {
19055e672cd6SDave Chinner if (!xfs_is_inodegc_enabled(mp))
19065e672cd6SDave Chinner return;
19075e672cd6SDave Chinner trace_xfs_inodegc_push(mp, __return_address);
19085e672cd6SDave Chinner xfs_inodegc_queue_all(mp);
19095e672cd6SDave Chinner }
19105e672cd6SDave Chinner
19115e672cd6SDave Chinner /*
19126191cf3aSBrian Foster * Force all currently queued inode inactivation work to run immediately and
19136191cf3aSBrian Foster * wait for the work to finish.
1914ab23a776SDave Chinner */
1915d4d12c02SDave Chinner int
xfs_inodegc_flush(struct xfs_mount * mp)1916ab23a776SDave Chinner xfs_inodegc_flush(
1917ab23a776SDave Chinner struct xfs_mount *mp)
1918ab23a776SDave Chinner {
19195e672cd6SDave Chinner xfs_inodegc_push(mp);
1920ab23a776SDave Chinner trace_xfs_inodegc_flush(mp, __return_address);
1921d4d12c02SDave Chinner return xfs_inodegc_wait_all(mp);
1922ab23a776SDave Chinner }
1923ab23a776SDave Chinner
1924ab23a776SDave Chinner /*
1925ab23a776SDave Chinner * Flush all the pending work and then disable the inode inactivation background
19262254a739SDarrick J. Wong * workers and wait for them to stop. Caller must hold sb->s_umount to
19272254a739SDarrick J. Wong * coordinate changes in the inodegc_enabled state.
1928ab23a776SDave Chinner */
1929ab23a776SDave Chinner void
xfs_inodegc_stop(struct xfs_mount * mp)1930ab23a776SDave Chinner xfs_inodegc_stop(
1931ab23a776SDave Chinner struct xfs_mount *mp)
1932ab23a776SDave Chinner {
19332254a739SDarrick J. Wong bool rerun;
19342254a739SDarrick J. Wong
1935ab23a776SDave Chinner if (!xfs_clear_inodegc_enabled(mp))
1936ab23a776SDave Chinner return;
1937ab23a776SDave Chinner
19382254a739SDarrick J. Wong /*
19392254a739SDarrick J. Wong * Drain all pending inodegc work, including inodes that could be
19402254a739SDarrick J. Wong * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
19412254a739SDarrick J. Wong * threads that sample the inodegc state just prior to us clearing it.
19422254a739SDarrick J. Wong * The inodegc flag state prevents new threads from queuing more
19432254a739SDarrick J. Wong * inodes, so we queue pending work items and flush the workqueue until
19442254a739SDarrick J. Wong * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
19452254a739SDarrick J. Wong * here because it does not allow other unserialized mechanisms to
19462254a739SDarrick J. Wong * reschedule inodegc work while this draining is in progress.
19472254a739SDarrick J. Wong */
1948ab23a776SDave Chinner xfs_inodegc_queue_all(mp);
19492254a739SDarrick J. Wong do {
19502254a739SDarrick J. Wong flush_workqueue(mp->m_inodegc_wq);
19512254a739SDarrick J. Wong rerun = xfs_inodegc_queue_all(mp);
19522254a739SDarrick J. Wong } while (rerun);
1953ab23a776SDave Chinner
1954ab23a776SDave Chinner trace_xfs_inodegc_stop(mp, __return_address);
1955ab23a776SDave Chinner }
1956ab23a776SDave Chinner
1957ab23a776SDave Chinner /*
1958ab23a776SDave Chinner * Enable the inode inactivation background workers and schedule deferred inode
19592254a739SDarrick J. Wong * inactivation work if there is any. Caller must hold sb->s_umount to
19602254a739SDarrick J. Wong * coordinate changes in the inodegc_enabled state.
1961ab23a776SDave Chinner */
1962ab23a776SDave Chinner void
xfs_inodegc_start(struct xfs_mount * mp)1963ab23a776SDave Chinner xfs_inodegc_start(
1964ab23a776SDave Chinner struct xfs_mount *mp)
1965ab23a776SDave Chinner {
1966ab23a776SDave Chinner if (xfs_set_inodegc_enabled(mp))
1967ab23a776SDave Chinner return;
1968ab23a776SDave Chinner
1969ab23a776SDave Chinner trace_xfs_inodegc_start(mp, __return_address);
1970ab23a776SDave Chinner xfs_inodegc_queue_all(mp);
1971ab23a776SDave Chinner }
1972ab23a776SDave Chinner
197365f03d86SDarrick J. Wong #ifdef CONFIG_XFS_RT
197465f03d86SDarrick J. Wong static inline bool
xfs_inodegc_want_queue_rt_file(struct xfs_inode * ip)197565f03d86SDarrick J. Wong xfs_inodegc_want_queue_rt_file(
197665f03d86SDarrick J. Wong struct xfs_inode *ip)
197765f03d86SDarrick J. Wong {
197865f03d86SDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
197965f03d86SDarrick J. Wong
198065f03d86SDarrick J. Wong if (!XFS_IS_REALTIME_INODE(ip))
198165f03d86SDarrick J. Wong return false;
198265f03d86SDarrick J. Wong
19832229276cSDarrick J. Wong if (__percpu_counter_compare(&mp->m_frextents,
19842229276cSDarrick J. Wong mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
19852229276cSDarrick J. Wong XFS_FDBLOCKS_BATCH) < 0)
19862229276cSDarrick J. Wong return true;
19872229276cSDarrick J. Wong
19882229276cSDarrick J. Wong return false;
198965f03d86SDarrick J. Wong }
199065f03d86SDarrick J. Wong #else
199165f03d86SDarrick J. Wong # define xfs_inodegc_want_queue_rt_file(ip) (false)
199265f03d86SDarrick J. Wong #endif /* CONFIG_XFS_RT */
199365f03d86SDarrick J. Wong
1994ab23a776SDave Chinner /*
1995ab23a776SDave Chinner * Schedule the inactivation worker when:
1996ab23a776SDave Chinner *
1997ab23a776SDave Chinner * - We've accumulated more than one inode cluster buffer's worth of inodes.
19987d6f07d2SDarrick J. Wong * - There is less than 5% free space left.
1999108523b8SDarrick J. Wong * - Any of the quotas for this inode are near an enforcement limit.
2000ab23a776SDave Chinner */
2001ab23a776SDave Chinner static inline bool
xfs_inodegc_want_queue_work(struct xfs_inode * ip,unsigned int items)2002ab23a776SDave Chinner xfs_inodegc_want_queue_work(
2003ab23a776SDave Chinner struct xfs_inode *ip,
2004ab23a776SDave Chinner unsigned int items)
2005ab23a776SDave Chinner {
2006ab23a776SDave Chinner struct xfs_mount *mp = ip->i_mount;
2007ab23a776SDave Chinner
2008ab23a776SDave Chinner if (items > mp->m_ino_geo.inodes_per_cluster)
2009ab23a776SDave Chinner return true;
2010ab23a776SDave Chinner
20117d6f07d2SDarrick J. Wong if (__percpu_counter_compare(&mp->m_fdblocks,
20127d6f07d2SDarrick J. Wong mp->m_low_space[XFS_LOWSP_5_PCNT],
20137d6f07d2SDarrick J. Wong XFS_FDBLOCKS_BATCH) < 0)
20147d6f07d2SDarrick J. Wong return true;
20157d6f07d2SDarrick J. Wong
201665f03d86SDarrick J. Wong if (xfs_inodegc_want_queue_rt_file(ip))
201765f03d86SDarrick J. Wong return true;
201865f03d86SDarrick J. Wong
2019108523b8SDarrick J. Wong if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2020108523b8SDarrick J. Wong return true;
2021108523b8SDarrick J. Wong
2022108523b8SDarrick J. Wong if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2023108523b8SDarrick J. Wong return true;
2024108523b8SDarrick J. Wong
2025108523b8SDarrick J. Wong if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2026108523b8SDarrick J. Wong return true;
2027108523b8SDarrick J. Wong
2028ab23a776SDave Chinner return false;
2029ab23a776SDave Chinner }
2030ab23a776SDave Chinner
2031ab23a776SDave Chinner /*
2032ab23a776SDave Chinner * Upper bound on the number of inodes in each AG that can be queued for
2033ab23a776SDave Chinner * inactivation at any given time, to avoid monopolizing the workqueue.
2034ab23a776SDave Chinner */
2035ab23a776SDave Chinner #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2036ab23a776SDave Chinner
2037ab23a776SDave Chinner /*
2038ab23a776SDave Chinner * Make the frontend wait for inactivations when:
2039ab23a776SDave Chinner *
204040b1de00SDarrick J. Wong * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2041ab23a776SDave Chinner * - The queue depth exceeds the maximum allowable percpu backlog.
2042ab23a776SDave Chinner *
20438bb04028SDave Chinner * Note: If we are in a NOFS context here (e.g. current thread is running a
20448bb04028SDave Chinner * transaction) the we don't want to block here as inodegc progress may require
20458bb04028SDave Chinner * filesystem resources we hold to make progress and that could result in a
20468bb04028SDave Chinner * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
2047ab23a776SDave Chinner */
2048ab23a776SDave Chinner static inline bool
xfs_inodegc_want_flush_work(struct xfs_inode * ip,unsigned int items,unsigned int shrinker_hits)2049ab23a776SDave Chinner xfs_inodegc_want_flush_work(
2050ab23a776SDave Chinner struct xfs_inode *ip,
205140b1de00SDarrick J. Wong unsigned int items,
205240b1de00SDarrick J. Wong unsigned int shrinker_hits)
2053ab23a776SDave Chinner {
20548bb04028SDave Chinner if (current->flags & PF_MEMALLOC_NOFS)
2055ab23a776SDave Chinner return false;
2056ab23a776SDave Chinner
205740b1de00SDarrick J. Wong if (shrinker_hits > 0)
205840b1de00SDarrick J. Wong return true;
205940b1de00SDarrick J. Wong
2060ab23a776SDave Chinner if (items > XFS_INODEGC_MAX_BACKLOG)
2061ab23a776SDave Chinner return true;
2062ab23a776SDave Chinner
2063ab23a776SDave Chinner return false;
2064ab23a776SDave Chinner }
2065ab23a776SDave Chinner
2066ab23a776SDave Chinner /*
2067ab23a776SDave Chinner * Queue a background inactivation worker if there are inodes that need to be
2068ab23a776SDave Chinner * inactivated and higher level xfs code hasn't disabled the background
2069ab23a776SDave Chinner * workers.
2070ab23a776SDave Chinner */
2071ab23a776SDave Chinner static void
xfs_inodegc_queue(struct xfs_inode * ip)2072ab23a776SDave Chinner xfs_inodegc_queue(
2073ab23a776SDave Chinner struct xfs_inode *ip)
2074ab23a776SDave Chinner {
2075ab23a776SDave Chinner struct xfs_mount *mp = ip->i_mount;
2076ab23a776SDave Chinner struct xfs_inodegc *gc;
2077ab23a776SDave Chinner int items;
207840b1de00SDarrick J. Wong unsigned int shrinker_hits;
207962334fabSDarrick J. Wong unsigned int cpu_nr;
20807cf2b0f9SDave Chinner unsigned long queue_delay = 1;
2081ab23a776SDave Chinner
2082ab23a776SDave Chinner trace_xfs_inode_set_need_inactive(ip);
2083ab23a776SDave Chinner spin_lock(&ip->i_flags_lock);
2084ab23a776SDave Chinner ip->i_flags |= XFS_NEED_INACTIVE;
2085ab23a776SDave Chinner spin_unlock(&ip->i_flags_lock);
2086ab23a776SDave Chinner
208762334fabSDarrick J. Wong cpu_nr = get_cpu();
208862334fabSDarrick J. Wong gc = this_cpu_ptr(mp->m_inodegc);
2089ab23a776SDave Chinner llist_add(&ip->i_gclist, &gc->list);
2090ab23a776SDave Chinner items = READ_ONCE(gc->items);
2091ab23a776SDave Chinner WRITE_ONCE(gc->items, items + 1);
209240b1de00SDarrick J. Wong shrinker_hits = READ_ONCE(gc->shrinker_hits);
20937cf2b0f9SDave Chinner
20947cf2b0f9SDave Chinner /*
209562334fabSDarrick J. Wong * Ensure the list add is always seen by anyone who finds the cpumask
209662334fabSDarrick J. Wong * bit set. This effectively gives the cpumask bit set operation
209762334fabSDarrick J. Wong * release ordering semantics.
209862334fabSDarrick J. Wong */
209962334fabSDarrick J. Wong smp_mb__before_atomic();
210062334fabSDarrick J. Wong if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
210162334fabSDarrick J. Wong cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
210262334fabSDarrick J. Wong
210362334fabSDarrick J. Wong /*
21047cf2b0f9SDave Chinner * We queue the work while holding the current CPU so that the work
21057cf2b0f9SDave Chinner * is scheduled to run on this CPU.
21067cf2b0f9SDave Chinner */
21077cf2b0f9SDave Chinner if (!xfs_is_inodegc_enabled(mp)) {
210862334fabSDarrick J. Wong put_cpu();
2109ab23a776SDave Chinner return;
2110ab23a776SDave Chinner }
2111ab23a776SDave Chinner
21127cf2b0f9SDave Chinner if (xfs_inodegc_want_queue_work(ip, items))
21137cf2b0f9SDave Chinner queue_delay = 0;
21147cf2b0f9SDave Chinner
21157cf2b0f9SDave Chinner trace_xfs_inodegc_queue(mp, __return_address);
211603e0add8SDarrick J. Wong mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
211703e0add8SDarrick J. Wong queue_delay);
211862334fabSDarrick J. Wong put_cpu();
21197cf2b0f9SDave Chinner
212040b1de00SDarrick J. Wong if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2121ab23a776SDave Chinner trace_xfs_inodegc_throttle(mp, __return_address);
21227cf2b0f9SDave Chinner flush_delayed_work(&gc->work);
2123ab23a776SDave Chinner }
2124ab23a776SDave Chinner }
2125ab23a776SDave Chinner
2126ab23a776SDave Chinner /*
2127ab23a776SDave Chinner * We set the inode flag atomically with the radix tree tag. Once we get tag
2128ab23a776SDave Chinner * lookups on the radix tree, this inode flag can go away.
2129ab23a776SDave Chinner *
2130ab23a776SDave Chinner * We always use background reclaim here because even if the inode is clean, it
2131ab23a776SDave Chinner * still may be under IO and hence we have wait for IO completion to occur
2132ab23a776SDave Chinner * before we can reclaim the inode. The background reclaim path handles this
2133ab23a776SDave Chinner * more efficiently than we can here, so simply let background reclaim tear down
2134ab23a776SDave Chinner * all inodes.
2135ab23a776SDave Chinner */
2136ab23a776SDave Chinner void
xfs_inode_mark_reclaimable(struct xfs_inode * ip)2137ab23a776SDave Chinner xfs_inode_mark_reclaimable(
2138ab23a776SDave Chinner struct xfs_inode *ip)
2139ab23a776SDave Chinner {
2140ab23a776SDave Chinner struct xfs_mount *mp = ip->i_mount;
2141ab23a776SDave Chinner bool need_inactive;
2142ab23a776SDave Chinner
2143ab23a776SDave Chinner XFS_STATS_INC(mp, vn_reclaim);
2144ab23a776SDave Chinner
2145ab23a776SDave Chinner /*
2146ab23a776SDave Chinner * We should never get here with any of the reclaim flags already set.
2147ab23a776SDave Chinner */
2148ab23a776SDave Chinner ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2149ab23a776SDave Chinner
2150ab23a776SDave Chinner need_inactive = xfs_inode_needs_inactive(ip);
2151ab23a776SDave Chinner if (need_inactive) {
2152ab23a776SDave Chinner xfs_inodegc_queue(ip);
2153ab23a776SDave Chinner return;
2154ab23a776SDave Chinner }
2155ab23a776SDave Chinner
2156ab23a776SDave Chinner /* Going straight to reclaim, so drop the dquots. */
2157ab23a776SDave Chinner xfs_qm_dqdetach(ip);
2158ab23a776SDave Chinner xfs_inodegc_set_reclaimable(ip);
2159ab23a776SDave Chinner }
216040b1de00SDarrick J. Wong
216140b1de00SDarrick J. Wong /*
216240b1de00SDarrick J. Wong * Register a phony shrinker so that we can run background inodegc sooner when
216340b1de00SDarrick J. Wong * there's memory pressure. Inactivation does not itself free any memory but
216440b1de00SDarrick J. Wong * it does make inodes reclaimable, which eventually frees memory.
216540b1de00SDarrick J. Wong *
216640b1de00SDarrick J. Wong * The count function, seek value, and batch value are crafted to trigger the
216740b1de00SDarrick J. Wong * scan function during the second round of scanning. Hopefully this means
216840b1de00SDarrick J. Wong * that we reclaimed enough memory that initiating metadata transactions won't
216940b1de00SDarrick J. Wong * make things worse.
217040b1de00SDarrick J. Wong */
217140b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
217240b1de00SDarrick J. Wong #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
217340b1de00SDarrick J. Wong
217440b1de00SDarrick J. Wong static unsigned long
xfs_inodegc_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)217540b1de00SDarrick J. Wong xfs_inodegc_shrinker_count(
217640b1de00SDarrick J. Wong struct shrinker *shrink,
217740b1de00SDarrick J. Wong struct shrink_control *sc)
217840b1de00SDarrick J. Wong {
217940b1de00SDarrick J. Wong struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
218040b1de00SDarrick J. Wong m_inodegc_shrinker);
218140b1de00SDarrick J. Wong struct xfs_inodegc *gc;
218240b1de00SDarrick J. Wong int cpu;
218340b1de00SDarrick J. Wong
218440b1de00SDarrick J. Wong if (!xfs_is_inodegc_enabled(mp))
218540b1de00SDarrick J. Wong return 0;
218640b1de00SDarrick J. Wong
218762334fabSDarrick J. Wong for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
218840b1de00SDarrick J. Wong gc = per_cpu_ptr(mp->m_inodegc, cpu);
218940b1de00SDarrick J. Wong if (!llist_empty(&gc->list))
219040b1de00SDarrick J. Wong return XFS_INODEGC_SHRINKER_COUNT;
219140b1de00SDarrick J. Wong }
219240b1de00SDarrick J. Wong
219340b1de00SDarrick J. Wong return 0;
219440b1de00SDarrick J. Wong }
219540b1de00SDarrick J. Wong
219640b1de00SDarrick J. Wong static unsigned long
xfs_inodegc_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)219740b1de00SDarrick J. Wong xfs_inodegc_shrinker_scan(
219840b1de00SDarrick J. Wong struct shrinker *shrink,
219940b1de00SDarrick J. Wong struct shrink_control *sc)
220040b1de00SDarrick J. Wong {
220140b1de00SDarrick J. Wong struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
220240b1de00SDarrick J. Wong m_inodegc_shrinker);
220340b1de00SDarrick J. Wong struct xfs_inodegc *gc;
220440b1de00SDarrick J. Wong int cpu;
220540b1de00SDarrick J. Wong bool no_items = true;
220640b1de00SDarrick J. Wong
220740b1de00SDarrick J. Wong if (!xfs_is_inodegc_enabled(mp))
220840b1de00SDarrick J. Wong return SHRINK_STOP;
220940b1de00SDarrick J. Wong
221040b1de00SDarrick J. Wong trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
221140b1de00SDarrick J. Wong
221262334fabSDarrick J. Wong for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
221340b1de00SDarrick J. Wong gc = per_cpu_ptr(mp->m_inodegc, cpu);
221440b1de00SDarrick J. Wong if (!llist_empty(&gc->list)) {
221540b1de00SDarrick J. Wong unsigned int h = READ_ONCE(gc->shrinker_hits);
221640b1de00SDarrick J. Wong
221740b1de00SDarrick J. Wong WRITE_ONCE(gc->shrinker_hits, h + 1);
22187cf2b0f9SDave Chinner mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
221940b1de00SDarrick J. Wong no_items = false;
222040b1de00SDarrick J. Wong }
222140b1de00SDarrick J. Wong }
222240b1de00SDarrick J. Wong
222340b1de00SDarrick J. Wong /*
222440b1de00SDarrick J. Wong * If there are no inodes to inactivate, we don't want the shrinker
222540b1de00SDarrick J. Wong * to think there's deferred work to call us back about.
222640b1de00SDarrick J. Wong */
222740b1de00SDarrick J. Wong if (no_items)
222840b1de00SDarrick J. Wong return LONG_MAX;
222940b1de00SDarrick J. Wong
223040b1de00SDarrick J. Wong return SHRINK_STOP;
223140b1de00SDarrick J. Wong }
223240b1de00SDarrick J. Wong
223340b1de00SDarrick J. Wong /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
223440b1de00SDarrick J. Wong int
xfs_inodegc_register_shrinker(struct xfs_mount * mp)223540b1de00SDarrick J. Wong xfs_inodegc_register_shrinker(
223640b1de00SDarrick J. Wong struct xfs_mount *mp)
223740b1de00SDarrick J. Wong {
223840b1de00SDarrick J. Wong struct shrinker *shrink = &mp->m_inodegc_shrinker;
223940b1de00SDarrick J. Wong
224040b1de00SDarrick J. Wong shrink->count_objects = xfs_inodegc_shrinker_count;
224140b1de00SDarrick J. Wong shrink->scan_objects = xfs_inodegc_shrinker_scan;
224240b1de00SDarrick J. Wong shrink->seeks = 0;
224340b1de00SDarrick J. Wong shrink->flags = SHRINKER_NONSLAB;
224440b1de00SDarrick J. Wong shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
224540b1de00SDarrick J. Wong
2246e33c267aSRoman Gushchin return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
224740b1de00SDarrick J. Wong }
2248