xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 33479e0542df066fb0b47df18780e93bfe6e0dc5)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_dinode.h"
32 #include "xfs_error.h"
33 #include "xfs_filestream.h"
34 #include "xfs_vnodeops.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_quota.h"
37 #include "xfs_trace.h"
38 #include "xfs_fsops.h"
39 #include "xfs_icache.h"
40 
41 #include <linux/kthread.h>
42 #include <linux/freezer.h>
43 
44 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
45 				struct xfs_perag *pag, struct xfs_inode *ip);
46 
47 /*
48  * Allocate and initialise an xfs_inode.
49  */
50 STATIC struct xfs_inode *
51 xfs_inode_alloc(
52 	struct xfs_mount	*mp,
53 	xfs_ino_t		ino)
54 {
55 	struct xfs_inode	*ip;
56 
57 	/*
58 	 * if this didn't occur in transactions, we could use
59 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
60 	 * code up to do this anyway.
61 	 */
62 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
63 	if (!ip)
64 		return NULL;
65 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
66 		kmem_zone_free(xfs_inode_zone, ip);
67 		return NULL;
68 	}
69 
70 	ASSERT(atomic_read(&ip->i_pincount) == 0);
71 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
72 	ASSERT(!xfs_isiflocked(ip));
73 	ASSERT(ip->i_ino == 0);
74 
75 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
76 
77 	/* initialise the xfs inode */
78 	ip->i_ino = ino;
79 	ip->i_mount = mp;
80 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 	ip->i_afp = NULL;
82 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 	ip->i_flags = 0;
84 	ip->i_delayed_blks = 0;
85 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
86 
87 	return ip;
88 }
89 
90 STATIC void
91 xfs_inode_free_callback(
92 	struct rcu_head		*head)
93 {
94 	struct inode		*inode = container_of(head, struct inode, i_rcu);
95 	struct xfs_inode	*ip = XFS_I(inode);
96 
97 	kmem_zone_free(xfs_inode_zone, ip);
98 }
99 
100 STATIC void
101 xfs_inode_free(
102 	struct xfs_inode	*ip)
103 {
104 	switch (ip->i_d.di_mode & S_IFMT) {
105 	case S_IFREG:
106 	case S_IFDIR:
107 	case S_IFLNK:
108 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
109 		break;
110 	}
111 
112 	if (ip->i_afp)
113 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
114 
115 	if (ip->i_itemp) {
116 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
117 		xfs_inode_item_destroy(ip);
118 		ip->i_itemp = NULL;
119 	}
120 
121 	/* asserts to verify all state is correct here */
122 	ASSERT(atomic_read(&ip->i_pincount) == 0);
123 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
124 	ASSERT(!xfs_isiflocked(ip));
125 
126 	/*
127 	 * Because we use RCU freeing we need to ensure the inode always
128 	 * appears to be reclaimed with an invalid inode number when in the
129 	 * free state. The ip->i_flags_lock provides the barrier against lookup
130 	 * races.
131 	 */
132 	spin_lock(&ip->i_flags_lock);
133 	ip->i_flags = XFS_IRECLAIM;
134 	ip->i_ino = 0;
135 	spin_unlock(&ip->i_flags_lock);
136 
137 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
138 }
139 
140 /*
141  * Check the validity of the inode we just found it the cache
142  */
143 static int
144 xfs_iget_cache_hit(
145 	struct xfs_perag	*pag,
146 	struct xfs_inode	*ip,
147 	xfs_ino_t		ino,
148 	int			flags,
149 	int			lock_flags) __releases(RCU)
150 {
151 	struct inode		*inode = VFS_I(ip);
152 	struct xfs_mount	*mp = ip->i_mount;
153 	int			error;
154 
155 	/*
156 	 * check for re-use of an inode within an RCU grace period due to the
157 	 * radix tree nodes not being updated yet. We monitor for this by
158 	 * setting the inode number to zero before freeing the inode structure.
159 	 * If the inode has been reallocated and set up, then the inode number
160 	 * will not match, so check for that, too.
161 	 */
162 	spin_lock(&ip->i_flags_lock);
163 	if (ip->i_ino != ino) {
164 		trace_xfs_iget_skip(ip);
165 		XFS_STATS_INC(xs_ig_frecycle);
166 		error = EAGAIN;
167 		goto out_error;
168 	}
169 
170 
171 	/*
172 	 * If we are racing with another cache hit that is currently
173 	 * instantiating this inode or currently recycling it out of
174 	 * reclaimabe state, wait for the initialisation to complete
175 	 * before continuing.
176 	 *
177 	 * XXX(hch): eventually we should do something equivalent to
178 	 *	     wait_on_inode to wait for these flags to be cleared
179 	 *	     instead of polling for it.
180 	 */
181 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
182 		trace_xfs_iget_skip(ip);
183 		XFS_STATS_INC(xs_ig_frecycle);
184 		error = EAGAIN;
185 		goto out_error;
186 	}
187 
188 	/*
189 	 * If lookup is racing with unlink return an error immediately.
190 	 */
191 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
192 		error = ENOENT;
193 		goto out_error;
194 	}
195 
196 	/*
197 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
198 	 * Need to carefully get it back into useable state.
199 	 */
200 	if (ip->i_flags & XFS_IRECLAIMABLE) {
201 		trace_xfs_iget_reclaim(ip);
202 
203 		/*
204 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
205 		 * from stomping over us while we recycle the inode.  We can't
206 		 * clear the radix tree reclaimable tag yet as it requires
207 		 * pag_ici_lock to be held exclusive.
208 		 */
209 		ip->i_flags |= XFS_IRECLAIM;
210 
211 		spin_unlock(&ip->i_flags_lock);
212 		rcu_read_unlock();
213 
214 		error = -inode_init_always(mp->m_super, inode);
215 		if (error) {
216 			/*
217 			 * Re-initializing the inode failed, and we are in deep
218 			 * trouble.  Try to re-add it to the reclaim list.
219 			 */
220 			rcu_read_lock();
221 			spin_lock(&ip->i_flags_lock);
222 
223 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
224 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
225 			trace_xfs_iget_reclaim_fail(ip);
226 			goto out_error;
227 		}
228 
229 		spin_lock(&pag->pag_ici_lock);
230 		spin_lock(&ip->i_flags_lock);
231 
232 		/*
233 		 * Clear the per-lifetime state in the inode as we are now
234 		 * effectively a new inode and need to return to the initial
235 		 * state before reuse occurs.
236 		 */
237 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
238 		ip->i_flags |= XFS_INEW;
239 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
240 		inode->i_state = I_NEW;
241 
242 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
243 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
244 
245 		spin_unlock(&ip->i_flags_lock);
246 		spin_unlock(&pag->pag_ici_lock);
247 	} else {
248 		/* If the VFS inode is being torn down, pause and try again. */
249 		if (!igrab(inode)) {
250 			trace_xfs_iget_skip(ip);
251 			error = EAGAIN;
252 			goto out_error;
253 		}
254 
255 		/* We've got a live one. */
256 		spin_unlock(&ip->i_flags_lock);
257 		rcu_read_unlock();
258 		trace_xfs_iget_hit(ip);
259 	}
260 
261 	if (lock_flags != 0)
262 		xfs_ilock(ip, lock_flags);
263 
264 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
265 	XFS_STATS_INC(xs_ig_found);
266 
267 	return 0;
268 
269 out_error:
270 	spin_unlock(&ip->i_flags_lock);
271 	rcu_read_unlock();
272 	return error;
273 }
274 
275 
276 static int
277 xfs_iget_cache_miss(
278 	struct xfs_mount	*mp,
279 	struct xfs_perag	*pag,
280 	xfs_trans_t		*tp,
281 	xfs_ino_t		ino,
282 	struct xfs_inode	**ipp,
283 	int			flags,
284 	int			lock_flags)
285 {
286 	struct xfs_inode	*ip;
287 	int			error;
288 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
289 	int			iflags;
290 
291 	ip = xfs_inode_alloc(mp, ino);
292 	if (!ip)
293 		return ENOMEM;
294 
295 	error = xfs_iread(mp, tp, ip, flags);
296 	if (error)
297 		goto out_destroy;
298 
299 	trace_xfs_iget_miss(ip);
300 
301 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
302 		error = ENOENT;
303 		goto out_destroy;
304 	}
305 
306 	/*
307 	 * Preload the radix tree so we can insert safely under the
308 	 * write spinlock. Note that we cannot sleep inside the preload
309 	 * region. Since we can be called from transaction context, don't
310 	 * recurse into the file system.
311 	 */
312 	if (radix_tree_preload(GFP_NOFS)) {
313 		error = EAGAIN;
314 		goto out_destroy;
315 	}
316 
317 	/*
318 	 * Because the inode hasn't been added to the radix-tree yet it can't
319 	 * be found by another thread, so we can do the non-sleeping lock here.
320 	 */
321 	if (lock_flags) {
322 		if (!xfs_ilock_nowait(ip, lock_flags))
323 			BUG();
324 	}
325 
326 	/*
327 	 * These values must be set before inserting the inode into the radix
328 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
329 	 * RCU locking mechanism) can find it and that lookup must see that this
330 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
331 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
332 	 * memory barrier that ensures this detection works correctly at lookup
333 	 * time.
334 	 */
335 	iflags = XFS_INEW;
336 	if (flags & XFS_IGET_DONTCACHE)
337 		iflags |= XFS_IDONTCACHE;
338 	ip->i_udquot = ip->i_gdquot = NULL;
339 	xfs_iflags_set(ip, iflags);
340 
341 	/* insert the new inode */
342 	spin_lock(&pag->pag_ici_lock);
343 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
344 	if (unlikely(error)) {
345 		WARN_ON(error != -EEXIST);
346 		XFS_STATS_INC(xs_ig_dup);
347 		error = EAGAIN;
348 		goto out_preload_end;
349 	}
350 	spin_unlock(&pag->pag_ici_lock);
351 	radix_tree_preload_end();
352 
353 	*ipp = ip;
354 	return 0;
355 
356 out_preload_end:
357 	spin_unlock(&pag->pag_ici_lock);
358 	radix_tree_preload_end();
359 	if (lock_flags)
360 		xfs_iunlock(ip, lock_flags);
361 out_destroy:
362 	__destroy_inode(VFS_I(ip));
363 	xfs_inode_free(ip);
364 	return error;
365 }
366 
367 /*
368  * Look up an inode by number in the given file system.
369  * The inode is looked up in the cache held in each AG.
370  * If the inode is found in the cache, initialise the vfs inode
371  * if necessary.
372  *
373  * If it is not in core, read it in from the file system's device,
374  * add it to the cache and initialise the vfs inode.
375  *
376  * The inode is locked according to the value of the lock_flags parameter.
377  * This flag parameter indicates how and if the inode's IO lock and inode lock
378  * should be taken.
379  *
380  * mp -- the mount point structure for the current file system.  It points
381  *       to the inode hash table.
382  * tp -- a pointer to the current transaction if there is one.  This is
383  *       simply passed through to the xfs_iread() call.
384  * ino -- the number of the inode desired.  This is the unique identifier
385  *        within the file system for the inode being requested.
386  * lock_flags -- flags indicating how to lock the inode.  See the comment
387  *		 for xfs_ilock() for a list of valid values.
388  */
389 int
390 xfs_iget(
391 	xfs_mount_t	*mp,
392 	xfs_trans_t	*tp,
393 	xfs_ino_t	ino,
394 	uint		flags,
395 	uint		lock_flags,
396 	xfs_inode_t	**ipp)
397 {
398 	xfs_inode_t	*ip;
399 	int		error;
400 	xfs_perag_t	*pag;
401 	xfs_agino_t	agino;
402 
403 	/*
404 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
405 	 * doesn't get freed while it's being referenced during a
406 	 * radix tree traversal here.  It assumes this function
407 	 * aqcuires only the ILOCK (and therefore it has no need to
408 	 * involve the IOLOCK in this synchronization).
409 	 */
410 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
411 
412 	/* reject inode numbers outside existing AGs */
413 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
414 		return EINVAL;
415 
416 	/* get the perag structure and ensure that it's inode capable */
417 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
418 	agino = XFS_INO_TO_AGINO(mp, ino);
419 
420 again:
421 	error = 0;
422 	rcu_read_lock();
423 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
424 
425 	if (ip) {
426 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
427 		if (error)
428 			goto out_error_or_again;
429 	} else {
430 		rcu_read_unlock();
431 		XFS_STATS_INC(xs_ig_missed);
432 
433 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
434 							flags, lock_flags);
435 		if (error)
436 			goto out_error_or_again;
437 	}
438 	xfs_perag_put(pag);
439 
440 	*ipp = ip;
441 
442 	/*
443 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
444 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
445 	 */
446 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
447 		xfs_setup_inode(ip);
448 	return 0;
449 
450 out_error_or_again:
451 	if (error == EAGAIN) {
452 		delay(1);
453 		goto again;
454 	}
455 	xfs_perag_put(pag);
456 	return error;
457 }
458 
459 /*
460  * The inode lookup is done in batches to keep the amount of lock traffic and
461  * radix tree lookups to a minimum. The batch size is a trade off between
462  * lookup reduction and stack usage. This is in the reclaim path, so we can't
463  * be too greedy.
464  */
465 #define XFS_LOOKUP_BATCH	32
466 
467 STATIC int
468 xfs_inode_ag_walk_grab(
469 	struct xfs_inode	*ip)
470 {
471 	struct inode		*inode = VFS_I(ip);
472 
473 	ASSERT(rcu_read_lock_held());
474 
475 	/*
476 	 * check for stale RCU freed inode
477 	 *
478 	 * If the inode has been reallocated, it doesn't matter if it's not in
479 	 * the AG we are walking - we are walking for writeback, so if it
480 	 * passes all the "valid inode" checks and is dirty, then we'll write
481 	 * it back anyway.  If it has been reallocated and still being
482 	 * initialised, the XFS_INEW check below will catch it.
483 	 */
484 	spin_lock(&ip->i_flags_lock);
485 	if (!ip->i_ino)
486 		goto out_unlock_noent;
487 
488 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
489 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
490 		goto out_unlock_noent;
491 	spin_unlock(&ip->i_flags_lock);
492 
493 	/* nothing to sync during shutdown */
494 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
495 		return EFSCORRUPTED;
496 
497 	/* If we can't grab the inode, it must on it's way to reclaim. */
498 	if (!igrab(inode))
499 		return ENOENT;
500 
501 	if (is_bad_inode(inode)) {
502 		IRELE(ip);
503 		return ENOENT;
504 	}
505 
506 	/* inode is valid */
507 	return 0;
508 
509 out_unlock_noent:
510 	spin_unlock(&ip->i_flags_lock);
511 	return ENOENT;
512 }
513 
514 STATIC int
515 xfs_inode_ag_walk(
516 	struct xfs_mount	*mp,
517 	struct xfs_perag	*pag,
518 	int			(*execute)(struct xfs_inode *ip,
519 					   struct xfs_perag *pag, int flags),
520 	int			flags)
521 {
522 	uint32_t		first_index;
523 	int			last_error = 0;
524 	int			skipped;
525 	int			done;
526 	int			nr_found;
527 
528 restart:
529 	done = 0;
530 	skipped = 0;
531 	first_index = 0;
532 	nr_found = 0;
533 	do {
534 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
535 		int		error = 0;
536 		int		i;
537 
538 		rcu_read_lock();
539 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
540 					(void **)batch, first_index,
541 					XFS_LOOKUP_BATCH);
542 		if (!nr_found) {
543 			rcu_read_unlock();
544 			break;
545 		}
546 
547 		/*
548 		 * Grab the inodes before we drop the lock. if we found
549 		 * nothing, nr == 0 and the loop will be skipped.
550 		 */
551 		for (i = 0; i < nr_found; i++) {
552 			struct xfs_inode *ip = batch[i];
553 
554 			if (done || xfs_inode_ag_walk_grab(ip))
555 				batch[i] = NULL;
556 
557 			/*
558 			 * Update the index for the next lookup. Catch
559 			 * overflows into the next AG range which can occur if
560 			 * we have inodes in the last block of the AG and we
561 			 * are currently pointing to the last inode.
562 			 *
563 			 * Because we may see inodes that are from the wrong AG
564 			 * due to RCU freeing and reallocation, only update the
565 			 * index if it lies in this AG. It was a race that lead
566 			 * us to see this inode, so another lookup from the
567 			 * same index will not find it again.
568 			 */
569 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
570 				continue;
571 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
572 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
573 				done = 1;
574 		}
575 
576 		/* unlock now we've grabbed the inodes. */
577 		rcu_read_unlock();
578 
579 		for (i = 0; i < nr_found; i++) {
580 			if (!batch[i])
581 				continue;
582 			error = execute(batch[i], pag, flags);
583 			IRELE(batch[i]);
584 			if (error == EAGAIN) {
585 				skipped++;
586 				continue;
587 			}
588 			if (error && last_error != EFSCORRUPTED)
589 				last_error = error;
590 		}
591 
592 		/* bail out if the filesystem is corrupted.  */
593 		if (error == EFSCORRUPTED)
594 			break;
595 
596 		cond_resched();
597 
598 	} while (nr_found && !done);
599 
600 	if (skipped) {
601 		delay(1);
602 		goto restart;
603 	}
604 	return last_error;
605 }
606 
607 int
608 xfs_inode_ag_iterator(
609 	struct xfs_mount	*mp,
610 	int			(*execute)(struct xfs_inode *ip,
611 					   struct xfs_perag *pag, int flags),
612 	int			flags)
613 {
614 	struct xfs_perag	*pag;
615 	int			error = 0;
616 	int			last_error = 0;
617 	xfs_agnumber_t		ag;
618 
619 	ag = 0;
620 	while ((pag = xfs_perag_get(mp, ag))) {
621 		ag = pag->pag_agno + 1;
622 		error = xfs_inode_ag_walk(mp, pag, execute, flags);
623 		xfs_perag_put(pag);
624 		if (error) {
625 			last_error = error;
626 			if (error == EFSCORRUPTED)
627 				break;
628 		}
629 	}
630 	return XFS_ERROR(last_error);
631 }
632 
633 /*
634  * Queue a new inode reclaim pass if there are reclaimable inodes and there
635  * isn't a reclaim pass already in progress. By default it runs every 5s based
636  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
637  * tunable, but that can be done if this method proves to be ineffective or too
638  * aggressive.
639  */
640 static void
641 xfs_reclaim_work_queue(
642 	struct xfs_mount        *mp)
643 {
644 
645 	rcu_read_lock();
646 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
647 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
648 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
649 	}
650 	rcu_read_unlock();
651 }
652 
653 /*
654  * This is a fast pass over the inode cache to try to get reclaim moving on as
655  * many inodes as possible in a short period of time. It kicks itself every few
656  * seconds, as well as being kicked by the inode cache shrinker when memory
657  * goes low. It scans as quickly as possible avoiding locked inodes or those
658  * already being flushed, and once done schedules a future pass.
659  */
660 void
661 xfs_reclaim_worker(
662 	struct work_struct *work)
663 {
664 	struct xfs_mount *mp = container_of(to_delayed_work(work),
665 					struct xfs_mount, m_reclaim_work);
666 
667 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
668 	xfs_reclaim_work_queue(mp);
669 }
670 
671 static void
672 __xfs_inode_set_reclaim_tag(
673 	struct xfs_perag	*pag,
674 	struct xfs_inode	*ip)
675 {
676 	radix_tree_tag_set(&pag->pag_ici_root,
677 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
678 			   XFS_ICI_RECLAIM_TAG);
679 
680 	if (!pag->pag_ici_reclaimable) {
681 		/* propagate the reclaim tag up into the perag radix tree */
682 		spin_lock(&ip->i_mount->m_perag_lock);
683 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
684 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
685 				XFS_ICI_RECLAIM_TAG);
686 		spin_unlock(&ip->i_mount->m_perag_lock);
687 
688 		/* schedule periodic background inode reclaim */
689 		xfs_reclaim_work_queue(ip->i_mount);
690 
691 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
692 							-1, _RET_IP_);
693 	}
694 	pag->pag_ici_reclaimable++;
695 }
696 
697 /*
698  * We set the inode flag atomically with the radix tree tag.
699  * Once we get tag lookups on the radix tree, this inode flag
700  * can go away.
701  */
702 void
703 xfs_inode_set_reclaim_tag(
704 	xfs_inode_t	*ip)
705 {
706 	struct xfs_mount *mp = ip->i_mount;
707 	struct xfs_perag *pag;
708 
709 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
710 	spin_lock(&pag->pag_ici_lock);
711 	spin_lock(&ip->i_flags_lock);
712 	__xfs_inode_set_reclaim_tag(pag, ip);
713 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
714 	spin_unlock(&ip->i_flags_lock);
715 	spin_unlock(&pag->pag_ici_lock);
716 	xfs_perag_put(pag);
717 }
718 
719 STATIC void
720 __xfs_inode_clear_reclaim(
721 	xfs_perag_t	*pag,
722 	xfs_inode_t	*ip)
723 {
724 	pag->pag_ici_reclaimable--;
725 	if (!pag->pag_ici_reclaimable) {
726 		/* clear the reclaim tag from the perag radix tree */
727 		spin_lock(&ip->i_mount->m_perag_lock);
728 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
729 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
730 				XFS_ICI_RECLAIM_TAG);
731 		spin_unlock(&ip->i_mount->m_perag_lock);
732 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
733 							-1, _RET_IP_);
734 	}
735 }
736 
737 STATIC void
738 __xfs_inode_clear_reclaim_tag(
739 	xfs_mount_t	*mp,
740 	xfs_perag_t	*pag,
741 	xfs_inode_t	*ip)
742 {
743 	radix_tree_tag_clear(&pag->pag_ici_root,
744 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
745 	__xfs_inode_clear_reclaim(pag, ip);
746 }
747 
748 /*
749  * Grab the inode for reclaim exclusively.
750  * Return 0 if we grabbed it, non-zero otherwise.
751  */
752 STATIC int
753 xfs_reclaim_inode_grab(
754 	struct xfs_inode	*ip,
755 	int			flags)
756 {
757 	ASSERT(rcu_read_lock_held());
758 
759 	/* quick check for stale RCU freed inode */
760 	if (!ip->i_ino)
761 		return 1;
762 
763 	/*
764 	 * If we are asked for non-blocking operation, do unlocked checks to
765 	 * see if the inode already is being flushed or in reclaim to avoid
766 	 * lock traffic.
767 	 */
768 	if ((flags & SYNC_TRYLOCK) &&
769 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
770 		return 1;
771 
772 	/*
773 	 * The radix tree lock here protects a thread in xfs_iget from racing
774 	 * with us starting reclaim on the inode.  Once we have the
775 	 * XFS_IRECLAIM flag set it will not touch us.
776 	 *
777 	 * Due to RCU lookup, we may find inodes that have been freed and only
778 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
779 	 * aren't candidates for reclaim at all, so we must check the
780 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
781 	 */
782 	spin_lock(&ip->i_flags_lock);
783 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
784 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
785 		/* not a reclaim candidate. */
786 		spin_unlock(&ip->i_flags_lock);
787 		return 1;
788 	}
789 	__xfs_iflags_set(ip, XFS_IRECLAIM);
790 	spin_unlock(&ip->i_flags_lock);
791 	return 0;
792 }
793 
794 /*
795  * Inodes in different states need to be treated differently. The following
796  * table lists the inode states and the reclaim actions necessary:
797  *
798  *	inode state	     iflush ret		required action
799  *      ---------------      ----------         ---------------
800  *	bad			-		reclaim
801  *	shutdown		EIO		unpin and reclaim
802  *	clean, unpinned		0		reclaim
803  *	stale, unpinned		0		reclaim
804  *	clean, pinned(*)	0		requeue
805  *	stale, pinned		EAGAIN		requeue
806  *	dirty, async		-		requeue
807  *	dirty, sync		0		reclaim
808  *
809  * (*) dgc: I don't think the clean, pinned state is possible but it gets
810  * handled anyway given the order of checks implemented.
811  *
812  * Also, because we get the flush lock first, we know that any inode that has
813  * been flushed delwri has had the flush completed by the time we check that
814  * the inode is clean.
815  *
816  * Note that because the inode is flushed delayed write by AIL pushing, the
817  * flush lock may already be held here and waiting on it can result in very
818  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
819  * the caller should push the AIL first before trying to reclaim inodes to
820  * minimise the amount of time spent waiting.  For background relaim, we only
821  * bother to reclaim clean inodes anyway.
822  *
823  * Hence the order of actions after gaining the locks should be:
824  *	bad		=> reclaim
825  *	shutdown	=> unpin and reclaim
826  *	pinned, async	=> requeue
827  *	pinned, sync	=> unpin
828  *	stale		=> reclaim
829  *	clean		=> reclaim
830  *	dirty, async	=> requeue
831  *	dirty, sync	=> flush, wait and reclaim
832  */
833 STATIC int
834 xfs_reclaim_inode(
835 	struct xfs_inode	*ip,
836 	struct xfs_perag	*pag,
837 	int			sync_mode)
838 {
839 	struct xfs_buf		*bp = NULL;
840 	int			error;
841 
842 restart:
843 	error = 0;
844 	xfs_ilock(ip, XFS_ILOCK_EXCL);
845 	if (!xfs_iflock_nowait(ip)) {
846 		if (!(sync_mode & SYNC_WAIT))
847 			goto out;
848 		xfs_iflock(ip);
849 	}
850 
851 	if (is_bad_inode(VFS_I(ip)))
852 		goto reclaim;
853 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
854 		xfs_iunpin_wait(ip);
855 		xfs_iflush_abort(ip, false);
856 		goto reclaim;
857 	}
858 	if (xfs_ipincount(ip)) {
859 		if (!(sync_mode & SYNC_WAIT))
860 			goto out_ifunlock;
861 		xfs_iunpin_wait(ip);
862 	}
863 	if (xfs_iflags_test(ip, XFS_ISTALE))
864 		goto reclaim;
865 	if (xfs_inode_clean(ip))
866 		goto reclaim;
867 
868 	/*
869 	 * Never flush out dirty data during non-blocking reclaim, as it would
870 	 * just contend with AIL pushing trying to do the same job.
871 	 */
872 	if (!(sync_mode & SYNC_WAIT))
873 		goto out_ifunlock;
874 
875 	/*
876 	 * Now we have an inode that needs flushing.
877 	 *
878 	 * Note that xfs_iflush will never block on the inode buffer lock, as
879 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
880 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
881 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
882 	 * result in an ABBA deadlock with xfs_ifree_cluster().
883 	 *
884 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
885 	 * cache to mark them stale, if we hit this case we don't actually want
886 	 * to do IO here - we want the inode marked stale so we can simply
887 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
888 	 * inode, back off and try again.  Hopefully the next pass through will
889 	 * see the stale flag set on the inode.
890 	 */
891 	error = xfs_iflush(ip, &bp);
892 	if (error == EAGAIN) {
893 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
894 		/* backoff longer than in xfs_ifree_cluster */
895 		delay(2);
896 		goto restart;
897 	}
898 
899 	if (!error) {
900 		error = xfs_bwrite(bp);
901 		xfs_buf_relse(bp);
902 	}
903 
904 	xfs_iflock(ip);
905 reclaim:
906 	xfs_ifunlock(ip);
907 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
908 
909 	XFS_STATS_INC(xs_ig_reclaims);
910 	/*
911 	 * Remove the inode from the per-AG radix tree.
912 	 *
913 	 * Because radix_tree_delete won't complain even if the item was never
914 	 * added to the tree assert that it's been there before to catch
915 	 * problems with the inode life time early on.
916 	 */
917 	spin_lock(&pag->pag_ici_lock);
918 	if (!radix_tree_delete(&pag->pag_ici_root,
919 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
920 		ASSERT(0);
921 	__xfs_inode_clear_reclaim(pag, ip);
922 	spin_unlock(&pag->pag_ici_lock);
923 
924 	/*
925 	 * Here we do an (almost) spurious inode lock in order to coordinate
926 	 * with inode cache radix tree lookups.  This is because the lookup
927 	 * can reference the inodes in the cache without taking references.
928 	 *
929 	 * We make that OK here by ensuring that we wait until the inode is
930 	 * unlocked after the lookup before we go ahead and free it.
931 	 */
932 	xfs_ilock(ip, XFS_ILOCK_EXCL);
933 	xfs_qm_dqdetach(ip);
934 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
935 
936 	xfs_inode_free(ip);
937 	return error;
938 
939 out_ifunlock:
940 	xfs_ifunlock(ip);
941 out:
942 	xfs_iflags_clear(ip, XFS_IRECLAIM);
943 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
944 	/*
945 	 * We could return EAGAIN here to make reclaim rescan the inode tree in
946 	 * a short while. However, this just burns CPU time scanning the tree
947 	 * waiting for IO to complete and the reclaim work never goes back to
948 	 * the idle state. Instead, return 0 to let the next scheduled
949 	 * background reclaim attempt to reclaim the inode again.
950 	 */
951 	return 0;
952 }
953 
954 /*
955  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
956  * corrupted, we still want to try to reclaim all the inodes. If we don't,
957  * then a shut down during filesystem unmount reclaim walk leak all the
958  * unreclaimed inodes.
959  */
960 STATIC int
961 xfs_reclaim_inodes_ag(
962 	struct xfs_mount	*mp,
963 	int			flags,
964 	int			*nr_to_scan)
965 {
966 	struct xfs_perag	*pag;
967 	int			error = 0;
968 	int			last_error = 0;
969 	xfs_agnumber_t		ag;
970 	int			trylock = flags & SYNC_TRYLOCK;
971 	int			skipped;
972 
973 restart:
974 	ag = 0;
975 	skipped = 0;
976 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
977 		unsigned long	first_index = 0;
978 		int		done = 0;
979 		int		nr_found = 0;
980 
981 		ag = pag->pag_agno + 1;
982 
983 		if (trylock) {
984 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
985 				skipped++;
986 				xfs_perag_put(pag);
987 				continue;
988 			}
989 			first_index = pag->pag_ici_reclaim_cursor;
990 		} else
991 			mutex_lock(&pag->pag_ici_reclaim_lock);
992 
993 		do {
994 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
995 			int	i;
996 
997 			rcu_read_lock();
998 			nr_found = radix_tree_gang_lookup_tag(
999 					&pag->pag_ici_root,
1000 					(void **)batch, first_index,
1001 					XFS_LOOKUP_BATCH,
1002 					XFS_ICI_RECLAIM_TAG);
1003 			if (!nr_found) {
1004 				done = 1;
1005 				rcu_read_unlock();
1006 				break;
1007 			}
1008 
1009 			/*
1010 			 * Grab the inodes before we drop the lock. if we found
1011 			 * nothing, nr == 0 and the loop will be skipped.
1012 			 */
1013 			for (i = 0; i < nr_found; i++) {
1014 				struct xfs_inode *ip = batch[i];
1015 
1016 				if (done || xfs_reclaim_inode_grab(ip, flags))
1017 					batch[i] = NULL;
1018 
1019 				/*
1020 				 * Update the index for the next lookup. Catch
1021 				 * overflows into the next AG range which can
1022 				 * occur if we have inodes in the last block of
1023 				 * the AG and we are currently pointing to the
1024 				 * last inode.
1025 				 *
1026 				 * Because we may see inodes that are from the
1027 				 * wrong AG due to RCU freeing and
1028 				 * reallocation, only update the index if it
1029 				 * lies in this AG. It was a race that lead us
1030 				 * to see this inode, so another lookup from
1031 				 * the same index will not find it again.
1032 				 */
1033 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1034 								pag->pag_agno)
1035 					continue;
1036 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1037 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1038 					done = 1;
1039 			}
1040 
1041 			/* unlock now we've grabbed the inodes. */
1042 			rcu_read_unlock();
1043 
1044 			for (i = 0; i < nr_found; i++) {
1045 				if (!batch[i])
1046 					continue;
1047 				error = xfs_reclaim_inode(batch[i], pag, flags);
1048 				if (error && last_error != EFSCORRUPTED)
1049 					last_error = error;
1050 			}
1051 
1052 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1053 
1054 			cond_resched();
1055 
1056 		} while (nr_found && !done && *nr_to_scan > 0);
1057 
1058 		if (trylock && !done)
1059 			pag->pag_ici_reclaim_cursor = first_index;
1060 		else
1061 			pag->pag_ici_reclaim_cursor = 0;
1062 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1063 		xfs_perag_put(pag);
1064 	}
1065 
1066 	/*
1067 	 * if we skipped any AG, and we still have scan count remaining, do
1068 	 * another pass this time using blocking reclaim semantics (i.e
1069 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1070 	 * ensure that when we get more reclaimers than AGs we block rather
1071 	 * than spin trying to execute reclaim.
1072 	 */
1073 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1074 		trylock = 0;
1075 		goto restart;
1076 	}
1077 	return XFS_ERROR(last_error);
1078 }
1079 
1080 int
1081 xfs_reclaim_inodes(
1082 	xfs_mount_t	*mp,
1083 	int		mode)
1084 {
1085 	int		nr_to_scan = INT_MAX;
1086 
1087 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1088 }
1089 
1090 /*
1091  * Scan a certain number of inodes for reclaim.
1092  *
1093  * When called we make sure that there is a background (fast) inode reclaim in
1094  * progress, while we will throttle the speed of reclaim via doing synchronous
1095  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1096  * them to be cleaned, which we hope will not be very long due to the
1097  * background walker having already kicked the IO off on those dirty inodes.
1098  */
1099 void
1100 xfs_reclaim_inodes_nr(
1101 	struct xfs_mount	*mp,
1102 	int			nr_to_scan)
1103 {
1104 	/* kick background reclaimer and push the AIL */
1105 	xfs_reclaim_work_queue(mp);
1106 	xfs_ail_push_all(mp->m_ail);
1107 
1108 	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1109 }
1110 
1111 /*
1112  * Return the number of reclaimable inodes in the filesystem for
1113  * the shrinker to determine how much to reclaim.
1114  */
1115 int
1116 xfs_reclaim_inodes_count(
1117 	struct xfs_mount	*mp)
1118 {
1119 	struct xfs_perag	*pag;
1120 	xfs_agnumber_t		ag = 0;
1121 	int			reclaimable = 0;
1122 
1123 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1124 		ag = pag->pag_agno + 1;
1125 		reclaimable += pag->pag_ici_reclaimable;
1126 		xfs_perag_put(pag);
1127 	}
1128 	return reclaimable;
1129 }
1130 
1131