xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 5bd8e16d)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_log_priv.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_dinode.h"
33 #include "xfs_error.h"
34 #include "xfs_filestream.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_quota.h"
37 #include "xfs_trace.h"
38 #include "xfs_fsops.h"
39 #include "xfs_icache.h"
40 #include "xfs_bmap_util.h"
41 
42 #include <linux/kthread.h>
43 #include <linux/freezer.h>
44 
45 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
46 				struct xfs_perag *pag, struct xfs_inode *ip);
47 
48 /*
49  * Allocate and initialise an xfs_inode.
50  */
51 struct xfs_inode *
52 xfs_inode_alloc(
53 	struct xfs_mount	*mp,
54 	xfs_ino_t		ino)
55 {
56 	struct xfs_inode	*ip;
57 
58 	/*
59 	 * if this didn't occur in transactions, we could use
60 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
61 	 * code up to do this anyway.
62 	 */
63 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
64 	if (!ip)
65 		return NULL;
66 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
67 		kmem_zone_free(xfs_inode_zone, ip);
68 		return NULL;
69 	}
70 
71 	ASSERT(atomic_read(&ip->i_pincount) == 0);
72 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
73 	ASSERT(!xfs_isiflocked(ip));
74 	ASSERT(ip->i_ino == 0);
75 
76 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
77 
78 	/* initialise the xfs inode */
79 	ip->i_ino = ino;
80 	ip->i_mount = mp;
81 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
82 	ip->i_afp = NULL;
83 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
84 	ip->i_flags = 0;
85 	ip->i_delayed_blks = 0;
86 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
87 
88 	return ip;
89 }
90 
91 STATIC void
92 xfs_inode_free_callback(
93 	struct rcu_head		*head)
94 {
95 	struct inode		*inode = container_of(head, struct inode, i_rcu);
96 	struct xfs_inode	*ip = XFS_I(inode);
97 
98 	kmem_zone_free(xfs_inode_zone, ip);
99 }
100 
101 void
102 xfs_inode_free(
103 	struct xfs_inode	*ip)
104 {
105 	switch (ip->i_d.di_mode & S_IFMT) {
106 	case S_IFREG:
107 	case S_IFDIR:
108 	case S_IFLNK:
109 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
110 		break;
111 	}
112 
113 	if (ip->i_afp)
114 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
115 
116 	if (ip->i_itemp) {
117 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
118 		xfs_inode_item_destroy(ip);
119 		ip->i_itemp = NULL;
120 	}
121 
122 	/* asserts to verify all state is correct here */
123 	ASSERT(atomic_read(&ip->i_pincount) == 0);
124 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
125 	ASSERT(!xfs_isiflocked(ip));
126 
127 	/*
128 	 * Because we use RCU freeing we need to ensure the inode always
129 	 * appears to be reclaimed with an invalid inode number when in the
130 	 * free state. The ip->i_flags_lock provides the barrier against lookup
131 	 * races.
132 	 */
133 	spin_lock(&ip->i_flags_lock);
134 	ip->i_flags = XFS_IRECLAIM;
135 	ip->i_ino = 0;
136 	spin_unlock(&ip->i_flags_lock);
137 
138 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
139 }
140 
141 /*
142  * Check the validity of the inode we just found it the cache
143  */
144 static int
145 xfs_iget_cache_hit(
146 	struct xfs_perag	*pag,
147 	struct xfs_inode	*ip,
148 	xfs_ino_t		ino,
149 	int			flags,
150 	int			lock_flags) __releases(RCU)
151 {
152 	struct inode		*inode = VFS_I(ip);
153 	struct xfs_mount	*mp = ip->i_mount;
154 	int			error;
155 
156 	/*
157 	 * check for re-use of an inode within an RCU grace period due to the
158 	 * radix tree nodes not being updated yet. We monitor for this by
159 	 * setting the inode number to zero before freeing the inode structure.
160 	 * If the inode has been reallocated and set up, then the inode number
161 	 * will not match, so check for that, too.
162 	 */
163 	spin_lock(&ip->i_flags_lock);
164 	if (ip->i_ino != ino) {
165 		trace_xfs_iget_skip(ip);
166 		XFS_STATS_INC(xs_ig_frecycle);
167 		error = EAGAIN;
168 		goto out_error;
169 	}
170 
171 
172 	/*
173 	 * If we are racing with another cache hit that is currently
174 	 * instantiating this inode or currently recycling it out of
175 	 * reclaimabe state, wait for the initialisation to complete
176 	 * before continuing.
177 	 *
178 	 * XXX(hch): eventually we should do something equivalent to
179 	 *	     wait_on_inode to wait for these flags to be cleared
180 	 *	     instead of polling for it.
181 	 */
182 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
183 		trace_xfs_iget_skip(ip);
184 		XFS_STATS_INC(xs_ig_frecycle);
185 		error = EAGAIN;
186 		goto out_error;
187 	}
188 
189 	/*
190 	 * If lookup is racing with unlink return an error immediately.
191 	 */
192 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
193 		error = ENOENT;
194 		goto out_error;
195 	}
196 
197 	/*
198 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
199 	 * Need to carefully get it back into useable state.
200 	 */
201 	if (ip->i_flags & XFS_IRECLAIMABLE) {
202 		trace_xfs_iget_reclaim(ip);
203 
204 		/*
205 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
206 		 * from stomping over us while we recycle the inode.  We can't
207 		 * clear the radix tree reclaimable tag yet as it requires
208 		 * pag_ici_lock to be held exclusive.
209 		 */
210 		ip->i_flags |= XFS_IRECLAIM;
211 
212 		spin_unlock(&ip->i_flags_lock);
213 		rcu_read_unlock();
214 
215 		error = -inode_init_always(mp->m_super, inode);
216 		if (error) {
217 			/*
218 			 * Re-initializing the inode failed, and we are in deep
219 			 * trouble.  Try to re-add it to the reclaim list.
220 			 */
221 			rcu_read_lock();
222 			spin_lock(&ip->i_flags_lock);
223 
224 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
225 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
226 			trace_xfs_iget_reclaim_fail(ip);
227 			goto out_error;
228 		}
229 
230 		spin_lock(&pag->pag_ici_lock);
231 		spin_lock(&ip->i_flags_lock);
232 
233 		/*
234 		 * Clear the per-lifetime state in the inode as we are now
235 		 * effectively a new inode and need to return to the initial
236 		 * state before reuse occurs.
237 		 */
238 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
239 		ip->i_flags |= XFS_INEW;
240 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
241 		inode->i_state = I_NEW;
242 
243 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
244 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
245 
246 		spin_unlock(&ip->i_flags_lock);
247 		spin_unlock(&pag->pag_ici_lock);
248 	} else {
249 		/* If the VFS inode is being torn down, pause and try again. */
250 		if (!igrab(inode)) {
251 			trace_xfs_iget_skip(ip);
252 			error = EAGAIN;
253 			goto out_error;
254 		}
255 
256 		/* We've got a live one. */
257 		spin_unlock(&ip->i_flags_lock);
258 		rcu_read_unlock();
259 		trace_xfs_iget_hit(ip);
260 	}
261 
262 	if (lock_flags != 0)
263 		xfs_ilock(ip, lock_flags);
264 
265 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
266 	XFS_STATS_INC(xs_ig_found);
267 
268 	return 0;
269 
270 out_error:
271 	spin_unlock(&ip->i_flags_lock);
272 	rcu_read_unlock();
273 	return error;
274 }
275 
276 
277 static int
278 xfs_iget_cache_miss(
279 	struct xfs_mount	*mp,
280 	struct xfs_perag	*pag,
281 	xfs_trans_t		*tp,
282 	xfs_ino_t		ino,
283 	struct xfs_inode	**ipp,
284 	int			flags,
285 	int			lock_flags)
286 {
287 	struct xfs_inode	*ip;
288 	int			error;
289 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
290 	int			iflags;
291 
292 	ip = xfs_inode_alloc(mp, ino);
293 	if (!ip)
294 		return ENOMEM;
295 
296 	error = xfs_iread(mp, tp, ip, flags);
297 	if (error)
298 		goto out_destroy;
299 
300 	trace_xfs_iget_miss(ip);
301 
302 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
303 		error = ENOENT;
304 		goto out_destroy;
305 	}
306 
307 	/*
308 	 * Preload the radix tree so we can insert safely under the
309 	 * write spinlock. Note that we cannot sleep inside the preload
310 	 * region. Since we can be called from transaction context, don't
311 	 * recurse into the file system.
312 	 */
313 	if (radix_tree_preload(GFP_NOFS)) {
314 		error = EAGAIN;
315 		goto out_destroy;
316 	}
317 
318 	/*
319 	 * Because the inode hasn't been added to the radix-tree yet it can't
320 	 * be found by another thread, so we can do the non-sleeping lock here.
321 	 */
322 	if (lock_flags) {
323 		if (!xfs_ilock_nowait(ip, lock_flags))
324 			BUG();
325 	}
326 
327 	/*
328 	 * These values must be set before inserting the inode into the radix
329 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
330 	 * RCU locking mechanism) can find it and that lookup must see that this
331 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
332 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
333 	 * memory barrier that ensures this detection works correctly at lookup
334 	 * time.
335 	 */
336 	iflags = XFS_INEW;
337 	if (flags & XFS_IGET_DONTCACHE)
338 		iflags |= XFS_IDONTCACHE;
339 	ip->i_udquot = NULL;
340 	ip->i_gdquot = NULL;
341 	ip->i_pdquot = NULL;
342 	xfs_iflags_set(ip, iflags);
343 
344 	/* insert the new inode */
345 	spin_lock(&pag->pag_ici_lock);
346 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
347 	if (unlikely(error)) {
348 		WARN_ON(error != -EEXIST);
349 		XFS_STATS_INC(xs_ig_dup);
350 		error = EAGAIN;
351 		goto out_preload_end;
352 	}
353 	spin_unlock(&pag->pag_ici_lock);
354 	radix_tree_preload_end();
355 
356 	*ipp = ip;
357 	return 0;
358 
359 out_preload_end:
360 	spin_unlock(&pag->pag_ici_lock);
361 	radix_tree_preload_end();
362 	if (lock_flags)
363 		xfs_iunlock(ip, lock_flags);
364 out_destroy:
365 	__destroy_inode(VFS_I(ip));
366 	xfs_inode_free(ip);
367 	return error;
368 }
369 
370 /*
371  * Look up an inode by number in the given file system.
372  * The inode is looked up in the cache held in each AG.
373  * If the inode is found in the cache, initialise the vfs inode
374  * if necessary.
375  *
376  * If it is not in core, read it in from the file system's device,
377  * add it to the cache and initialise the vfs inode.
378  *
379  * The inode is locked according to the value of the lock_flags parameter.
380  * This flag parameter indicates how and if the inode's IO lock and inode lock
381  * should be taken.
382  *
383  * mp -- the mount point structure for the current file system.  It points
384  *       to the inode hash table.
385  * tp -- a pointer to the current transaction if there is one.  This is
386  *       simply passed through to the xfs_iread() call.
387  * ino -- the number of the inode desired.  This is the unique identifier
388  *        within the file system for the inode being requested.
389  * lock_flags -- flags indicating how to lock the inode.  See the comment
390  *		 for xfs_ilock() for a list of valid values.
391  */
392 int
393 xfs_iget(
394 	xfs_mount_t	*mp,
395 	xfs_trans_t	*tp,
396 	xfs_ino_t	ino,
397 	uint		flags,
398 	uint		lock_flags,
399 	xfs_inode_t	**ipp)
400 {
401 	xfs_inode_t	*ip;
402 	int		error;
403 	xfs_perag_t	*pag;
404 	xfs_agino_t	agino;
405 
406 	/*
407 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
408 	 * doesn't get freed while it's being referenced during a
409 	 * radix tree traversal here.  It assumes this function
410 	 * aqcuires only the ILOCK (and therefore it has no need to
411 	 * involve the IOLOCK in this synchronization).
412 	 */
413 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
414 
415 	/* reject inode numbers outside existing AGs */
416 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
417 		return EINVAL;
418 
419 	/* get the perag structure and ensure that it's inode capable */
420 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
421 	agino = XFS_INO_TO_AGINO(mp, ino);
422 
423 again:
424 	error = 0;
425 	rcu_read_lock();
426 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
427 
428 	if (ip) {
429 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
430 		if (error)
431 			goto out_error_or_again;
432 	} else {
433 		rcu_read_unlock();
434 		XFS_STATS_INC(xs_ig_missed);
435 
436 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
437 							flags, lock_flags);
438 		if (error)
439 			goto out_error_or_again;
440 	}
441 	xfs_perag_put(pag);
442 
443 	*ipp = ip;
444 
445 	/*
446 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
447 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
448 	 */
449 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
450 		xfs_setup_inode(ip);
451 	return 0;
452 
453 out_error_or_again:
454 	if (error == EAGAIN) {
455 		delay(1);
456 		goto again;
457 	}
458 	xfs_perag_put(pag);
459 	return error;
460 }
461 
462 /*
463  * The inode lookup is done in batches to keep the amount of lock traffic and
464  * radix tree lookups to a minimum. The batch size is a trade off between
465  * lookup reduction and stack usage. This is in the reclaim path, so we can't
466  * be too greedy.
467  */
468 #define XFS_LOOKUP_BATCH	32
469 
470 STATIC int
471 xfs_inode_ag_walk_grab(
472 	struct xfs_inode	*ip)
473 {
474 	struct inode		*inode = VFS_I(ip);
475 
476 	ASSERT(rcu_read_lock_held());
477 
478 	/*
479 	 * check for stale RCU freed inode
480 	 *
481 	 * If the inode has been reallocated, it doesn't matter if it's not in
482 	 * the AG we are walking - we are walking for writeback, so if it
483 	 * passes all the "valid inode" checks and is dirty, then we'll write
484 	 * it back anyway.  If it has been reallocated and still being
485 	 * initialised, the XFS_INEW check below will catch it.
486 	 */
487 	spin_lock(&ip->i_flags_lock);
488 	if (!ip->i_ino)
489 		goto out_unlock_noent;
490 
491 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
492 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
493 		goto out_unlock_noent;
494 	spin_unlock(&ip->i_flags_lock);
495 
496 	/* nothing to sync during shutdown */
497 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
498 		return EFSCORRUPTED;
499 
500 	/* If we can't grab the inode, it must on it's way to reclaim. */
501 	if (!igrab(inode))
502 		return ENOENT;
503 
504 	if (is_bad_inode(inode)) {
505 		IRELE(ip);
506 		return ENOENT;
507 	}
508 
509 	/* inode is valid */
510 	return 0;
511 
512 out_unlock_noent:
513 	spin_unlock(&ip->i_flags_lock);
514 	return ENOENT;
515 }
516 
517 STATIC int
518 xfs_inode_ag_walk(
519 	struct xfs_mount	*mp,
520 	struct xfs_perag	*pag,
521 	int			(*execute)(struct xfs_inode *ip,
522 					   struct xfs_perag *pag, int flags,
523 					   void *args),
524 	int			flags,
525 	void			*args,
526 	int			tag)
527 {
528 	uint32_t		first_index;
529 	int			last_error = 0;
530 	int			skipped;
531 	int			done;
532 	int			nr_found;
533 
534 restart:
535 	done = 0;
536 	skipped = 0;
537 	first_index = 0;
538 	nr_found = 0;
539 	do {
540 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
541 		int		error = 0;
542 		int		i;
543 
544 		rcu_read_lock();
545 
546 		if (tag == -1)
547 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
548 					(void **)batch, first_index,
549 					XFS_LOOKUP_BATCH);
550 		else
551 			nr_found = radix_tree_gang_lookup_tag(
552 					&pag->pag_ici_root,
553 					(void **) batch, first_index,
554 					XFS_LOOKUP_BATCH, tag);
555 
556 		if (!nr_found) {
557 			rcu_read_unlock();
558 			break;
559 		}
560 
561 		/*
562 		 * Grab the inodes before we drop the lock. if we found
563 		 * nothing, nr == 0 and the loop will be skipped.
564 		 */
565 		for (i = 0; i < nr_found; i++) {
566 			struct xfs_inode *ip = batch[i];
567 
568 			if (done || xfs_inode_ag_walk_grab(ip))
569 				batch[i] = NULL;
570 
571 			/*
572 			 * Update the index for the next lookup. Catch
573 			 * overflows into the next AG range which can occur if
574 			 * we have inodes in the last block of the AG and we
575 			 * are currently pointing to the last inode.
576 			 *
577 			 * Because we may see inodes that are from the wrong AG
578 			 * due to RCU freeing and reallocation, only update the
579 			 * index if it lies in this AG. It was a race that lead
580 			 * us to see this inode, so another lookup from the
581 			 * same index will not find it again.
582 			 */
583 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
584 				continue;
585 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
586 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
587 				done = 1;
588 		}
589 
590 		/* unlock now we've grabbed the inodes. */
591 		rcu_read_unlock();
592 
593 		for (i = 0; i < nr_found; i++) {
594 			if (!batch[i])
595 				continue;
596 			error = execute(batch[i], pag, flags, args);
597 			IRELE(batch[i]);
598 			if (error == EAGAIN) {
599 				skipped++;
600 				continue;
601 			}
602 			if (error && last_error != EFSCORRUPTED)
603 				last_error = error;
604 		}
605 
606 		/* bail out if the filesystem is corrupted.  */
607 		if (error == EFSCORRUPTED)
608 			break;
609 
610 		cond_resched();
611 
612 	} while (nr_found && !done);
613 
614 	if (skipped) {
615 		delay(1);
616 		goto restart;
617 	}
618 	return last_error;
619 }
620 
621 /*
622  * Background scanning to trim post-EOF preallocated space. This is queued
623  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
624  */
625 STATIC void
626 xfs_queue_eofblocks(
627 	struct xfs_mount *mp)
628 {
629 	rcu_read_lock();
630 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
631 		queue_delayed_work(mp->m_eofblocks_workqueue,
632 				   &mp->m_eofblocks_work,
633 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
634 	rcu_read_unlock();
635 }
636 
637 void
638 xfs_eofblocks_worker(
639 	struct work_struct *work)
640 {
641 	struct xfs_mount *mp = container_of(to_delayed_work(work),
642 				struct xfs_mount, m_eofblocks_work);
643 	xfs_icache_free_eofblocks(mp, NULL);
644 	xfs_queue_eofblocks(mp);
645 }
646 
647 int
648 xfs_inode_ag_iterator(
649 	struct xfs_mount	*mp,
650 	int			(*execute)(struct xfs_inode *ip,
651 					   struct xfs_perag *pag, int flags,
652 					   void *args),
653 	int			flags,
654 	void			*args)
655 {
656 	struct xfs_perag	*pag;
657 	int			error = 0;
658 	int			last_error = 0;
659 	xfs_agnumber_t		ag;
660 
661 	ag = 0;
662 	while ((pag = xfs_perag_get(mp, ag))) {
663 		ag = pag->pag_agno + 1;
664 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
665 		xfs_perag_put(pag);
666 		if (error) {
667 			last_error = error;
668 			if (error == EFSCORRUPTED)
669 				break;
670 		}
671 	}
672 	return XFS_ERROR(last_error);
673 }
674 
675 int
676 xfs_inode_ag_iterator_tag(
677 	struct xfs_mount	*mp,
678 	int			(*execute)(struct xfs_inode *ip,
679 					   struct xfs_perag *pag, int flags,
680 					   void *args),
681 	int			flags,
682 	void			*args,
683 	int			tag)
684 {
685 	struct xfs_perag	*pag;
686 	int			error = 0;
687 	int			last_error = 0;
688 	xfs_agnumber_t		ag;
689 
690 	ag = 0;
691 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
692 		ag = pag->pag_agno + 1;
693 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
694 		xfs_perag_put(pag);
695 		if (error) {
696 			last_error = error;
697 			if (error == EFSCORRUPTED)
698 				break;
699 		}
700 	}
701 	return XFS_ERROR(last_error);
702 }
703 
704 /*
705  * Queue a new inode reclaim pass if there are reclaimable inodes and there
706  * isn't a reclaim pass already in progress. By default it runs every 5s based
707  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
708  * tunable, but that can be done if this method proves to be ineffective or too
709  * aggressive.
710  */
711 static void
712 xfs_reclaim_work_queue(
713 	struct xfs_mount        *mp)
714 {
715 
716 	rcu_read_lock();
717 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
718 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
719 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
720 	}
721 	rcu_read_unlock();
722 }
723 
724 /*
725  * This is a fast pass over the inode cache to try to get reclaim moving on as
726  * many inodes as possible in a short period of time. It kicks itself every few
727  * seconds, as well as being kicked by the inode cache shrinker when memory
728  * goes low. It scans as quickly as possible avoiding locked inodes or those
729  * already being flushed, and once done schedules a future pass.
730  */
731 void
732 xfs_reclaim_worker(
733 	struct work_struct *work)
734 {
735 	struct xfs_mount *mp = container_of(to_delayed_work(work),
736 					struct xfs_mount, m_reclaim_work);
737 
738 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
739 	xfs_reclaim_work_queue(mp);
740 }
741 
742 static void
743 __xfs_inode_set_reclaim_tag(
744 	struct xfs_perag	*pag,
745 	struct xfs_inode	*ip)
746 {
747 	radix_tree_tag_set(&pag->pag_ici_root,
748 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
749 			   XFS_ICI_RECLAIM_TAG);
750 
751 	if (!pag->pag_ici_reclaimable) {
752 		/* propagate the reclaim tag up into the perag radix tree */
753 		spin_lock(&ip->i_mount->m_perag_lock);
754 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
755 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
756 				XFS_ICI_RECLAIM_TAG);
757 		spin_unlock(&ip->i_mount->m_perag_lock);
758 
759 		/* schedule periodic background inode reclaim */
760 		xfs_reclaim_work_queue(ip->i_mount);
761 
762 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
763 							-1, _RET_IP_);
764 	}
765 	pag->pag_ici_reclaimable++;
766 }
767 
768 /*
769  * We set the inode flag atomically with the radix tree tag.
770  * Once we get tag lookups on the radix tree, this inode flag
771  * can go away.
772  */
773 void
774 xfs_inode_set_reclaim_tag(
775 	xfs_inode_t	*ip)
776 {
777 	struct xfs_mount *mp = ip->i_mount;
778 	struct xfs_perag *pag;
779 
780 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
781 	spin_lock(&pag->pag_ici_lock);
782 	spin_lock(&ip->i_flags_lock);
783 	__xfs_inode_set_reclaim_tag(pag, ip);
784 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
785 	spin_unlock(&ip->i_flags_lock);
786 	spin_unlock(&pag->pag_ici_lock);
787 	xfs_perag_put(pag);
788 }
789 
790 STATIC void
791 __xfs_inode_clear_reclaim(
792 	xfs_perag_t	*pag,
793 	xfs_inode_t	*ip)
794 {
795 	pag->pag_ici_reclaimable--;
796 	if (!pag->pag_ici_reclaimable) {
797 		/* clear the reclaim tag from the perag radix tree */
798 		spin_lock(&ip->i_mount->m_perag_lock);
799 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
800 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
801 				XFS_ICI_RECLAIM_TAG);
802 		spin_unlock(&ip->i_mount->m_perag_lock);
803 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
804 							-1, _RET_IP_);
805 	}
806 }
807 
808 STATIC void
809 __xfs_inode_clear_reclaim_tag(
810 	xfs_mount_t	*mp,
811 	xfs_perag_t	*pag,
812 	xfs_inode_t	*ip)
813 {
814 	radix_tree_tag_clear(&pag->pag_ici_root,
815 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
816 	__xfs_inode_clear_reclaim(pag, ip);
817 }
818 
819 /*
820  * Grab the inode for reclaim exclusively.
821  * Return 0 if we grabbed it, non-zero otherwise.
822  */
823 STATIC int
824 xfs_reclaim_inode_grab(
825 	struct xfs_inode	*ip,
826 	int			flags)
827 {
828 	ASSERT(rcu_read_lock_held());
829 
830 	/* quick check for stale RCU freed inode */
831 	if (!ip->i_ino)
832 		return 1;
833 
834 	/*
835 	 * If we are asked for non-blocking operation, do unlocked checks to
836 	 * see if the inode already is being flushed or in reclaim to avoid
837 	 * lock traffic.
838 	 */
839 	if ((flags & SYNC_TRYLOCK) &&
840 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
841 		return 1;
842 
843 	/*
844 	 * The radix tree lock here protects a thread in xfs_iget from racing
845 	 * with us starting reclaim on the inode.  Once we have the
846 	 * XFS_IRECLAIM flag set it will not touch us.
847 	 *
848 	 * Due to RCU lookup, we may find inodes that have been freed and only
849 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
850 	 * aren't candidates for reclaim at all, so we must check the
851 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
852 	 */
853 	spin_lock(&ip->i_flags_lock);
854 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
855 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
856 		/* not a reclaim candidate. */
857 		spin_unlock(&ip->i_flags_lock);
858 		return 1;
859 	}
860 	__xfs_iflags_set(ip, XFS_IRECLAIM);
861 	spin_unlock(&ip->i_flags_lock);
862 	return 0;
863 }
864 
865 /*
866  * Inodes in different states need to be treated differently. The following
867  * table lists the inode states and the reclaim actions necessary:
868  *
869  *	inode state	     iflush ret		required action
870  *      ---------------      ----------         ---------------
871  *	bad			-		reclaim
872  *	shutdown		EIO		unpin and reclaim
873  *	clean, unpinned		0		reclaim
874  *	stale, unpinned		0		reclaim
875  *	clean, pinned(*)	0		requeue
876  *	stale, pinned		EAGAIN		requeue
877  *	dirty, async		-		requeue
878  *	dirty, sync		0		reclaim
879  *
880  * (*) dgc: I don't think the clean, pinned state is possible but it gets
881  * handled anyway given the order of checks implemented.
882  *
883  * Also, because we get the flush lock first, we know that any inode that has
884  * been flushed delwri has had the flush completed by the time we check that
885  * the inode is clean.
886  *
887  * Note that because the inode is flushed delayed write by AIL pushing, the
888  * flush lock may already be held here and waiting on it can result in very
889  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
890  * the caller should push the AIL first before trying to reclaim inodes to
891  * minimise the amount of time spent waiting.  For background relaim, we only
892  * bother to reclaim clean inodes anyway.
893  *
894  * Hence the order of actions after gaining the locks should be:
895  *	bad		=> reclaim
896  *	shutdown	=> unpin and reclaim
897  *	pinned, async	=> requeue
898  *	pinned, sync	=> unpin
899  *	stale		=> reclaim
900  *	clean		=> reclaim
901  *	dirty, async	=> requeue
902  *	dirty, sync	=> flush, wait and reclaim
903  */
904 STATIC int
905 xfs_reclaim_inode(
906 	struct xfs_inode	*ip,
907 	struct xfs_perag	*pag,
908 	int			sync_mode)
909 {
910 	struct xfs_buf		*bp = NULL;
911 	int			error;
912 
913 restart:
914 	error = 0;
915 	xfs_ilock(ip, XFS_ILOCK_EXCL);
916 	if (!xfs_iflock_nowait(ip)) {
917 		if (!(sync_mode & SYNC_WAIT))
918 			goto out;
919 		xfs_iflock(ip);
920 	}
921 
922 	if (is_bad_inode(VFS_I(ip)))
923 		goto reclaim;
924 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
925 		xfs_iunpin_wait(ip);
926 		xfs_iflush_abort(ip, false);
927 		goto reclaim;
928 	}
929 	if (xfs_ipincount(ip)) {
930 		if (!(sync_mode & SYNC_WAIT))
931 			goto out_ifunlock;
932 		xfs_iunpin_wait(ip);
933 	}
934 	if (xfs_iflags_test(ip, XFS_ISTALE))
935 		goto reclaim;
936 	if (xfs_inode_clean(ip))
937 		goto reclaim;
938 
939 	/*
940 	 * Never flush out dirty data during non-blocking reclaim, as it would
941 	 * just contend with AIL pushing trying to do the same job.
942 	 */
943 	if (!(sync_mode & SYNC_WAIT))
944 		goto out_ifunlock;
945 
946 	/*
947 	 * Now we have an inode that needs flushing.
948 	 *
949 	 * Note that xfs_iflush will never block on the inode buffer lock, as
950 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
951 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
952 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
953 	 * result in an ABBA deadlock with xfs_ifree_cluster().
954 	 *
955 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
956 	 * cache to mark them stale, if we hit this case we don't actually want
957 	 * to do IO here - we want the inode marked stale so we can simply
958 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
959 	 * inode, back off and try again.  Hopefully the next pass through will
960 	 * see the stale flag set on the inode.
961 	 */
962 	error = xfs_iflush(ip, &bp);
963 	if (error == EAGAIN) {
964 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
965 		/* backoff longer than in xfs_ifree_cluster */
966 		delay(2);
967 		goto restart;
968 	}
969 
970 	if (!error) {
971 		error = xfs_bwrite(bp);
972 		xfs_buf_relse(bp);
973 	}
974 
975 	xfs_iflock(ip);
976 reclaim:
977 	xfs_ifunlock(ip);
978 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
979 
980 	XFS_STATS_INC(xs_ig_reclaims);
981 	/*
982 	 * Remove the inode from the per-AG radix tree.
983 	 *
984 	 * Because radix_tree_delete won't complain even if the item was never
985 	 * added to the tree assert that it's been there before to catch
986 	 * problems with the inode life time early on.
987 	 */
988 	spin_lock(&pag->pag_ici_lock);
989 	if (!radix_tree_delete(&pag->pag_ici_root,
990 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
991 		ASSERT(0);
992 	__xfs_inode_clear_reclaim(pag, ip);
993 	spin_unlock(&pag->pag_ici_lock);
994 
995 	/*
996 	 * Here we do an (almost) spurious inode lock in order to coordinate
997 	 * with inode cache radix tree lookups.  This is because the lookup
998 	 * can reference the inodes in the cache without taking references.
999 	 *
1000 	 * We make that OK here by ensuring that we wait until the inode is
1001 	 * unlocked after the lookup before we go ahead and free it.
1002 	 */
1003 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1004 	xfs_qm_dqdetach(ip);
1005 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1006 
1007 	xfs_inode_free(ip);
1008 	return error;
1009 
1010 out_ifunlock:
1011 	xfs_ifunlock(ip);
1012 out:
1013 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1014 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1015 	/*
1016 	 * We could return EAGAIN here to make reclaim rescan the inode tree in
1017 	 * a short while. However, this just burns CPU time scanning the tree
1018 	 * waiting for IO to complete and the reclaim work never goes back to
1019 	 * the idle state. Instead, return 0 to let the next scheduled
1020 	 * background reclaim attempt to reclaim the inode again.
1021 	 */
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1027  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1028  * then a shut down during filesystem unmount reclaim walk leak all the
1029  * unreclaimed inodes.
1030  */
1031 STATIC int
1032 xfs_reclaim_inodes_ag(
1033 	struct xfs_mount	*mp,
1034 	int			flags,
1035 	int			*nr_to_scan)
1036 {
1037 	struct xfs_perag	*pag;
1038 	int			error = 0;
1039 	int			last_error = 0;
1040 	xfs_agnumber_t		ag;
1041 	int			trylock = flags & SYNC_TRYLOCK;
1042 	int			skipped;
1043 
1044 restart:
1045 	ag = 0;
1046 	skipped = 0;
1047 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1048 		unsigned long	first_index = 0;
1049 		int		done = 0;
1050 		int		nr_found = 0;
1051 
1052 		ag = pag->pag_agno + 1;
1053 
1054 		if (trylock) {
1055 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1056 				skipped++;
1057 				xfs_perag_put(pag);
1058 				continue;
1059 			}
1060 			first_index = pag->pag_ici_reclaim_cursor;
1061 		} else
1062 			mutex_lock(&pag->pag_ici_reclaim_lock);
1063 
1064 		do {
1065 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1066 			int	i;
1067 
1068 			rcu_read_lock();
1069 			nr_found = radix_tree_gang_lookup_tag(
1070 					&pag->pag_ici_root,
1071 					(void **)batch, first_index,
1072 					XFS_LOOKUP_BATCH,
1073 					XFS_ICI_RECLAIM_TAG);
1074 			if (!nr_found) {
1075 				done = 1;
1076 				rcu_read_unlock();
1077 				break;
1078 			}
1079 
1080 			/*
1081 			 * Grab the inodes before we drop the lock. if we found
1082 			 * nothing, nr == 0 and the loop will be skipped.
1083 			 */
1084 			for (i = 0; i < nr_found; i++) {
1085 				struct xfs_inode *ip = batch[i];
1086 
1087 				if (done || xfs_reclaim_inode_grab(ip, flags))
1088 					batch[i] = NULL;
1089 
1090 				/*
1091 				 * Update the index for the next lookup. Catch
1092 				 * overflows into the next AG range which can
1093 				 * occur if we have inodes in the last block of
1094 				 * the AG and we are currently pointing to the
1095 				 * last inode.
1096 				 *
1097 				 * Because we may see inodes that are from the
1098 				 * wrong AG due to RCU freeing and
1099 				 * reallocation, only update the index if it
1100 				 * lies in this AG. It was a race that lead us
1101 				 * to see this inode, so another lookup from
1102 				 * the same index will not find it again.
1103 				 */
1104 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1105 								pag->pag_agno)
1106 					continue;
1107 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1108 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1109 					done = 1;
1110 			}
1111 
1112 			/* unlock now we've grabbed the inodes. */
1113 			rcu_read_unlock();
1114 
1115 			for (i = 0; i < nr_found; i++) {
1116 				if (!batch[i])
1117 					continue;
1118 				error = xfs_reclaim_inode(batch[i], pag, flags);
1119 				if (error && last_error != EFSCORRUPTED)
1120 					last_error = error;
1121 			}
1122 
1123 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1124 
1125 			cond_resched();
1126 
1127 		} while (nr_found && !done && *nr_to_scan > 0);
1128 
1129 		if (trylock && !done)
1130 			pag->pag_ici_reclaim_cursor = first_index;
1131 		else
1132 			pag->pag_ici_reclaim_cursor = 0;
1133 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1134 		xfs_perag_put(pag);
1135 	}
1136 
1137 	/*
1138 	 * if we skipped any AG, and we still have scan count remaining, do
1139 	 * another pass this time using blocking reclaim semantics (i.e
1140 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1141 	 * ensure that when we get more reclaimers than AGs we block rather
1142 	 * than spin trying to execute reclaim.
1143 	 */
1144 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1145 		trylock = 0;
1146 		goto restart;
1147 	}
1148 	return XFS_ERROR(last_error);
1149 }
1150 
1151 int
1152 xfs_reclaim_inodes(
1153 	xfs_mount_t	*mp,
1154 	int		mode)
1155 {
1156 	int		nr_to_scan = INT_MAX;
1157 
1158 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1159 }
1160 
1161 /*
1162  * Scan a certain number of inodes for reclaim.
1163  *
1164  * When called we make sure that there is a background (fast) inode reclaim in
1165  * progress, while we will throttle the speed of reclaim via doing synchronous
1166  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1167  * them to be cleaned, which we hope will not be very long due to the
1168  * background walker having already kicked the IO off on those dirty inodes.
1169  */
1170 long
1171 xfs_reclaim_inodes_nr(
1172 	struct xfs_mount	*mp,
1173 	int			nr_to_scan)
1174 {
1175 	/* kick background reclaimer and push the AIL */
1176 	xfs_reclaim_work_queue(mp);
1177 	xfs_ail_push_all(mp->m_ail);
1178 
1179 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1180 }
1181 
1182 /*
1183  * Return the number of reclaimable inodes in the filesystem for
1184  * the shrinker to determine how much to reclaim.
1185  */
1186 int
1187 xfs_reclaim_inodes_count(
1188 	struct xfs_mount	*mp)
1189 {
1190 	struct xfs_perag	*pag;
1191 	xfs_agnumber_t		ag = 0;
1192 	int			reclaimable = 0;
1193 
1194 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1195 		ag = pag->pag_agno + 1;
1196 		reclaimable += pag->pag_ici_reclaimable;
1197 		xfs_perag_put(pag);
1198 	}
1199 	return reclaimable;
1200 }
1201 
1202 STATIC int
1203 xfs_inode_match_id(
1204 	struct xfs_inode	*ip,
1205 	struct xfs_eofblocks	*eofb)
1206 {
1207 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1208 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1209 		return 0;
1210 
1211 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1212 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1213 		return 0;
1214 
1215 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1216 	    xfs_get_projid(ip) != eofb->eof_prid)
1217 		return 0;
1218 
1219 	return 1;
1220 }
1221 
1222 STATIC int
1223 xfs_inode_free_eofblocks(
1224 	struct xfs_inode	*ip,
1225 	struct xfs_perag	*pag,
1226 	int			flags,
1227 	void			*args)
1228 {
1229 	int ret;
1230 	struct xfs_eofblocks *eofb = args;
1231 
1232 	if (!xfs_can_free_eofblocks(ip, false)) {
1233 		/* inode could be preallocated or append-only */
1234 		trace_xfs_inode_free_eofblocks_invalid(ip);
1235 		xfs_inode_clear_eofblocks_tag(ip);
1236 		return 0;
1237 	}
1238 
1239 	/*
1240 	 * If the mapping is dirty the operation can block and wait for some
1241 	 * time. Unless we are waiting, skip it.
1242 	 */
1243 	if (!(flags & SYNC_WAIT) &&
1244 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1245 		return 0;
1246 
1247 	if (eofb) {
1248 		if (!xfs_inode_match_id(ip, eofb))
1249 			return 0;
1250 
1251 		/* skip the inode if the file size is too small */
1252 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1253 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1254 			return 0;
1255 	}
1256 
1257 	ret = xfs_free_eofblocks(ip->i_mount, ip, true);
1258 
1259 	/* don't revisit the inode if we're not waiting */
1260 	if (ret == EAGAIN && !(flags & SYNC_WAIT))
1261 		ret = 0;
1262 
1263 	return ret;
1264 }
1265 
1266 int
1267 xfs_icache_free_eofblocks(
1268 	struct xfs_mount	*mp,
1269 	struct xfs_eofblocks	*eofb)
1270 {
1271 	int flags = SYNC_TRYLOCK;
1272 
1273 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1274 		flags = SYNC_WAIT;
1275 
1276 	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1277 					 eofb, XFS_ICI_EOFBLOCKS_TAG);
1278 }
1279 
1280 void
1281 xfs_inode_set_eofblocks_tag(
1282 	xfs_inode_t	*ip)
1283 {
1284 	struct xfs_mount *mp = ip->i_mount;
1285 	struct xfs_perag *pag;
1286 	int tagged;
1287 
1288 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1289 	spin_lock(&pag->pag_ici_lock);
1290 	trace_xfs_inode_set_eofblocks_tag(ip);
1291 
1292 	tagged = radix_tree_tagged(&pag->pag_ici_root,
1293 				   XFS_ICI_EOFBLOCKS_TAG);
1294 	radix_tree_tag_set(&pag->pag_ici_root,
1295 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1296 			   XFS_ICI_EOFBLOCKS_TAG);
1297 	if (!tagged) {
1298 		/* propagate the eofblocks tag up into the perag radix tree */
1299 		spin_lock(&ip->i_mount->m_perag_lock);
1300 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1301 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1302 				   XFS_ICI_EOFBLOCKS_TAG);
1303 		spin_unlock(&ip->i_mount->m_perag_lock);
1304 
1305 		/* kick off background trimming */
1306 		xfs_queue_eofblocks(ip->i_mount);
1307 
1308 		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1309 					      -1, _RET_IP_);
1310 	}
1311 
1312 	spin_unlock(&pag->pag_ici_lock);
1313 	xfs_perag_put(pag);
1314 }
1315 
1316 void
1317 xfs_inode_clear_eofblocks_tag(
1318 	xfs_inode_t	*ip)
1319 {
1320 	struct xfs_mount *mp = ip->i_mount;
1321 	struct xfs_perag *pag;
1322 
1323 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1324 	spin_lock(&pag->pag_ici_lock);
1325 	trace_xfs_inode_clear_eofblocks_tag(ip);
1326 
1327 	radix_tree_tag_clear(&pag->pag_ici_root,
1328 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1329 			     XFS_ICI_EOFBLOCKS_TAG);
1330 	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1331 		/* clear the eofblocks tag from the perag radix tree */
1332 		spin_lock(&ip->i_mount->m_perag_lock);
1333 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1334 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1335 				     XFS_ICI_EOFBLOCKS_TAG);
1336 		spin_unlock(&ip->i_mount->m_perag_lock);
1337 		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1338 					       -1, _RET_IP_);
1339 	}
1340 
1341 	spin_unlock(&pag->pag_ici_lock);
1342 	xfs_perag_put(pag);
1343 }
1344 
1345