xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 7b7dfdd2)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_inum.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_error.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_inode_item.h"
32 #include "xfs_quota.h"
33 #include "xfs_trace.h"
34 #include "xfs_icache.h"
35 #include "xfs_bmap_util.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/freezer.h>
39 
40 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
41 				struct xfs_perag *pag, struct xfs_inode *ip);
42 
43 /*
44  * Allocate and initialise an xfs_inode.
45  */
46 struct xfs_inode *
47 xfs_inode_alloc(
48 	struct xfs_mount	*mp,
49 	xfs_ino_t		ino)
50 {
51 	struct xfs_inode	*ip;
52 
53 	/*
54 	 * if this didn't occur in transactions, we could use
55 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
56 	 * code up to do this anyway.
57 	 */
58 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
59 	if (!ip)
60 		return NULL;
61 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
62 		kmem_zone_free(xfs_inode_zone, ip);
63 		return NULL;
64 	}
65 
66 	ASSERT(atomic_read(&ip->i_pincount) == 0);
67 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
68 	ASSERT(!xfs_isiflocked(ip));
69 	ASSERT(ip->i_ino == 0);
70 
71 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
72 
73 	/* initialise the xfs inode */
74 	ip->i_ino = ino;
75 	ip->i_mount = mp;
76 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77 	ip->i_afp = NULL;
78 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
79 	ip->i_flags = 0;
80 	ip->i_delayed_blks = 0;
81 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
82 
83 	return ip;
84 }
85 
86 STATIC void
87 xfs_inode_free_callback(
88 	struct rcu_head		*head)
89 {
90 	struct inode		*inode = container_of(head, struct inode, i_rcu);
91 	struct xfs_inode	*ip = XFS_I(inode);
92 
93 	kmem_zone_free(xfs_inode_zone, ip);
94 }
95 
96 void
97 xfs_inode_free(
98 	struct xfs_inode	*ip)
99 {
100 	switch (ip->i_d.di_mode & S_IFMT) {
101 	case S_IFREG:
102 	case S_IFDIR:
103 	case S_IFLNK:
104 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
105 		break;
106 	}
107 
108 	if (ip->i_afp)
109 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
110 
111 	if (ip->i_itemp) {
112 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
113 		xfs_inode_item_destroy(ip);
114 		ip->i_itemp = NULL;
115 	}
116 
117 	/*
118 	 * Because we use RCU freeing we need to ensure the inode always
119 	 * appears to be reclaimed with an invalid inode number when in the
120 	 * free state. The ip->i_flags_lock provides the barrier against lookup
121 	 * races.
122 	 */
123 	spin_lock(&ip->i_flags_lock);
124 	ip->i_flags = XFS_IRECLAIM;
125 	ip->i_ino = 0;
126 	spin_unlock(&ip->i_flags_lock);
127 
128 	/* asserts to verify all state is correct here */
129 	ASSERT(atomic_read(&ip->i_pincount) == 0);
130 	ASSERT(!xfs_isiflocked(ip));
131 
132 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
133 }
134 
135 /*
136  * Check the validity of the inode we just found it the cache
137  */
138 static int
139 xfs_iget_cache_hit(
140 	struct xfs_perag	*pag,
141 	struct xfs_inode	*ip,
142 	xfs_ino_t		ino,
143 	int			flags,
144 	int			lock_flags) __releases(RCU)
145 {
146 	struct inode		*inode = VFS_I(ip);
147 	struct xfs_mount	*mp = ip->i_mount;
148 	int			error;
149 
150 	/*
151 	 * check for re-use of an inode within an RCU grace period due to the
152 	 * radix tree nodes not being updated yet. We monitor for this by
153 	 * setting the inode number to zero before freeing the inode structure.
154 	 * If the inode has been reallocated and set up, then the inode number
155 	 * will not match, so check for that, too.
156 	 */
157 	spin_lock(&ip->i_flags_lock);
158 	if (ip->i_ino != ino) {
159 		trace_xfs_iget_skip(ip);
160 		XFS_STATS_INC(xs_ig_frecycle);
161 		error = EAGAIN;
162 		goto out_error;
163 	}
164 
165 
166 	/*
167 	 * If we are racing with another cache hit that is currently
168 	 * instantiating this inode or currently recycling it out of
169 	 * reclaimabe state, wait for the initialisation to complete
170 	 * before continuing.
171 	 *
172 	 * XXX(hch): eventually we should do something equivalent to
173 	 *	     wait_on_inode to wait for these flags to be cleared
174 	 *	     instead of polling for it.
175 	 */
176 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
177 		trace_xfs_iget_skip(ip);
178 		XFS_STATS_INC(xs_ig_frecycle);
179 		error = EAGAIN;
180 		goto out_error;
181 	}
182 
183 	/*
184 	 * If lookup is racing with unlink return an error immediately.
185 	 */
186 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
187 		error = ENOENT;
188 		goto out_error;
189 	}
190 
191 	/*
192 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
193 	 * Need to carefully get it back into useable state.
194 	 */
195 	if (ip->i_flags & XFS_IRECLAIMABLE) {
196 		trace_xfs_iget_reclaim(ip);
197 
198 		/*
199 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
200 		 * from stomping over us while we recycle the inode.  We can't
201 		 * clear the radix tree reclaimable tag yet as it requires
202 		 * pag_ici_lock to be held exclusive.
203 		 */
204 		ip->i_flags |= XFS_IRECLAIM;
205 
206 		spin_unlock(&ip->i_flags_lock);
207 		rcu_read_unlock();
208 
209 		error = -inode_init_always(mp->m_super, inode);
210 		if (error) {
211 			/*
212 			 * Re-initializing the inode failed, and we are in deep
213 			 * trouble.  Try to re-add it to the reclaim list.
214 			 */
215 			rcu_read_lock();
216 			spin_lock(&ip->i_flags_lock);
217 
218 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
219 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
220 			trace_xfs_iget_reclaim_fail(ip);
221 			goto out_error;
222 		}
223 
224 		spin_lock(&pag->pag_ici_lock);
225 		spin_lock(&ip->i_flags_lock);
226 
227 		/*
228 		 * Clear the per-lifetime state in the inode as we are now
229 		 * effectively a new inode and need to return to the initial
230 		 * state before reuse occurs.
231 		 */
232 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
233 		ip->i_flags |= XFS_INEW;
234 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
235 		inode->i_state = I_NEW;
236 
237 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
238 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
239 
240 		spin_unlock(&ip->i_flags_lock);
241 		spin_unlock(&pag->pag_ici_lock);
242 	} else {
243 		/* If the VFS inode is being torn down, pause and try again. */
244 		if (!igrab(inode)) {
245 			trace_xfs_iget_skip(ip);
246 			error = EAGAIN;
247 			goto out_error;
248 		}
249 
250 		/* We've got a live one. */
251 		spin_unlock(&ip->i_flags_lock);
252 		rcu_read_unlock();
253 		trace_xfs_iget_hit(ip);
254 	}
255 
256 	if (lock_flags != 0)
257 		xfs_ilock(ip, lock_flags);
258 
259 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
260 	XFS_STATS_INC(xs_ig_found);
261 
262 	return 0;
263 
264 out_error:
265 	spin_unlock(&ip->i_flags_lock);
266 	rcu_read_unlock();
267 	return error;
268 }
269 
270 
271 static int
272 xfs_iget_cache_miss(
273 	struct xfs_mount	*mp,
274 	struct xfs_perag	*pag,
275 	xfs_trans_t		*tp,
276 	xfs_ino_t		ino,
277 	struct xfs_inode	**ipp,
278 	int			flags,
279 	int			lock_flags)
280 {
281 	struct xfs_inode	*ip;
282 	int			error;
283 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
284 	int			iflags;
285 
286 	ip = xfs_inode_alloc(mp, ino);
287 	if (!ip)
288 		return ENOMEM;
289 
290 	error = xfs_iread(mp, tp, ip, flags);
291 	if (error)
292 		goto out_destroy;
293 
294 	trace_xfs_iget_miss(ip);
295 
296 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
297 		error = ENOENT;
298 		goto out_destroy;
299 	}
300 
301 	/*
302 	 * Preload the radix tree so we can insert safely under the
303 	 * write spinlock. Note that we cannot sleep inside the preload
304 	 * region. Since we can be called from transaction context, don't
305 	 * recurse into the file system.
306 	 */
307 	if (radix_tree_preload(GFP_NOFS)) {
308 		error = EAGAIN;
309 		goto out_destroy;
310 	}
311 
312 	/*
313 	 * Because the inode hasn't been added to the radix-tree yet it can't
314 	 * be found by another thread, so we can do the non-sleeping lock here.
315 	 */
316 	if (lock_flags) {
317 		if (!xfs_ilock_nowait(ip, lock_flags))
318 			BUG();
319 	}
320 
321 	/*
322 	 * These values must be set before inserting the inode into the radix
323 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
324 	 * RCU locking mechanism) can find it and that lookup must see that this
325 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
326 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
327 	 * memory barrier that ensures this detection works correctly at lookup
328 	 * time.
329 	 */
330 	iflags = XFS_INEW;
331 	if (flags & XFS_IGET_DONTCACHE)
332 		iflags |= XFS_IDONTCACHE;
333 	ip->i_udquot = NULL;
334 	ip->i_gdquot = NULL;
335 	ip->i_pdquot = NULL;
336 	xfs_iflags_set(ip, iflags);
337 
338 	/* insert the new inode */
339 	spin_lock(&pag->pag_ici_lock);
340 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
341 	if (unlikely(error)) {
342 		WARN_ON(error != -EEXIST);
343 		XFS_STATS_INC(xs_ig_dup);
344 		error = EAGAIN;
345 		goto out_preload_end;
346 	}
347 	spin_unlock(&pag->pag_ici_lock);
348 	radix_tree_preload_end();
349 
350 	*ipp = ip;
351 	return 0;
352 
353 out_preload_end:
354 	spin_unlock(&pag->pag_ici_lock);
355 	radix_tree_preload_end();
356 	if (lock_flags)
357 		xfs_iunlock(ip, lock_flags);
358 out_destroy:
359 	__destroy_inode(VFS_I(ip));
360 	xfs_inode_free(ip);
361 	return error;
362 }
363 
364 /*
365  * Look up an inode by number in the given file system.
366  * The inode is looked up in the cache held in each AG.
367  * If the inode is found in the cache, initialise the vfs inode
368  * if necessary.
369  *
370  * If it is not in core, read it in from the file system's device,
371  * add it to the cache and initialise the vfs inode.
372  *
373  * The inode is locked according to the value of the lock_flags parameter.
374  * This flag parameter indicates how and if the inode's IO lock and inode lock
375  * should be taken.
376  *
377  * mp -- the mount point structure for the current file system.  It points
378  *       to the inode hash table.
379  * tp -- a pointer to the current transaction if there is one.  This is
380  *       simply passed through to the xfs_iread() call.
381  * ino -- the number of the inode desired.  This is the unique identifier
382  *        within the file system for the inode being requested.
383  * lock_flags -- flags indicating how to lock the inode.  See the comment
384  *		 for xfs_ilock() for a list of valid values.
385  */
386 int
387 xfs_iget(
388 	xfs_mount_t	*mp,
389 	xfs_trans_t	*tp,
390 	xfs_ino_t	ino,
391 	uint		flags,
392 	uint		lock_flags,
393 	xfs_inode_t	**ipp)
394 {
395 	xfs_inode_t	*ip;
396 	int		error;
397 	xfs_perag_t	*pag;
398 	xfs_agino_t	agino;
399 
400 	/*
401 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
402 	 * doesn't get freed while it's being referenced during a
403 	 * radix tree traversal here.  It assumes this function
404 	 * aqcuires only the ILOCK (and therefore it has no need to
405 	 * involve the IOLOCK in this synchronization).
406 	 */
407 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
408 
409 	/* reject inode numbers outside existing AGs */
410 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
411 		return EINVAL;
412 
413 	/* get the perag structure and ensure that it's inode capable */
414 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
415 	agino = XFS_INO_TO_AGINO(mp, ino);
416 
417 again:
418 	error = 0;
419 	rcu_read_lock();
420 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
421 
422 	if (ip) {
423 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
424 		if (error)
425 			goto out_error_or_again;
426 	} else {
427 		rcu_read_unlock();
428 		XFS_STATS_INC(xs_ig_missed);
429 
430 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
431 							flags, lock_flags);
432 		if (error)
433 			goto out_error_or_again;
434 	}
435 	xfs_perag_put(pag);
436 
437 	*ipp = ip;
438 
439 	/*
440 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
441 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
442 	 */
443 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
444 		xfs_setup_inode(ip);
445 	return 0;
446 
447 out_error_or_again:
448 	if (error == EAGAIN) {
449 		delay(1);
450 		goto again;
451 	}
452 	xfs_perag_put(pag);
453 	return error;
454 }
455 
456 /*
457  * The inode lookup is done in batches to keep the amount of lock traffic and
458  * radix tree lookups to a minimum. The batch size is a trade off between
459  * lookup reduction and stack usage. This is in the reclaim path, so we can't
460  * be too greedy.
461  */
462 #define XFS_LOOKUP_BATCH	32
463 
464 STATIC int
465 xfs_inode_ag_walk_grab(
466 	struct xfs_inode	*ip)
467 {
468 	struct inode		*inode = VFS_I(ip);
469 
470 	ASSERT(rcu_read_lock_held());
471 
472 	/*
473 	 * check for stale RCU freed inode
474 	 *
475 	 * If the inode has been reallocated, it doesn't matter if it's not in
476 	 * the AG we are walking - we are walking for writeback, so if it
477 	 * passes all the "valid inode" checks and is dirty, then we'll write
478 	 * it back anyway.  If it has been reallocated and still being
479 	 * initialised, the XFS_INEW check below will catch it.
480 	 */
481 	spin_lock(&ip->i_flags_lock);
482 	if (!ip->i_ino)
483 		goto out_unlock_noent;
484 
485 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
486 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
487 		goto out_unlock_noent;
488 	spin_unlock(&ip->i_flags_lock);
489 
490 	/* nothing to sync during shutdown */
491 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
492 		return EFSCORRUPTED;
493 
494 	/* If we can't grab the inode, it must on it's way to reclaim. */
495 	if (!igrab(inode))
496 		return ENOENT;
497 
498 	/* inode is valid */
499 	return 0;
500 
501 out_unlock_noent:
502 	spin_unlock(&ip->i_flags_lock);
503 	return ENOENT;
504 }
505 
506 STATIC int
507 xfs_inode_ag_walk(
508 	struct xfs_mount	*mp,
509 	struct xfs_perag	*pag,
510 	int			(*execute)(struct xfs_inode *ip, int flags,
511 					   void *args),
512 	int			flags,
513 	void			*args,
514 	int			tag)
515 {
516 	uint32_t		first_index;
517 	int			last_error = 0;
518 	int			skipped;
519 	int			done;
520 	int			nr_found;
521 
522 restart:
523 	done = 0;
524 	skipped = 0;
525 	first_index = 0;
526 	nr_found = 0;
527 	do {
528 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
529 		int		error = 0;
530 		int		i;
531 
532 		rcu_read_lock();
533 
534 		if (tag == -1)
535 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
536 					(void **)batch, first_index,
537 					XFS_LOOKUP_BATCH);
538 		else
539 			nr_found = radix_tree_gang_lookup_tag(
540 					&pag->pag_ici_root,
541 					(void **) batch, first_index,
542 					XFS_LOOKUP_BATCH, tag);
543 
544 		if (!nr_found) {
545 			rcu_read_unlock();
546 			break;
547 		}
548 
549 		/*
550 		 * Grab the inodes before we drop the lock. if we found
551 		 * nothing, nr == 0 and the loop will be skipped.
552 		 */
553 		for (i = 0; i < nr_found; i++) {
554 			struct xfs_inode *ip = batch[i];
555 
556 			if (done || xfs_inode_ag_walk_grab(ip))
557 				batch[i] = NULL;
558 
559 			/*
560 			 * Update the index for the next lookup. Catch
561 			 * overflows into the next AG range which can occur if
562 			 * we have inodes in the last block of the AG and we
563 			 * are currently pointing to the last inode.
564 			 *
565 			 * Because we may see inodes that are from the wrong AG
566 			 * due to RCU freeing and reallocation, only update the
567 			 * index if it lies in this AG. It was a race that lead
568 			 * us to see this inode, so another lookup from the
569 			 * same index will not find it again.
570 			 */
571 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
572 				continue;
573 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
574 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
575 				done = 1;
576 		}
577 
578 		/* unlock now we've grabbed the inodes. */
579 		rcu_read_unlock();
580 
581 		for (i = 0; i < nr_found; i++) {
582 			if (!batch[i])
583 				continue;
584 			error = execute(batch[i], flags, args);
585 			IRELE(batch[i]);
586 			if (error == EAGAIN) {
587 				skipped++;
588 				continue;
589 			}
590 			if (error && last_error != EFSCORRUPTED)
591 				last_error = error;
592 		}
593 
594 		/* bail out if the filesystem is corrupted.  */
595 		if (error == EFSCORRUPTED)
596 			break;
597 
598 		cond_resched();
599 
600 	} while (nr_found && !done);
601 
602 	if (skipped) {
603 		delay(1);
604 		goto restart;
605 	}
606 	return last_error;
607 }
608 
609 /*
610  * Background scanning to trim post-EOF preallocated space. This is queued
611  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
612  */
613 STATIC void
614 xfs_queue_eofblocks(
615 	struct xfs_mount *mp)
616 {
617 	rcu_read_lock();
618 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
619 		queue_delayed_work(mp->m_eofblocks_workqueue,
620 				   &mp->m_eofblocks_work,
621 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
622 	rcu_read_unlock();
623 }
624 
625 void
626 xfs_eofblocks_worker(
627 	struct work_struct *work)
628 {
629 	struct xfs_mount *mp = container_of(to_delayed_work(work),
630 				struct xfs_mount, m_eofblocks_work);
631 	xfs_icache_free_eofblocks(mp, NULL);
632 	xfs_queue_eofblocks(mp);
633 }
634 
635 int
636 xfs_inode_ag_iterator(
637 	struct xfs_mount	*mp,
638 	int			(*execute)(struct xfs_inode *ip, int flags,
639 					   void *args),
640 	int			flags,
641 	void			*args)
642 {
643 	struct xfs_perag	*pag;
644 	int			error = 0;
645 	int			last_error = 0;
646 	xfs_agnumber_t		ag;
647 
648 	ag = 0;
649 	while ((pag = xfs_perag_get(mp, ag))) {
650 		ag = pag->pag_agno + 1;
651 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
652 		xfs_perag_put(pag);
653 		if (error) {
654 			last_error = error;
655 			if (error == EFSCORRUPTED)
656 				break;
657 		}
658 	}
659 	return XFS_ERROR(last_error);
660 }
661 
662 int
663 xfs_inode_ag_iterator_tag(
664 	struct xfs_mount	*mp,
665 	int			(*execute)(struct xfs_inode *ip, int flags,
666 					   void *args),
667 	int			flags,
668 	void			*args,
669 	int			tag)
670 {
671 	struct xfs_perag	*pag;
672 	int			error = 0;
673 	int			last_error = 0;
674 	xfs_agnumber_t		ag;
675 
676 	ag = 0;
677 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
678 		ag = pag->pag_agno + 1;
679 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
680 		xfs_perag_put(pag);
681 		if (error) {
682 			last_error = error;
683 			if (error == EFSCORRUPTED)
684 				break;
685 		}
686 	}
687 	return XFS_ERROR(last_error);
688 }
689 
690 /*
691  * Queue a new inode reclaim pass if there are reclaimable inodes and there
692  * isn't a reclaim pass already in progress. By default it runs every 5s based
693  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
694  * tunable, but that can be done if this method proves to be ineffective or too
695  * aggressive.
696  */
697 static void
698 xfs_reclaim_work_queue(
699 	struct xfs_mount        *mp)
700 {
701 
702 	rcu_read_lock();
703 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
704 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
705 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
706 	}
707 	rcu_read_unlock();
708 }
709 
710 /*
711  * This is a fast pass over the inode cache to try to get reclaim moving on as
712  * many inodes as possible in a short period of time. It kicks itself every few
713  * seconds, as well as being kicked by the inode cache shrinker when memory
714  * goes low. It scans as quickly as possible avoiding locked inodes or those
715  * already being flushed, and once done schedules a future pass.
716  */
717 void
718 xfs_reclaim_worker(
719 	struct work_struct *work)
720 {
721 	struct xfs_mount *mp = container_of(to_delayed_work(work),
722 					struct xfs_mount, m_reclaim_work);
723 
724 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
725 	xfs_reclaim_work_queue(mp);
726 }
727 
728 static void
729 __xfs_inode_set_reclaim_tag(
730 	struct xfs_perag	*pag,
731 	struct xfs_inode	*ip)
732 {
733 	radix_tree_tag_set(&pag->pag_ici_root,
734 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
735 			   XFS_ICI_RECLAIM_TAG);
736 
737 	if (!pag->pag_ici_reclaimable) {
738 		/* propagate the reclaim tag up into the perag radix tree */
739 		spin_lock(&ip->i_mount->m_perag_lock);
740 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
741 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
742 				XFS_ICI_RECLAIM_TAG);
743 		spin_unlock(&ip->i_mount->m_perag_lock);
744 
745 		/* schedule periodic background inode reclaim */
746 		xfs_reclaim_work_queue(ip->i_mount);
747 
748 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
749 							-1, _RET_IP_);
750 	}
751 	pag->pag_ici_reclaimable++;
752 }
753 
754 /*
755  * We set the inode flag atomically with the radix tree tag.
756  * Once we get tag lookups on the radix tree, this inode flag
757  * can go away.
758  */
759 void
760 xfs_inode_set_reclaim_tag(
761 	xfs_inode_t	*ip)
762 {
763 	struct xfs_mount *mp = ip->i_mount;
764 	struct xfs_perag *pag;
765 
766 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
767 	spin_lock(&pag->pag_ici_lock);
768 	spin_lock(&ip->i_flags_lock);
769 	__xfs_inode_set_reclaim_tag(pag, ip);
770 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
771 	spin_unlock(&ip->i_flags_lock);
772 	spin_unlock(&pag->pag_ici_lock);
773 	xfs_perag_put(pag);
774 }
775 
776 STATIC void
777 __xfs_inode_clear_reclaim(
778 	xfs_perag_t	*pag,
779 	xfs_inode_t	*ip)
780 {
781 	pag->pag_ici_reclaimable--;
782 	if (!pag->pag_ici_reclaimable) {
783 		/* clear the reclaim tag from the perag radix tree */
784 		spin_lock(&ip->i_mount->m_perag_lock);
785 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
786 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
787 				XFS_ICI_RECLAIM_TAG);
788 		spin_unlock(&ip->i_mount->m_perag_lock);
789 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
790 							-1, _RET_IP_);
791 	}
792 }
793 
794 STATIC void
795 __xfs_inode_clear_reclaim_tag(
796 	xfs_mount_t	*mp,
797 	xfs_perag_t	*pag,
798 	xfs_inode_t	*ip)
799 {
800 	radix_tree_tag_clear(&pag->pag_ici_root,
801 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
802 	__xfs_inode_clear_reclaim(pag, ip);
803 }
804 
805 /*
806  * Grab the inode for reclaim exclusively.
807  * Return 0 if we grabbed it, non-zero otherwise.
808  */
809 STATIC int
810 xfs_reclaim_inode_grab(
811 	struct xfs_inode	*ip,
812 	int			flags)
813 {
814 	ASSERT(rcu_read_lock_held());
815 
816 	/* quick check for stale RCU freed inode */
817 	if (!ip->i_ino)
818 		return 1;
819 
820 	/*
821 	 * If we are asked for non-blocking operation, do unlocked checks to
822 	 * see if the inode already is being flushed or in reclaim to avoid
823 	 * lock traffic.
824 	 */
825 	if ((flags & SYNC_TRYLOCK) &&
826 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
827 		return 1;
828 
829 	/*
830 	 * The radix tree lock here protects a thread in xfs_iget from racing
831 	 * with us starting reclaim on the inode.  Once we have the
832 	 * XFS_IRECLAIM flag set it will not touch us.
833 	 *
834 	 * Due to RCU lookup, we may find inodes that have been freed and only
835 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
836 	 * aren't candidates for reclaim at all, so we must check the
837 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
838 	 */
839 	spin_lock(&ip->i_flags_lock);
840 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
841 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
842 		/* not a reclaim candidate. */
843 		spin_unlock(&ip->i_flags_lock);
844 		return 1;
845 	}
846 	__xfs_iflags_set(ip, XFS_IRECLAIM);
847 	spin_unlock(&ip->i_flags_lock);
848 	return 0;
849 }
850 
851 /*
852  * Inodes in different states need to be treated differently. The following
853  * table lists the inode states and the reclaim actions necessary:
854  *
855  *	inode state	     iflush ret		required action
856  *      ---------------      ----------         ---------------
857  *	bad			-		reclaim
858  *	shutdown		EIO		unpin and reclaim
859  *	clean, unpinned		0		reclaim
860  *	stale, unpinned		0		reclaim
861  *	clean, pinned(*)	0		requeue
862  *	stale, pinned		EAGAIN		requeue
863  *	dirty, async		-		requeue
864  *	dirty, sync		0		reclaim
865  *
866  * (*) dgc: I don't think the clean, pinned state is possible but it gets
867  * handled anyway given the order of checks implemented.
868  *
869  * Also, because we get the flush lock first, we know that any inode that has
870  * been flushed delwri has had the flush completed by the time we check that
871  * the inode is clean.
872  *
873  * Note that because the inode is flushed delayed write by AIL pushing, the
874  * flush lock may already be held here and waiting on it can result in very
875  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
876  * the caller should push the AIL first before trying to reclaim inodes to
877  * minimise the amount of time spent waiting.  For background relaim, we only
878  * bother to reclaim clean inodes anyway.
879  *
880  * Hence the order of actions after gaining the locks should be:
881  *	bad		=> reclaim
882  *	shutdown	=> unpin and reclaim
883  *	pinned, async	=> requeue
884  *	pinned, sync	=> unpin
885  *	stale		=> reclaim
886  *	clean		=> reclaim
887  *	dirty, async	=> requeue
888  *	dirty, sync	=> flush, wait and reclaim
889  */
890 STATIC int
891 xfs_reclaim_inode(
892 	struct xfs_inode	*ip,
893 	struct xfs_perag	*pag,
894 	int			sync_mode)
895 {
896 	struct xfs_buf		*bp = NULL;
897 	int			error;
898 
899 restart:
900 	error = 0;
901 	xfs_ilock(ip, XFS_ILOCK_EXCL);
902 	if (!xfs_iflock_nowait(ip)) {
903 		if (!(sync_mode & SYNC_WAIT))
904 			goto out;
905 		xfs_iflock(ip);
906 	}
907 
908 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
909 		xfs_iunpin_wait(ip);
910 		xfs_iflush_abort(ip, false);
911 		goto reclaim;
912 	}
913 	if (xfs_ipincount(ip)) {
914 		if (!(sync_mode & SYNC_WAIT))
915 			goto out_ifunlock;
916 		xfs_iunpin_wait(ip);
917 	}
918 	if (xfs_iflags_test(ip, XFS_ISTALE))
919 		goto reclaim;
920 	if (xfs_inode_clean(ip))
921 		goto reclaim;
922 
923 	/*
924 	 * Never flush out dirty data during non-blocking reclaim, as it would
925 	 * just contend with AIL pushing trying to do the same job.
926 	 */
927 	if (!(sync_mode & SYNC_WAIT))
928 		goto out_ifunlock;
929 
930 	/*
931 	 * Now we have an inode that needs flushing.
932 	 *
933 	 * Note that xfs_iflush will never block on the inode buffer lock, as
934 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
935 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
936 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
937 	 * result in an ABBA deadlock with xfs_ifree_cluster().
938 	 *
939 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
940 	 * cache to mark them stale, if we hit this case we don't actually want
941 	 * to do IO here - we want the inode marked stale so we can simply
942 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
943 	 * inode, back off and try again.  Hopefully the next pass through will
944 	 * see the stale flag set on the inode.
945 	 */
946 	error = xfs_iflush(ip, &bp);
947 	if (error == EAGAIN) {
948 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
949 		/* backoff longer than in xfs_ifree_cluster */
950 		delay(2);
951 		goto restart;
952 	}
953 
954 	if (!error) {
955 		error = xfs_bwrite(bp);
956 		xfs_buf_relse(bp);
957 	}
958 
959 	xfs_iflock(ip);
960 reclaim:
961 	xfs_ifunlock(ip);
962 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
963 
964 	XFS_STATS_INC(xs_ig_reclaims);
965 	/*
966 	 * Remove the inode from the per-AG radix tree.
967 	 *
968 	 * Because radix_tree_delete won't complain even if the item was never
969 	 * added to the tree assert that it's been there before to catch
970 	 * problems with the inode life time early on.
971 	 */
972 	spin_lock(&pag->pag_ici_lock);
973 	if (!radix_tree_delete(&pag->pag_ici_root,
974 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
975 		ASSERT(0);
976 	__xfs_inode_clear_reclaim(pag, ip);
977 	spin_unlock(&pag->pag_ici_lock);
978 
979 	/*
980 	 * Here we do an (almost) spurious inode lock in order to coordinate
981 	 * with inode cache radix tree lookups.  This is because the lookup
982 	 * can reference the inodes in the cache without taking references.
983 	 *
984 	 * We make that OK here by ensuring that we wait until the inode is
985 	 * unlocked after the lookup before we go ahead and free it.
986 	 */
987 	xfs_ilock(ip, XFS_ILOCK_EXCL);
988 	xfs_qm_dqdetach(ip);
989 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
990 
991 	xfs_inode_free(ip);
992 	return error;
993 
994 out_ifunlock:
995 	xfs_ifunlock(ip);
996 out:
997 	xfs_iflags_clear(ip, XFS_IRECLAIM);
998 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
999 	/*
1000 	 * We could return EAGAIN here to make reclaim rescan the inode tree in
1001 	 * a short while. However, this just burns CPU time scanning the tree
1002 	 * waiting for IO to complete and the reclaim work never goes back to
1003 	 * the idle state. Instead, return 0 to let the next scheduled
1004 	 * background reclaim attempt to reclaim the inode again.
1005 	 */
1006 	return 0;
1007 }
1008 
1009 /*
1010  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1011  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1012  * then a shut down during filesystem unmount reclaim walk leak all the
1013  * unreclaimed inodes.
1014  */
1015 STATIC int
1016 xfs_reclaim_inodes_ag(
1017 	struct xfs_mount	*mp,
1018 	int			flags,
1019 	int			*nr_to_scan)
1020 {
1021 	struct xfs_perag	*pag;
1022 	int			error = 0;
1023 	int			last_error = 0;
1024 	xfs_agnumber_t		ag;
1025 	int			trylock = flags & SYNC_TRYLOCK;
1026 	int			skipped;
1027 
1028 restart:
1029 	ag = 0;
1030 	skipped = 0;
1031 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1032 		unsigned long	first_index = 0;
1033 		int		done = 0;
1034 		int		nr_found = 0;
1035 
1036 		ag = pag->pag_agno + 1;
1037 
1038 		if (trylock) {
1039 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1040 				skipped++;
1041 				xfs_perag_put(pag);
1042 				continue;
1043 			}
1044 			first_index = pag->pag_ici_reclaim_cursor;
1045 		} else
1046 			mutex_lock(&pag->pag_ici_reclaim_lock);
1047 
1048 		do {
1049 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1050 			int	i;
1051 
1052 			rcu_read_lock();
1053 			nr_found = radix_tree_gang_lookup_tag(
1054 					&pag->pag_ici_root,
1055 					(void **)batch, first_index,
1056 					XFS_LOOKUP_BATCH,
1057 					XFS_ICI_RECLAIM_TAG);
1058 			if (!nr_found) {
1059 				done = 1;
1060 				rcu_read_unlock();
1061 				break;
1062 			}
1063 
1064 			/*
1065 			 * Grab the inodes before we drop the lock. if we found
1066 			 * nothing, nr == 0 and the loop will be skipped.
1067 			 */
1068 			for (i = 0; i < nr_found; i++) {
1069 				struct xfs_inode *ip = batch[i];
1070 
1071 				if (done || xfs_reclaim_inode_grab(ip, flags))
1072 					batch[i] = NULL;
1073 
1074 				/*
1075 				 * Update the index for the next lookup. Catch
1076 				 * overflows into the next AG range which can
1077 				 * occur if we have inodes in the last block of
1078 				 * the AG and we are currently pointing to the
1079 				 * last inode.
1080 				 *
1081 				 * Because we may see inodes that are from the
1082 				 * wrong AG due to RCU freeing and
1083 				 * reallocation, only update the index if it
1084 				 * lies in this AG. It was a race that lead us
1085 				 * to see this inode, so another lookup from
1086 				 * the same index will not find it again.
1087 				 */
1088 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1089 								pag->pag_agno)
1090 					continue;
1091 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1092 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1093 					done = 1;
1094 			}
1095 
1096 			/* unlock now we've grabbed the inodes. */
1097 			rcu_read_unlock();
1098 
1099 			for (i = 0; i < nr_found; i++) {
1100 				if (!batch[i])
1101 					continue;
1102 				error = xfs_reclaim_inode(batch[i], pag, flags);
1103 				if (error && last_error != EFSCORRUPTED)
1104 					last_error = error;
1105 			}
1106 
1107 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1108 
1109 			cond_resched();
1110 
1111 		} while (nr_found && !done && *nr_to_scan > 0);
1112 
1113 		if (trylock && !done)
1114 			pag->pag_ici_reclaim_cursor = first_index;
1115 		else
1116 			pag->pag_ici_reclaim_cursor = 0;
1117 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1118 		xfs_perag_put(pag);
1119 	}
1120 
1121 	/*
1122 	 * if we skipped any AG, and we still have scan count remaining, do
1123 	 * another pass this time using blocking reclaim semantics (i.e
1124 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1125 	 * ensure that when we get more reclaimers than AGs we block rather
1126 	 * than spin trying to execute reclaim.
1127 	 */
1128 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1129 		trylock = 0;
1130 		goto restart;
1131 	}
1132 	return XFS_ERROR(last_error);
1133 }
1134 
1135 int
1136 xfs_reclaim_inodes(
1137 	xfs_mount_t	*mp,
1138 	int		mode)
1139 {
1140 	int		nr_to_scan = INT_MAX;
1141 
1142 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1143 }
1144 
1145 /*
1146  * Scan a certain number of inodes for reclaim.
1147  *
1148  * When called we make sure that there is a background (fast) inode reclaim in
1149  * progress, while we will throttle the speed of reclaim via doing synchronous
1150  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1151  * them to be cleaned, which we hope will not be very long due to the
1152  * background walker having already kicked the IO off on those dirty inodes.
1153  */
1154 long
1155 xfs_reclaim_inodes_nr(
1156 	struct xfs_mount	*mp,
1157 	int			nr_to_scan)
1158 {
1159 	/* kick background reclaimer and push the AIL */
1160 	xfs_reclaim_work_queue(mp);
1161 	xfs_ail_push_all(mp->m_ail);
1162 
1163 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1164 }
1165 
1166 /*
1167  * Return the number of reclaimable inodes in the filesystem for
1168  * the shrinker to determine how much to reclaim.
1169  */
1170 int
1171 xfs_reclaim_inodes_count(
1172 	struct xfs_mount	*mp)
1173 {
1174 	struct xfs_perag	*pag;
1175 	xfs_agnumber_t		ag = 0;
1176 	int			reclaimable = 0;
1177 
1178 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1179 		ag = pag->pag_agno + 1;
1180 		reclaimable += pag->pag_ici_reclaimable;
1181 		xfs_perag_put(pag);
1182 	}
1183 	return reclaimable;
1184 }
1185 
1186 STATIC int
1187 xfs_inode_match_id(
1188 	struct xfs_inode	*ip,
1189 	struct xfs_eofblocks	*eofb)
1190 {
1191 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1192 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1193 		return 0;
1194 
1195 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1196 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1197 		return 0;
1198 
1199 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1200 	    xfs_get_projid(ip) != eofb->eof_prid)
1201 		return 0;
1202 
1203 	return 1;
1204 }
1205 
1206 STATIC int
1207 xfs_inode_free_eofblocks(
1208 	struct xfs_inode	*ip,
1209 	int			flags,
1210 	void			*args)
1211 {
1212 	int ret;
1213 	struct xfs_eofblocks *eofb = args;
1214 
1215 	if (!xfs_can_free_eofblocks(ip, false)) {
1216 		/* inode could be preallocated or append-only */
1217 		trace_xfs_inode_free_eofblocks_invalid(ip);
1218 		xfs_inode_clear_eofblocks_tag(ip);
1219 		return 0;
1220 	}
1221 
1222 	/*
1223 	 * If the mapping is dirty the operation can block and wait for some
1224 	 * time. Unless we are waiting, skip it.
1225 	 */
1226 	if (!(flags & SYNC_WAIT) &&
1227 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1228 		return 0;
1229 
1230 	if (eofb) {
1231 		if (!xfs_inode_match_id(ip, eofb))
1232 			return 0;
1233 
1234 		/* skip the inode if the file size is too small */
1235 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1236 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1237 			return 0;
1238 	}
1239 
1240 	ret = xfs_free_eofblocks(ip->i_mount, ip, true);
1241 
1242 	/* don't revisit the inode if we're not waiting */
1243 	if (ret == EAGAIN && !(flags & SYNC_WAIT))
1244 		ret = 0;
1245 
1246 	return ret;
1247 }
1248 
1249 int
1250 xfs_icache_free_eofblocks(
1251 	struct xfs_mount	*mp,
1252 	struct xfs_eofblocks	*eofb)
1253 {
1254 	int flags = SYNC_TRYLOCK;
1255 
1256 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1257 		flags = SYNC_WAIT;
1258 
1259 	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1260 					 eofb, XFS_ICI_EOFBLOCKS_TAG);
1261 }
1262 
1263 void
1264 xfs_inode_set_eofblocks_tag(
1265 	xfs_inode_t	*ip)
1266 {
1267 	struct xfs_mount *mp = ip->i_mount;
1268 	struct xfs_perag *pag;
1269 	int tagged;
1270 
1271 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1272 	spin_lock(&pag->pag_ici_lock);
1273 	trace_xfs_inode_set_eofblocks_tag(ip);
1274 
1275 	tagged = radix_tree_tagged(&pag->pag_ici_root,
1276 				   XFS_ICI_EOFBLOCKS_TAG);
1277 	radix_tree_tag_set(&pag->pag_ici_root,
1278 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1279 			   XFS_ICI_EOFBLOCKS_TAG);
1280 	if (!tagged) {
1281 		/* propagate the eofblocks tag up into the perag radix tree */
1282 		spin_lock(&ip->i_mount->m_perag_lock);
1283 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1284 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1285 				   XFS_ICI_EOFBLOCKS_TAG);
1286 		spin_unlock(&ip->i_mount->m_perag_lock);
1287 
1288 		/* kick off background trimming */
1289 		xfs_queue_eofblocks(ip->i_mount);
1290 
1291 		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1292 					      -1, _RET_IP_);
1293 	}
1294 
1295 	spin_unlock(&pag->pag_ici_lock);
1296 	xfs_perag_put(pag);
1297 }
1298 
1299 void
1300 xfs_inode_clear_eofblocks_tag(
1301 	xfs_inode_t	*ip)
1302 {
1303 	struct xfs_mount *mp = ip->i_mount;
1304 	struct xfs_perag *pag;
1305 
1306 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1307 	spin_lock(&pag->pag_ici_lock);
1308 	trace_xfs_inode_clear_eofblocks_tag(ip);
1309 
1310 	radix_tree_tag_clear(&pag->pag_ici_root,
1311 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1312 			     XFS_ICI_EOFBLOCKS_TAG);
1313 	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1314 		/* clear the eofblocks tag from the perag radix tree */
1315 		spin_lock(&ip->i_mount->m_perag_lock);
1316 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1317 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1318 				     XFS_ICI_EOFBLOCKS_TAG);
1319 		spin_unlock(&ip->i_mount->m_perag_lock);
1320 		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1321 					       -1, _RET_IP_);
1322 	}
1323 
1324 	spin_unlock(&pag->pag_ici_lock);
1325 	xfs_perag_put(pag);
1326 }
1327 
1328