xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision 275876e2)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_inum.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_error.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_inode_item.h"
32 #include "xfs_quota.h"
33 #include "xfs_trace.h"
34 #include "xfs_icache.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_quota.h"
37 #include "xfs_dquot_item.h"
38 #include "xfs_dquot.h"
39 
40 #include <linux/kthread.h>
41 #include <linux/freezer.h>
42 
43 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
44 				struct xfs_perag *pag, struct xfs_inode *ip);
45 
46 /*
47  * Allocate and initialise an xfs_inode.
48  */
49 struct xfs_inode *
50 xfs_inode_alloc(
51 	struct xfs_mount	*mp,
52 	xfs_ino_t		ino)
53 {
54 	struct xfs_inode	*ip;
55 
56 	/*
57 	 * if this didn't occur in transactions, we could use
58 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
59 	 * code up to do this anyway.
60 	 */
61 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
62 	if (!ip)
63 		return NULL;
64 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
65 		kmem_zone_free(xfs_inode_zone, ip);
66 		return NULL;
67 	}
68 
69 	ASSERT(atomic_read(&ip->i_pincount) == 0);
70 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
71 	ASSERT(!xfs_isiflocked(ip));
72 	ASSERT(ip->i_ino == 0);
73 
74 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
75 
76 	/* initialise the xfs inode */
77 	ip->i_ino = ino;
78 	ip->i_mount = mp;
79 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
80 	ip->i_afp = NULL;
81 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
82 	ip->i_flags = 0;
83 	ip->i_delayed_blks = 0;
84 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
85 
86 	return ip;
87 }
88 
89 STATIC void
90 xfs_inode_free_callback(
91 	struct rcu_head		*head)
92 {
93 	struct inode		*inode = container_of(head, struct inode, i_rcu);
94 	struct xfs_inode	*ip = XFS_I(inode);
95 
96 	kmem_zone_free(xfs_inode_zone, ip);
97 }
98 
99 void
100 xfs_inode_free(
101 	struct xfs_inode	*ip)
102 {
103 	switch (ip->i_d.di_mode & S_IFMT) {
104 	case S_IFREG:
105 	case S_IFDIR:
106 	case S_IFLNK:
107 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
108 		break;
109 	}
110 
111 	if (ip->i_afp)
112 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
113 
114 	if (ip->i_itemp) {
115 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
116 		xfs_inode_item_destroy(ip);
117 		ip->i_itemp = NULL;
118 	}
119 
120 	/*
121 	 * Because we use RCU freeing we need to ensure the inode always
122 	 * appears to be reclaimed with an invalid inode number when in the
123 	 * free state. The ip->i_flags_lock provides the barrier against lookup
124 	 * races.
125 	 */
126 	spin_lock(&ip->i_flags_lock);
127 	ip->i_flags = XFS_IRECLAIM;
128 	ip->i_ino = 0;
129 	spin_unlock(&ip->i_flags_lock);
130 
131 	/* asserts to verify all state is correct here */
132 	ASSERT(atomic_read(&ip->i_pincount) == 0);
133 	ASSERT(!xfs_isiflocked(ip));
134 
135 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
136 }
137 
138 /*
139  * Check the validity of the inode we just found it the cache
140  */
141 static int
142 xfs_iget_cache_hit(
143 	struct xfs_perag	*pag,
144 	struct xfs_inode	*ip,
145 	xfs_ino_t		ino,
146 	int			flags,
147 	int			lock_flags) __releases(RCU)
148 {
149 	struct inode		*inode = VFS_I(ip);
150 	struct xfs_mount	*mp = ip->i_mount;
151 	int			error;
152 
153 	/*
154 	 * check for re-use of an inode within an RCU grace period due to the
155 	 * radix tree nodes not being updated yet. We monitor for this by
156 	 * setting the inode number to zero before freeing the inode structure.
157 	 * If the inode has been reallocated and set up, then the inode number
158 	 * will not match, so check for that, too.
159 	 */
160 	spin_lock(&ip->i_flags_lock);
161 	if (ip->i_ino != ino) {
162 		trace_xfs_iget_skip(ip);
163 		XFS_STATS_INC(xs_ig_frecycle);
164 		error = -EAGAIN;
165 		goto out_error;
166 	}
167 
168 
169 	/*
170 	 * If we are racing with another cache hit that is currently
171 	 * instantiating this inode or currently recycling it out of
172 	 * reclaimabe state, wait for the initialisation to complete
173 	 * before continuing.
174 	 *
175 	 * XXX(hch): eventually we should do something equivalent to
176 	 *	     wait_on_inode to wait for these flags to be cleared
177 	 *	     instead of polling for it.
178 	 */
179 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
180 		trace_xfs_iget_skip(ip);
181 		XFS_STATS_INC(xs_ig_frecycle);
182 		error = -EAGAIN;
183 		goto out_error;
184 	}
185 
186 	/*
187 	 * If lookup is racing with unlink return an error immediately.
188 	 */
189 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
190 		error = -ENOENT;
191 		goto out_error;
192 	}
193 
194 	/*
195 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
196 	 * Need to carefully get it back into useable state.
197 	 */
198 	if (ip->i_flags & XFS_IRECLAIMABLE) {
199 		trace_xfs_iget_reclaim(ip);
200 
201 		/*
202 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
203 		 * from stomping over us while we recycle the inode.  We can't
204 		 * clear the radix tree reclaimable tag yet as it requires
205 		 * pag_ici_lock to be held exclusive.
206 		 */
207 		ip->i_flags |= XFS_IRECLAIM;
208 
209 		spin_unlock(&ip->i_flags_lock);
210 		rcu_read_unlock();
211 
212 		error = inode_init_always(mp->m_super, inode);
213 		if (error) {
214 			/*
215 			 * Re-initializing the inode failed, and we are in deep
216 			 * trouble.  Try to re-add it to the reclaim list.
217 			 */
218 			rcu_read_lock();
219 			spin_lock(&ip->i_flags_lock);
220 
221 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
222 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
223 			trace_xfs_iget_reclaim_fail(ip);
224 			goto out_error;
225 		}
226 
227 		spin_lock(&pag->pag_ici_lock);
228 		spin_lock(&ip->i_flags_lock);
229 
230 		/*
231 		 * Clear the per-lifetime state in the inode as we are now
232 		 * effectively a new inode and need to return to the initial
233 		 * state before reuse occurs.
234 		 */
235 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
236 		ip->i_flags |= XFS_INEW;
237 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
238 		inode->i_state = I_NEW;
239 
240 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
241 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
242 
243 		spin_unlock(&ip->i_flags_lock);
244 		spin_unlock(&pag->pag_ici_lock);
245 	} else {
246 		/* If the VFS inode is being torn down, pause and try again. */
247 		if (!igrab(inode)) {
248 			trace_xfs_iget_skip(ip);
249 			error = -EAGAIN;
250 			goto out_error;
251 		}
252 
253 		/* We've got a live one. */
254 		spin_unlock(&ip->i_flags_lock);
255 		rcu_read_unlock();
256 		trace_xfs_iget_hit(ip);
257 	}
258 
259 	if (lock_flags != 0)
260 		xfs_ilock(ip, lock_flags);
261 
262 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
263 	XFS_STATS_INC(xs_ig_found);
264 
265 	return 0;
266 
267 out_error:
268 	spin_unlock(&ip->i_flags_lock);
269 	rcu_read_unlock();
270 	return error;
271 }
272 
273 
274 static int
275 xfs_iget_cache_miss(
276 	struct xfs_mount	*mp,
277 	struct xfs_perag	*pag,
278 	xfs_trans_t		*tp,
279 	xfs_ino_t		ino,
280 	struct xfs_inode	**ipp,
281 	int			flags,
282 	int			lock_flags)
283 {
284 	struct xfs_inode	*ip;
285 	int			error;
286 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
287 	int			iflags;
288 
289 	ip = xfs_inode_alloc(mp, ino);
290 	if (!ip)
291 		return -ENOMEM;
292 
293 	error = xfs_iread(mp, tp, ip, flags);
294 	if (error)
295 		goto out_destroy;
296 
297 	trace_xfs_iget_miss(ip);
298 
299 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
300 		error = -ENOENT;
301 		goto out_destroy;
302 	}
303 
304 	/*
305 	 * Preload the radix tree so we can insert safely under the
306 	 * write spinlock. Note that we cannot sleep inside the preload
307 	 * region. Since we can be called from transaction context, don't
308 	 * recurse into the file system.
309 	 */
310 	if (radix_tree_preload(GFP_NOFS)) {
311 		error = -EAGAIN;
312 		goto out_destroy;
313 	}
314 
315 	/*
316 	 * Because the inode hasn't been added to the radix-tree yet it can't
317 	 * be found by another thread, so we can do the non-sleeping lock here.
318 	 */
319 	if (lock_flags) {
320 		if (!xfs_ilock_nowait(ip, lock_flags))
321 			BUG();
322 	}
323 
324 	/*
325 	 * These values must be set before inserting the inode into the radix
326 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
327 	 * RCU locking mechanism) can find it and that lookup must see that this
328 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
329 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
330 	 * memory barrier that ensures this detection works correctly at lookup
331 	 * time.
332 	 */
333 	iflags = XFS_INEW;
334 	if (flags & XFS_IGET_DONTCACHE)
335 		iflags |= XFS_IDONTCACHE;
336 	ip->i_udquot = NULL;
337 	ip->i_gdquot = NULL;
338 	ip->i_pdquot = NULL;
339 	xfs_iflags_set(ip, iflags);
340 
341 	/* insert the new inode */
342 	spin_lock(&pag->pag_ici_lock);
343 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
344 	if (unlikely(error)) {
345 		WARN_ON(error != -EEXIST);
346 		XFS_STATS_INC(xs_ig_dup);
347 		error = -EAGAIN;
348 		goto out_preload_end;
349 	}
350 	spin_unlock(&pag->pag_ici_lock);
351 	radix_tree_preload_end();
352 
353 	*ipp = ip;
354 	return 0;
355 
356 out_preload_end:
357 	spin_unlock(&pag->pag_ici_lock);
358 	radix_tree_preload_end();
359 	if (lock_flags)
360 		xfs_iunlock(ip, lock_flags);
361 out_destroy:
362 	__destroy_inode(VFS_I(ip));
363 	xfs_inode_free(ip);
364 	return error;
365 }
366 
367 /*
368  * Look up an inode by number in the given file system.
369  * The inode is looked up in the cache held in each AG.
370  * If the inode is found in the cache, initialise the vfs inode
371  * if necessary.
372  *
373  * If it is not in core, read it in from the file system's device,
374  * add it to the cache and initialise the vfs inode.
375  *
376  * The inode is locked according to the value of the lock_flags parameter.
377  * This flag parameter indicates how and if the inode's IO lock and inode lock
378  * should be taken.
379  *
380  * mp -- the mount point structure for the current file system.  It points
381  *       to the inode hash table.
382  * tp -- a pointer to the current transaction if there is one.  This is
383  *       simply passed through to the xfs_iread() call.
384  * ino -- the number of the inode desired.  This is the unique identifier
385  *        within the file system for the inode being requested.
386  * lock_flags -- flags indicating how to lock the inode.  See the comment
387  *		 for xfs_ilock() for a list of valid values.
388  */
389 int
390 xfs_iget(
391 	xfs_mount_t	*mp,
392 	xfs_trans_t	*tp,
393 	xfs_ino_t	ino,
394 	uint		flags,
395 	uint		lock_flags,
396 	xfs_inode_t	**ipp)
397 {
398 	xfs_inode_t	*ip;
399 	int		error;
400 	xfs_perag_t	*pag;
401 	xfs_agino_t	agino;
402 
403 	/*
404 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
405 	 * doesn't get freed while it's being referenced during a
406 	 * radix tree traversal here.  It assumes this function
407 	 * aqcuires only the ILOCK (and therefore it has no need to
408 	 * involve the IOLOCK in this synchronization).
409 	 */
410 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
411 
412 	/* reject inode numbers outside existing AGs */
413 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
414 		return -EINVAL;
415 
416 	/* get the perag structure and ensure that it's inode capable */
417 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
418 	agino = XFS_INO_TO_AGINO(mp, ino);
419 
420 again:
421 	error = 0;
422 	rcu_read_lock();
423 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
424 
425 	if (ip) {
426 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
427 		if (error)
428 			goto out_error_or_again;
429 	} else {
430 		rcu_read_unlock();
431 		XFS_STATS_INC(xs_ig_missed);
432 
433 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
434 							flags, lock_flags);
435 		if (error)
436 			goto out_error_or_again;
437 	}
438 	xfs_perag_put(pag);
439 
440 	*ipp = ip;
441 
442 	/*
443 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
444 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
445 	 */
446 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
447 		xfs_setup_inode(ip);
448 	return 0;
449 
450 out_error_or_again:
451 	if (error == -EAGAIN) {
452 		delay(1);
453 		goto again;
454 	}
455 	xfs_perag_put(pag);
456 	return error;
457 }
458 
459 /*
460  * The inode lookup is done in batches to keep the amount of lock traffic and
461  * radix tree lookups to a minimum. The batch size is a trade off between
462  * lookup reduction and stack usage. This is in the reclaim path, so we can't
463  * be too greedy.
464  */
465 #define XFS_LOOKUP_BATCH	32
466 
467 STATIC int
468 xfs_inode_ag_walk_grab(
469 	struct xfs_inode	*ip)
470 {
471 	struct inode		*inode = VFS_I(ip);
472 
473 	ASSERT(rcu_read_lock_held());
474 
475 	/*
476 	 * check for stale RCU freed inode
477 	 *
478 	 * If the inode has been reallocated, it doesn't matter if it's not in
479 	 * the AG we are walking - we are walking for writeback, so if it
480 	 * passes all the "valid inode" checks and is dirty, then we'll write
481 	 * it back anyway.  If it has been reallocated and still being
482 	 * initialised, the XFS_INEW check below will catch it.
483 	 */
484 	spin_lock(&ip->i_flags_lock);
485 	if (!ip->i_ino)
486 		goto out_unlock_noent;
487 
488 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
489 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
490 		goto out_unlock_noent;
491 	spin_unlock(&ip->i_flags_lock);
492 
493 	/* nothing to sync during shutdown */
494 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
495 		return -EFSCORRUPTED;
496 
497 	/* If we can't grab the inode, it must on it's way to reclaim. */
498 	if (!igrab(inode))
499 		return -ENOENT;
500 
501 	/* inode is valid */
502 	return 0;
503 
504 out_unlock_noent:
505 	spin_unlock(&ip->i_flags_lock);
506 	return -ENOENT;
507 }
508 
509 STATIC int
510 xfs_inode_ag_walk(
511 	struct xfs_mount	*mp,
512 	struct xfs_perag	*pag,
513 	int			(*execute)(struct xfs_inode *ip, int flags,
514 					   void *args),
515 	int			flags,
516 	void			*args,
517 	int			tag)
518 {
519 	uint32_t		first_index;
520 	int			last_error = 0;
521 	int			skipped;
522 	int			done;
523 	int			nr_found;
524 
525 restart:
526 	done = 0;
527 	skipped = 0;
528 	first_index = 0;
529 	nr_found = 0;
530 	do {
531 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
532 		int		error = 0;
533 		int		i;
534 
535 		rcu_read_lock();
536 
537 		if (tag == -1)
538 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
539 					(void **)batch, first_index,
540 					XFS_LOOKUP_BATCH);
541 		else
542 			nr_found = radix_tree_gang_lookup_tag(
543 					&pag->pag_ici_root,
544 					(void **) batch, first_index,
545 					XFS_LOOKUP_BATCH, tag);
546 
547 		if (!nr_found) {
548 			rcu_read_unlock();
549 			break;
550 		}
551 
552 		/*
553 		 * Grab the inodes before we drop the lock. if we found
554 		 * nothing, nr == 0 and the loop will be skipped.
555 		 */
556 		for (i = 0; i < nr_found; i++) {
557 			struct xfs_inode *ip = batch[i];
558 
559 			if (done || xfs_inode_ag_walk_grab(ip))
560 				batch[i] = NULL;
561 
562 			/*
563 			 * Update the index for the next lookup. Catch
564 			 * overflows into the next AG range which can occur if
565 			 * we have inodes in the last block of the AG and we
566 			 * are currently pointing to the last inode.
567 			 *
568 			 * Because we may see inodes that are from the wrong AG
569 			 * due to RCU freeing and reallocation, only update the
570 			 * index if it lies in this AG. It was a race that lead
571 			 * us to see this inode, so another lookup from the
572 			 * same index will not find it again.
573 			 */
574 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
575 				continue;
576 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
577 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
578 				done = 1;
579 		}
580 
581 		/* unlock now we've grabbed the inodes. */
582 		rcu_read_unlock();
583 
584 		for (i = 0; i < nr_found; i++) {
585 			if (!batch[i])
586 				continue;
587 			error = execute(batch[i], flags, args);
588 			IRELE(batch[i]);
589 			if (error == -EAGAIN) {
590 				skipped++;
591 				continue;
592 			}
593 			if (error && last_error != -EFSCORRUPTED)
594 				last_error = error;
595 		}
596 
597 		/* bail out if the filesystem is corrupted.  */
598 		if (error == -EFSCORRUPTED)
599 			break;
600 
601 		cond_resched();
602 
603 	} while (nr_found && !done);
604 
605 	if (skipped) {
606 		delay(1);
607 		goto restart;
608 	}
609 	return last_error;
610 }
611 
612 /*
613  * Background scanning to trim post-EOF preallocated space. This is queued
614  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
615  */
616 STATIC void
617 xfs_queue_eofblocks(
618 	struct xfs_mount *mp)
619 {
620 	rcu_read_lock();
621 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
622 		queue_delayed_work(mp->m_eofblocks_workqueue,
623 				   &mp->m_eofblocks_work,
624 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
625 	rcu_read_unlock();
626 }
627 
628 void
629 xfs_eofblocks_worker(
630 	struct work_struct *work)
631 {
632 	struct xfs_mount *mp = container_of(to_delayed_work(work),
633 				struct xfs_mount, m_eofblocks_work);
634 	xfs_icache_free_eofblocks(mp, NULL);
635 	xfs_queue_eofblocks(mp);
636 }
637 
638 int
639 xfs_inode_ag_iterator(
640 	struct xfs_mount	*mp,
641 	int			(*execute)(struct xfs_inode *ip, int flags,
642 					   void *args),
643 	int			flags,
644 	void			*args)
645 {
646 	struct xfs_perag	*pag;
647 	int			error = 0;
648 	int			last_error = 0;
649 	xfs_agnumber_t		ag;
650 
651 	ag = 0;
652 	while ((pag = xfs_perag_get(mp, ag))) {
653 		ag = pag->pag_agno + 1;
654 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
655 		xfs_perag_put(pag);
656 		if (error) {
657 			last_error = error;
658 			if (error == -EFSCORRUPTED)
659 				break;
660 		}
661 	}
662 	return last_error;
663 }
664 
665 int
666 xfs_inode_ag_iterator_tag(
667 	struct xfs_mount	*mp,
668 	int			(*execute)(struct xfs_inode *ip, int flags,
669 					   void *args),
670 	int			flags,
671 	void			*args,
672 	int			tag)
673 {
674 	struct xfs_perag	*pag;
675 	int			error = 0;
676 	int			last_error = 0;
677 	xfs_agnumber_t		ag;
678 
679 	ag = 0;
680 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
681 		ag = pag->pag_agno + 1;
682 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
683 		xfs_perag_put(pag);
684 		if (error) {
685 			last_error = error;
686 			if (error == -EFSCORRUPTED)
687 				break;
688 		}
689 	}
690 	return last_error;
691 }
692 
693 /*
694  * Queue a new inode reclaim pass if there are reclaimable inodes and there
695  * isn't a reclaim pass already in progress. By default it runs every 5s based
696  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
697  * tunable, but that can be done if this method proves to be ineffective or too
698  * aggressive.
699  */
700 static void
701 xfs_reclaim_work_queue(
702 	struct xfs_mount        *mp)
703 {
704 
705 	rcu_read_lock();
706 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
707 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
708 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
709 	}
710 	rcu_read_unlock();
711 }
712 
713 /*
714  * This is a fast pass over the inode cache to try to get reclaim moving on as
715  * many inodes as possible in a short period of time. It kicks itself every few
716  * seconds, as well as being kicked by the inode cache shrinker when memory
717  * goes low. It scans as quickly as possible avoiding locked inodes or those
718  * already being flushed, and once done schedules a future pass.
719  */
720 void
721 xfs_reclaim_worker(
722 	struct work_struct *work)
723 {
724 	struct xfs_mount *mp = container_of(to_delayed_work(work),
725 					struct xfs_mount, m_reclaim_work);
726 
727 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
728 	xfs_reclaim_work_queue(mp);
729 }
730 
731 static void
732 __xfs_inode_set_reclaim_tag(
733 	struct xfs_perag	*pag,
734 	struct xfs_inode	*ip)
735 {
736 	radix_tree_tag_set(&pag->pag_ici_root,
737 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
738 			   XFS_ICI_RECLAIM_TAG);
739 
740 	if (!pag->pag_ici_reclaimable) {
741 		/* propagate the reclaim tag up into the perag radix tree */
742 		spin_lock(&ip->i_mount->m_perag_lock);
743 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
744 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
745 				XFS_ICI_RECLAIM_TAG);
746 		spin_unlock(&ip->i_mount->m_perag_lock);
747 
748 		/* schedule periodic background inode reclaim */
749 		xfs_reclaim_work_queue(ip->i_mount);
750 
751 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
752 							-1, _RET_IP_);
753 	}
754 	pag->pag_ici_reclaimable++;
755 }
756 
757 /*
758  * We set the inode flag atomically with the radix tree tag.
759  * Once we get tag lookups on the radix tree, this inode flag
760  * can go away.
761  */
762 void
763 xfs_inode_set_reclaim_tag(
764 	xfs_inode_t	*ip)
765 {
766 	struct xfs_mount *mp = ip->i_mount;
767 	struct xfs_perag *pag;
768 
769 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
770 	spin_lock(&pag->pag_ici_lock);
771 	spin_lock(&ip->i_flags_lock);
772 	__xfs_inode_set_reclaim_tag(pag, ip);
773 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
774 	spin_unlock(&ip->i_flags_lock);
775 	spin_unlock(&pag->pag_ici_lock);
776 	xfs_perag_put(pag);
777 }
778 
779 STATIC void
780 __xfs_inode_clear_reclaim(
781 	xfs_perag_t	*pag,
782 	xfs_inode_t	*ip)
783 {
784 	pag->pag_ici_reclaimable--;
785 	if (!pag->pag_ici_reclaimable) {
786 		/* clear the reclaim tag from the perag radix tree */
787 		spin_lock(&ip->i_mount->m_perag_lock);
788 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
789 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
790 				XFS_ICI_RECLAIM_TAG);
791 		spin_unlock(&ip->i_mount->m_perag_lock);
792 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
793 							-1, _RET_IP_);
794 	}
795 }
796 
797 STATIC void
798 __xfs_inode_clear_reclaim_tag(
799 	xfs_mount_t	*mp,
800 	xfs_perag_t	*pag,
801 	xfs_inode_t	*ip)
802 {
803 	radix_tree_tag_clear(&pag->pag_ici_root,
804 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
805 	__xfs_inode_clear_reclaim(pag, ip);
806 }
807 
808 /*
809  * Grab the inode for reclaim exclusively.
810  * Return 0 if we grabbed it, non-zero otherwise.
811  */
812 STATIC int
813 xfs_reclaim_inode_grab(
814 	struct xfs_inode	*ip,
815 	int			flags)
816 {
817 	ASSERT(rcu_read_lock_held());
818 
819 	/* quick check for stale RCU freed inode */
820 	if (!ip->i_ino)
821 		return 1;
822 
823 	/*
824 	 * If we are asked for non-blocking operation, do unlocked checks to
825 	 * see if the inode already is being flushed or in reclaim to avoid
826 	 * lock traffic.
827 	 */
828 	if ((flags & SYNC_TRYLOCK) &&
829 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
830 		return 1;
831 
832 	/*
833 	 * The radix tree lock here protects a thread in xfs_iget from racing
834 	 * with us starting reclaim on the inode.  Once we have the
835 	 * XFS_IRECLAIM flag set it will not touch us.
836 	 *
837 	 * Due to RCU lookup, we may find inodes that have been freed and only
838 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
839 	 * aren't candidates for reclaim at all, so we must check the
840 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
841 	 */
842 	spin_lock(&ip->i_flags_lock);
843 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
844 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
845 		/* not a reclaim candidate. */
846 		spin_unlock(&ip->i_flags_lock);
847 		return 1;
848 	}
849 	__xfs_iflags_set(ip, XFS_IRECLAIM);
850 	spin_unlock(&ip->i_flags_lock);
851 	return 0;
852 }
853 
854 /*
855  * Inodes in different states need to be treated differently. The following
856  * table lists the inode states and the reclaim actions necessary:
857  *
858  *	inode state	     iflush ret		required action
859  *      ---------------      ----------         ---------------
860  *	bad			-		reclaim
861  *	shutdown		EIO		unpin and reclaim
862  *	clean, unpinned		0		reclaim
863  *	stale, unpinned		0		reclaim
864  *	clean, pinned(*)	0		requeue
865  *	stale, pinned		EAGAIN		requeue
866  *	dirty, async		-		requeue
867  *	dirty, sync		0		reclaim
868  *
869  * (*) dgc: I don't think the clean, pinned state is possible but it gets
870  * handled anyway given the order of checks implemented.
871  *
872  * Also, because we get the flush lock first, we know that any inode that has
873  * been flushed delwri has had the flush completed by the time we check that
874  * the inode is clean.
875  *
876  * Note that because the inode is flushed delayed write by AIL pushing, the
877  * flush lock may already be held here and waiting on it can result in very
878  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
879  * the caller should push the AIL first before trying to reclaim inodes to
880  * minimise the amount of time spent waiting.  For background relaim, we only
881  * bother to reclaim clean inodes anyway.
882  *
883  * Hence the order of actions after gaining the locks should be:
884  *	bad		=> reclaim
885  *	shutdown	=> unpin and reclaim
886  *	pinned, async	=> requeue
887  *	pinned, sync	=> unpin
888  *	stale		=> reclaim
889  *	clean		=> reclaim
890  *	dirty, async	=> requeue
891  *	dirty, sync	=> flush, wait and reclaim
892  */
893 STATIC int
894 xfs_reclaim_inode(
895 	struct xfs_inode	*ip,
896 	struct xfs_perag	*pag,
897 	int			sync_mode)
898 {
899 	struct xfs_buf		*bp = NULL;
900 	int			error;
901 
902 restart:
903 	error = 0;
904 	xfs_ilock(ip, XFS_ILOCK_EXCL);
905 	if (!xfs_iflock_nowait(ip)) {
906 		if (!(sync_mode & SYNC_WAIT))
907 			goto out;
908 		xfs_iflock(ip);
909 	}
910 
911 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
912 		xfs_iunpin_wait(ip);
913 		xfs_iflush_abort(ip, false);
914 		goto reclaim;
915 	}
916 	if (xfs_ipincount(ip)) {
917 		if (!(sync_mode & SYNC_WAIT))
918 			goto out_ifunlock;
919 		xfs_iunpin_wait(ip);
920 	}
921 	if (xfs_iflags_test(ip, XFS_ISTALE))
922 		goto reclaim;
923 	if (xfs_inode_clean(ip))
924 		goto reclaim;
925 
926 	/*
927 	 * Never flush out dirty data during non-blocking reclaim, as it would
928 	 * just contend with AIL pushing trying to do the same job.
929 	 */
930 	if (!(sync_mode & SYNC_WAIT))
931 		goto out_ifunlock;
932 
933 	/*
934 	 * Now we have an inode that needs flushing.
935 	 *
936 	 * Note that xfs_iflush will never block on the inode buffer lock, as
937 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
938 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
939 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
940 	 * result in an ABBA deadlock with xfs_ifree_cluster().
941 	 *
942 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
943 	 * cache to mark them stale, if we hit this case we don't actually want
944 	 * to do IO here - we want the inode marked stale so we can simply
945 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
946 	 * inode, back off and try again.  Hopefully the next pass through will
947 	 * see the stale flag set on the inode.
948 	 */
949 	error = xfs_iflush(ip, &bp);
950 	if (error == -EAGAIN) {
951 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
952 		/* backoff longer than in xfs_ifree_cluster */
953 		delay(2);
954 		goto restart;
955 	}
956 
957 	if (!error) {
958 		error = xfs_bwrite(bp);
959 		xfs_buf_relse(bp);
960 	}
961 
962 	xfs_iflock(ip);
963 reclaim:
964 	xfs_ifunlock(ip);
965 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
966 
967 	XFS_STATS_INC(xs_ig_reclaims);
968 	/*
969 	 * Remove the inode from the per-AG radix tree.
970 	 *
971 	 * Because radix_tree_delete won't complain even if the item was never
972 	 * added to the tree assert that it's been there before to catch
973 	 * problems with the inode life time early on.
974 	 */
975 	spin_lock(&pag->pag_ici_lock);
976 	if (!radix_tree_delete(&pag->pag_ici_root,
977 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
978 		ASSERT(0);
979 	__xfs_inode_clear_reclaim(pag, ip);
980 	spin_unlock(&pag->pag_ici_lock);
981 
982 	/*
983 	 * Here we do an (almost) spurious inode lock in order to coordinate
984 	 * with inode cache radix tree lookups.  This is because the lookup
985 	 * can reference the inodes in the cache without taking references.
986 	 *
987 	 * We make that OK here by ensuring that we wait until the inode is
988 	 * unlocked after the lookup before we go ahead and free it.
989 	 */
990 	xfs_ilock(ip, XFS_ILOCK_EXCL);
991 	xfs_qm_dqdetach(ip);
992 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
993 
994 	xfs_inode_free(ip);
995 	return error;
996 
997 out_ifunlock:
998 	xfs_ifunlock(ip);
999 out:
1000 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1001 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1002 	/*
1003 	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1004 	 * a short while. However, this just burns CPU time scanning the tree
1005 	 * waiting for IO to complete and the reclaim work never goes back to
1006 	 * the idle state. Instead, return 0 to let the next scheduled
1007 	 * background reclaim attempt to reclaim the inode again.
1008 	 */
1009 	return 0;
1010 }
1011 
1012 /*
1013  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1014  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1015  * then a shut down during filesystem unmount reclaim walk leak all the
1016  * unreclaimed inodes.
1017  */
1018 STATIC int
1019 xfs_reclaim_inodes_ag(
1020 	struct xfs_mount	*mp,
1021 	int			flags,
1022 	int			*nr_to_scan)
1023 {
1024 	struct xfs_perag	*pag;
1025 	int			error = 0;
1026 	int			last_error = 0;
1027 	xfs_agnumber_t		ag;
1028 	int			trylock = flags & SYNC_TRYLOCK;
1029 	int			skipped;
1030 
1031 restart:
1032 	ag = 0;
1033 	skipped = 0;
1034 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1035 		unsigned long	first_index = 0;
1036 		int		done = 0;
1037 		int		nr_found = 0;
1038 
1039 		ag = pag->pag_agno + 1;
1040 
1041 		if (trylock) {
1042 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1043 				skipped++;
1044 				xfs_perag_put(pag);
1045 				continue;
1046 			}
1047 			first_index = pag->pag_ici_reclaim_cursor;
1048 		} else
1049 			mutex_lock(&pag->pag_ici_reclaim_lock);
1050 
1051 		do {
1052 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1053 			int	i;
1054 
1055 			rcu_read_lock();
1056 			nr_found = radix_tree_gang_lookup_tag(
1057 					&pag->pag_ici_root,
1058 					(void **)batch, first_index,
1059 					XFS_LOOKUP_BATCH,
1060 					XFS_ICI_RECLAIM_TAG);
1061 			if (!nr_found) {
1062 				done = 1;
1063 				rcu_read_unlock();
1064 				break;
1065 			}
1066 
1067 			/*
1068 			 * Grab the inodes before we drop the lock. if we found
1069 			 * nothing, nr == 0 and the loop will be skipped.
1070 			 */
1071 			for (i = 0; i < nr_found; i++) {
1072 				struct xfs_inode *ip = batch[i];
1073 
1074 				if (done || xfs_reclaim_inode_grab(ip, flags))
1075 					batch[i] = NULL;
1076 
1077 				/*
1078 				 * Update the index for the next lookup. Catch
1079 				 * overflows into the next AG range which can
1080 				 * occur if we have inodes in the last block of
1081 				 * the AG and we are currently pointing to the
1082 				 * last inode.
1083 				 *
1084 				 * Because we may see inodes that are from the
1085 				 * wrong AG due to RCU freeing and
1086 				 * reallocation, only update the index if it
1087 				 * lies in this AG. It was a race that lead us
1088 				 * to see this inode, so another lookup from
1089 				 * the same index will not find it again.
1090 				 */
1091 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1092 								pag->pag_agno)
1093 					continue;
1094 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1095 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1096 					done = 1;
1097 			}
1098 
1099 			/* unlock now we've grabbed the inodes. */
1100 			rcu_read_unlock();
1101 
1102 			for (i = 0; i < nr_found; i++) {
1103 				if (!batch[i])
1104 					continue;
1105 				error = xfs_reclaim_inode(batch[i], pag, flags);
1106 				if (error && last_error != -EFSCORRUPTED)
1107 					last_error = error;
1108 			}
1109 
1110 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1111 
1112 			cond_resched();
1113 
1114 		} while (nr_found && !done && *nr_to_scan > 0);
1115 
1116 		if (trylock && !done)
1117 			pag->pag_ici_reclaim_cursor = first_index;
1118 		else
1119 			pag->pag_ici_reclaim_cursor = 0;
1120 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1121 		xfs_perag_put(pag);
1122 	}
1123 
1124 	/*
1125 	 * if we skipped any AG, and we still have scan count remaining, do
1126 	 * another pass this time using blocking reclaim semantics (i.e
1127 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1128 	 * ensure that when we get more reclaimers than AGs we block rather
1129 	 * than spin trying to execute reclaim.
1130 	 */
1131 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1132 		trylock = 0;
1133 		goto restart;
1134 	}
1135 	return last_error;
1136 }
1137 
1138 int
1139 xfs_reclaim_inodes(
1140 	xfs_mount_t	*mp,
1141 	int		mode)
1142 {
1143 	int		nr_to_scan = INT_MAX;
1144 
1145 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1146 }
1147 
1148 /*
1149  * Scan a certain number of inodes for reclaim.
1150  *
1151  * When called we make sure that there is a background (fast) inode reclaim in
1152  * progress, while we will throttle the speed of reclaim via doing synchronous
1153  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1154  * them to be cleaned, which we hope will not be very long due to the
1155  * background walker having already kicked the IO off on those dirty inodes.
1156  */
1157 long
1158 xfs_reclaim_inodes_nr(
1159 	struct xfs_mount	*mp,
1160 	int			nr_to_scan)
1161 {
1162 	/* kick background reclaimer and push the AIL */
1163 	xfs_reclaim_work_queue(mp);
1164 	xfs_ail_push_all(mp->m_ail);
1165 
1166 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1167 }
1168 
1169 /*
1170  * Return the number of reclaimable inodes in the filesystem for
1171  * the shrinker to determine how much to reclaim.
1172  */
1173 int
1174 xfs_reclaim_inodes_count(
1175 	struct xfs_mount	*mp)
1176 {
1177 	struct xfs_perag	*pag;
1178 	xfs_agnumber_t		ag = 0;
1179 	int			reclaimable = 0;
1180 
1181 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1182 		ag = pag->pag_agno + 1;
1183 		reclaimable += pag->pag_ici_reclaimable;
1184 		xfs_perag_put(pag);
1185 	}
1186 	return reclaimable;
1187 }
1188 
1189 STATIC int
1190 xfs_inode_match_id(
1191 	struct xfs_inode	*ip,
1192 	struct xfs_eofblocks	*eofb)
1193 {
1194 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1195 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1196 		return 0;
1197 
1198 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1199 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1200 		return 0;
1201 
1202 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1203 	    xfs_get_projid(ip) != eofb->eof_prid)
1204 		return 0;
1205 
1206 	return 1;
1207 }
1208 
1209 /*
1210  * A union-based inode filtering algorithm. Process the inode if any of the
1211  * criteria match. This is for global/internal scans only.
1212  */
1213 STATIC int
1214 xfs_inode_match_id_union(
1215 	struct xfs_inode	*ip,
1216 	struct xfs_eofblocks	*eofb)
1217 {
1218 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1219 	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1220 		return 1;
1221 
1222 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1223 	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1224 		return 1;
1225 
1226 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1227 	    xfs_get_projid(ip) == eofb->eof_prid)
1228 		return 1;
1229 
1230 	return 0;
1231 }
1232 
1233 STATIC int
1234 xfs_inode_free_eofblocks(
1235 	struct xfs_inode	*ip,
1236 	int			flags,
1237 	void			*args)
1238 {
1239 	int ret;
1240 	struct xfs_eofblocks *eofb = args;
1241 	bool need_iolock = true;
1242 	int match;
1243 
1244 	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1245 
1246 	if (!xfs_can_free_eofblocks(ip, false)) {
1247 		/* inode could be preallocated or append-only */
1248 		trace_xfs_inode_free_eofblocks_invalid(ip);
1249 		xfs_inode_clear_eofblocks_tag(ip);
1250 		return 0;
1251 	}
1252 
1253 	/*
1254 	 * If the mapping is dirty the operation can block and wait for some
1255 	 * time. Unless we are waiting, skip it.
1256 	 */
1257 	if (!(flags & SYNC_WAIT) &&
1258 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1259 		return 0;
1260 
1261 	if (eofb) {
1262 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1263 			match = xfs_inode_match_id_union(ip, eofb);
1264 		else
1265 			match = xfs_inode_match_id(ip, eofb);
1266 		if (!match)
1267 			return 0;
1268 
1269 		/* skip the inode if the file size is too small */
1270 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1271 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1272 			return 0;
1273 
1274 		/*
1275 		 * A scan owner implies we already hold the iolock. Skip it in
1276 		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
1277 		 * the possibility of EAGAIN being returned.
1278 		 */
1279 		if (eofb->eof_scan_owner == ip->i_ino)
1280 			need_iolock = false;
1281 	}
1282 
1283 	ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
1284 
1285 	/* don't revisit the inode if we're not waiting */
1286 	if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1287 		ret = 0;
1288 
1289 	return ret;
1290 }
1291 
1292 int
1293 xfs_icache_free_eofblocks(
1294 	struct xfs_mount	*mp,
1295 	struct xfs_eofblocks	*eofb)
1296 {
1297 	int flags = SYNC_TRYLOCK;
1298 
1299 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1300 		flags = SYNC_WAIT;
1301 
1302 	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1303 					 eofb, XFS_ICI_EOFBLOCKS_TAG);
1304 }
1305 
1306 /*
1307  * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1308  * multiple quotas, we don't know exactly which quota caused an allocation
1309  * failure. We make a best effort by including each quota under low free space
1310  * conditions (less than 1% free space) in the scan.
1311  */
1312 int
1313 xfs_inode_free_quota_eofblocks(
1314 	struct xfs_inode *ip)
1315 {
1316 	int scan = 0;
1317 	struct xfs_eofblocks eofb = {0};
1318 	struct xfs_dquot *dq;
1319 
1320 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1321 
1322 	/*
1323 	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
1324 	 * can repeatedly trylock on the inode we're currently processing. We
1325 	 * run a sync scan to increase effectiveness and use the union filter to
1326 	 * cover all applicable quotas in a single scan.
1327 	 */
1328 	eofb.eof_scan_owner = ip->i_ino;
1329 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1330 
1331 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1332 		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1333 		if (dq && xfs_dquot_lowsp(dq)) {
1334 			eofb.eof_uid = VFS_I(ip)->i_uid;
1335 			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1336 			scan = 1;
1337 		}
1338 	}
1339 
1340 	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1341 		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1342 		if (dq && xfs_dquot_lowsp(dq)) {
1343 			eofb.eof_gid = VFS_I(ip)->i_gid;
1344 			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1345 			scan = 1;
1346 		}
1347 	}
1348 
1349 	if (scan)
1350 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1351 
1352 	return scan;
1353 }
1354 
1355 void
1356 xfs_inode_set_eofblocks_tag(
1357 	xfs_inode_t	*ip)
1358 {
1359 	struct xfs_mount *mp = ip->i_mount;
1360 	struct xfs_perag *pag;
1361 	int tagged;
1362 
1363 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1364 	spin_lock(&pag->pag_ici_lock);
1365 	trace_xfs_inode_set_eofblocks_tag(ip);
1366 
1367 	tagged = radix_tree_tagged(&pag->pag_ici_root,
1368 				   XFS_ICI_EOFBLOCKS_TAG);
1369 	radix_tree_tag_set(&pag->pag_ici_root,
1370 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1371 			   XFS_ICI_EOFBLOCKS_TAG);
1372 	if (!tagged) {
1373 		/* propagate the eofblocks tag up into the perag radix tree */
1374 		spin_lock(&ip->i_mount->m_perag_lock);
1375 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1376 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1377 				   XFS_ICI_EOFBLOCKS_TAG);
1378 		spin_unlock(&ip->i_mount->m_perag_lock);
1379 
1380 		/* kick off background trimming */
1381 		xfs_queue_eofblocks(ip->i_mount);
1382 
1383 		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1384 					      -1, _RET_IP_);
1385 	}
1386 
1387 	spin_unlock(&pag->pag_ici_lock);
1388 	xfs_perag_put(pag);
1389 }
1390 
1391 void
1392 xfs_inode_clear_eofblocks_tag(
1393 	xfs_inode_t	*ip)
1394 {
1395 	struct xfs_mount *mp = ip->i_mount;
1396 	struct xfs_perag *pag;
1397 
1398 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1399 	spin_lock(&pag->pag_ici_lock);
1400 	trace_xfs_inode_clear_eofblocks_tag(ip);
1401 
1402 	radix_tree_tag_clear(&pag->pag_ici_root,
1403 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1404 			     XFS_ICI_EOFBLOCKS_TAG);
1405 	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1406 		/* clear the eofblocks tag from the perag radix tree */
1407 		spin_lock(&ip->i_mount->m_perag_lock);
1408 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1409 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1410 				     XFS_ICI_EOFBLOCKS_TAG);
1411 		spin_unlock(&ip->i_mount->m_perag_lock);
1412 		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1413 					       -1, _RET_IP_);
1414 	}
1415 
1416 	spin_unlock(&pag->pag_ici_lock);
1417 	xfs_perag_put(pag);
1418 }
1419 
1420