xref: /openbmc/linux/fs/xfs/xfs_icache.c (revision f3a8b664)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_sb.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_quota.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_bmap_util.h"
34 #include "xfs_dquot_item.h"
35 #include "xfs_dquot.h"
36 #include "xfs_reflink.h"
37 
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 
41 /*
42  * Allocate and initialise an xfs_inode.
43  */
44 struct xfs_inode *
45 xfs_inode_alloc(
46 	struct xfs_mount	*mp,
47 	xfs_ino_t		ino)
48 {
49 	struct xfs_inode	*ip;
50 
51 	/*
52 	 * if this didn't occur in transactions, we could use
53 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
54 	 * code up to do this anyway.
55 	 */
56 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
57 	if (!ip)
58 		return NULL;
59 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
60 		kmem_zone_free(xfs_inode_zone, ip);
61 		return NULL;
62 	}
63 
64 	/* VFS doesn't initialise i_mode! */
65 	VFS_I(ip)->i_mode = 0;
66 
67 	XFS_STATS_INC(mp, vn_active);
68 	ASSERT(atomic_read(&ip->i_pincount) == 0);
69 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 	ASSERT(!xfs_isiflocked(ip));
71 	ASSERT(ip->i_ino == 0);
72 
73 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
74 
75 	/* initialise the xfs inode */
76 	ip->i_ino = ino;
77 	ip->i_mount = mp;
78 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
79 	ip->i_afp = NULL;
80 	ip->i_cowfp = NULL;
81 	ip->i_cnextents = 0;
82 	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
83 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
84 	ip->i_flags = 0;
85 	ip->i_delayed_blks = 0;
86 	memset(&ip->i_d, 0, sizeof(ip->i_d));
87 
88 	return ip;
89 }
90 
91 STATIC void
92 xfs_inode_free_callback(
93 	struct rcu_head		*head)
94 {
95 	struct inode		*inode = container_of(head, struct inode, i_rcu);
96 	struct xfs_inode	*ip = XFS_I(inode);
97 
98 	switch (VFS_I(ip)->i_mode & S_IFMT) {
99 	case S_IFREG:
100 	case S_IFDIR:
101 	case S_IFLNK:
102 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
103 		break;
104 	}
105 
106 	if (ip->i_afp)
107 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
108 	if (ip->i_cowfp)
109 		xfs_idestroy_fork(ip, XFS_COW_FORK);
110 
111 	if (ip->i_itemp) {
112 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
113 		xfs_inode_item_destroy(ip);
114 		ip->i_itemp = NULL;
115 	}
116 
117 	kmem_zone_free(xfs_inode_zone, ip);
118 }
119 
120 static void
121 __xfs_inode_free(
122 	struct xfs_inode	*ip)
123 {
124 	/* asserts to verify all state is correct here */
125 	ASSERT(atomic_read(&ip->i_pincount) == 0);
126 	ASSERT(!xfs_isiflocked(ip));
127 	XFS_STATS_DEC(ip->i_mount, vn_active);
128 
129 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
130 }
131 
132 void
133 xfs_inode_free(
134 	struct xfs_inode	*ip)
135 {
136 	/*
137 	 * Because we use RCU freeing we need to ensure the inode always
138 	 * appears to be reclaimed with an invalid inode number when in the
139 	 * free state. The ip->i_flags_lock provides the barrier against lookup
140 	 * races.
141 	 */
142 	spin_lock(&ip->i_flags_lock);
143 	ip->i_flags = XFS_IRECLAIM;
144 	ip->i_ino = 0;
145 	spin_unlock(&ip->i_flags_lock);
146 
147 	__xfs_inode_free(ip);
148 }
149 
150 /*
151  * Queue a new inode reclaim pass if there are reclaimable inodes and there
152  * isn't a reclaim pass already in progress. By default it runs every 5s based
153  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
154  * tunable, but that can be done if this method proves to be ineffective or too
155  * aggressive.
156  */
157 static void
158 xfs_reclaim_work_queue(
159 	struct xfs_mount        *mp)
160 {
161 
162 	rcu_read_lock();
163 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
164 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
165 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
166 	}
167 	rcu_read_unlock();
168 }
169 
170 /*
171  * This is a fast pass over the inode cache to try to get reclaim moving on as
172  * many inodes as possible in a short period of time. It kicks itself every few
173  * seconds, as well as being kicked by the inode cache shrinker when memory
174  * goes low. It scans as quickly as possible avoiding locked inodes or those
175  * already being flushed, and once done schedules a future pass.
176  */
177 void
178 xfs_reclaim_worker(
179 	struct work_struct *work)
180 {
181 	struct xfs_mount *mp = container_of(to_delayed_work(work),
182 					struct xfs_mount, m_reclaim_work);
183 
184 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
185 	xfs_reclaim_work_queue(mp);
186 }
187 
188 static void
189 xfs_perag_set_reclaim_tag(
190 	struct xfs_perag	*pag)
191 {
192 	struct xfs_mount	*mp = pag->pag_mount;
193 
194 	ASSERT(spin_is_locked(&pag->pag_ici_lock));
195 	if (pag->pag_ici_reclaimable++)
196 		return;
197 
198 	/* propagate the reclaim tag up into the perag radix tree */
199 	spin_lock(&mp->m_perag_lock);
200 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
201 			   XFS_ICI_RECLAIM_TAG);
202 	spin_unlock(&mp->m_perag_lock);
203 
204 	/* schedule periodic background inode reclaim */
205 	xfs_reclaim_work_queue(mp);
206 
207 	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
208 }
209 
210 static void
211 xfs_perag_clear_reclaim_tag(
212 	struct xfs_perag	*pag)
213 {
214 	struct xfs_mount	*mp = pag->pag_mount;
215 
216 	ASSERT(spin_is_locked(&pag->pag_ici_lock));
217 	if (--pag->pag_ici_reclaimable)
218 		return;
219 
220 	/* clear the reclaim tag from the perag radix tree */
221 	spin_lock(&mp->m_perag_lock);
222 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
223 			     XFS_ICI_RECLAIM_TAG);
224 	spin_unlock(&mp->m_perag_lock);
225 	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
226 }
227 
228 
229 /*
230  * We set the inode flag atomically with the radix tree tag.
231  * Once we get tag lookups on the radix tree, this inode flag
232  * can go away.
233  */
234 void
235 xfs_inode_set_reclaim_tag(
236 	struct xfs_inode	*ip)
237 {
238 	struct xfs_mount	*mp = ip->i_mount;
239 	struct xfs_perag	*pag;
240 
241 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
242 	spin_lock(&pag->pag_ici_lock);
243 	spin_lock(&ip->i_flags_lock);
244 
245 	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
246 			   XFS_ICI_RECLAIM_TAG);
247 	xfs_perag_set_reclaim_tag(pag);
248 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
249 
250 	spin_unlock(&ip->i_flags_lock);
251 	spin_unlock(&pag->pag_ici_lock);
252 	xfs_perag_put(pag);
253 }
254 
255 STATIC void
256 xfs_inode_clear_reclaim_tag(
257 	struct xfs_perag	*pag,
258 	xfs_ino_t		ino)
259 {
260 	radix_tree_tag_clear(&pag->pag_ici_root,
261 			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
262 			     XFS_ICI_RECLAIM_TAG);
263 	xfs_perag_clear_reclaim_tag(pag);
264 }
265 
266 /*
267  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
268  * part of the structure. This is made more complex by the fact we store
269  * information about the on-disk values in the VFS inode and so we can't just
270  * overwrite the values unconditionally. Hence we save the parameters we
271  * need to retain across reinitialisation, and rewrite them into the VFS inode
272  * after reinitialisation even if it fails.
273  */
274 static int
275 xfs_reinit_inode(
276 	struct xfs_mount	*mp,
277 	struct inode		*inode)
278 {
279 	int		error;
280 	uint32_t	nlink = inode->i_nlink;
281 	uint32_t	generation = inode->i_generation;
282 	uint64_t	version = inode->i_version;
283 	umode_t		mode = inode->i_mode;
284 
285 	error = inode_init_always(mp->m_super, inode);
286 
287 	set_nlink(inode, nlink);
288 	inode->i_generation = generation;
289 	inode->i_version = version;
290 	inode->i_mode = mode;
291 	return error;
292 }
293 
294 /*
295  * Check the validity of the inode we just found it the cache
296  */
297 static int
298 xfs_iget_cache_hit(
299 	struct xfs_perag	*pag,
300 	struct xfs_inode	*ip,
301 	xfs_ino_t		ino,
302 	int			flags,
303 	int			lock_flags) __releases(RCU)
304 {
305 	struct inode		*inode = VFS_I(ip);
306 	struct xfs_mount	*mp = ip->i_mount;
307 	int			error;
308 
309 	/*
310 	 * check for re-use of an inode within an RCU grace period due to the
311 	 * radix tree nodes not being updated yet. We monitor for this by
312 	 * setting the inode number to zero before freeing the inode structure.
313 	 * If the inode has been reallocated and set up, then the inode number
314 	 * will not match, so check for that, too.
315 	 */
316 	spin_lock(&ip->i_flags_lock);
317 	if (ip->i_ino != ino) {
318 		trace_xfs_iget_skip(ip);
319 		XFS_STATS_INC(mp, xs_ig_frecycle);
320 		error = -EAGAIN;
321 		goto out_error;
322 	}
323 
324 
325 	/*
326 	 * If we are racing with another cache hit that is currently
327 	 * instantiating this inode or currently recycling it out of
328 	 * reclaimabe state, wait for the initialisation to complete
329 	 * before continuing.
330 	 *
331 	 * XXX(hch): eventually we should do something equivalent to
332 	 *	     wait_on_inode to wait for these flags to be cleared
333 	 *	     instead of polling for it.
334 	 */
335 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
336 		trace_xfs_iget_skip(ip);
337 		XFS_STATS_INC(mp, xs_ig_frecycle);
338 		error = -EAGAIN;
339 		goto out_error;
340 	}
341 
342 	/*
343 	 * If lookup is racing with unlink return an error immediately.
344 	 */
345 	if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
346 		error = -ENOENT;
347 		goto out_error;
348 	}
349 
350 	/*
351 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
352 	 * Need to carefully get it back into useable state.
353 	 */
354 	if (ip->i_flags & XFS_IRECLAIMABLE) {
355 		trace_xfs_iget_reclaim(ip);
356 
357 		/*
358 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
359 		 * from stomping over us while we recycle the inode.  We can't
360 		 * clear the radix tree reclaimable tag yet as it requires
361 		 * pag_ici_lock to be held exclusive.
362 		 */
363 		ip->i_flags |= XFS_IRECLAIM;
364 
365 		spin_unlock(&ip->i_flags_lock);
366 		rcu_read_unlock();
367 
368 		error = xfs_reinit_inode(mp, inode);
369 		if (error) {
370 			/*
371 			 * Re-initializing the inode failed, and we are in deep
372 			 * trouble.  Try to re-add it to the reclaim list.
373 			 */
374 			rcu_read_lock();
375 			spin_lock(&ip->i_flags_lock);
376 
377 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
378 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
379 			trace_xfs_iget_reclaim_fail(ip);
380 			goto out_error;
381 		}
382 
383 		spin_lock(&pag->pag_ici_lock);
384 		spin_lock(&ip->i_flags_lock);
385 
386 		/*
387 		 * Clear the per-lifetime state in the inode as we are now
388 		 * effectively a new inode and need to return to the initial
389 		 * state before reuse occurs.
390 		 */
391 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
392 		ip->i_flags |= XFS_INEW;
393 		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
394 		inode->i_state = I_NEW;
395 
396 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
397 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
398 
399 		spin_unlock(&ip->i_flags_lock);
400 		spin_unlock(&pag->pag_ici_lock);
401 	} else {
402 		/* If the VFS inode is being torn down, pause and try again. */
403 		if (!igrab(inode)) {
404 			trace_xfs_iget_skip(ip);
405 			error = -EAGAIN;
406 			goto out_error;
407 		}
408 
409 		/* We've got a live one. */
410 		spin_unlock(&ip->i_flags_lock);
411 		rcu_read_unlock();
412 		trace_xfs_iget_hit(ip);
413 	}
414 
415 	if (lock_flags != 0)
416 		xfs_ilock(ip, lock_flags);
417 
418 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
419 	XFS_STATS_INC(mp, xs_ig_found);
420 
421 	return 0;
422 
423 out_error:
424 	spin_unlock(&ip->i_flags_lock);
425 	rcu_read_unlock();
426 	return error;
427 }
428 
429 
430 static int
431 xfs_iget_cache_miss(
432 	struct xfs_mount	*mp,
433 	struct xfs_perag	*pag,
434 	xfs_trans_t		*tp,
435 	xfs_ino_t		ino,
436 	struct xfs_inode	**ipp,
437 	int			flags,
438 	int			lock_flags)
439 {
440 	struct xfs_inode	*ip;
441 	int			error;
442 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
443 	int			iflags;
444 
445 	ip = xfs_inode_alloc(mp, ino);
446 	if (!ip)
447 		return -ENOMEM;
448 
449 	error = xfs_iread(mp, tp, ip, flags);
450 	if (error)
451 		goto out_destroy;
452 
453 	trace_xfs_iget_miss(ip);
454 
455 	if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
456 		error = -ENOENT;
457 		goto out_destroy;
458 	}
459 
460 	/*
461 	 * Preload the radix tree so we can insert safely under the
462 	 * write spinlock. Note that we cannot sleep inside the preload
463 	 * region. Since we can be called from transaction context, don't
464 	 * recurse into the file system.
465 	 */
466 	if (radix_tree_preload(GFP_NOFS)) {
467 		error = -EAGAIN;
468 		goto out_destroy;
469 	}
470 
471 	/*
472 	 * Because the inode hasn't been added to the radix-tree yet it can't
473 	 * be found by another thread, so we can do the non-sleeping lock here.
474 	 */
475 	if (lock_flags) {
476 		if (!xfs_ilock_nowait(ip, lock_flags))
477 			BUG();
478 	}
479 
480 	/*
481 	 * These values must be set before inserting the inode into the radix
482 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
483 	 * RCU locking mechanism) can find it and that lookup must see that this
484 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
485 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
486 	 * memory barrier that ensures this detection works correctly at lookup
487 	 * time.
488 	 */
489 	iflags = XFS_INEW;
490 	if (flags & XFS_IGET_DONTCACHE)
491 		iflags |= XFS_IDONTCACHE;
492 	ip->i_udquot = NULL;
493 	ip->i_gdquot = NULL;
494 	ip->i_pdquot = NULL;
495 	xfs_iflags_set(ip, iflags);
496 
497 	/* insert the new inode */
498 	spin_lock(&pag->pag_ici_lock);
499 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
500 	if (unlikely(error)) {
501 		WARN_ON(error != -EEXIST);
502 		XFS_STATS_INC(mp, xs_ig_dup);
503 		error = -EAGAIN;
504 		goto out_preload_end;
505 	}
506 	spin_unlock(&pag->pag_ici_lock);
507 	radix_tree_preload_end();
508 
509 	*ipp = ip;
510 	return 0;
511 
512 out_preload_end:
513 	spin_unlock(&pag->pag_ici_lock);
514 	radix_tree_preload_end();
515 	if (lock_flags)
516 		xfs_iunlock(ip, lock_flags);
517 out_destroy:
518 	__destroy_inode(VFS_I(ip));
519 	xfs_inode_free(ip);
520 	return error;
521 }
522 
523 /*
524  * Look up an inode by number in the given file system.
525  * The inode is looked up in the cache held in each AG.
526  * If the inode is found in the cache, initialise the vfs inode
527  * if necessary.
528  *
529  * If it is not in core, read it in from the file system's device,
530  * add it to the cache and initialise the vfs inode.
531  *
532  * The inode is locked according to the value of the lock_flags parameter.
533  * This flag parameter indicates how and if the inode's IO lock and inode lock
534  * should be taken.
535  *
536  * mp -- the mount point structure for the current file system.  It points
537  *       to the inode hash table.
538  * tp -- a pointer to the current transaction if there is one.  This is
539  *       simply passed through to the xfs_iread() call.
540  * ino -- the number of the inode desired.  This is the unique identifier
541  *        within the file system for the inode being requested.
542  * lock_flags -- flags indicating how to lock the inode.  See the comment
543  *		 for xfs_ilock() for a list of valid values.
544  */
545 int
546 xfs_iget(
547 	xfs_mount_t	*mp,
548 	xfs_trans_t	*tp,
549 	xfs_ino_t	ino,
550 	uint		flags,
551 	uint		lock_flags,
552 	xfs_inode_t	**ipp)
553 {
554 	xfs_inode_t	*ip;
555 	int		error;
556 	xfs_perag_t	*pag;
557 	xfs_agino_t	agino;
558 
559 	/*
560 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
561 	 * doesn't get freed while it's being referenced during a
562 	 * radix tree traversal here.  It assumes this function
563 	 * aqcuires only the ILOCK (and therefore it has no need to
564 	 * involve the IOLOCK in this synchronization).
565 	 */
566 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
567 
568 	/* reject inode numbers outside existing AGs */
569 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
570 		return -EINVAL;
571 
572 	XFS_STATS_INC(mp, xs_ig_attempts);
573 
574 	/* get the perag structure and ensure that it's inode capable */
575 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
576 	agino = XFS_INO_TO_AGINO(mp, ino);
577 
578 again:
579 	error = 0;
580 	rcu_read_lock();
581 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
582 
583 	if (ip) {
584 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
585 		if (error)
586 			goto out_error_or_again;
587 	} else {
588 		rcu_read_unlock();
589 		XFS_STATS_INC(mp, xs_ig_missed);
590 
591 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
592 							flags, lock_flags);
593 		if (error)
594 			goto out_error_or_again;
595 	}
596 	xfs_perag_put(pag);
597 
598 	*ipp = ip;
599 
600 	/*
601 	 * If we have a real type for an on-disk inode, we can setup the inode
602 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
603 	 */
604 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
605 		xfs_setup_existing_inode(ip);
606 	return 0;
607 
608 out_error_or_again:
609 	if (error == -EAGAIN) {
610 		delay(1);
611 		goto again;
612 	}
613 	xfs_perag_put(pag);
614 	return error;
615 }
616 
617 /*
618  * The inode lookup is done in batches to keep the amount of lock traffic and
619  * radix tree lookups to a minimum. The batch size is a trade off between
620  * lookup reduction and stack usage. This is in the reclaim path, so we can't
621  * be too greedy.
622  */
623 #define XFS_LOOKUP_BATCH	32
624 
625 STATIC int
626 xfs_inode_ag_walk_grab(
627 	struct xfs_inode	*ip)
628 {
629 	struct inode		*inode = VFS_I(ip);
630 
631 	ASSERT(rcu_read_lock_held());
632 
633 	/*
634 	 * check for stale RCU freed inode
635 	 *
636 	 * If the inode has been reallocated, it doesn't matter if it's not in
637 	 * the AG we are walking - we are walking for writeback, so if it
638 	 * passes all the "valid inode" checks and is dirty, then we'll write
639 	 * it back anyway.  If it has been reallocated and still being
640 	 * initialised, the XFS_INEW check below will catch it.
641 	 */
642 	spin_lock(&ip->i_flags_lock);
643 	if (!ip->i_ino)
644 		goto out_unlock_noent;
645 
646 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
647 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
648 		goto out_unlock_noent;
649 	spin_unlock(&ip->i_flags_lock);
650 
651 	/* nothing to sync during shutdown */
652 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
653 		return -EFSCORRUPTED;
654 
655 	/* If we can't grab the inode, it must on it's way to reclaim. */
656 	if (!igrab(inode))
657 		return -ENOENT;
658 
659 	/* inode is valid */
660 	return 0;
661 
662 out_unlock_noent:
663 	spin_unlock(&ip->i_flags_lock);
664 	return -ENOENT;
665 }
666 
667 STATIC int
668 xfs_inode_ag_walk(
669 	struct xfs_mount	*mp,
670 	struct xfs_perag	*pag,
671 	int			(*execute)(struct xfs_inode *ip, int flags,
672 					   void *args),
673 	int			flags,
674 	void			*args,
675 	int			tag)
676 {
677 	uint32_t		first_index;
678 	int			last_error = 0;
679 	int			skipped;
680 	int			done;
681 	int			nr_found;
682 
683 restart:
684 	done = 0;
685 	skipped = 0;
686 	first_index = 0;
687 	nr_found = 0;
688 	do {
689 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
690 		int		error = 0;
691 		int		i;
692 
693 		rcu_read_lock();
694 
695 		if (tag == -1)
696 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
697 					(void **)batch, first_index,
698 					XFS_LOOKUP_BATCH);
699 		else
700 			nr_found = radix_tree_gang_lookup_tag(
701 					&pag->pag_ici_root,
702 					(void **) batch, first_index,
703 					XFS_LOOKUP_BATCH, tag);
704 
705 		if (!nr_found) {
706 			rcu_read_unlock();
707 			break;
708 		}
709 
710 		/*
711 		 * Grab the inodes before we drop the lock. if we found
712 		 * nothing, nr == 0 and the loop will be skipped.
713 		 */
714 		for (i = 0; i < nr_found; i++) {
715 			struct xfs_inode *ip = batch[i];
716 
717 			if (done || xfs_inode_ag_walk_grab(ip))
718 				batch[i] = NULL;
719 
720 			/*
721 			 * Update the index for the next lookup. Catch
722 			 * overflows into the next AG range which can occur if
723 			 * we have inodes in the last block of the AG and we
724 			 * are currently pointing to the last inode.
725 			 *
726 			 * Because we may see inodes that are from the wrong AG
727 			 * due to RCU freeing and reallocation, only update the
728 			 * index if it lies in this AG. It was a race that lead
729 			 * us to see this inode, so another lookup from the
730 			 * same index will not find it again.
731 			 */
732 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
733 				continue;
734 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
735 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
736 				done = 1;
737 		}
738 
739 		/* unlock now we've grabbed the inodes. */
740 		rcu_read_unlock();
741 
742 		for (i = 0; i < nr_found; i++) {
743 			if (!batch[i])
744 				continue;
745 			error = execute(batch[i], flags, args);
746 			IRELE(batch[i]);
747 			if (error == -EAGAIN) {
748 				skipped++;
749 				continue;
750 			}
751 			if (error && last_error != -EFSCORRUPTED)
752 				last_error = error;
753 		}
754 
755 		/* bail out if the filesystem is corrupted.  */
756 		if (error == -EFSCORRUPTED)
757 			break;
758 
759 		cond_resched();
760 
761 	} while (nr_found && !done);
762 
763 	if (skipped) {
764 		delay(1);
765 		goto restart;
766 	}
767 	return last_error;
768 }
769 
770 /*
771  * Background scanning to trim post-EOF preallocated space. This is queued
772  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
773  */
774 void
775 xfs_queue_eofblocks(
776 	struct xfs_mount *mp)
777 {
778 	rcu_read_lock();
779 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
780 		queue_delayed_work(mp->m_eofblocks_workqueue,
781 				   &mp->m_eofblocks_work,
782 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
783 	rcu_read_unlock();
784 }
785 
786 void
787 xfs_eofblocks_worker(
788 	struct work_struct *work)
789 {
790 	struct xfs_mount *mp = container_of(to_delayed_work(work),
791 				struct xfs_mount, m_eofblocks_work);
792 	xfs_icache_free_eofblocks(mp, NULL);
793 	xfs_queue_eofblocks(mp);
794 }
795 
796 /*
797  * Background scanning to trim preallocated CoW space. This is queued
798  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
799  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
800  */
801 STATIC void
802 xfs_queue_cowblocks(
803 	struct xfs_mount *mp)
804 {
805 	rcu_read_lock();
806 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
807 		queue_delayed_work(mp->m_eofblocks_workqueue,
808 				   &mp->m_cowblocks_work,
809 				   msecs_to_jiffies(xfs_cowb_secs * 1000));
810 	rcu_read_unlock();
811 }
812 
813 void
814 xfs_cowblocks_worker(
815 	struct work_struct *work)
816 {
817 	struct xfs_mount *mp = container_of(to_delayed_work(work),
818 				struct xfs_mount, m_cowblocks_work);
819 	xfs_icache_free_cowblocks(mp, NULL);
820 	xfs_queue_cowblocks(mp);
821 }
822 
823 int
824 xfs_inode_ag_iterator(
825 	struct xfs_mount	*mp,
826 	int			(*execute)(struct xfs_inode *ip, int flags,
827 					   void *args),
828 	int			flags,
829 	void			*args)
830 {
831 	struct xfs_perag	*pag;
832 	int			error = 0;
833 	int			last_error = 0;
834 	xfs_agnumber_t		ag;
835 
836 	ag = 0;
837 	while ((pag = xfs_perag_get(mp, ag))) {
838 		ag = pag->pag_agno + 1;
839 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
840 		xfs_perag_put(pag);
841 		if (error) {
842 			last_error = error;
843 			if (error == -EFSCORRUPTED)
844 				break;
845 		}
846 	}
847 	return last_error;
848 }
849 
850 int
851 xfs_inode_ag_iterator_tag(
852 	struct xfs_mount	*mp,
853 	int			(*execute)(struct xfs_inode *ip, int flags,
854 					   void *args),
855 	int			flags,
856 	void			*args,
857 	int			tag)
858 {
859 	struct xfs_perag	*pag;
860 	int			error = 0;
861 	int			last_error = 0;
862 	xfs_agnumber_t		ag;
863 
864 	ag = 0;
865 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
866 		ag = pag->pag_agno + 1;
867 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
868 		xfs_perag_put(pag);
869 		if (error) {
870 			last_error = error;
871 			if (error == -EFSCORRUPTED)
872 				break;
873 		}
874 	}
875 	return last_error;
876 }
877 
878 /*
879  * Grab the inode for reclaim exclusively.
880  * Return 0 if we grabbed it, non-zero otherwise.
881  */
882 STATIC int
883 xfs_reclaim_inode_grab(
884 	struct xfs_inode	*ip,
885 	int			flags)
886 {
887 	ASSERT(rcu_read_lock_held());
888 
889 	/* quick check for stale RCU freed inode */
890 	if (!ip->i_ino)
891 		return 1;
892 
893 	/*
894 	 * If we are asked for non-blocking operation, do unlocked checks to
895 	 * see if the inode already is being flushed or in reclaim to avoid
896 	 * lock traffic.
897 	 */
898 	if ((flags & SYNC_TRYLOCK) &&
899 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
900 		return 1;
901 
902 	/*
903 	 * The radix tree lock here protects a thread in xfs_iget from racing
904 	 * with us starting reclaim on the inode.  Once we have the
905 	 * XFS_IRECLAIM flag set it will not touch us.
906 	 *
907 	 * Due to RCU lookup, we may find inodes that have been freed and only
908 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
909 	 * aren't candidates for reclaim at all, so we must check the
910 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
911 	 */
912 	spin_lock(&ip->i_flags_lock);
913 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
914 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
915 		/* not a reclaim candidate. */
916 		spin_unlock(&ip->i_flags_lock);
917 		return 1;
918 	}
919 	__xfs_iflags_set(ip, XFS_IRECLAIM);
920 	spin_unlock(&ip->i_flags_lock);
921 	return 0;
922 }
923 
924 /*
925  * Inodes in different states need to be treated differently. The following
926  * table lists the inode states and the reclaim actions necessary:
927  *
928  *	inode state	     iflush ret		required action
929  *      ---------------      ----------         ---------------
930  *	bad			-		reclaim
931  *	shutdown		EIO		unpin and reclaim
932  *	clean, unpinned		0		reclaim
933  *	stale, unpinned		0		reclaim
934  *	clean, pinned(*)	0		requeue
935  *	stale, pinned		EAGAIN		requeue
936  *	dirty, async		-		requeue
937  *	dirty, sync		0		reclaim
938  *
939  * (*) dgc: I don't think the clean, pinned state is possible but it gets
940  * handled anyway given the order of checks implemented.
941  *
942  * Also, because we get the flush lock first, we know that any inode that has
943  * been flushed delwri has had the flush completed by the time we check that
944  * the inode is clean.
945  *
946  * Note that because the inode is flushed delayed write by AIL pushing, the
947  * flush lock may already be held here and waiting on it can result in very
948  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
949  * the caller should push the AIL first before trying to reclaim inodes to
950  * minimise the amount of time spent waiting.  For background relaim, we only
951  * bother to reclaim clean inodes anyway.
952  *
953  * Hence the order of actions after gaining the locks should be:
954  *	bad		=> reclaim
955  *	shutdown	=> unpin and reclaim
956  *	pinned, async	=> requeue
957  *	pinned, sync	=> unpin
958  *	stale		=> reclaim
959  *	clean		=> reclaim
960  *	dirty, async	=> requeue
961  *	dirty, sync	=> flush, wait and reclaim
962  */
963 STATIC int
964 xfs_reclaim_inode(
965 	struct xfs_inode	*ip,
966 	struct xfs_perag	*pag,
967 	int			sync_mode)
968 {
969 	struct xfs_buf		*bp = NULL;
970 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
971 	int			error;
972 
973 restart:
974 	error = 0;
975 	xfs_ilock(ip, XFS_ILOCK_EXCL);
976 	if (!xfs_iflock_nowait(ip)) {
977 		if (!(sync_mode & SYNC_WAIT))
978 			goto out;
979 		xfs_iflock(ip);
980 	}
981 
982 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
983 		xfs_iunpin_wait(ip);
984 		xfs_iflush_abort(ip, false);
985 		goto reclaim;
986 	}
987 	if (xfs_ipincount(ip)) {
988 		if (!(sync_mode & SYNC_WAIT))
989 			goto out_ifunlock;
990 		xfs_iunpin_wait(ip);
991 	}
992 	if (xfs_iflags_test(ip, XFS_ISTALE))
993 		goto reclaim;
994 	if (xfs_inode_clean(ip))
995 		goto reclaim;
996 
997 	/*
998 	 * Never flush out dirty data during non-blocking reclaim, as it would
999 	 * just contend with AIL pushing trying to do the same job.
1000 	 */
1001 	if (!(sync_mode & SYNC_WAIT))
1002 		goto out_ifunlock;
1003 
1004 	/*
1005 	 * Now we have an inode that needs flushing.
1006 	 *
1007 	 * Note that xfs_iflush will never block on the inode buffer lock, as
1008 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1009 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
1010 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1011 	 * result in an ABBA deadlock with xfs_ifree_cluster().
1012 	 *
1013 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
1014 	 * cache to mark them stale, if we hit this case we don't actually want
1015 	 * to do IO here - we want the inode marked stale so we can simply
1016 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1017 	 * inode, back off and try again.  Hopefully the next pass through will
1018 	 * see the stale flag set on the inode.
1019 	 */
1020 	error = xfs_iflush(ip, &bp);
1021 	if (error == -EAGAIN) {
1022 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1023 		/* backoff longer than in xfs_ifree_cluster */
1024 		delay(2);
1025 		goto restart;
1026 	}
1027 
1028 	if (!error) {
1029 		error = xfs_bwrite(bp);
1030 		xfs_buf_relse(bp);
1031 	}
1032 
1033 	xfs_iflock(ip);
1034 reclaim:
1035 	/*
1036 	 * Because we use RCU freeing we need to ensure the inode always appears
1037 	 * to be reclaimed with an invalid inode number when in the free state.
1038 	 * We do this as early as possible under the ILOCK and flush lock so
1039 	 * that xfs_iflush_cluster() can be guaranteed to detect races with us
1040 	 * here. By doing this, we guarantee that once xfs_iflush_cluster has
1041 	 * locked both the XFS_ILOCK and the flush lock that it will see either
1042 	 * a valid, flushable inode that will serialise correctly against the
1043 	 * locks below, or it will see a clean (and invalid) inode that it can
1044 	 * skip.
1045 	 */
1046 	spin_lock(&ip->i_flags_lock);
1047 	ip->i_flags = XFS_IRECLAIM;
1048 	ip->i_ino = 0;
1049 	spin_unlock(&ip->i_flags_lock);
1050 
1051 	xfs_ifunlock(ip);
1052 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1053 
1054 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1055 	/*
1056 	 * Remove the inode from the per-AG radix tree.
1057 	 *
1058 	 * Because radix_tree_delete won't complain even if the item was never
1059 	 * added to the tree assert that it's been there before to catch
1060 	 * problems with the inode life time early on.
1061 	 */
1062 	spin_lock(&pag->pag_ici_lock);
1063 	if (!radix_tree_delete(&pag->pag_ici_root,
1064 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1065 		ASSERT(0);
1066 	xfs_perag_clear_reclaim_tag(pag);
1067 	spin_unlock(&pag->pag_ici_lock);
1068 
1069 	/*
1070 	 * Here we do an (almost) spurious inode lock in order to coordinate
1071 	 * with inode cache radix tree lookups.  This is because the lookup
1072 	 * can reference the inodes in the cache without taking references.
1073 	 *
1074 	 * We make that OK here by ensuring that we wait until the inode is
1075 	 * unlocked after the lookup before we go ahead and free it.
1076 	 */
1077 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1078 	xfs_qm_dqdetach(ip);
1079 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1080 
1081 	__xfs_inode_free(ip);
1082 	return error;
1083 
1084 out_ifunlock:
1085 	xfs_ifunlock(ip);
1086 out:
1087 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1088 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1089 	/*
1090 	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1091 	 * a short while. However, this just burns CPU time scanning the tree
1092 	 * waiting for IO to complete and the reclaim work never goes back to
1093 	 * the idle state. Instead, return 0 to let the next scheduled
1094 	 * background reclaim attempt to reclaim the inode again.
1095 	 */
1096 	return 0;
1097 }
1098 
1099 /*
1100  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1101  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1102  * then a shut down during filesystem unmount reclaim walk leak all the
1103  * unreclaimed inodes.
1104  */
1105 STATIC int
1106 xfs_reclaim_inodes_ag(
1107 	struct xfs_mount	*mp,
1108 	int			flags,
1109 	int			*nr_to_scan)
1110 {
1111 	struct xfs_perag	*pag;
1112 	int			error = 0;
1113 	int			last_error = 0;
1114 	xfs_agnumber_t		ag;
1115 	int			trylock = flags & SYNC_TRYLOCK;
1116 	int			skipped;
1117 
1118 restart:
1119 	ag = 0;
1120 	skipped = 0;
1121 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1122 		unsigned long	first_index = 0;
1123 		int		done = 0;
1124 		int		nr_found = 0;
1125 
1126 		ag = pag->pag_agno + 1;
1127 
1128 		if (trylock) {
1129 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1130 				skipped++;
1131 				xfs_perag_put(pag);
1132 				continue;
1133 			}
1134 			first_index = pag->pag_ici_reclaim_cursor;
1135 		} else
1136 			mutex_lock(&pag->pag_ici_reclaim_lock);
1137 
1138 		do {
1139 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1140 			int	i;
1141 
1142 			rcu_read_lock();
1143 			nr_found = radix_tree_gang_lookup_tag(
1144 					&pag->pag_ici_root,
1145 					(void **)batch, first_index,
1146 					XFS_LOOKUP_BATCH,
1147 					XFS_ICI_RECLAIM_TAG);
1148 			if (!nr_found) {
1149 				done = 1;
1150 				rcu_read_unlock();
1151 				break;
1152 			}
1153 
1154 			/*
1155 			 * Grab the inodes before we drop the lock. if we found
1156 			 * nothing, nr == 0 and the loop will be skipped.
1157 			 */
1158 			for (i = 0; i < nr_found; i++) {
1159 				struct xfs_inode *ip = batch[i];
1160 
1161 				if (done || xfs_reclaim_inode_grab(ip, flags))
1162 					batch[i] = NULL;
1163 
1164 				/*
1165 				 * Update the index for the next lookup. Catch
1166 				 * overflows into the next AG range which can
1167 				 * occur if we have inodes in the last block of
1168 				 * the AG and we are currently pointing to the
1169 				 * last inode.
1170 				 *
1171 				 * Because we may see inodes that are from the
1172 				 * wrong AG due to RCU freeing and
1173 				 * reallocation, only update the index if it
1174 				 * lies in this AG. It was a race that lead us
1175 				 * to see this inode, so another lookup from
1176 				 * the same index will not find it again.
1177 				 */
1178 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1179 								pag->pag_agno)
1180 					continue;
1181 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1182 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1183 					done = 1;
1184 			}
1185 
1186 			/* unlock now we've grabbed the inodes. */
1187 			rcu_read_unlock();
1188 
1189 			for (i = 0; i < nr_found; i++) {
1190 				if (!batch[i])
1191 					continue;
1192 				error = xfs_reclaim_inode(batch[i], pag, flags);
1193 				if (error && last_error != -EFSCORRUPTED)
1194 					last_error = error;
1195 			}
1196 
1197 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1198 
1199 			cond_resched();
1200 
1201 		} while (nr_found && !done && *nr_to_scan > 0);
1202 
1203 		if (trylock && !done)
1204 			pag->pag_ici_reclaim_cursor = first_index;
1205 		else
1206 			pag->pag_ici_reclaim_cursor = 0;
1207 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1208 		xfs_perag_put(pag);
1209 	}
1210 
1211 	/*
1212 	 * if we skipped any AG, and we still have scan count remaining, do
1213 	 * another pass this time using blocking reclaim semantics (i.e
1214 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1215 	 * ensure that when we get more reclaimers than AGs we block rather
1216 	 * than spin trying to execute reclaim.
1217 	 */
1218 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1219 		trylock = 0;
1220 		goto restart;
1221 	}
1222 	return last_error;
1223 }
1224 
1225 int
1226 xfs_reclaim_inodes(
1227 	xfs_mount_t	*mp,
1228 	int		mode)
1229 {
1230 	int		nr_to_scan = INT_MAX;
1231 
1232 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1233 }
1234 
1235 /*
1236  * Scan a certain number of inodes for reclaim.
1237  *
1238  * When called we make sure that there is a background (fast) inode reclaim in
1239  * progress, while we will throttle the speed of reclaim via doing synchronous
1240  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1241  * them to be cleaned, which we hope will not be very long due to the
1242  * background walker having already kicked the IO off on those dirty inodes.
1243  */
1244 long
1245 xfs_reclaim_inodes_nr(
1246 	struct xfs_mount	*mp,
1247 	int			nr_to_scan)
1248 {
1249 	/* kick background reclaimer and push the AIL */
1250 	xfs_reclaim_work_queue(mp);
1251 	xfs_ail_push_all(mp->m_ail);
1252 
1253 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1254 }
1255 
1256 /*
1257  * Return the number of reclaimable inodes in the filesystem for
1258  * the shrinker to determine how much to reclaim.
1259  */
1260 int
1261 xfs_reclaim_inodes_count(
1262 	struct xfs_mount	*mp)
1263 {
1264 	struct xfs_perag	*pag;
1265 	xfs_agnumber_t		ag = 0;
1266 	int			reclaimable = 0;
1267 
1268 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1269 		ag = pag->pag_agno + 1;
1270 		reclaimable += pag->pag_ici_reclaimable;
1271 		xfs_perag_put(pag);
1272 	}
1273 	return reclaimable;
1274 }
1275 
1276 STATIC int
1277 xfs_inode_match_id(
1278 	struct xfs_inode	*ip,
1279 	struct xfs_eofblocks	*eofb)
1280 {
1281 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1282 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1283 		return 0;
1284 
1285 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1286 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1287 		return 0;
1288 
1289 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1290 	    xfs_get_projid(ip) != eofb->eof_prid)
1291 		return 0;
1292 
1293 	return 1;
1294 }
1295 
1296 /*
1297  * A union-based inode filtering algorithm. Process the inode if any of the
1298  * criteria match. This is for global/internal scans only.
1299  */
1300 STATIC int
1301 xfs_inode_match_id_union(
1302 	struct xfs_inode	*ip,
1303 	struct xfs_eofblocks	*eofb)
1304 {
1305 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1306 	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1307 		return 1;
1308 
1309 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1310 	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1311 		return 1;
1312 
1313 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1314 	    xfs_get_projid(ip) == eofb->eof_prid)
1315 		return 1;
1316 
1317 	return 0;
1318 }
1319 
1320 STATIC int
1321 xfs_inode_free_eofblocks(
1322 	struct xfs_inode	*ip,
1323 	int			flags,
1324 	void			*args)
1325 {
1326 	int ret;
1327 	struct xfs_eofblocks *eofb = args;
1328 	bool need_iolock = true;
1329 	int match;
1330 
1331 	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1332 
1333 	if (!xfs_can_free_eofblocks(ip, false)) {
1334 		/* inode could be preallocated or append-only */
1335 		trace_xfs_inode_free_eofblocks_invalid(ip);
1336 		xfs_inode_clear_eofblocks_tag(ip);
1337 		return 0;
1338 	}
1339 
1340 	/*
1341 	 * If the mapping is dirty the operation can block and wait for some
1342 	 * time. Unless we are waiting, skip it.
1343 	 */
1344 	if (!(flags & SYNC_WAIT) &&
1345 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1346 		return 0;
1347 
1348 	if (eofb) {
1349 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1350 			match = xfs_inode_match_id_union(ip, eofb);
1351 		else
1352 			match = xfs_inode_match_id(ip, eofb);
1353 		if (!match)
1354 			return 0;
1355 
1356 		/* skip the inode if the file size is too small */
1357 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1358 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1359 			return 0;
1360 
1361 		/*
1362 		 * A scan owner implies we already hold the iolock. Skip it in
1363 		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
1364 		 * the possibility of EAGAIN being returned.
1365 		 */
1366 		if (eofb->eof_scan_owner == ip->i_ino)
1367 			need_iolock = false;
1368 	}
1369 
1370 	ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
1371 
1372 	/* don't revisit the inode if we're not waiting */
1373 	if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1374 		ret = 0;
1375 
1376 	return ret;
1377 }
1378 
1379 static int
1380 __xfs_icache_free_eofblocks(
1381 	struct xfs_mount	*mp,
1382 	struct xfs_eofblocks	*eofb,
1383 	int			(*execute)(struct xfs_inode *ip, int flags,
1384 					   void *args),
1385 	int			tag)
1386 {
1387 	int flags = SYNC_TRYLOCK;
1388 
1389 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1390 		flags = SYNC_WAIT;
1391 
1392 	return xfs_inode_ag_iterator_tag(mp, execute, flags,
1393 					 eofb, tag);
1394 }
1395 
1396 int
1397 xfs_icache_free_eofblocks(
1398 	struct xfs_mount	*mp,
1399 	struct xfs_eofblocks	*eofb)
1400 {
1401 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1402 			XFS_ICI_EOFBLOCKS_TAG);
1403 }
1404 
1405 /*
1406  * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1407  * multiple quotas, we don't know exactly which quota caused an allocation
1408  * failure. We make a best effort by including each quota under low free space
1409  * conditions (less than 1% free space) in the scan.
1410  */
1411 static int
1412 __xfs_inode_free_quota_eofblocks(
1413 	struct xfs_inode	*ip,
1414 	int			(*execute)(struct xfs_mount *mp,
1415 					   struct xfs_eofblocks	*eofb))
1416 {
1417 	int scan = 0;
1418 	struct xfs_eofblocks eofb = {0};
1419 	struct xfs_dquot *dq;
1420 
1421 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1422 
1423 	/*
1424 	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
1425 	 * can repeatedly trylock on the inode we're currently processing. We
1426 	 * run a sync scan to increase effectiveness and use the union filter to
1427 	 * cover all applicable quotas in a single scan.
1428 	 */
1429 	eofb.eof_scan_owner = ip->i_ino;
1430 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1431 
1432 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1433 		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1434 		if (dq && xfs_dquot_lowsp(dq)) {
1435 			eofb.eof_uid = VFS_I(ip)->i_uid;
1436 			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1437 			scan = 1;
1438 		}
1439 	}
1440 
1441 	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1442 		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1443 		if (dq && xfs_dquot_lowsp(dq)) {
1444 			eofb.eof_gid = VFS_I(ip)->i_gid;
1445 			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1446 			scan = 1;
1447 		}
1448 	}
1449 
1450 	if (scan)
1451 		execute(ip->i_mount, &eofb);
1452 
1453 	return scan;
1454 }
1455 
1456 int
1457 xfs_inode_free_quota_eofblocks(
1458 	struct xfs_inode *ip)
1459 {
1460 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1461 }
1462 
1463 static void
1464 __xfs_inode_set_eofblocks_tag(
1465 	xfs_inode_t	*ip,
1466 	void		(*execute)(struct xfs_mount *mp),
1467 	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1468 				  int error, unsigned long caller_ip),
1469 	int		tag)
1470 {
1471 	struct xfs_mount *mp = ip->i_mount;
1472 	struct xfs_perag *pag;
1473 	int tagged;
1474 
1475 	/*
1476 	 * Don't bother locking the AG and looking up in the radix trees
1477 	 * if we already know that we have the tag set.
1478 	 */
1479 	if (ip->i_flags & XFS_IEOFBLOCKS)
1480 		return;
1481 	spin_lock(&ip->i_flags_lock);
1482 	ip->i_flags |= XFS_IEOFBLOCKS;
1483 	spin_unlock(&ip->i_flags_lock);
1484 
1485 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1486 	spin_lock(&pag->pag_ici_lock);
1487 
1488 	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1489 	radix_tree_tag_set(&pag->pag_ici_root,
1490 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1491 	if (!tagged) {
1492 		/* propagate the eofblocks tag up into the perag radix tree */
1493 		spin_lock(&ip->i_mount->m_perag_lock);
1494 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1495 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1496 				   tag);
1497 		spin_unlock(&ip->i_mount->m_perag_lock);
1498 
1499 		/* kick off background trimming */
1500 		execute(ip->i_mount);
1501 
1502 		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1503 	}
1504 
1505 	spin_unlock(&pag->pag_ici_lock);
1506 	xfs_perag_put(pag);
1507 }
1508 
1509 void
1510 xfs_inode_set_eofblocks_tag(
1511 	xfs_inode_t	*ip)
1512 {
1513 	trace_xfs_inode_set_eofblocks_tag(ip);
1514 	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
1515 			trace_xfs_perag_set_eofblocks,
1516 			XFS_ICI_EOFBLOCKS_TAG);
1517 }
1518 
1519 static void
1520 __xfs_inode_clear_eofblocks_tag(
1521 	xfs_inode_t	*ip,
1522 	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1523 				    int error, unsigned long caller_ip),
1524 	int		tag)
1525 {
1526 	struct xfs_mount *mp = ip->i_mount;
1527 	struct xfs_perag *pag;
1528 
1529 	spin_lock(&ip->i_flags_lock);
1530 	ip->i_flags &= ~XFS_IEOFBLOCKS;
1531 	spin_unlock(&ip->i_flags_lock);
1532 
1533 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1534 	spin_lock(&pag->pag_ici_lock);
1535 
1536 	radix_tree_tag_clear(&pag->pag_ici_root,
1537 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1538 	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1539 		/* clear the eofblocks tag from the perag radix tree */
1540 		spin_lock(&ip->i_mount->m_perag_lock);
1541 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1542 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1543 				     tag);
1544 		spin_unlock(&ip->i_mount->m_perag_lock);
1545 		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1546 	}
1547 
1548 	spin_unlock(&pag->pag_ici_lock);
1549 	xfs_perag_put(pag);
1550 }
1551 
1552 void
1553 xfs_inode_clear_eofblocks_tag(
1554 	xfs_inode_t	*ip)
1555 {
1556 	trace_xfs_inode_clear_eofblocks_tag(ip);
1557 	return __xfs_inode_clear_eofblocks_tag(ip,
1558 			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1559 }
1560 
1561 /*
1562  * Automatic CoW Reservation Freeing
1563  *
1564  * These functions automatically garbage collect leftover CoW reservations
1565  * that were made on behalf of a cowextsize hint when we start to run out
1566  * of quota or when the reservations sit around for too long.  If the file
1567  * has dirty pages or is undergoing writeback, its CoW reservations will
1568  * be retained.
1569  *
1570  * The actual garbage collection piggybacks off the same code that runs
1571  * the speculative EOF preallocation garbage collector.
1572  */
1573 STATIC int
1574 xfs_inode_free_cowblocks(
1575 	struct xfs_inode	*ip,
1576 	int			flags,
1577 	void			*args)
1578 {
1579 	int ret;
1580 	struct xfs_eofblocks *eofb = args;
1581 	bool need_iolock = true;
1582 	int match;
1583 
1584 	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1585 
1586 	if (!xfs_reflink_has_real_cow_blocks(ip)) {
1587 		trace_xfs_inode_free_cowblocks_invalid(ip);
1588 		xfs_inode_clear_cowblocks_tag(ip);
1589 		return 0;
1590 	}
1591 
1592 	/*
1593 	 * If the mapping is dirty or under writeback we cannot touch the
1594 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1595 	 */
1596 	if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1597 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1598 	    atomic_read(&VFS_I(ip)->i_dio_count))
1599 		return 0;
1600 
1601 	if (eofb) {
1602 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1603 			match = xfs_inode_match_id_union(ip, eofb);
1604 		else
1605 			match = xfs_inode_match_id(ip, eofb);
1606 		if (!match)
1607 			return 0;
1608 
1609 		/* skip the inode if the file size is too small */
1610 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1611 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1612 			return 0;
1613 
1614 		/*
1615 		 * A scan owner implies we already hold the iolock. Skip it in
1616 		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
1617 		 * the possibility of EAGAIN being returned.
1618 		 */
1619 		if (eofb->eof_scan_owner == ip->i_ino)
1620 			need_iolock = false;
1621 	}
1622 
1623 	/* Free the CoW blocks */
1624 	if (need_iolock) {
1625 		xfs_ilock(ip, XFS_IOLOCK_EXCL);
1626 		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1627 	}
1628 
1629 	ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
1630 
1631 	if (need_iolock) {
1632 		xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1633 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1634 	}
1635 
1636 	return ret;
1637 }
1638 
1639 int
1640 xfs_icache_free_cowblocks(
1641 	struct xfs_mount	*mp,
1642 	struct xfs_eofblocks	*eofb)
1643 {
1644 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1645 			XFS_ICI_COWBLOCKS_TAG);
1646 }
1647 
1648 int
1649 xfs_inode_free_quota_cowblocks(
1650 	struct xfs_inode *ip)
1651 {
1652 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1653 }
1654 
1655 void
1656 xfs_inode_set_cowblocks_tag(
1657 	xfs_inode_t	*ip)
1658 {
1659 	trace_xfs_inode_set_cowblocks_tag(ip);
1660 	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
1661 			trace_xfs_perag_set_cowblocks,
1662 			XFS_ICI_COWBLOCKS_TAG);
1663 }
1664 
1665 void
1666 xfs_inode_clear_cowblocks_tag(
1667 	xfs_inode_t	*ip)
1668 {
1669 	trace_xfs_inode_clear_cowblocks_tag(ip);
1670 	return __xfs_inode_clear_eofblocks_tag(ip,
1671 			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1672 }
1673