xref: /openbmc/linux/fs/xfs/xfs_iwalk.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1a211432cSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
2a211432cSDarrick J. Wong /*
3a211432cSDarrick J. Wong  * Copyright (C) 2019 Oracle.  All Rights Reserved.
4a211432cSDarrick J. Wong  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5a211432cSDarrick J. Wong  */
6a211432cSDarrick J. Wong #include "xfs.h"
7a211432cSDarrick J. Wong #include "xfs_fs.h"
8a211432cSDarrick J. Wong #include "xfs_shared.h"
9a211432cSDarrick J. Wong #include "xfs_format.h"
10a211432cSDarrick J. Wong #include "xfs_log_format.h"
11a211432cSDarrick J. Wong #include "xfs_trans_resv.h"
12a211432cSDarrick J. Wong #include "xfs_mount.h"
13a211432cSDarrick J. Wong #include "xfs_inode.h"
14a211432cSDarrick J. Wong #include "xfs_btree.h"
15a211432cSDarrick J. Wong #include "xfs_ialloc.h"
16a211432cSDarrick J. Wong #include "xfs_ialloc_btree.h"
17a211432cSDarrick J. Wong #include "xfs_iwalk.h"
18a211432cSDarrick J. Wong #include "xfs_error.h"
19a211432cSDarrick J. Wong #include "xfs_trace.h"
20a211432cSDarrick J. Wong #include "xfs_icache.h"
21a211432cSDarrick J. Wong #include "xfs_health.h"
22a211432cSDarrick J. Wong #include "xfs_trans.h"
2340786717SDarrick J. Wong #include "xfs_pwork.h"
246f4118fcSDave Chinner #include "xfs_ag.h"
25a211432cSDarrick J. Wong 
26a211432cSDarrick J. Wong /*
27a211432cSDarrick J. Wong  * Walking Inodes in the Filesystem
28a211432cSDarrick J. Wong  * ================================
29a211432cSDarrick J. Wong  *
30a211432cSDarrick J. Wong  * This iterator function walks a subset of filesystem inodes in increasing
31a211432cSDarrick J. Wong  * order from @startino until there are no more inodes.  For each allocated
32a211432cSDarrick J. Wong  * inode it finds, it calls a walk function with the relevant inode number and
33a211432cSDarrick J. Wong  * a pointer to caller-provided data.  The walk function can return the usual
34a211432cSDarrick J. Wong  * negative error code to stop the iteration; 0 to continue the iteration; or
35e7ee96dfSDarrick J. Wong  * -ECANCELED to stop the iteration.  This return value is returned to the
36a211432cSDarrick J. Wong  * caller.
37a211432cSDarrick J. Wong  *
38a211432cSDarrick J. Wong  * Internally, we allow the walk function to do anything, which means that we
39a211432cSDarrick J. Wong  * cannot maintain the inobt cursor or our lock on the AGI buffer.  We
40a211432cSDarrick J. Wong  * therefore cache the inobt records in kernel memory and only call the walk
41a211432cSDarrick J. Wong  * function when our memory buffer is full.  @nr_recs is the number of records
42a211432cSDarrick J. Wong  * that we've cached, and @sz_recs is the size of our cache.
43a211432cSDarrick J. Wong  *
44a211432cSDarrick J. Wong  * It is the responsibility of the walk function to ensure it accesses
45a211432cSDarrick J. Wong  * allocated inodes, as the inobt records may be stale by the time they are
46a211432cSDarrick J. Wong  * acted upon.
47a211432cSDarrick J. Wong  */
48a211432cSDarrick J. Wong 
49a211432cSDarrick J. Wong struct xfs_iwalk_ag {
5040786717SDarrick J. Wong 	/* parallel work control data; will be null if single threaded */
5140786717SDarrick J. Wong 	struct xfs_pwork		pwork;
5240786717SDarrick J. Wong 
53a211432cSDarrick J. Wong 	struct xfs_mount		*mp;
54a211432cSDarrick J. Wong 	struct xfs_trans		*tp;
556f4118fcSDave Chinner 	struct xfs_perag		*pag;
56a211432cSDarrick J. Wong 
57a211432cSDarrick J. Wong 	/* Where do we start the traversal? */
58a211432cSDarrick J. Wong 	xfs_ino_t			startino;
59a211432cSDarrick J. Wong 
6027c14b5dSDarrick J. Wong 	/* What was the last inode number we saw when iterating the inobt? */
6127c14b5dSDarrick J. Wong 	xfs_ino_t			lastino;
6227c14b5dSDarrick J. Wong 
63a211432cSDarrick J. Wong 	/* Array of inobt records we cache. */
64a211432cSDarrick J. Wong 	struct xfs_inobt_rec_incore	*recs;
65a211432cSDarrick J. Wong 
66a211432cSDarrick J. Wong 	/* Number of entries allocated for the @recs array. */
67a211432cSDarrick J. Wong 	unsigned int			sz_recs;
68a211432cSDarrick J. Wong 
69a211432cSDarrick J. Wong 	/* Number of entries in the @recs array that are in use. */
70a211432cSDarrick J. Wong 	unsigned int			nr_recs;
71a211432cSDarrick J. Wong 
72a211432cSDarrick J. Wong 	/* Inode walk function and data pointer. */
73a211432cSDarrick J. Wong 	xfs_iwalk_fn			iwalk_fn;
7404b8fba2SDarrick J. Wong 	xfs_inobt_walk_fn		inobt_walk_fn;
75a211432cSDarrick J. Wong 	void				*data;
7604b8fba2SDarrick J. Wong 
7704b8fba2SDarrick J. Wong 	/*
7804b8fba2SDarrick J. Wong 	 * Make it look like the inodes up to startino are free so that
7904b8fba2SDarrick J. Wong 	 * bulkstat can start its inode iteration at the correct place without
8004b8fba2SDarrick J. Wong 	 * needing to special case everywhere.
8104b8fba2SDarrick J. Wong 	 */
8204b8fba2SDarrick J. Wong 	unsigned int			trim_start:1;
8304b8fba2SDarrick J. Wong 
8404b8fba2SDarrick J. Wong 	/* Skip empty inobt records? */
8504b8fba2SDarrick J. Wong 	unsigned int			skip_empty:1;
86a6343e4dSDarrick J. Wong 
87a6343e4dSDarrick J. Wong 	/* Drop the (hopefully empty) transaction when calling iwalk_fn. */
88a6343e4dSDarrick J. Wong 	unsigned int			drop_trans:1;
89a211432cSDarrick J. Wong };
90a211432cSDarrick J. Wong 
91da1d9e59SDarrick J. Wong /*
92da1d9e59SDarrick J. Wong  * Loop over all clusters in a chunk for a given incore inode allocation btree
93da1d9e59SDarrick J. Wong  * record.  Do a readahead if there are any allocated inodes in that cluster.
94da1d9e59SDarrick J. Wong  */
95da1d9e59SDarrick J. Wong STATIC void
xfs_iwalk_ichunk_ra(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_inobt_rec_incore * irec)96da1d9e59SDarrick J. Wong xfs_iwalk_ichunk_ra(
97da1d9e59SDarrick J. Wong 	struct xfs_mount		*mp,
986f4118fcSDave Chinner 	struct xfs_perag		*pag,
99da1d9e59SDarrick J. Wong 	struct xfs_inobt_rec_incore	*irec)
100da1d9e59SDarrick J. Wong {
101da1d9e59SDarrick J. Wong 	struct xfs_ino_geometry		*igeo = M_IGEO(mp);
102da1d9e59SDarrick J. Wong 	xfs_agblock_t			agbno;
103da1d9e59SDarrick J. Wong 	struct blk_plug			plug;
104da1d9e59SDarrick J. Wong 	int				i;	/* inode chunk index */
105da1d9e59SDarrick J. Wong 
106da1d9e59SDarrick J. Wong 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
107da1d9e59SDarrick J. Wong 
108da1d9e59SDarrick J. Wong 	blk_start_plug(&plug);
109688f7c36SDarrick J. Wong 	for (i = 0; i < XFS_INODES_PER_CHUNK; i += igeo->inodes_per_cluster) {
110688f7c36SDarrick J. Wong 		xfs_inofree_t	imask;
111688f7c36SDarrick J. Wong 
112688f7c36SDarrick J. Wong 		imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
113688f7c36SDarrick J. Wong 		if (imask & ~irec->ir_free) {
1146f4118fcSDave Chinner 			xfs_btree_reada_bufs(mp, pag->pag_agno, agbno,
115da1d9e59SDarrick J. Wong 					igeo->blocks_per_cluster,
116da1d9e59SDarrick J. Wong 					&xfs_inode_buf_ops);
117da1d9e59SDarrick J. Wong 		}
118688f7c36SDarrick J. Wong 		agbno += igeo->blocks_per_cluster;
119da1d9e59SDarrick J. Wong 	}
120da1d9e59SDarrick J. Wong 	blk_finish_plug(&plug);
121da1d9e59SDarrick J. Wong }
122da1d9e59SDarrick J. Wong 
123da1d9e59SDarrick J. Wong /*
1242b5eb826SDarrick J. Wong  * Set the bits in @irec's free mask that correspond to the inodes before
1252b5eb826SDarrick J. Wong  * @agino so that we skip them.  This is how we restart an inode walk that was
1262b5eb826SDarrick J. Wong  * interrupted in the middle of an inode record.
127da1d9e59SDarrick J. Wong  */
1282b5eb826SDarrick J. Wong STATIC void
xfs_iwalk_adjust_start(xfs_agino_t agino,struct xfs_inobt_rec_incore * irec)1292b5eb826SDarrick J. Wong xfs_iwalk_adjust_start(
130da1d9e59SDarrick J. Wong 	xfs_agino_t			agino,	/* starting inode of chunk */
131da1d9e59SDarrick J. Wong 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
132da1d9e59SDarrick J. Wong {
133da1d9e59SDarrick J. Wong 	int				idx;	/* index into inode chunk */
1345e29f3b7SDarrick J. Wong 	int				i;
135da1d9e59SDarrick J. Wong 
1365e29f3b7SDarrick J. Wong 	idx = agino - irec->ir_startino;
137da1d9e59SDarrick J. Wong 
1385e29f3b7SDarrick J. Wong 	/*
1395e29f3b7SDarrick J. Wong 	 * We got a right chunk with some left inodes allocated at it.  Grab
1405e29f3b7SDarrick J. Wong 	 * the chunk record.  Mark all the uninteresting inodes free because
1415e29f3b7SDarrick J. Wong 	 * they're before our start point.
142da1d9e59SDarrick J. Wong 	 */
143da1d9e59SDarrick J. Wong 	for (i = 0; i < idx; i++) {
144da1d9e59SDarrick J. Wong 		if (XFS_INOBT_MASK(i) & ~irec->ir_free)
145da1d9e59SDarrick J. Wong 			irec->ir_freecount++;
146da1d9e59SDarrick J. Wong 	}
147da1d9e59SDarrick J. Wong 
148da1d9e59SDarrick J. Wong 	irec->ir_free |= xfs_inobt_maskn(0, idx);
149da1d9e59SDarrick J. Wong }
150da1d9e59SDarrick J. Wong 
151a211432cSDarrick J. Wong /* Allocate memory for a walk. */
152a211432cSDarrick J. Wong STATIC int
xfs_iwalk_alloc(struct xfs_iwalk_ag * iwag)153a211432cSDarrick J. Wong xfs_iwalk_alloc(
154a211432cSDarrick J. Wong 	struct xfs_iwalk_ag	*iwag)
155a211432cSDarrick J. Wong {
156a211432cSDarrick J. Wong 	size_t			size;
157a211432cSDarrick J. Wong 
158a211432cSDarrick J. Wong 	ASSERT(iwag->recs == NULL);
159a211432cSDarrick J. Wong 	iwag->nr_recs = 0;
160a211432cSDarrick J. Wong 
161a211432cSDarrick J. Wong 	/* Allocate a prefetch buffer for inobt records. */
162a211432cSDarrick J. Wong 	size = iwag->sz_recs * sizeof(struct xfs_inobt_rec_incore);
163a211432cSDarrick J. Wong 	iwag->recs = kmem_alloc(size, KM_MAYFAIL);
164a211432cSDarrick J. Wong 	if (iwag->recs == NULL)
165a211432cSDarrick J. Wong 		return -ENOMEM;
166a211432cSDarrick J. Wong 
167a211432cSDarrick J. Wong 	return 0;
168a211432cSDarrick J. Wong }
169a211432cSDarrick J. Wong 
170a211432cSDarrick J. Wong /* Free memory we allocated for a walk. */
171a211432cSDarrick J. Wong STATIC void
xfs_iwalk_free(struct xfs_iwalk_ag * iwag)172a211432cSDarrick J. Wong xfs_iwalk_free(
173a211432cSDarrick J. Wong 	struct xfs_iwalk_ag	*iwag)
174a211432cSDarrick J. Wong {
175a211432cSDarrick J. Wong 	kmem_free(iwag->recs);
176a211432cSDarrick J. Wong 	iwag->recs = NULL;
177a211432cSDarrick J. Wong }
178a211432cSDarrick J. Wong 
179a211432cSDarrick J. Wong /* For each inuse inode in each cached inobt record, call our function. */
180a211432cSDarrick J. Wong STATIC int
xfs_iwalk_ag_recs(struct xfs_iwalk_ag * iwag)181a211432cSDarrick J. Wong xfs_iwalk_ag_recs(
182a211432cSDarrick J. Wong 	struct xfs_iwalk_ag	*iwag)
183a211432cSDarrick J. Wong {
184a211432cSDarrick J. Wong 	struct xfs_mount	*mp = iwag->mp;
185a211432cSDarrick J. Wong 	struct xfs_trans	*tp = iwag->tp;
1866f4118fcSDave Chinner 	struct xfs_perag	*pag = iwag->pag;
187a211432cSDarrick J. Wong 	xfs_ino_t		ino;
188a211432cSDarrick J. Wong 	unsigned int		i, j;
189a211432cSDarrick J. Wong 	int			error;
190a211432cSDarrick J. Wong 
191a211432cSDarrick J. Wong 	for (i = 0; i < iwag->nr_recs; i++) {
192a211432cSDarrick J. Wong 		struct xfs_inobt_rec_incore	*irec = &iwag->recs[i];
193a211432cSDarrick J. Wong 
1946f4118fcSDave Chinner 		trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec);
195a211432cSDarrick J. Wong 
19640786717SDarrick J. Wong 		if (xfs_pwork_want_abort(&iwag->pwork))
19740786717SDarrick J. Wong 			return 0;
19840786717SDarrick J. Wong 
19904b8fba2SDarrick J. Wong 		if (iwag->inobt_walk_fn) {
2006f4118fcSDave Chinner 			error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec,
20104b8fba2SDarrick J. Wong 					iwag->data);
20204b8fba2SDarrick J. Wong 			if (error)
20304b8fba2SDarrick J. Wong 				return error;
20404b8fba2SDarrick J. Wong 		}
20504b8fba2SDarrick J. Wong 
20604b8fba2SDarrick J. Wong 		if (!iwag->iwalk_fn)
20704b8fba2SDarrick J. Wong 			continue;
20804b8fba2SDarrick J. Wong 
209a211432cSDarrick J. Wong 		for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
21040786717SDarrick J. Wong 			if (xfs_pwork_want_abort(&iwag->pwork))
21140786717SDarrick J. Wong 				return 0;
21240786717SDarrick J. Wong 
213a211432cSDarrick J. Wong 			/* Skip if this inode is free */
214a211432cSDarrick J. Wong 			if (XFS_INOBT_MASK(j) & irec->ir_free)
215a211432cSDarrick J. Wong 				continue;
216a211432cSDarrick J. Wong 
217a211432cSDarrick J. Wong 			/* Otherwise call our function. */
2186f4118fcSDave Chinner 			ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
2196f4118fcSDave Chinner 						irec->ir_startino + j);
220a211432cSDarrick J. Wong 			error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
221a211432cSDarrick J. Wong 			if (error)
222a211432cSDarrick J. Wong 				return error;
223a211432cSDarrick J. Wong 		}
224a211432cSDarrick J. Wong 	}
225a211432cSDarrick J. Wong 
226a211432cSDarrick J. Wong 	return 0;
227a211432cSDarrick J. Wong }
228a211432cSDarrick J. Wong 
229a211432cSDarrick J. Wong /* Delete cursor and let go of AGI. */
230a211432cSDarrick J. Wong static inline void
xfs_iwalk_del_inobt(struct xfs_trans * tp,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int error)231a211432cSDarrick J. Wong xfs_iwalk_del_inobt(
232a211432cSDarrick J. Wong 	struct xfs_trans	*tp,
233a211432cSDarrick J. Wong 	struct xfs_btree_cur	**curpp,
234a211432cSDarrick J. Wong 	struct xfs_buf		**agi_bpp,
235a211432cSDarrick J. Wong 	int			error)
236a211432cSDarrick J. Wong {
237a211432cSDarrick J. Wong 	if (*curpp) {
238a211432cSDarrick J. Wong 		xfs_btree_del_cursor(*curpp, error);
239a211432cSDarrick J. Wong 		*curpp = NULL;
240a211432cSDarrick J. Wong 	}
241a211432cSDarrick J. Wong 	if (*agi_bpp) {
242a211432cSDarrick J. Wong 		xfs_trans_brelse(tp, *agi_bpp);
243a211432cSDarrick J. Wong 		*agi_bpp = NULL;
244a211432cSDarrick J. Wong 	}
245a211432cSDarrick J. Wong }
246a211432cSDarrick J. Wong 
247a211432cSDarrick J. Wong /*
248a211432cSDarrick J. Wong  * Set ourselves up for walking inobt records starting from a given point in
249a211432cSDarrick J. Wong  * the filesystem.
250a211432cSDarrick J. Wong  *
251a211432cSDarrick J. Wong  * If caller passed in a nonzero start inode number, load the record from the
252a211432cSDarrick J. Wong  * inobt and make the record look like all the inodes before agino are free so
253a211432cSDarrick J. Wong  * that we skip them, and then move the cursor to the next inobt record.  This
254a211432cSDarrick J. Wong  * is how we support starting an iwalk in the middle of an inode chunk.
255a211432cSDarrick J. Wong  *
256a211432cSDarrick J. Wong  * If the caller passed in a start number of zero, move the cursor to the first
257a211432cSDarrick J. Wong  * inobt record.
258a211432cSDarrick J. Wong  *
259a211432cSDarrick J. Wong  * The caller is responsible for cleaning up the cursor and buffer pointer
260a211432cSDarrick J. Wong  * regardless of the error status.
261a211432cSDarrick J. Wong  */
262a211432cSDarrick J. Wong STATIC int
xfs_iwalk_ag_start(struct xfs_iwalk_ag * iwag,xfs_agino_t agino,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int * has_more)263a211432cSDarrick J. Wong xfs_iwalk_ag_start(
264a211432cSDarrick J. Wong 	struct xfs_iwalk_ag	*iwag,
265a211432cSDarrick J. Wong 	xfs_agino_t		agino,
266a211432cSDarrick J. Wong 	struct xfs_btree_cur	**curpp,
267a211432cSDarrick J. Wong 	struct xfs_buf		**agi_bpp,
268a211432cSDarrick J. Wong 	int			*has_more)
269a211432cSDarrick J. Wong {
270a211432cSDarrick J. Wong 	struct xfs_mount	*mp = iwag->mp;
271a211432cSDarrick J. Wong 	struct xfs_trans	*tp = iwag->tp;
2726f4118fcSDave Chinner 	struct xfs_perag	*pag = iwag->pag;
2732b5eb826SDarrick J. Wong 	struct xfs_inobt_rec_incore *irec;
274a211432cSDarrick J. Wong 	int			error;
275a211432cSDarrick J. Wong 
276a211432cSDarrick J. Wong 	/* Set up a fresh cursor and empty the inobt cache. */
277a211432cSDarrick J. Wong 	iwag->nr_recs = 0;
278bab8b795SDave Chinner 	error = xfs_inobt_cur(pag, tp, XFS_BTNUM_INO, curpp, agi_bpp);
279a211432cSDarrick J. Wong 	if (error)
280a211432cSDarrick J. Wong 		return error;
281a211432cSDarrick J. Wong 
282a211432cSDarrick J. Wong 	/* Starting at the beginning of the AG?  That's easy! */
283a211432cSDarrick J. Wong 	if (agino == 0)
284a211432cSDarrick J. Wong 		return xfs_inobt_lookup(*curpp, 0, XFS_LOOKUP_GE, has_more);
285a211432cSDarrick J. Wong 
286a211432cSDarrick J. Wong 	/*
287a211432cSDarrick J. Wong 	 * Otherwise, we have to grab the inobt record where we left off, stuff
288a211432cSDarrick J. Wong 	 * the record into our cache, and then see if there are more records.
2892b5eb826SDarrick J. Wong 	 * We require a lookup cache of at least two elements so that the
2902b5eb826SDarrick J. Wong 	 * caller doesn't have to deal with tearing down the cursor to walk the
2912b5eb826SDarrick J. Wong 	 * records.
292a211432cSDarrick J. Wong 	 */
2932b5eb826SDarrick J. Wong 	error = xfs_inobt_lookup(*curpp, agino, XFS_LOOKUP_LE, has_more);
294a211432cSDarrick J. Wong 	if (error)
295a211432cSDarrick J. Wong 		return error;
2962b5eb826SDarrick J. Wong 
2972b5eb826SDarrick J. Wong 	/*
2982b5eb826SDarrick J. Wong 	 * If the LE lookup at @agino yields no records, jump ahead to the
2992b5eb826SDarrick J. Wong 	 * inobt cursor increment to see if there are more records to process.
3002b5eb826SDarrick J. Wong 	 */
3012b5eb826SDarrick J. Wong 	if (!*has_more)
3022b5eb826SDarrick J. Wong 		goto out_advance;
3032b5eb826SDarrick J. Wong 
3042b5eb826SDarrick J. Wong 	/* Get the record, should always work */
3052b5eb826SDarrick J. Wong 	irec = &iwag->recs[iwag->nr_recs];
3062b5eb826SDarrick J. Wong 	error = xfs_inobt_get_rec(*curpp, irec, has_more);
3072b5eb826SDarrick J. Wong 	if (error)
3082b5eb826SDarrick J. Wong 		return error;
309f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, *has_more != 1))
310f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
3112b5eb826SDarrick J. Wong 
3126f4118fcSDave Chinner 	iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
31327c14b5dSDarrick J. Wong 				irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
31427c14b5dSDarrick J. Wong 
3152b5eb826SDarrick J. Wong 	/*
3162b5eb826SDarrick J. Wong 	 * If the LE lookup yielded an inobt record before the cursor position,
3172b5eb826SDarrick J. Wong 	 * skip it and see if there's another one after it.
3182b5eb826SDarrick J. Wong 	 */
3192b5eb826SDarrick J. Wong 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
3202b5eb826SDarrick J. Wong 		goto out_advance;
3212b5eb826SDarrick J. Wong 
3222b5eb826SDarrick J. Wong 	/*
3232b5eb826SDarrick J. Wong 	 * If agino fell in the middle of the inode record, make it look like
3242b5eb826SDarrick J. Wong 	 * the inodes up to agino are free so that we don't return them again.
3252b5eb826SDarrick J. Wong 	 */
32604b8fba2SDarrick J. Wong 	if (iwag->trim_start)
3272b5eb826SDarrick J. Wong 		xfs_iwalk_adjust_start(agino, irec);
328a211432cSDarrick J. Wong 
329a211432cSDarrick J. Wong 	/*
330a211432cSDarrick J. Wong 	 * The prefetch calculation is supposed to give us a large enough inobt
331a211432cSDarrick J. Wong 	 * record cache that grab_ichunk can stage a partial first record and
332a211432cSDarrick J. Wong 	 * the loop body can cache a record without having to check for cache
333a211432cSDarrick J. Wong 	 * space until after it reads an inobt record.
334a211432cSDarrick J. Wong 	 */
3352b5eb826SDarrick J. Wong 	iwag->nr_recs++;
336a211432cSDarrick J. Wong 	ASSERT(iwag->nr_recs < iwag->sz_recs);
337a211432cSDarrick J. Wong 
3382b5eb826SDarrick J. Wong out_advance:
339a211432cSDarrick J. Wong 	return xfs_btree_increment(*curpp, 0, has_more);
340a211432cSDarrick J. Wong }
341a211432cSDarrick J. Wong 
342a211432cSDarrick J. Wong /*
343a211432cSDarrick J. Wong  * The inobt record cache is full, so preserve the inobt cursor state and
344a211432cSDarrick J. Wong  * run callbacks on the cached inobt records.  When we're done, restore the
345a211432cSDarrick J. Wong  * cursor state to wherever the cursor would have been had the cache not been
346a211432cSDarrick J. Wong  * full (and therefore we could've just incremented the cursor) if *@has_more
347a211432cSDarrick J. Wong  * is true.  On exit, *@has_more will indicate whether or not the caller should
348a211432cSDarrick J. Wong  * try for more inode records.
349a211432cSDarrick J. Wong  */
350a211432cSDarrick J. Wong STATIC int
xfs_iwalk_run_callbacks(struct xfs_iwalk_ag * iwag,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int * has_more)351a211432cSDarrick J. Wong xfs_iwalk_run_callbacks(
352a211432cSDarrick J. Wong 	struct xfs_iwalk_ag		*iwag,
353a211432cSDarrick J. Wong 	struct xfs_btree_cur		**curpp,
354a211432cSDarrick J. Wong 	struct xfs_buf			**agi_bpp,
355a211432cSDarrick J. Wong 	int				*has_more)
356a211432cSDarrick J. Wong {
357a211432cSDarrick J. Wong 	struct xfs_mount		*mp = iwag->mp;
358a211432cSDarrick J. Wong 	struct xfs_inobt_rec_incore	*irec;
35927c14b5dSDarrick J. Wong 	xfs_agino_t			next_agino;
360a211432cSDarrick J. Wong 	int				error;
361a211432cSDarrick J. Wong 
36227c14b5dSDarrick J. Wong 	next_agino = XFS_INO_TO_AGINO(mp, iwag->lastino) + 1;
36327c14b5dSDarrick J. Wong 
364a211432cSDarrick J. Wong 	ASSERT(iwag->nr_recs > 0);
365a211432cSDarrick J. Wong 
366a211432cSDarrick J. Wong 	/* Delete cursor but remember the last record we cached... */
367a6343e4dSDarrick J. Wong 	xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0);
368a211432cSDarrick J. Wong 	irec = &iwag->recs[iwag->nr_recs - 1];
369a5336d6bSDarrick J. Wong 	ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
370a211432cSDarrick J. Wong 
371a6343e4dSDarrick J. Wong 	if (iwag->drop_trans) {
372a6343e4dSDarrick J. Wong 		xfs_trans_cancel(iwag->tp);
373a6343e4dSDarrick J. Wong 		iwag->tp = NULL;
374a6343e4dSDarrick J. Wong 	}
375a6343e4dSDarrick J. Wong 
376a211432cSDarrick J. Wong 	error = xfs_iwalk_ag_recs(iwag);
377a211432cSDarrick J. Wong 	if (error)
378a211432cSDarrick J. Wong 		return error;
379a211432cSDarrick J. Wong 
380a211432cSDarrick J. Wong 	/* ...empty the cache... */
381a211432cSDarrick J. Wong 	iwag->nr_recs = 0;
382a211432cSDarrick J. Wong 
383a211432cSDarrick J. Wong 	if (!has_more)
384a211432cSDarrick J. Wong 		return 0;
385a211432cSDarrick J. Wong 
386a6343e4dSDarrick J. Wong 	if (iwag->drop_trans) {
387a6343e4dSDarrick J. Wong 		error = xfs_trans_alloc_empty(mp, &iwag->tp);
388a6343e4dSDarrick J. Wong 		if (error)
389a6343e4dSDarrick J. Wong 			return error;
390a6343e4dSDarrick J. Wong 	}
391a6343e4dSDarrick J. Wong 
392a211432cSDarrick J. Wong 	/* ...and recreate the cursor just past where we left off. */
393bab8b795SDave Chinner 	error = xfs_inobt_cur(iwag->pag, iwag->tp, XFS_BTNUM_INO, curpp,
394a6343e4dSDarrick J. Wong 			agi_bpp);
395a211432cSDarrick J. Wong 	if (error)
396a211432cSDarrick J. Wong 		return error;
397a211432cSDarrick J. Wong 
39827c14b5dSDarrick J. Wong 	return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more);
399a211432cSDarrick J. Wong }
400a211432cSDarrick J. Wong 
401a211432cSDarrick J. Wong /* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */
402a211432cSDarrick J. Wong STATIC int
xfs_iwalk_ag(struct xfs_iwalk_ag * iwag)403a211432cSDarrick J. Wong xfs_iwalk_ag(
404a211432cSDarrick J. Wong 	struct xfs_iwalk_ag		*iwag)
405a211432cSDarrick J. Wong {
406a211432cSDarrick J. Wong 	struct xfs_mount		*mp = iwag->mp;
4076f4118fcSDave Chinner 	struct xfs_perag		*pag = iwag->pag;
408a211432cSDarrick J. Wong 	struct xfs_buf			*agi_bp = NULL;
409a211432cSDarrick J. Wong 	struct xfs_btree_cur		*cur = NULL;
410a211432cSDarrick J. Wong 	xfs_agino_t			agino;
411a211432cSDarrick J. Wong 	int				has_more;
412a211432cSDarrick J. Wong 	int				error = 0;
413a211432cSDarrick J. Wong 
414a211432cSDarrick J. Wong 	/* Set up our cursor at the right place in the inode btree. */
4156f4118fcSDave Chinner 	ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino));
416a211432cSDarrick J. Wong 	agino = XFS_INO_TO_AGINO(mp, iwag->startino);
4176f4118fcSDave Chinner 	error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more);
418a211432cSDarrick J. Wong 
419a211432cSDarrick J. Wong 	while (!error && has_more) {
420a211432cSDarrick J. Wong 		struct xfs_inobt_rec_incore	*irec;
42127c14b5dSDarrick J. Wong 		xfs_ino_t			rec_fsino;
422a211432cSDarrick J. Wong 
423a211432cSDarrick J. Wong 		cond_resched();
42440786717SDarrick J. Wong 		if (xfs_pwork_want_abort(&iwag->pwork))
42540786717SDarrick J. Wong 			goto out;
426a211432cSDarrick J. Wong 
427a211432cSDarrick J. Wong 		/* Fetch the inobt record. */
428a211432cSDarrick J. Wong 		irec = &iwag->recs[iwag->nr_recs];
429a211432cSDarrick J. Wong 		error = xfs_inobt_get_rec(cur, irec, &has_more);
430a211432cSDarrick J. Wong 		if (error || !has_more)
431a211432cSDarrick J. Wong 			break;
432a211432cSDarrick J. Wong 
43327c14b5dSDarrick J. Wong 		/* Make sure that we always move forward. */
4346f4118fcSDave Chinner 		rec_fsino = XFS_AGINO_TO_INO(mp, pag->pag_agno, irec->ir_startino);
43527c14b5dSDarrick J. Wong 		if (iwag->lastino != NULLFSINO &&
43627c14b5dSDarrick J. Wong 		    XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
43727c14b5dSDarrick J. Wong 			error = -EFSCORRUPTED;
43827c14b5dSDarrick J. Wong 			goto out;
43927c14b5dSDarrick J. Wong 		}
44027c14b5dSDarrick J. Wong 		iwag->lastino = rec_fsino + XFS_INODES_PER_CHUNK - 1;
44127c14b5dSDarrick J. Wong 
442a211432cSDarrick J. Wong 		/* No allocated inodes in this chunk; skip it. */
44304b8fba2SDarrick J. Wong 		if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) {
444a211432cSDarrick J. Wong 			error = xfs_btree_increment(cur, 0, &has_more);
445a211432cSDarrick J. Wong 			if (error)
446a211432cSDarrick J. Wong 				break;
447a211432cSDarrick J. Wong 			continue;
448a211432cSDarrick J. Wong 		}
449a211432cSDarrick J. Wong 
450a211432cSDarrick J. Wong 		/*
451a211432cSDarrick J. Wong 		 * Start readahead for this inode chunk in anticipation of
452a211432cSDarrick J. Wong 		 * walking the inodes.
453a211432cSDarrick J. Wong 		 */
45404b8fba2SDarrick J. Wong 		if (iwag->iwalk_fn)
4556f4118fcSDave Chinner 			xfs_iwalk_ichunk_ra(mp, pag, irec);
456a211432cSDarrick J. Wong 
457a211432cSDarrick J. Wong 		/*
458a211432cSDarrick J. Wong 		 * If there's space in the buffer for more records, increment
459a211432cSDarrick J. Wong 		 * the btree cursor and grab more.
460a211432cSDarrick J. Wong 		 */
461a211432cSDarrick J. Wong 		if (++iwag->nr_recs < iwag->sz_recs) {
462a211432cSDarrick J. Wong 			error = xfs_btree_increment(cur, 0, &has_more);
463a211432cSDarrick J. Wong 			if (error || !has_more)
464a211432cSDarrick J. Wong 				break;
465a211432cSDarrick J. Wong 			continue;
466a211432cSDarrick J. Wong 		}
467a211432cSDarrick J. Wong 
468a211432cSDarrick J. Wong 		/*
469a211432cSDarrick J. Wong 		 * Otherwise, we need to save cursor state and run the callback
470a211432cSDarrick J. Wong 		 * function on the cached records.  The run_callbacks function
471a211432cSDarrick J. Wong 		 * is supposed to return a cursor pointing to the record where
472a211432cSDarrick J. Wong 		 * we would be if we had been able to increment like above.
473a211432cSDarrick J. Wong 		 */
474a211432cSDarrick J. Wong 		ASSERT(has_more);
4756f4118fcSDave Chinner 		error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
476a211432cSDarrick J. Wong 	}
477a211432cSDarrick J. Wong 
478a211432cSDarrick J. Wong 	if (iwag->nr_recs == 0 || error)
479a211432cSDarrick J. Wong 		goto out;
480a211432cSDarrick J. Wong 
481a211432cSDarrick J. Wong 	/* Walk the unprocessed records in the cache. */
4826f4118fcSDave Chinner 	error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
483a211432cSDarrick J. Wong 
484a211432cSDarrick J. Wong out:
485a6343e4dSDarrick J. Wong 	xfs_iwalk_del_inobt(iwag->tp, &cur, &agi_bp, error);
486a211432cSDarrick J. Wong 	return error;
487a211432cSDarrick J. Wong }
488a211432cSDarrick J. Wong 
489a211432cSDarrick J. Wong /*
490938c710dSDarrick J. Wong  * We experimentally determined that the reduction in ioctl call overhead
491938c710dSDarrick J. Wong  * diminishes when userspace asks for more than 2048 inodes, so we'll cap
492938c710dSDarrick J. Wong  * prefetch at this point.
493938c710dSDarrick J. Wong  */
494938c710dSDarrick J. Wong #define IWALK_MAX_INODE_PREFETCH	(2048U)
495938c710dSDarrick J. Wong 
496938c710dSDarrick J. Wong /*
497a211432cSDarrick J. Wong  * Given the number of inodes to prefetch, set the number of inobt records that
498a211432cSDarrick J. Wong  * we cache in memory, which controls the number of inodes we try to read
499938c710dSDarrick J. Wong  * ahead.  Set the maximum if @inodes == 0.
500a211432cSDarrick J. Wong  */
501a211432cSDarrick J. Wong static inline unsigned int
xfs_iwalk_prefetch(unsigned int inodes)502a211432cSDarrick J. Wong xfs_iwalk_prefetch(
503938c710dSDarrick J. Wong 	unsigned int		inodes)
504a211432cSDarrick J. Wong {
505938c710dSDarrick J. Wong 	unsigned int		inobt_records;
506938c710dSDarrick J. Wong 
507938c710dSDarrick J. Wong 	/*
508938c710dSDarrick J. Wong 	 * If the caller didn't tell us the number of inodes they wanted,
509938c710dSDarrick J. Wong 	 * assume the maximum prefetch possible for best performance.
510938c710dSDarrick J. Wong 	 * Otherwise, cap prefetch at that maximum so that we don't start an
511938c710dSDarrick J. Wong 	 * absurd amount of prefetch.
512938c710dSDarrick J. Wong 	 */
513938c710dSDarrick J. Wong 	if (inodes == 0)
514938c710dSDarrick J. Wong 		inodes = IWALK_MAX_INODE_PREFETCH;
515938c710dSDarrick J. Wong 	inodes = min(inodes, IWALK_MAX_INODE_PREFETCH);
516938c710dSDarrick J. Wong 
517938c710dSDarrick J. Wong 	/* Round the inode count up to a full chunk. */
518938c710dSDarrick J. Wong 	inodes = round_up(inodes, XFS_INODES_PER_CHUNK);
519938c710dSDarrick J. Wong 
520938c710dSDarrick J. Wong 	/*
521938c710dSDarrick J. Wong 	 * In order to convert the number of inodes to prefetch into an
522938c710dSDarrick J. Wong 	 * estimate of the number of inobt records to cache, we require a
523938c710dSDarrick J. Wong 	 * conversion factor that reflects our expectations of the average
524938c710dSDarrick J. Wong 	 * loading factor of an inode chunk.  Based on data gathered, most
525938c710dSDarrick J. Wong 	 * (but not all) filesystems manage to keep the inode chunks totally
526938c710dSDarrick J. Wong 	 * full, so we'll underestimate slightly so that our readahead will
527938c710dSDarrick J. Wong 	 * still deliver the performance we want on aging filesystems:
528938c710dSDarrick J. Wong 	 *
529938c710dSDarrick J. Wong 	 * inobt = inodes / (INODES_PER_CHUNK * (4 / 5));
530938c710dSDarrick J. Wong 	 *
531938c710dSDarrick J. Wong 	 * The funny math is to avoid integer division.
532938c710dSDarrick J. Wong 	 */
533938c710dSDarrick J. Wong 	inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK);
534938c710dSDarrick J. Wong 
535938c710dSDarrick J. Wong 	/*
536938c710dSDarrick J. Wong 	 * Allocate enough space to prefetch at least two inobt records so that
537938c710dSDarrick J. Wong 	 * we can cache both the record where the iwalk started and the next
538938c710dSDarrick J. Wong 	 * record.  This simplifies the AG inode walk loop setup code.
539938c710dSDarrick J. Wong 	 */
540938c710dSDarrick J. Wong 	return max(inobt_records, 2U);
541a211432cSDarrick J. Wong }
542a211432cSDarrick J. Wong 
543a211432cSDarrick J. Wong /*
544a211432cSDarrick J. Wong  * Walk all inodes in the filesystem starting from @startino.  The @iwalk_fn
545a211432cSDarrick J. Wong  * will be called for each allocated inode, being passed the inode's number and
546a211432cSDarrick J. Wong  * @data.  @max_prefetch controls how many inobt records' worth of inodes we
547a211432cSDarrick J. Wong  * try to readahead.
548a211432cSDarrick J. Wong  */
549a211432cSDarrick J. Wong int
xfs_iwalk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t startino,unsigned int flags,xfs_iwalk_fn iwalk_fn,unsigned int inode_records,void * data)550a211432cSDarrick J. Wong xfs_iwalk(
551a211432cSDarrick J. Wong 	struct xfs_mount	*mp,
552a211432cSDarrick J. Wong 	struct xfs_trans	*tp,
553a211432cSDarrick J. Wong 	xfs_ino_t		startino,
55413d59a2aSDarrick J. Wong 	unsigned int		flags,
555a211432cSDarrick J. Wong 	xfs_iwalk_fn		iwalk_fn,
556a211432cSDarrick J. Wong 	unsigned int		inode_records,
557a211432cSDarrick J. Wong 	void			*data)
558a211432cSDarrick J. Wong {
559a211432cSDarrick J. Wong 	struct xfs_iwalk_ag	iwag = {
560a211432cSDarrick J. Wong 		.mp		= mp,
561a211432cSDarrick J. Wong 		.tp		= tp,
562a211432cSDarrick J. Wong 		.iwalk_fn	= iwalk_fn,
563a211432cSDarrick J. Wong 		.data		= data,
564a211432cSDarrick J. Wong 		.startino	= startino,
565a211432cSDarrick J. Wong 		.sz_recs	= xfs_iwalk_prefetch(inode_records),
56604b8fba2SDarrick J. Wong 		.trim_start	= 1,
56704b8fba2SDarrick J. Wong 		.skip_empty	= 1,
56840786717SDarrick J. Wong 		.pwork		= XFS_PWORK_SINGLE_THREADED,
56927c14b5dSDarrick J. Wong 		.lastino	= NULLFSINO,
57004b8fba2SDarrick J. Wong 	};
5716f4118fcSDave Chinner 	struct xfs_perag	*pag;
57204b8fba2SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
57304b8fba2SDarrick J. Wong 	int			error;
57404b8fba2SDarrick J. Wong 
57504b8fba2SDarrick J. Wong 	ASSERT(agno < mp->m_sb.sb_agcount);
57613d59a2aSDarrick J. Wong 	ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
57704b8fba2SDarrick J. Wong 
57804b8fba2SDarrick J. Wong 	error = xfs_iwalk_alloc(&iwag);
57904b8fba2SDarrick J. Wong 	if (error)
58004b8fba2SDarrick J. Wong 		return error;
58104b8fba2SDarrick J. Wong 
5826f4118fcSDave Chinner 	for_each_perag_from(mp, agno, pag) {
5836f4118fcSDave Chinner 		iwag.pag = pag;
58404b8fba2SDarrick J. Wong 		error = xfs_iwalk_ag(&iwag);
58504b8fba2SDarrick J. Wong 		if (error)
58604b8fba2SDarrick J. Wong 			break;
58704b8fba2SDarrick J. Wong 		iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
58813d59a2aSDarrick J. Wong 		if (flags & XFS_INOBT_WALK_SAME_AG)
58913d59a2aSDarrick J. Wong 			break;
5906f4118fcSDave Chinner 		iwag.pag = NULL;
59104b8fba2SDarrick J. Wong 	}
59204b8fba2SDarrick J. Wong 
5936f4118fcSDave Chinner 	if (iwag.pag)
594c4d5660aSDave Chinner 		xfs_perag_rele(pag);
59504b8fba2SDarrick J. Wong 	xfs_iwalk_free(&iwag);
59604b8fba2SDarrick J. Wong 	return error;
59704b8fba2SDarrick J. Wong }
59804b8fba2SDarrick J. Wong 
59940786717SDarrick J. Wong /* Run per-thread iwalk work. */
60040786717SDarrick J. Wong static int
xfs_iwalk_ag_work(struct xfs_mount * mp,struct xfs_pwork * pwork)60140786717SDarrick J. Wong xfs_iwalk_ag_work(
60240786717SDarrick J. Wong 	struct xfs_mount	*mp,
60340786717SDarrick J. Wong 	struct xfs_pwork	*pwork)
60440786717SDarrick J. Wong {
60540786717SDarrick J. Wong 	struct xfs_iwalk_ag	*iwag;
60640786717SDarrick J. Wong 	int			error = 0;
60740786717SDarrick J. Wong 
60840786717SDarrick J. Wong 	iwag = container_of(pwork, struct xfs_iwalk_ag, pwork);
60940786717SDarrick J. Wong 	if (xfs_pwork_want_abort(pwork))
61040786717SDarrick J. Wong 		goto out;
61140786717SDarrick J. Wong 
61240786717SDarrick J. Wong 	error = xfs_iwalk_alloc(iwag);
61340786717SDarrick J. Wong 	if (error)
61440786717SDarrick J. Wong 		goto out;
615a6343e4dSDarrick J. Wong 	/*
616a6343e4dSDarrick J. Wong 	 * Grab an empty transaction so that we can use its recursive buffer
617a6343e4dSDarrick J. Wong 	 * locking abilities to detect cycles in the inobt without deadlocking.
618a6343e4dSDarrick J. Wong 	 */
619a6343e4dSDarrick J. Wong 	error = xfs_trans_alloc_empty(mp, &iwag->tp);
620a6343e4dSDarrick J. Wong 	if (error)
621a6343e4dSDarrick J. Wong 		goto out;
622a6343e4dSDarrick J. Wong 	iwag->drop_trans = 1;
62340786717SDarrick J. Wong 
62440786717SDarrick J. Wong 	error = xfs_iwalk_ag(iwag);
625a6343e4dSDarrick J. Wong 	if (iwag->tp)
626a6343e4dSDarrick J. Wong 		xfs_trans_cancel(iwag->tp);
62740786717SDarrick J. Wong 	xfs_iwalk_free(iwag);
62840786717SDarrick J. Wong out:
6296f4118fcSDave Chinner 	xfs_perag_put(iwag->pag);
63040786717SDarrick J. Wong 	kmem_free(iwag);
63140786717SDarrick J. Wong 	return error;
63240786717SDarrick J. Wong }
63340786717SDarrick J. Wong 
63440786717SDarrick J. Wong /*
63540786717SDarrick J. Wong  * Walk all the inodes in the filesystem using multiple threads to process each
63640786717SDarrick J. Wong  * AG.
63740786717SDarrick J. Wong  */
63840786717SDarrick J. Wong int
xfs_iwalk_threaded(struct xfs_mount * mp,xfs_ino_t startino,unsigned int flags,xfs_iwalk_fn iwalk_fn,unsigned int inode_records,bool polled,void * data)63940786717SDarrick J. Wong xfs_iwalk_threaded(
64040786717SDarrick J. Wong 	struct xfs_mount	*mp,
64140786717SDarrick J. Wong 	xfs_ino_t		startino,
64213d59a2aSDarrick J. Wong 	unsigned int		flags,
64340786717SDarrick J. Wong 	xfs_iwalk_fn		iwalk_fn,
64440786717SDarrick J. Wong 	unsigned int		inode_records,
6453e5a428bSDarrick J. Wong 	bool			polled,
64640786717SDarrick J. Wong 	void			*data)
64740786717SDarrick J. Wong {
64840786717SDarrick J. Wong 	struct xfs_pwork_ctl	pctl;
6496f4118fcSDave Chinner 	struct xfs_perag	*pag;
65040786717SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
65140786717SDarrick J. Wong 	int			error;
65240786717SDarrick J. Wong 
65340786717SDarrick J. Wong 	ASSERT(agno < mp->m_sb.sb_agcount);
65413d59a2aSDarrick J. Wong 	ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
65540786717SDarrick J. Wong 
656f83d436aSDarrick J. Wong 	error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk");
65740786717SDarrick J. Wong 	if (error)
65840786717SDarrick J. Wong 		return error;
65940786717SDarrick J. Wong 
6606f4118fcSDave Chinner 	for_each_perag_from(mp, agno, pag) {
66140786717SDarrick J. Wong 		struct xfs_iwalk_ag	*iwag;
66240786717SDarrick J. Wong 
66340786717SDarrick J. Wong 		if (xfs_pwork_ctl_want_abort(&pctl))
66440786717SDarrick J. Wong 			break;
66540786717SDarrick J. Wong 
666707e0ddaSTetsuo Handa 		iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
66740786717SDarrick J. Wong 		iwag->mp = mp;
6686f4118fcSDave Chinner 
6696f4118fcSDave Chinner 		/*
670*9b2e5a23SDarrick J. Wong 		 * perag is being handed off to async work, so take a passive
6716f4118fcSDave Chinner 		 * reference for the async work to release.
6726f4118fcSDave Chinner 		 */
673*9b2e5a23SDarrick J. Wong 		iwag->pag = xfs_perag_hold(pag);
67440786717SDarrick J. Wong 		iwag->iwalk_fn = iwalk_fn;
67540786717SDarrick J. Wong 		iwag->data = data;
67640786717SDarrick J. Wong 		iwag->startino = startino;
67740786717SDarrick J. Wong 		iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
67827c14b5dSDarrick J. Wong 		iwag->lastino = NULLFSINO;
67940786717SDarrick J. Wong 		xfs_pwork_queue(&pctl, &iwag->pwork);
6806f4118fcSDave Chinner 		startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
68113d59a2aSDarrick J. Wong 		if (flags & XFS_INOBT_WALK_SAME_AG)
68213d59a2aSDarrick J. Wong 			break;
68340786717SDarrick J. Wong 	}
6846f4118fcSDave Chinner 	if (pag)
685c4d5660aSDave Chinner 		xfs_perag_rele(pag);
6863e5a428bSDarrick J. Wong 	if (polled)
6873e5a428bSDarrick J. Wong 		xfs_pwork_poll(&pctl);
68840786717SDarrick J. Wong 	return xfs_pwork_destroy(&pctl);
68940786717SDarrick J. Wong }
69040786717SDarrick J. Wong 
69104b8fba2SDarrick J. Wong /*
69204b8fba2SDarrick J. Wong  * Allow callers to cache up to a page's worth of inobt records.  This reflects
69304b8fba2SDarrick J. Wong  * the existing inumbers prefetching behavior.  Since the inobt walk does not
69404b8fba2SDarrick J. Wong  * itself do anything with the inobt records, we can set a fairly high limit
69504b8fba2SDarrick J. Wong  * here.
69604b8fba2SDarrick J. Wong  */
69704b8fba2SDarrick J. Wong #define MAX_INOBT_WALK_PREFETCH	\
69804b8fba2SDarrick J. Wong 	(PAGE_SIZE / sizeof(struct xfs_inobt_rec_incore))
69904b8fba2SDarrick J. Wong 
70004b8fba2SDarrick J. Wong /*
70104b8fba2SDarrick J. Wong  * Given the number of records that the user wanted, set the number of inobt
70204b8fba2SDarrick J. Wong  * records that we buffer in memory.  Set the maximum if @inobt_records == 0.
70304b8fba2SDarrick J. Wong  */
70404b8fba2SDarrick J. Wong static inline unsigned int
xfs_inobt_walk_prefetch(unsigned int inobt_records)70504b8fba2SDarrick J. Wong xfs_inobt_walk_prefetch(
70604b8fba2SDarrick J. Wong 	unsigned int		inobt_records)
70704b8fba2SDarrick J. Wong {
70804b8fba2SDarrick J. Wong 	/*
70904b8fba2SDarrick J. Wong 	 * If the caller didn't tell us the number of inobt records they
71004b8fba2SDarrick J. Wong 	 * wanted, assume the maximum prefetch possible for best performance.
71104b8fba2SDarrick J. Wong 	 */
71204b8fba2SDarrick J. Wong 	if (inobt_records == 0)
71304b8fba2SDarrick J. Wong 		inobt_records = MAX_INOBT_WALK_PREFETCH;
71404b8fba2SDarrick J. Wong 
71504b8fba2SDarrick J. Wong 	/*
71604b8fba2SDarrick J. Wong 	 * Allocate enough space to prefetch at least two inobt records so that
71704b8fba2SDarrick J. Wong 	 * we can cache both the record where the iwalk started and the next
71804b8fba2SDarrick J. Wong 	 * record.  This simplifies the AG inode walk loop setup code.
71904b8fba2SDarrick J. Wong 	 */
72004b8fba2SDarrick J. Wong 	inobt_records = max(inobt_records, 2U);
72104b8fba2SDarrick J. Wong 
72204b8fba2SDarrick J. Wong 	/*
72304b8fba2SDarrick J. Wong 	 * Cap prefetch at that maximum so that we don't use an absurd amount
72404b8fba2SDarrick J. Wong 	 * of memory.
72504b8fba2SDarrick J. Wong 	 */
72604b8fba2SDarrick J. Wong 	return min_t(unsigned int, inobt_records, MAX_INOBT_WALK_PREFETCH);
72704b8fba2SDarrick J. Wong }
72804b8fba2SDarrick J. Wong 
72904b8fba2SDarrick J. Wong /*
73004b8fba2SDarrick J. Wong  * Walk all inode btree records in the filesystem starting from @startino.  The
73104b8fba2SDarrick J. Wong  * @inobt_walk_fn will be called for each btree record, being passed the incore
73204b8fba2SDarrick J. Wong  * record and @data.  @max_prefetch controls how many inobt records we try to
73304b8fba2SDarrick J. Wong  * cache ahead of time.
73404b8fba2SDarrick J. Wong  */
73504b8fba2SDarrick J. Wong int
xfs_inobt_walk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t startino,unsigned int flags,xfs_inobt_walk_fn inobt_walk_fn,unsigned int inobt_records,void * data)73604b8fba2SDarrick J. Wong xfs_inobt_walk(
73704b8fba2SDarrick J. Wong 	struct xfs_mount	*mp,
73804b8fba2SDarrick J. Wong 	struct xfs_trans	*tp,
73904b8fba2SDarrick J. Wong 	xfs_ino_t		startino,
74013d59a2aSDarrick J. Wong 	unsigned int		flags,
74104b8fba2SDarrick J. Wong 	xfs_inobt_walk_fn	inobt_walk_fn,
74204b8fba2SDarrick J. Wong 	unsigned int		inobt_records,
74304b8fba2SDarrick J. Wong 	void			*data)
74404b8fba2SDarrick J. Wong {
74504b8fba2SDarrick J. Wong 	struct xfs_iwalk_ag	iwag = {
74604b8fba2SDarrick J. Wong 		.mp		= mp,
74704b8fba2SDarrick J. Wong 		.tp		= tp,
74804b8fba2SDarrick J. Wong 		.inobt_walk_fn	= inobt_walk_fn,
74904b8fba2SDarrick J. Wong 		.data		= data,
75004b8fba2SDarrick J. Wong 		.startino	= startino,
75104b8fba2SDarrick J. Wong 		.sz_recs	= xfs_inobt_walk_prefetch(inobt_records),
75240786717SDarrick J. Wong 		.pwork		= XFS_PWORK_SINGLE_THREADED,
75327c14b5dSDarrick J. Wong 		.lastino	= NULLFSINO,
754a211432cSDarrick J. Wong 	};
7556f4118fcSDave Chinner 	struct xfs_perag	*pag;
756a211432cSDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
757a211432cSDarrick J. Wong 	int			error;
758a211432cSDarrick J. Wong 
759a211432cSDarrick J. Wong 	ASSERT(agno < mp->m_sb.sb_agcount);
76013d59a2aSDarrick J. Wong 	ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL));
761a211432cSDarrick J. Wong 
762a211432cSDarrick J. Wong 	error = xfs_iwalk_alloc(&iwag);
763a211432cSDarrick J. Wong 	if (error)
764a211432cSDarrick J. Wong 		return error;
765a211432cSDarrick J. Wong 
7666f4118fcSDave Chinner 	for_each_perag_from(mp, agno, pag) {
7676f4118fcSDave Chinner 		iwag.pag = pag;
768a211432cSDarrick J. Wong 		error = xfs_iwalk_ag(&iwag);
769a211432cSDarrick J. Wong 		if (error)
770a211432cSDarrick J. Wong 			break;
7716f4118fcSDave Chinner 		iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
77213d59a2aSDarrick J. Wong 		if (flags & XFS_INOBT_WALK_SAME_AG)
77313d59a2aSDarrick J. Wong 			break;
7746f4118fcSDave Chinner 		iwag.pag = NULL;
775a211432cSDarrick J. Wong 	}
776a211432cSDarrick J. Wong 
7776f4118fcSDave Chinner 	if (iwag.pag)
778c4d5660aSDave Chinner 		xfs_perag_rele(pag);
779a211432cSDarrick J. Wong 	xfs_iwalk_free(&iwag);
780a211432cSDarrick J. Wong 	return error;
781a211432cSDarrick J. Wong }
782