xref: /openbmc/linux/fs/xfs/xfs_itable.c (revision 2810bd6840e46306c110f4b76441a987756efe6c)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
37b718769SNathan Scott  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include "xfs.h"
7a844f451SNathan Scott #include "xfs_fs.h"
870a9883cSDave Chinner #include "xfs_shared.h"
9a4fbe6abSDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
121da177e4SLinus Torvalds #include "xfs_mount.h"
131da177e4SLinus Torvalds #include "xfs_inode.h"
14a4fbe6abSDave Chinner #include "xfs_btree.h"
151da177e4SLinus Torvalds #include "xfs_ialloc.h"
16a4fbe6abSDave Chinner #include "xfs_ialloc_btree.h"
17*2810bd68SDarrick J. Wong #include "xfs_iwalk.h"
181da177e4SLinus Torvalds #include "xfs_itable.h"
191da177e4SLinus Torvalds #include "xfs_error.h"
2033479e05SDave Chinner #include "xfs_icache.h"
2189d139d5SDarrick J. Wong #include "xfs_health.h"
221da177e4SLinus Torvalds 
237dce11dbSChristoph Hellwig /*
24*2810bd68SDarrick J. Wong  * Bulk Stat
25*2810bd68SDarrick J. Wong  * =========
26*2810bd68SDarrick J. Wong  *
27*2810bd68SDarrick J. Wong  * Use the inode walking functions to fill out struct xfs_bstat for every
28*2810bd68SDarrick J. Wong  * allocated inode, then pass the stat information to some externally provided
29*2810bd68SDarrick J. Wong  * iteration function.
307dce11dbSChristoph Hellwig  */
31*2810bd68SDarrick J. Wong 
32*2810bd68SDarrick J. Wong struct xfs_bstat_chunk {
33*2810bd68SDarrick J. Wong 	bulkstat_one_fmt_pf	formatter;
34*2810bd68SDarrick J. Wong 	struct xfs_ibulk	*breq;
35*2810bd68SDarrick J. Wong 	struct xfs_bstat	*buf;
36*2810bd68SDarrick J. Wong };
37*2810bd68SDarrick J. Wong 
38*2810bd68SDarrick J. Wong /*
39*2810bd68SDarrick J. Wong  * Fill out the bulkstat info for a single inode and report it somewhere.
40*2810bd68SDarrick J. Wong  *
41*2810bd68SDarrick J. Wong  * bc->breq->lastino is effectively the inode cursor as we walk through the
42*2810bd68SDarrick J. Wong  * filesystem.  Therefore, we update it any time we need to move the cursor
43*2810bd68SDarrick J. Wong  * forward, regardless of whether or not we're sending any bstat information
44*2810bd68SDarrick J. Wong  * back to userspace.  If the inode is internal metadata or, has been freed
45*2810bd68SDarrick J. Wong  * out from under us, we just simply keep going.
46*2810bd68SDarrick J. Wong  *
47*2810bd68SDarrick J. Wong  * However, if any other type of error happens we want to stop right where we
48*2810bd68SDarrick J. Wong  * are so that userspace will call back with exact number of the bad inode and
49*2810bd68SDarrick J. Wong  * we can send back an error code.
50*2810bd68SDarrick J. Wong  *
51*2810bd68SDarrick J. Wong  * Note that if the formatter tells us there's no space left in the buffer we
52*2810bd68SDarrick J. Wong  * move the cursor forward and abort the walk.
53*2810bd68SDarrick J. Wong  */
54*2810bd68SDarrick J. Wong STATIC int
557dce11dbSChristoph Hellwig xfs_bulkstat_one_int(
56*2810bd68SDarrick J. Wong 	struct xfs_mount	*mp,
57*2810bd68SDarrick J. Wong 	struct xfs_trans	*tp,
58*2810bd68SDarrick J. Wong 	xfs_ino_t		ino,
59*2810bd68SDarrick J. Wong 	struct xfs_bstat_chunk	*bc)
601da177e4SLinus Torvalds {
617dce11dbSChristoph Hellwig 	struct xfs_icdinode	*dic;		/* dinode core info pointer */
627dce11dbSChristoph Hellwig 	struct xfs_inode	*ip;		/* incore inode pointer */
633987848cSDave Chinner 	struct inode		*inode;
64*2810bd68SDarrick J. Wong 	struct xfs_bstat	*buf = bc->buf;
65*2810bd68SDarrick J. Wong 	int			error = -EINVAL;
667dce11dbSChristoph Hellwig 
67*2810bd68SDarrick J. Wong 	if (xfs_internal_inum(mp, ino))
68*2810bd68SDarrick J. Wong 		goto out_advance;
697dce11dbSChristoph Hellwig 
70*2810bd68SDarrick J. Wong 	error = xfs_iget(mp, tp, ino,
715132ba8fSDave Chinner 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
725132ba8fSDave Chinner 			 XFS_ILOCK_SHARED, &ip);
73*2810bd68SDarrick J. Wong 	if (error == -ENOENT || error == -EINVAL)
74*2810bd68SDarrick J. Wong 		goto out_advance;
758fe65776SJie Liu 	if (error)
76*2810bd68SDarrick J. Wong 		goto out;
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds 	ASSERT(ip != NULL);
7992bfc6e7SChristoph Hellwig 	ASSERT(ip->i_imap.im_blkno != 0);
803987848cSDave Chinner 	inode = VFS_I(ip);
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds 	dic = &ip->i_d;
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds 	/* xfs_iget returns the following without needing
851da177e4SLinus Torvalds 	 * further change.
861da177e4SLinus Torvalds 	 */
876743099cSArkadiusz Mi?kiewicz 	buf->bs_projid_lo = dic->di_projid_lo;
886743099cSArkadiusz Mi?kiewicz 	buf->bs_projid_hi = dic->di_projid_hi;
891da177e4SLinus Torvalds 	buf->bs_ino = ino;
901da177e4SLinus Torvalds 	buf->bs_uid = dic->di_uid;
911da177e4SLinus Torvalds 	buf->bs_gid = dic->di_gid;
921da177e4SLinus Torvalds 	buf->bs_size = dic->di_size;
933987848cSDave Chinner 
9454d7b5c1SDave Chinner 	buf->bs_nlink = inode->i_nlink;
953987848cSDave Chinner 	buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
963987848cSDave Chinner 	buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
973987848cSDave Chinner 	buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
983987848cSDave Chinner 	buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
993987848cSDave Chinner 	buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
1003987848cSDave Chinner 	buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
1019e9a2674SDave Chinner 	buf->bs_gen = inode->i_generation;
102c19b3b05SDave Chinner 	buf->bs_mode = inode->i_mode;
1033987848cSDave Chinner 
1041da177e4SLinus Torvalds 	buf->bs_xflags = xfs_ip2xflags(ip);
1051da177e4SLinus Torvalds 	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
1061da177e4SLinus Torvalds 	buf->bs_extents = dic->di_nextents;
1071da177e4SLinus Torvalds 	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
10889d139d5SDarrick J. Wong 	xfs_bulkstat_health(ip, buf);
1091da177e4SLinus Torvalds 	buf->bs_dmevmask = dic->di_dmevmask;
1101da177e4SLinus Torvalds 	buf->bs_dmstate = dic->di_dmstate;
1111da177e4SLinus Torvalds 	buf->bs_aextents = dic->di_anextents;
11207000ee6SDave Chinner 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
1131da177e4SLinus Torvalds 
114f7ca3522SDarrick J. Wong 	if (dic->di_version == 3) {
115f7ca3522SDarrick J. Wong 		if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
116f7ca3522SDarrick J. Wong 			buf->bs_cowextsize = dic->di_cowextsize <<
117f7ca3522SDarrick J. Wong 					mp->m_sb.sb_blocklog;
118f7ca3522SDarrick J. Wong 	}
119f7ca3522SDarrick J. Wong 
1201da177e4SLinus Torvalds 	switch (dic->di_format) {
1211da177e4SLinus Torvalds 	case XFS_DINODE_FMT_DEV:
12266f36464SChristoph Hellwig 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
1231da177e4SLinus Torvalds 		buf->bs_blksize = BLKDEV_IOSIZE;
1241da177e4SLinus Torvalds 		buf->bs_blocks = 0;
1251da177e4SLinus Torvalds 		break;
1261da177e4SLinus Torvalds 	case XFS_DINODE_FMT_LOCAL:
1271da177e4SLinus Torvalds 		buf->bs_rdev = 0;
1281da177e4SLinus Torvalds 		buf->bs_blksize = mp->m_sb.sb_blocksize;
1291da177e4SLinus Torvalds 		buf->bs_blocks = 0;
1301da177e4SLinus Torvalds 		break;
1311da177e4SLinus Torvalds 	case XFS_DINODE_FMT_EXTENTS:
1321da177e4SLinus Torvalds 	case XFS_DINODE_FMT_BTREE:
1331da177e4SLinus Torvalds 		buf->bs_rdev = 0;
1341da177e4SLinus Torvalds 		buf->bs_blksize = mp->m_sb.sb_blocksize;
1351da177e4SLinus Torvalds 		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
1361da177e4SLinus Torvalds 		break;
1371da177e4SLinus Torvalds 	}
138f2d67614SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
13944a8736bSDarrick J. Wong 	xfs_irele(ip);
1407dce11dbSChristoph Hellwig 
141*2810bd68SDarrick J. Wong 	error = bc->formatter(bc->breq, buf);
142*2810bd68SDarrick J. Wong 	if (error == XFS_IBULK_ABORT)
143*2810bd68SDarrick J. Wong 		goto out_advance;
144*2810bd68SDarrick J. Wong 	if (error)
145*2810bd68SDarrick J. Wong 		goto out;
1467dce11dbSChristoph Hellwig 
147*2810bd68SDarrick J. Wong out_advance:
148*2810bd68SDarrick J. Wong 	/*
149*2810bd68SDarrick J. Wong 	 * Advance the cursor to the inode that comes after the one we just
150*2810bd68SDarrick J. Wong 	 * looked at.  We want the caller to move along if the bulkstat
151*2810bd68SDarrick J. Wong 	 * information was copied successfully; if we tried to grab the inode
152*2810bd68SDarrick J. Wong 	 * but it's no longer allocated; or if it's internal metadata.
153*2810bd68SDarrick J. Wong 	 */
154*2810bd68SDarrick J. Wong 	bc->breq->startino = ino + 1;
155*2810bd68SDarrick J. Wong out:
1561da177e4SLinus Torvalds 	return error;
1571da177e4SLinus Torvalds }
1581da177e4SLinus Torvalds 
159*2810bd68SDarrick J. Wong /* Bulkstat a single inode. */
1602ee4fa5cSsandeen@sandeen.net int
1612ee4fa5cSsandeen@sandeen.net xfs_bulkstat_one(
162*2810bd68SDarrick J. Wong 	struct xfs_ibulk	*breq,
163*2810bd68SDarrick J. Wong 	bulkstat_one_fmt_pf	formatter)
1642ee4fa5cSsandeen@sandeen.net {
165*2810bd68SDarrick J. Wong 	struct xfs_bstat_chunk	bc = {
166*2810bd68SDarrick J. Wong 		.formatter	= formatter,
167*2810bd68SDarrick J. Wong 		.breq		= breq,
168*2810bd68SDarrick J. Wong 	};
169*2810bd68SDarrick J. Wong 	int			error;
170*2810bd68SDarrick J. Wong 
171*2810bd68SDarrick J. Wong 	ASSERT(breq->icount == 1);
172*2810bd68SDarrick J. Wong 
173*2810bd68SDarrick J. Wong 	bc.buf = kmem_zalloc(sizeof(struct xfs_bstat), KM_SLEEP | KM_MAYFAIL);
174*2810bd68SDarrick J. Wong 	if (!bc.buf)
175*2810bd68SDarrick J. Wong 		return -ENOMEM;
176*2810bd68SDarrick J. Wong 
177*2810bd68SDarrick J. Wong 	error = xfs_bulkstat_one_int(breq->mp, NULL, breq->startino, &bc);
178*2810bd68SDarrick J. Wong 
179*2810bd68SDarrick J. Wong 	kmem_free(bc.buf);
180*2810bd68SDarrick J. Wong 
181*2810bd68SDarrick J. Wong 	/*
182*2810bd68SDarrick J. Wong 	 * If we reported one inode to userspace then we abort because we hit
183*2810bd68SDarrick J. Wong 	 * the end of the buffer.  Don't leak that back to userspace.
184*2810bd68SDarrick J. Wong 	 */
185*2810bd68SDarrick J. Wong 	if (error == XFS_IWALK_ABORT)
186*2810bd68SDarrick J. Wong 		error = 0;
187*2810bd68SDarrick J. Wong 
188*2810bd68SDarrick J. Wong 	return error;
1898b56f083SNathan Scott }
1908b56f083SNathan Scott 
1914b8fdfecSJie Liu /*
1924b8fdfecSJie Liu  * Loop over all clusters in a chunk for a given incore inode allocation btree
1934b8fdfecSJie Liu  * record.  Do a readahead if there are any allocated inodes in that cluster.
1944b8fdfecSJie Liu  */
195a211432cSDarrick J. Wong void
1964b8fdfecSJie Liu xfs_bulkstat_ichunk_ra(
1974b8fdfecSJie Liu 	struct xfs_mount		*mp,
1984b8fdfecSJie Liu 	xfs_agnumber_t			agno,
1994b8fdfecSJie Liu 	struct xfs_inobt_rec_incore	*irec)
2004b8fdfecSJie Liu {
201ef325959SDarrick J. Wong 	struct xfs_ino_geometry		*igeo = M_IGEO(mp);
2024b8fdfecSJie Liu 	xfs_agblock_t			agbno;
2034b8fdfecSJie Liu 	struct blk_plug			plug;
2044b8fdfecSJie Liu 	int				i;	/* inode chunk index */
2054b8fdfecSJie Liu 
2064b8fdfecSJie Liu 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
2074b8fdfecSJie Liu 
2084b8fdfecSJie Liu 	blk_start_plug(&plug);
209ef325959SDarrick J. Wong 	for (i = 0;
210ef325959SDarrick J. Wong 	     i < XFS_INODES_PER_CHUNK;
211ef325959SDarrick J. Wong 	     i += igeo->inodes_per_cluster,
212ef325959SDarrick J. Wong 			agbno += igeo->blocks_per_cluster) {
213ef325959SDarrick J. Wong 		if (xfs_inobt_maskn(i, igeo->inodes_per_cluster) &
21483dcdb44SDarrick J. Wong 		    ~irec->ir_free) {
21583dcdb44SDarrick J. Wong 			xfs_btree_reada_bufs(mp, agno, agbno,
216ef325959SDarrick J. Wong 					igeo->blocks_per_cluster,
2174b8fdfecSJie Liu 					&xfs_inode_buf_ops);
2184b8fdfecSJie Liu 		}
2194b8fdfecSJie Liu 	}
2204b8fdfecSJie Liu 	blk_finish_plug(&plug);
2214b8fdfecSJie Liu }
2224b8fdfecSJie Liu 
223f3d1e587SJie Liu /*
224f3d1e587SJie Liu  * Lookup the inode chunk that the given inode lives in and then get the record
225f3d1e587SJie Liu  * if we found the chunk.  If the inode was not the last in the chunk and there
226f3d1e587SJie Liu  * are some left allocated, update the data for the pointed-to record as well as
227f3d1e587SJie Liu  * return the count of grabbed inodes.
228f3d1e587SJie Liu  */
229a211432cSDarrick J. Wong int
230f3d1e587SJie Liu xfs_bulkstat_grab_ichunk(
231f3d1e587SJie Liu 	struct xfs_btree_cur		*cur,	/* btree cursor */
232f3d1e587SJie Liu 	xfs_agino_t			agino,	/* starting inode of chunk */
233f3d1e587SJie Liu 	int				*icount,/* return # of inodes grabbed */
234f3d1e587SJie Liu 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
235f3d1e587SJie Liu {
236f3d1e587SJie Liu 	int				idx;	/* index into inode chunk */
237f3d1e587SJie Liu 	int				stat;
238f3d1e587SJie Liu 	int				error = 0;
239f3d1e587SJie Liu 
240f3d1e587SJie Liu 	/* Lookup the inode chunk that this inode lives in */
241f3d1e587SJie Liu 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
242f3d1e587SJie Liu 	if (error)
243f3d1e587SJie Liu 		return error;
244f3d1e587SJie Liu 	if (!stat) {
245f3d1e587SJie Liu 		*icount = 0;
246f3d1e587SJie Liu 		return error;
247f3d1e587SJie Liu 	}
248f3d1e587SJie Liu 
249f3d1e587SJie Liu 	/* Get the record, should always work */
250f3d1e587SJie Liu 	error = xfs_inobt_get_rec(cur, irec, &stat);
251f3d1e587SJie Liu 	if (error)
252f3d1e587SJie Liu 		return error;
2535fb5aeeeSEric Sandeen 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
254f3d1e587SJie Liu 
255f3d1e587SJie Liu 	/* Check if the record contains the inode in request */
256febe3cbeSDave Chinner 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
257febe3cbeSDave Chinner 		*icount = 0;
258febe3cbeSDave Chinner 		return 0;
259febe3cbeSDave Chinner 	}
260f3d1e587SJie Liu 
261f3d1e587SJie Liu 	idx = agino - irec->ir_startino + 1;
262f3d1e587SJie Liu 	if (idx < XFS_INODES_PER_CHUNK &&
263f3d1e587SJie Liu 	    (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
264f3d1e587SJie Liu 		int	i;
265f3d1e587SJie Liu 
266f3d1e587SJie Liu 		/* We got a right chunk with some left inodes allocated at it.
267f3d1e587SJie Liu 		 * Grab the chunk record.  Mark all the uninteresting inodes
268f3d1e587SJie Liu 		 * free -- because they're before our start point.
269f3d1e587SJie Liu 		 */
270f3d1e587SJie Liu 		for (i = 0; i < idx; i++) {
271f3d1e587SJie Liu 			if (XFS_INOBT_MASK(i) & ~irec->ir_free)
272f3d1e587SJie Liu 				irec->ir_freecount++;
273f3d1e587SJie Liu 		}
274f3d1e587SJie Liu 
275f3d1e587SJie Liu 		irec->ir_free |= xfs_inobt_maskn(0, idx);
27612d0714dSBrian Foster 		*icount = irec->ir_count - irec->ir_freecount;
277f3d1e587SJie Liu 	}
278f3d1e587SJie Liu 
279f3d1e587SJie Liu 	return 0;
280f3d1e587SJie Liu }
281f3d1e587SJie Liu 
282cd57e594SLachlan McIlroy #define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
283cd57e594SLachlan McIlroy 
284bf4a5af2SDave Chinner static int
285*2810bd68SDarrick J. Wong xfs_bulkstat_iwalk(
2861e773c49SJie Liu 	struct xfs_mount	*mp,
287*2810bd68SDarrick J. Wong 	struct xfs_trans	*tp,
288*2810bd68SDarrick J. Wong 	xfs_ino_t		ino,
289*2810bd68SDarrick J. Wong 	void			*data)
2901e773c49SJie Liu {
291*2810bd68SDarrick J. Wong 	int			error;
2921e773c49SJie Liu 
293*2810bd68SDarrick J. Wong 	error = xfs_bulkstat_one_int(mp, tp, ino, data);
294*2810bd68SDarrick J. Wong 	/* bulkstat just skips over missing inodes */
295*2810bd68SDarrick J. Wong 	if (error == -ENOENT || error == -EINVAL)
296*2810bd68SDarrick J. Wong 		return 0;
2971e773c49SJie Liu 	return error;
2981e773c49SJie Liu }
2991e773c49SJie Liu 
3001e773c49SJie Liu /*
301*2810bd68SDarrick J. Wong  * Check the incoming lastino parameter.
302*2810bd68SDarrick J. Wong  *
303*2810bd68SDarrick J. Wong  * We allow any inode value that could map to physical space inside the
304*2810bd68SDarrick J. Wong  * filesystem because if there are no inodes there, bulkstat moves on to the
305*2810bd68SDarrick J. Wong  * next chunk.  In other words, the magic agino value of zero takes us to the
306*2810bd68SDarrick J. Wong  * first chunk in the AG, and an agino value past the end of the AG takes us to
307*2810bd68SDarrick J. Wong  * the first chunk in the next AG.
308*2810bd68SDarrick J. Wong  *
309*2810bd68SDarrick J. Wong  * Therefore we can end early if the requested inode is beyond the end of the
310*2810bd68SDarrick J. Wong  * filesystem or doesn't map properly.
3111da177e4SLinus Torvalds  */
312*2810bd68SDarrick J. Wong static inline bool
313*2810bd68SDarrick J. Wong xfs_bulkstat_already_done(
314*2810bd68SDarrick J. Wong 	struct xfs_mount	*mp,
315*2810bd68SDarrick J. Wong 	xfs_ino_t		startino)
3161da177e4SLinus Torvalds {
317*2810bd68SDarrick J. Wong 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
318*2810bd68SDarrick J. Wong 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, startino);
3191da177e4SLinus Torvalds 
320*2810bd68SDarrick J. Wong 	return agno >= mp->m_sb.sb_agcount ||
321*2810bd68SDarrick J. Wong 	       startino != XFS_AGINO_TO_INO(mp, agno, agino);
322*2810bd68SDarrick J. Wong }
323*2810bd68SDarrick J. Wong 
324*2810bd68SDarrick J. Wong /* Return stat information in bulk (by-inode) for the filesystem. */
325*2810bd68SDarrick J. Wong int
326*2810bd68SDarrick J. Wong xfs_bulkstat(
327*2810bd68SDarrick J. Wong 	struct xfs_ibulk	*breq,
328*2810bd68SDarrick J. Wong 	bulkstat_one_fmt_pf	formatter)
329*2810bd68SDarrick J. Wong {
330*2810bd68SDarrick J. Wong 	struct xfs_bstat_chunk	bc = {
331*2810bd68SDarrick J. Wong 		.formatter	= formatter,
332*2810bd68SDarrick J. Wong 		.breq		= breq,
333*2810bd68SDarrick J. Wong 	};
334*2810bd68SDarrick J. Wong 	int			error;
335*2810bd68SDarrick J. Wong 
336*2810bd68SDarrick J. Wong 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
3371da177e4SLinus Torvalds 		return 0;
338296dfd7fSJie Liu 
339*2810bd68SDarrick J. Wong 	bc.buf = kmem_zalloc(sizeof(struct xfs_bstat), KM_SLEEP | KM_MAYFAIL);
340*2810bd68SDarrick J. Wong 	if (!bc.buf)
3412451337dSDave Chinner 		return -ENOMEM;
342bb3c7d29SNathan Scott 
343*2810bd68SDarrick J. Wong 	error = xfs_iwalk(breq->mp, NULL, breq->startino, xfs_bulkstat_iwalk,
344*2810bd68SDarrick J. Wong 			breq->icount, &bc);
3456e57c542SDave Chinner 
346*2810bd68SDarrick J. Wong 	kmem_free(bc.buf);
347febe3cbeSDave Chinner 
348cd57e594SLachlan McIlroy 	/*
349febe3cbeSDave Chinner 	 * We found some inodes, so clear the error status and return them.
350febe3cbeSDave Chinner 	 * The lastino pointer will point directly at the inode that triggered
351febe3cbeSDave Chinner 	 * any error that occurred, so on the next call the error will be
352febe3cbeSDave Chinner 	 * triggered again and propagated to userspace as there will be no
353febe3cbeSDave Chinner 	 * formatted inodes in the buffer.
354cd57e594SLachlan McIlroy 	 */
355*2810bd68SDarrick J. Wong 	if (breq->ocount > 0)
356febe3cbeSDave Chinner 		error = 0;
357febe3cbeSDave Chinner 
358febe3cbeSDave Chinner 	return error;
3591da177e4SLinus Torvalds }
3601da177e4SLinus Torvalds 
361faa63e95SMichal Marek int
362faa63e95SMichal Marek xfs_inumbers_fmt(
363faa63e95SMichal Marek 	void			__user *ubuffer, /* buffer to write to */
364549fa006SJie Liu 	const struct xfs_inogrp	*buffer,	/* buffer to read from */
365faa63e95SMichal Marek 	long			count,		/* # of elements to read */
366faa63e95SMichal Marek 	long			*written)	/* # of bytes written */
367faa63e95SMichal Marek {
368faa63e95SMichal Marek 	if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
369faa63e95SMichal Marek 		return -EFAULT;
370faa63e95SMichal Marek 	*written = count * sizeof(*buffer);
371faa63e95SMichal Marek 	return 0;
372faa63e95SMichal Marek }
373faa63e95SMichal Marek 
3741da177e4SLinus Torvalds /*
3751da177e4SLinus Torvalds  * Return inode number table for the filesystem.
3761da177e4SLinus Torvalds  */
3771da177e4SLinus Torvalds int					/* error status */
3781da177e4SLinus Torvalds xfs_inumbers(
379549fa006SJie Liu 	struct xfs_mount	*mp,/* mount point for filesystem */
3801da177e4SLinus Torvalds 	xfs_ino_t		*lastino,/* last inode returned */
3811da177e4SLinus Torvalds 	int			*count,/* size of buffer/count returned */
382faa63e95SMichal Marek 	void			__user *ubuffer,/* buffer with inode descriptions */
383faa63e95SMichal Marek 	inumbers_fmt_pf		formatter)
3841da177e4SLinus Torvalds {
385549fa006SJie Liu 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, *lastino);
386549fa006SJie Liu 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, *lastino);
387549fa006SJie Liu 	struct xfs_btree_cur	*cur = NULL;
388c7cb51dcSJie Liu 	struct xfs_buf		*agbp = NULL;
389549fa006SJie Liu 	struct xfs_inogrp	*buffer;
3901da177e4SLinus Torvalds 	int			bcount;
391549fa006SJie Liu 	int			left = *count;
392549fa006SJie Liu 	int			bufidx = 0;
393549fa006SJie Liu 	int			error = 0;
3941da177e4SLinus Torvalds 
3951da177e4SLinus Torvalds 	*count = 0;
396549fa006SJie Liu 	if (agno >= mp->m_sb.sb_agcount ||
397549fa006SJie Liu 	    *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
398549fa006SJie Liu 		return error;
399549fa006SJie Liu 
4009bb54cb5SDave Chinner 	bcount = min(left, (int)(PAGE_SIZE / sizeof(*buffer)));
401bf9216f9SDarrick J. Wong 	buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
402c7cb51dcSJie Liu 	do {
403549fa006SJie Liu 		struct xfs_inobt_rec_incore	r;
404549fa006SJie Liu 		int				stat;
405549fa006SJie Liu 
406c7cb51dcSJie Liu 		if (!agbp) {
4071da177e4SLinus Torvalds 			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
408c7cb51dcSJie Liu 			if (error)
409c7cb51dcSJie Liu 				break;
410c7cb51dcSJie Liu 
41157bd3dbeSBrian Foster 			cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
41257bd3dbeSBrian Foster 						    XFS_BTNUM_INO);
41321875505SChristoph Hellwig 			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
414549fa006SJie Liu 						 &stat);
415c7cb51dcSJie Liu 			if (error)
416c7cb51dcSJie Liu 				break;
417c7cb51dcSJie Liu 			if (!stat)
418c7cb51dcSJie Liu 				goto next_ag;
4191da177e4SLinus Torvalds 		}
420c7cb51dcSJie Liu 
421549fa006SJie Liu 		error = xfs_inobt_get_rec(cur, &r, &stat);
422c7cb51dcSJie Liu 		if (error)
423c7cb51dcSJie Liu 			break;
424c7cb51dcSJie Liu 		if (!stat)
425c7cb51dcSJie Liu 			goto next_ag;
426c7cb51dcSJie Liu 
4272e287a73SChristoph Hellwig 		agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
4282e287a73SChristoph Hellwig 		buffer[bufidx].xi_startino =
4292e287a73SChristoph Hellwig 			XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
43012d0714dSBrian Foster 		buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
4312e287a73SChristoph Hellwig 		buffer[bufidx].xi_allocmask = ~r.ir_free;
432c7cb51dcSJie Liu 		if (++bufidx == bcount) {
433faa63e95SMichal Marek 			long	written;
434c7cb51dcSJie Liu 
435549fa006SJie Liu 			error = formatter(ubuffer, buffer, bufidx, &written);
436549fa006SJie Liu 			if (error)
4371da177e4SLinus Torvalds 				break;
438faa63e95SMichal Marek 			ubuffer += written;
4391da177e4SLinus Torvalds 			*count += bufidx;
4401da177e4SLinus Torvalds 			bufidx = 0;
4411da177e4SLinus Torvalds 		}
442c7cb51dcSJie Liu 		if (!--left)
443c7cb51dcSJie Liu 			break;
444c7cb51dcSJie Liu 
445549fa006SJie Liu 		error = xfs_btree_increment(cur, 0, &stat);
446c7cb51dcSJie Liu 		if (error)
447c7cb51dcSJie Liu 			break;
448c7cb51dcSJie Liu 		if (stat)
449c7cb51dcSJie Liu 			continue;
450c7cb51dcSJie Liu 
451c7cb51dcSJie Liu next_ag:
4521da177e4SLinus Torvalds 		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
4531da177e4SLinus Torvalds 		cur = NULL;
4541da177e4SLinus Torvalds 		xfs_buf_relse(agbp);
4551da177e4SLinus Torvalds 		agbp = NULL;
456c7cb51dcSJie Liu 		agino = 0;
457a8b1ee8bSEric Sandeen 		agno++;
458a8b1ee8bSEric Sandeen 	} while (agno < mp->m_sb.sb_agcount);
459c7cb51dcSJie Liu 
4601da177e4SLinus Torvalds 	if (!error) {
4611da177e4SLinus Torvalds 		if (bufidx) {
462faa63e95SMichal Marek 			long	written;
463c7cb51dcSJie Liu 
464549fa006SJie Liu 			error = formatter(ubuffer, buffer, bufidx, &written);
465549fa006SJie Liu 			if (!error)
4661da177e4SLinus Torvalds 				*count += bufidx;
4671da177e4SLinus Torvalds 		}
4681da177e4SLinus Torvalds 		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
4691da177e4SLinus Torvalds 	}
470c7cb51dcSJie Liu 
471f0e2d93cSDenys Vlasenko 	kmem_free(buffer);
4721da177e4SLinus Torvalds 	if (cur)
4730b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
4741da177e4SLinus Torvalds 	if (agbp)
4751da177e4SLinus Torvalds 		xfs_buf_relse(agbp);
476c7cb51dcSJie Liu 
4771da177e4SLinus Torvalds 	return error;
4781da177e4SLinus Torvalds }
479