xref: /openbmc/linux/fs/xfs/xfs_itable.c (revision 0b61f8a4079d904b1b1d47946cca898313de8c26)
1*0b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
37b718769SNathan Scott  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
47b718769SNathan Scott  * All Rights Reserved.
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include "xfs.h"
7a844f451SNathan Scott #include "xfs_fs.h"
870a9883cSDave Chinner #include "xfs_shared.h"
9a4fbe6abSDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
121da177e4SLinus Torvalds #include "xfs_mount.h"
131da177e4SLinus Torvalds #include "xfs_inode.h"
14a4fbe6abSDave Chinner #include "xfs_btree.h"
151da177e4SLinus Torvalds #include "xfs_ialloc.h"
16a4fbe6abSDave Chinner #include "xfs_ialloc_btree.h"
171da177e4SLinus Torvalds #include "xfs_itable.h"
181da177e4SLinus Torvalds #include "xfs_error.h"
19f2d67614SChristoph Hellwig #include "xfs_trace.h"
2033479e05SDave Chinner #include "xfs_icache.h"
211da177e4SLinus Torvalds 
227dce11dbSChristoph Hellwig /*
237dce11dbSChristoph Hellwig  * Return stat information for one inode.
247dce11dbSChristoph Hellwig  * Return 0 if ok, else errno.
257dce11dbSChristoph Hellwig  */
267dce11dbSChristoph Hellwig int
277dce11dbSChristoph Hellwig xfs_bulkstat_one_int(
287dce11dbSChristoph Hellwig 	struct xfs_mount	*mp,		/* mount point for filesystem */
297dce11dbSChristoph Hellwig 	xfs_ino_t		ino,		/* inode to get data for */
307dce11dbSChristoph Hellwig 	void __user		*buffer,	/* buffer to place output in */
317dce11dbSChristoph Hellwig 	int			ubsize,		/* size of buffer */
327dce11dbSChristoph Hellwig 	bulkstat_one_fmt_pf	formatter,	/* formatter, copy to user */
337dce11dbSChristoph Hellwig 	int			*ubused,	/* bytes used by me */
341da177e4SLinus Torvalds 	int			*stat)		/* BULKSTAT_RV_... */
351da177e4SLinus Torvalds {
367dce11dbSChristoph Hellwig 	struct xfs_icdinode	*dic;		/* dinode core info pointer */
377dce11dbSChristoph Hellwig 	struct xfs_inode	*ip;		/* incore inode pointer */
383987848cSDave Chinner 	struct inode		*inode;
397dce11dbSChristoph Hellwig 	struct xfs_bstat	*buf;		/* return buffer */
407dce11dbSChristoph Hellwig 	int			error = 0;	/* error value */
417dce11dbSChristoph Hellwig 
427dce11dbSChristoph Hellwig 	*stat = BULKSTAT_RV_NOTHING;
437dce11dbSChristoph Hellwig 
447dce11dbSChristoph Hellwig 	if (!buffer || xfs_internal_inum(mp, ino))
452451337dSDave Chinner 		return -EINVAL;
467dce11dbSChristoph Hellwig 
47f7ca3522SDarrick J. Wong 	buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
487dce11dbSChristoph Hellwig 	if (!buf)
492451337dSDave Chinner 		return -ENOMEM;
501da177e4SLinus Torvalds 
51745b1f47SNathan Scott 	error = xfs_iget(mp, NULL, ino,
525132ba8fSDave Chinner 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
535132ba8fSDave Chinner 			 XFS_ILOCK_SHARED, &ip);
548fe65776SJie Liu 	if (error)
557dce11dbSChristoph Hellwig 		goto out_free;
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	ASSERT(ip != NULL);
5892bfc6e7SChristoph Hellwig 	ASSERT(ip->i_imap.im_blkno != 0);
593987848cSDave Chinner 	inode = VFS_I(ip);
601da177e4SLinus Torvalds 
611da177e4SLinus Torvalds 	dic = &ip->i_d;
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds 	/* xfs_iget returns the following without needing
641da177e4SLinus Torvalds 	 * further change.
651da177e4SLinus Torvalds 	 */
666743099cSArkadiusz Mi?kiewicz 	buf->bs_projid_lo = dic->di_projid_lo;
676743099cSArkadiusz Mi?kiewicz 	buf->bs_projid_hi = dic->di_projid_hi;
681da177e4SLinus Torvalds 	buf->bs_ino = ino;
691da177e4SLinus Torvalds 	buf->bs_uid = dic->di_uid;
701da177e4SLinus Torvalds 	buf->bs_gid = dic->di_gid;
711da177e4SLinus Torvalds 	buf->bs_size = dic->di_size;
723987848cSDave Chinner 
7354d7b5c1SDave Chinner 	buf->bs_nlink = inode->i_nlink;
743987848cSDave Chinner 	buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
753987848cSDave Chinner 	buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
763987848cSDave Chinner 	buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
773987848cSDave Chinner 	buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
783987848cSDave Chinner 	buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
793987848cSDave Chinner 	buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
809e9a2674SDave Chinner 	buf->bs_gen = inode->i_generation;
81c19b3b05SDave Chinner 	buf->bs_mode = inode->i_mode;
823987848cSDave Chinner 
831da177e4SLinus Torvalds 	buf->bs_xflags = xfs_ip2xflags(ip);
841da177e4SLinus Torvalds 	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
851da177e4SLinus Torvalds 	buf->bs_extents = dic->di_nextents;
861da177e4SLinus Torvalds 	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
871da177e4SLinus Torvalds 	buf->bs_dmevmask = dic->di_dmevmask;
881da177e4SLinus Torvalds 	buf->bs_dmstate = dic->di_dmstate;
891da177e4SLinus Torvalds 	buf->bs_aextents = dic->di_anextents;
9007000ee6SDave Chinner 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
911da177e4SLinus Torvalds 
92f7ca3522SDarrick J. Wong 	if (dic->di_version == 3) {
93f7ca3522SDarrick J. Wong 		if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
94f7ca3522SDarrick J. Wong 			buf->bs_cowextsize = dic->di_cowextsize <<
95f7ca3522SDarrick J. Wong 					mp->m_sb.sb_blocklog;
96f7ca3522SDarrick J. Wong 	}
97f7ca3522SDarrick J. Wong 
981da177e4SLinus Torvalds 	switch (dic->di_format) {
991da177e4SLinus Torvalds 	case XFS_DINODE_FMT_DEV:
10066f36464SChristoph Hellwig 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
1011da177e4SLinus Torvalds 		buf->bs_blksize = BLKDEV_IOSIZE;
1021da177e4SLinus Torvalds 		buf->bs_blocks = 0;
1031da177e4SLinus Torvalds 		break;
1041da177e4SLinus Torvalds 	case XFS_DINODE_FMT_LOCAL:
1051da177e4SLinus Torvalds 		buf->bs_rdev = 0;
1061da177e4SLinus Torvalds 		buf->bs_blksize = mp->m_sb.sb_blocksize;
1071da177e4SLinus Torvalds 		buf->bs_blocks = 0;
1081da177e4SLinus Torvalds 		break;
1091da177e4SLinus Torvalds 	case XFS_DINODE_FMT_EXTENTS:
1101da177e4SLinus Torvalds 	case XFS_DINODE_FMT_BTREE:
1111da177e4SLinus Torvalds 		buf->bs_rdev = 0;
1121da177e4SLinus Torvalds 		buf->bs_blksize = mp->m_sb.sb_blocksize;
1131da177e4SLinus Torvalds 		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
1141da177e4SLinus Torvalds 		break;
1151da177e4SLinus Torvalds 	}
116f2d67614SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
117f2d67614SChristoph Hellwig 	IRELE(ip);
1187dce11dbSChristoph Hellwig 
1197dce11dbSChristoph Hellwig 	error = formatter(buffer, ubsize, ubused, buf);
1207dce11dbSChristoph Hellwig 	if (!error)
1217dce11dbSChristoph Hellwig 		*stat = BULKSTAT_RV_DIDONE;
1227dce11dbSChristoph Hellwig 
1237dce11dbSChristoph Hellwig  out_free:
1247dce11dbSChristoph Hellwig 	kmem_free(buf);
1251da177e4SLinus Torvalds 	return error;
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
12865fbaf24Ssandeen@sandeen.net /* Return 0 on success or positive error */
129faa63e95SMichal Marek STATIC int
130faa63e95SMichal Marek xfs_bulkstat_one_fmt(
131faa63e95SMichal Marek 	void			__user *ubuffer,
13265fbaf24Ssandeen@sandeen.net 	int			ubsize,
13365fbaf24Ssandeen@sandeen.net 	int			*ubused,
134faa63e95SMichal Marek 	const xfs_bstat_t	*buffer)
135faa63e95SMichal Marek {
13665fbaf24Ssandeen@sandeen.net 	if (ubsize < sizeof(*buffer))
1372451337dSDave Chinner 		return -ENOMEM;
138faa63e95SMichal Marek 	if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
1392451337dSDave Chinner 		return -EFAULT;
14065fbaf24Ssandeen@sandeen.net 	if (ubused)
14165fbaf24Ssandeen@sandeen.net 		*ubused = sizeof(*buffer);
14265fbaf24Ssandeen@sandeen.net 	return 0;
143faa63e95SMichal Marek }
144faa63e95SMichal Marek 
1452ee4fa5cSsandeen@sandeen.net int
1462ee4fa5cSsandeen@sandeen.net xfs_bulkstat_one(
1472ee4fa5cSsandeen@sandeen.net 	xfs_mount_t	*mp,		/* mount point for filesystem */
1482ee4fa5cSsandeen@sandeen.net 	xfs_ino_t	ino,		/* inode number to get data for */
1492ee4fa5cSsandeen@sandeen.net 	void		__user *buffer,	/* buffer to place output in */
1502ee4fa5cSsandeen@sandeen.net 	int		ubsize,		/* size of buffer */
1512ee4fa5cSsandeen@sandeen.net 	int		*ubused,	/* bytes used by me */
1522ee4fa5cSsandeen@sandeen.net 	int		*stat)		/* BULKSTAT_RV_... */
1532ee4fa5cSsandeen@sandeen.net {
1542ee4fa5cSsandeen@sandeen.net 	return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
1557b6259e7SDave Chinner 				    xfs_bulkstat_one_fmt, ubused, stat);
1568b56f083SNathan Scott }
1578b56f083SNathan Scott 
1584b8fdfecSJie Liu /*
1594b8fdfecSJie Liu  * Loop over all clusters in a chunk for a given incore inode allocation btree
1604b8fdfecSJie Liu  * record.  Do a readahead if there are any allocated inodes in that cluster.
1614b8fdfecSJie Liu  */
1624b8fdfecSJie Liu STATIC void
1634b8fdfecSJie Liu xfs_bulkstat_ichunk_ra(
1644b8fdfecSJie Liu 	struct xfs_mount		*mp,
1654b8fdfecSJie Liu 	xfs_agnumber_t			agno,
1664b8fdfecSJie Liu 	struct xfs_inobt_rec_incore	*irec)
1674b8fdfecSJie Liu {
1684b8fdfecSJie Liu 	xfs_agblock_t			agbno;
1694b8fdfecSJie Liu 	struct blk_plug			plug;
1704b8fdfecSJie Liu 	int				blks_per_cluster;
1714b8fdfecSJie Liu 	int				inodes_per_cluster;
1724b8fdfecSJie Liu 	int				i;	/* inode chunk index */
1734b8fdfecSJie Liu 
1744b8fdfecSJie Liu 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
1754b8fdfecSJie Liu 	blks_per_cluster = xfs_icluster_size_fsb(mp);
1764b8fdfecSJie Liu 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
1774b8fdfecSJie Liu 
1784b8fdfecSJie Liu 	blk_start_plug(&plug);
1794b8fdfecSJie Liu 	for (i = 0; i < XFS_INODES_PER_CHUNK;
1804b8fdfecSJie Liu 	     i += inodes_per_cluster, agbno += blks_per_cluster) {
1814b8fdfecSJie Liu 		if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
1824b8fdfecSJie Liu 			xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
1834b8fdfecSJie Liu 					     &xfs_inode_buf_ops);
1844b8fdfecSJie Liu 		}
1854b8fdfecSJie Liu 	}
1864b8fdfecSJie Liu 	blk_finish_plug(&plug);
1874b8fdfecSJie Liu }
1884b8fdfecSJie Liu 
189f3d1e587SJie Liu /*
190f3d1e587SJie Liu  * Lookup the inode chunk that the given inode lives in and then get the record
191f3d1e587SJie Liu  * if we found the chunk.  If the inode was not the last in the chunk and there
192f3d1e587SJie Liu  * are some left allocated, update the data for the pointed-to record as well as
193f3d1e587SJie Liu  * return the count of grabbed inodes.
194f3d1e587SJie Liu  */
195f3d1e587SJie Liu STATIC int
196f3d1e587SJie Liu xfs_bulkstat_grab_ichunk(
197f3d1e587SJie Liu 	struct xfs_btree_cur		*cur,	/* btree cursor */
198f3d1e587SJie Liu 	xfs_agino_t			agino,	/* starting inode of chunk */
199f3d1e587SJie Liu 	int				*icount,/* return # of inodes grabbed */
200f3d1e587SJie Liu 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
201f3d1e587SJie Liu {
202f3d1e587SJie Liu 	int				idx;	/* index into inode chunk */
203f3d1e587SJie Liu 	int				stat;
204f3d1e587SJie Liu 	int				error = 0;
205f3d1e587SJie Liu 
206f3d1e587SJie Liu 	/* Lookup the inode chunk that this inode lives in */
207f3d1e587SJie Liu 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
208f3d1e587SJie Liu 	if (error)
209f3d1e587SJie Liu 		return error;
210f3d1e587SJie Liu 	if (!stat) {
211f3d1e587SJie Liu 		*icount = 0;
212f3d1e587SJie Liu 		return error;
213f3d1e587SJie Liu 	}
214f3d1e587SJie Liu 
215f3d1e587SJie Liu 	/* Get the record, should always work */
216f3d1e587SJie Liu 	error = xfs_inobt_get_rec(cur, irec, &stat);
217f3d1e587SJie Liu 	if (error)
218f3d1e587SJie Liu 		return error;
2195fb5aeeeSEric Sandeen 	XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
220f3d1e587SJie Liu 
221f3d1e587SJie Liu 	/* Check if the record contains the inode in request */
222febe3cbeSDave Chinner 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
223febe3cbeSDave Chinner 		*icount = 0;
224febe3cbeSDave Chinner 		return 0;
225febe3cbeSDave Chinner 	}
226f3d1e587SJie Liu 
227f3d1e587SJie Liu 	idx = agino - irec->ir_startino + 1;
228f3d1e587SJie Liu 	if (idx < XFS_INODES_PER_CHUNK &&
229f3d1e587SJie Liu 	    (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
230f3d1e587SJie Liu 		int	i;
231f3d1e587SJie Liu 
232f3d1e587SJie Liu 		/* We got a right chunk with some left inodes allocated at it.
233f3d1e587SJie Liu 		 * Grab the chunk record.  Mark all the uninteresting inodes
234f3d1e587SJie Liu 		 * free -- because they're before our start point.
235f3d1e587SJie Liu 		 */
236f3d1e587SJie Liu 		for (i = 0; i < idx; i++) {
237f3d1e587SJie Liu 			if (XFS_INOBT_MASK(i) & ~irec->ir_free)
238f3d1e587SJie Liu 				irec->ir_freecount++;
239f3d1e587SJie Liu 		}
240f3d1e587SJie Liu 
241f3d1e587SJie Liu 		irec->ir_free |= xfs_inobt_maskn(0, idx);
24212d0714dSBrian Foster 		*icount = irec->ir_count - irec->ir_freecount;
243f3d1e587SJie Liu 	}
244f3d1e587SJie Liu 
245f3d1e587SJie Liu 	return 0;
246f3d1e587SJie Liu }
247f3d1e587SJie Liu 
248cd57e594SLachlan McIlroy #define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
249cd57e594SLachlan McIlroy 
250bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk {
251bf4a5af2SDave Chinner 	char		__user **ac_ubuffer;/* pointer into user's buffer */
252bf4a5af2SDave Chinner 	int		ac_ubleft;	/* bytes left in user's buffer */
253bf4a5af2SDave Chinner 	int		ac_ubelem;	/* spaces used in user's buffer */
254bf4a5af2SDave Chinner };
255bf4a5af2SDave Chinner 
2568b56f083SNathan Scott /*
2571e773c49SJie Liu  * Process inodes in chunk with a pointer to a formatter function
2581e773c49SJie Liu  * that will iget the inode and fill in the appropriate structure.
2591e773c49SJie Liu  */
260bf4a5af2SDave Chinner static int
2611e773c49SJie Liu xfs_bulkstat_ag_ichunk(
2621e773c49SJie Liu 	struct xfs_mount		*mp,
2631e773c49SJie Liu 	xfs_agnumber_t			agno,
2641e773c49SJie Liu 	struct xfs_inobt_rec_incore	*irbp,
2651e773c49SJie Liu 	bulkstat_one_pf			formatter,
2661e773c49SJie Liu 	size_t				statstruct_size,
267bf4a5af2SDave Chinner 	struct xfs_bulkstat_agichunk	*acp,
26800275899SDave Chinner 	xfs_agino_t			*last_agino)
2691e773c49SJie Liu {
2701e773c49SJie Liu 	char				__user **ubufp = acp->ac_ubuffer;
2712b831ac6SDave Chinner 	int				chunkidx;
2721e773c49SJie Liu 	int				error = 0;
27300275899SDave Chinner 	xfs_agino_t			agino = irbp->ir_startino;
2741e773c49SJie Liu 
2752b831ac6SDave Chinner 	for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
2762b831ac6SDave Chinner 	     chunkidx++, agino++) {
2772b831ac6SDave Chinner 		int		fmterror;
2781e773c49SJie Liu 		int		ubused;
27900275899SDave Chinner 
28000275899SDave Chinner 		/* inode won't fit in buffer, we are done */
28100275899SDave Chinner 		if (acp->ac_ubleft < statstruct_size)
28200275899SDave Chinner 			break;
2831e773c49SJie Liu 
2841e773c49SJie Liu 		/* Skip if this inode is free */
28500275899SDave Chinner 		if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
2861e773c49SJie Liu 			continue;
2871e773c49SJie Liu 
2881e773c49SJie Liu 		/* Get the inode and fill in a single buffer */
2891e773c49SJie Liu 		ubused = statstruct_size;
29000275899SDave Chinner 		error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
29100275899SDave Chinner 				  *ubufp, acp->ac_ubleft, &ubused, &fmterror);
29200275899SDave Chinner 
2932b831ac6SDave Chinner 		if (fmterror == BULKSTAT_RV_GIVEUP ||
2942b831ac6SDave Chinner 		    (error && error != -ENOENT && error != -EINVAL)) {
2952b831ac6SDave Chinner 			acp->ac_ubleft = 0;
2961e773c49SJie Liu 			ASSERT(error);
2971e773c49SJie Liu 			break;
2981e773c49SJie Liu 		}
2992b831ac6SDave Chinner 
3002b831ac6SDave Chinner 		/* be careful not to leak error if at end of chunk */
3012b831ac6SDave Chinner 		if (fmterror == BULKSTAT_RV_NOTHING || error) {
3022b831ac6SDave Chinner 			error = 0;
3032b831ac6SDave Chinner 			continue;
3041e773c49SJie Liu 		}
3051e773c49SJie Liu 
3062b831ac6SDave Chinner 		*ubufp += ubused;
3072b831ac6SDave Chinner 		acp->ac_ubleft -= ubused;
3082b831ac6SDave Chinner 		acp->ac_ubelem++;
3092b831ac6SDave Chinner 	}
3101e773c49SJie Liu 
31100275899SDave Chinner 	/*
31200275899SDave Chinner 	 * Post-update *last_agino. At this point, agino will always point one
31300275899SDave Chinner 	 * inode past the last inode we processed successfully. Hence we
31400275899SDave Chinner 	 * substract that inode when setting the *last_agino cursor so that we
31500275899SDave Chinner 	 * return the correct cookie to userspace. On the next bulkstat call,
31600275899SDave Chinner 	 * the inode under the lastino cookie will be skipped as we have already
31700275899SDave Chinner 	 * processed it here.
31800275899SDave Chinner 	 */
31900275899SDave Chinner 	*last_agino = agino - 1;
32000275899SDave Chinner 
3211e773c49SJie Liu 	return error;
3221e773c49SJie Liu }
3231e773c49SJie Liu 
3241e773c49SJie Liu /*
3251da177e4SLinus Torvalds  * Return stat information in bulk (by-inode) for the filesystem.
3261da177e4SLinus Torvalds  */
3271da177e4SLinus Torvalds int					/* error status */
3281da177e4SLinus Torvalds xfs_bulkstat(
3291da177e4SLinus Torvalds 	xfs_mount_t		*mp,	/* mount point for filesystem */
3301da177e4SLinus Torvalds 	xfs_ino_t		*lastinop, /* last inode returned */
3311da177e4SLinus Torvalds 	int			*ubcountp, /* size of buffer/count returned */
3321da177e4SLinus Torvalds 	bulkstat_one_pf		formatter, /* func that'd fill a single buf */
3331da177e4SLinus Torvalds 	size_t			statstruct_size, /* sizeof struct filling */
3341da177e4SLinus Torvalds 	char			__user *ubuffer, /* buffer with inode stats */
335c41564b5SNathan Scott 	int			*done)	/* 1 if there are more stats to get */
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds 	xfs_buf_t		*agbp;	/* agi header buffer */
3381da177e4SLinus Torvalds 	xfs_agino_t		agino;	/* inode # in allocation group */
3391da177e4SLinus Torvalds 	xfs_agnumber_t		agno;	/* allocation group number */
3401da177e4SLinus Torvalds 	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
34126275093SNathan Scott 	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
3421da177e4SLinus Torvalds 	int			nirbuf;	/* size of irbuf */
3431da177e4SLinus Torvalds 	int			ubcount; /* size of user's buffer */
344bf4a5af2SDave Chinner 	struct xfs_bulkstat_agichunk ac;
3456e57c542SDave Chinner 	int			error = 0;
3461da177e4SLinus Torvalds 
3471da177e4SLinus Torvalds 	/*
3481da177e4SLinus Torvalds 	 * Get the last inode value, see if there's nothing to do.
3491da177e4SLinus Torvalds 	 */
35000275899SDave Chinner 	agno = XFS_INO_TO_AGNO(mp, *lastinop);
35100275899SDave Chinner 	agino = XFS_INO_TO_AGINO(mp, *lastinop);
3521da177e4SLinus Torvalds 	if (agno >= mp->m_sb.sb_agcount ||
35300275899SDave Chinner 	    *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
3541da177e4SLinus Torvalds 		*done = 1;
3551da177e4SLinus Torvalds 		*ubcountp = 0;
3561da177e4SLinus Torvalds 		return 0;
3571da177e4SLinus Torvalds 	}
358296dfd7fSJie Liu 
3591da177e4SLinus Torvalds 	ubcount = *ubcountp; /* statstruct's */
360bf4a5af2SDave Chinner 	ac.ac_ubuffer = &ubuffer;
361bf4a5af2SDave Chinner 	ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
362bf4a5af2SDave Chinner 	ac.ac_ubelem = 0;
363bf4a5af2SDave Chinner 
364bf4a5af2SDave Chinner 	*ubcountp = 0;
3651da177e4SLinus Torvalds 	*done = 0;
366bf4a5af2SDave Chinner 
36708b005f1SDarrick J. Wong 	irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
368bdfb0430SChristoph Hellwig 	if (!irbuf)
3692451337dSDave Chinner 		return -ENOMEM;
37008b005f1SDarrick J. Wong 	nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
371bb3c7d29SNathan Scott 
3721da177e4SLinus Torvalds 	/*
3731da177e4SLinus Torvalds 	 * Loop over the allocation groups, starting from the last
3741da177e4SLinus Torvalds 	 * inode returned; 0 means start of the allocation group.
3751da177e4SLinus Torvalds 	 */
3766e57c542SDave Chinner 	while (agno < mp->m_sb.sb_agcount) {
3776e57c542SDave Chinner 		struct xfs_inobt_rec_incore	*irbp = irbuf;
3786e57c542SDave Chinner 		struct xfs_inobt_rec_incore	*irbufend = irbuf + nirbuf;
3796e57c542SDave Chinner 		bool				end_of_ag = false;
3806e57c542SDave Chinner 		int				icount = 0;
3816e57c542SDave Chinner 		int				stat;
3826e57c542SDave Chinner 
3831da177e4SLinus Torvalds 		error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
384d4c27348SJie Liu 		if (error)
385d4c27348SJie Liu 			break;
3861da177e4SLinus Torvalds 		/*
3871da177e4SLinus Torvalds 		 * Allocate and initialize a btree cursor for ialloc btree.
3881da177e4SLinus Torvalds 		 */
38957bd3dbeSBrian Foster 		cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
39057bd3dbeSBrian Foster 					    XFS_BTNUM_INO);
3911da177e4SLinus Torvalds 		if (agino > 0) {
392f3d1e587SJie Liu 			/*
393f3d1e587SJie Liu 			 * In the middle of an allocation group, we need to get
394f3d1e587SJie Liu 			 * the remainder of the chunk we're in.
395f3d1e587SJie Liu 			 */
396f3d1e587SJie Liu 			struct xfs_inobt_rec_incore	r;
3972e287a73SChristoph Hellwig 
398f3d1e587SJie Liu 			error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
399f3d1e587SJie Liu 			if (error)
400a6bbce54SDave Chinner 				goto del_cursor;
401f3d1e587SJie Liu 			if (icount) {
4022e287a73SChristoph Hellwig 				irbp->ir_startino = r.ir_startino;
40312d0714dSBrian Foster 				irbp->ir_holemask = r.ir_holemask;
40412d0714dSBrian Foster 				irbp->ir_count = r.ir_count;
4052e287a73SChristoph Hellwig 				irbp->ir_freecount = r.ir_freecount;
4062e287a73SChristoph Hellwig 				irbp->ir_free = r.ir_free;
4071da177e4SLinus Torvalds 				irbp++;
4081da177e4SLinus Torvalds 			}
409f3d1e587SJie Liu 			/* Increment to the next record */
410afa947cbSDave Chinner 			error = xfs_btree_increment(cur, 0, &stat);
4111da177e4SLinus Torvalds 		} else {
412f3d1e587SJie Liu 			/* Start of ag.  Lookup the first inode chunk */
413afa947cbSDave Chinner 			error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
4141da177e4SLinus Torvalds 		}
415afa947cbSDave Chinner 		if (error || stat == 0) {
4166e57c542SDave Chinner 			end_of_ag = true;
417a6bbce54SDave Chinner 			goto del_cursor;
418afa947cbSDave Chinner 		}
419d4c27348SJie Liu 
4201da177e4SLinus Torvalds 		/*
4211da177e4SLinus Torvalds 		 * Loop through inode btree records in this ag,
4221da177e4SLinus Torvalds 		 * until we run out of inodes or space in the buffer.
4231da177e4SLinus Torvalds 		 */
4241da177e4SLinus Torvalds 		while (irbp < irbufend && icount < ubcount) {
425d4c27348SJie Liu 			struct xfs_inobt_rec_incore	r;
4262e287a73SChristoph Hellwig 
427afa947cbSDave Chinner 			error = xfs_inobt_get_rec(cur, &r, &stat);
428afa947cbSDave Chinner 			if (error || stat == 0) {
4296e57c542SDave Chinner 				end_of_ag = true;
430a6bbce54SDave Chinner 				goto del_cursor;
4312e287a73SChristoph Hellwig 			}
4322e287a73SChristoph Hellwig 
4331da177e4SLinus Torvalds 			/*
4341da177e4SLinus Torvalds 			 * If this chunk has any allocated inodes, save it.
43526275093SNathan Scott 			 * Also start read-ahead now for this chunk.
4361da177e4SLinus Torvalds 			 */
43712d0714dSBrian Foster 			if (r.ir_freecount < r.ir_count) {
4384b8fdfecSJie Liu 				xfs_bulkstat_ichunk_ra(mp, agno, &r);
4392e287a73SChristoph Hellwig 				irbp->ir_startino = r.ir_startino;
44012d0714dSBrian Foster 				irbp->ir_holemask = r.ir_holemask;
44112d0714dSBrian Foster 				irbp->ir_count = r.ir_count;
4422e287a73SChristoph Hellwig 				irbp->ir_freecount = r.ir_freecount;
4432e287a73SChristoph Hellwig 				irbp->ir_free = r.ir_free;
4441da177e4SLinus Torvalds 				irbp++;
44512d0714dSBrian Foster 				icount += r.ir_count - r.ir_freecount;
4461da177e4SLinus Torvalds 			}
447afa947cbSDave Chinner 			error = xfs_btree_increment(cur, 0, &stat);
448afa947cbSDave Chinner 			if (error || stat == 0) {
4496e57c542SDave Chinner 				end_of_ag = true;
4507a19dee1SJan Kara 				goto del_cursor;
4517a19dee1SJan Kara 			}
452cd57e594SLachlan McIlroy 			cond_resched();
4531da177e4SLinus Torvalds 		}
454a6bbce54SDave Chinner 
4551da177e4SLinus Torvalds 		/*
456a6bbce54SDave Chinner 		 * Drop the btree buffers and the agi buffer as we can't hold any
457a6bbce54SDave Chinner 		 * of the locks these represent when calling iget. If there is a
458a6bbce54SDave Chinner 		 * pending error, then we are done.
4591da177e4SLinus Torvalds 		 */
460a6bbce54SDave Chinner del_cursor:
461f307080aSBrian Foster 		xfs_btree_del_cursor(cur, error ?
462f307080aSBrian Foster 					  XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4631da177e4SLinus Torvalds 		xfs_buf_relse(agbp);
464a6bbce54SDave Chinner 		if (error)
465a6bbce54SDave Chinner 			break;
4661da177e4SLinus Torvalds 		/*
46700275899SDave Chinner 		 * Now format all the good inodes into the user's buffer. The
46800275899SDave Chinner 		 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
46900275899SDave Chinner 		 * for the next loop iteration.
4701da177e4SLinus Torvalds 		 */
4711da177e4SLinus Torvalds 		irbufend = irbp;
4721da177e4SLinus Torvalds 		for (irbp = irbuf;
4736e57c542SDave Chinner 		     irbp < irbufend && ac.ac_ubleft >= statstruct_size;
474bf4a5af2SDave Chinner 		     irbp++) {
4751e773c49SJie Liu 			error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
476bf4a5af2SDave Chinner 					formatter, statstruct_size, &ac,
47700275899SDave Chinner 					&agino);
4781e773c49SJie Liu 			if (error)
479febe3cbeSDave Chinner 				break;
4801da177e4SLinus Torvalds 
481cd57e594SLachlan McIlroy 			cond_resched();
4821da177e4SLinus Torvalds 		}
483bf4a5af2SDave Chinner 
484febe3cbeSDave Chinner 		/*
485febe3cbeSDave Chinner 		 * If we've run out of space or had a formatting error, we
486febe3cbeSDave Chinner 		 * are now done
487febe3cbeSDave Chinner 		 */
488febe3cbeSDave Chinner 		if (ac.ac_ubleft < statstruct_size || error)
4896e57c542SDave Chinner 			break;
4906e57c542SDave Chinner 
4911da177e4SLinus Torvalds 		if (end_of_ag) {
4921da177e4SLinus Torvalds 			agno++;
4931da177e4SLinus Torvalds 			agino = 0;
49400275899SDave Chinner 		}
4951da177e4SLinus Torvalds 	}
4961da177e4SLinus Torvalds 	/*
4971da177e4SLinus Torvalds 	 * Done, we're either out of filesystem or space to put the data.
4981da177e4SLinus Torvalds 	 */
499fdd3cceeSDave Chinner 	kmem_free(irbuf);
500bf4a5af2SDave Chinner 	*ubcountp = ac.ac_ubelem;
501febe3cbeSDave Chinner 
502cd57e594SLachlan McIlroy 	/*
503febe3cbeSDave Chinner 	 * We found some inodes, so clear the error status and return them.
504febe3cbeSDave Chinner 	 * The lastino pointer will point directly at the inode that triggered
505febe3cbeSDave Chinner 	 * any error that occurred, so on the next call the error will be
506febe3cbeSDave Chinner 	 * triggered again and propagated to userspace as there will be no
507febe3cbeSDave Chinner 	 * formatted inodes in the buffer.
508cd57e594SLachlan McIlroy 	 */
509bf4a5af2SDave Chinner 	if (ac.ac_ubelem)
510febe3cbeSDave Chinner 		error = 0;
511febe3cbeSDave Chinner 
5121da177e4SLinus Torvalds 	/*
51300275899SDave Chinner 	 * If we ran out of filesystem, lastino will point off the end of
51400275899SDave Chinner 	 * the filesystem so the next call will return immediately.
5151da177e4SLinus Torvalds 	 */
51600275899SDave Chinner 	*lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
51700275899SDave Chinner 	if (agno >= mp->m_sb.sb_agcount)
5181da177e4SLinus Torvalds 		*done = 1;
5191da177e4SLinus Torvalds 
520febe3cbeSDave Chinner 	return error;
5211da177e4SLinus Torvalds }
5221da177e4SLinus Torvalds 
523faa63e95SMichal Marek int
524faa63e95SMichal Marek xfs_inumbers_fmt(
525faa63e95SMichal Marek 	void			__user *ubuffer, /* buffer to write to */
526549fa006SJie Liu 	const struct xfs_inogrp	*buffer,	/* buffer to read from */
527faa63e95SMichal Marek 	long			count,		/* # of elements to read */
528faa63e95SMichal Marek 	long			*written)	/* # of bytes written */
529faa63e95SMichal Marek {
530faa63e95SMichal Marek 	if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
531faa63e95SMichal Marek 		return -EFAULT;
532faa63e95SMichal Marek 	*written = count * sizeof(*buffer);
533faa63e95SMichal Marek 	return 0;
534faa63e95SMichal Marek }
535faa63e95SMichal Marek 
5361da177e4SLinus Torvalds /*
5371da177e4SLinus Torvalds  * Return inode number table for the filesystem.
5381da177e4SLinus Torvalds  */
5391da177e4SLinus Torvalds int					/* error status */
5401da177e4SLinus Torvalds xfs_inumbers(
541549fa006SJie Liu 	struct xfs_mount	*mp,/* mount point for filesystem */
5421da177e4SLinus Torvalds 	xfs_ino_t		*lastino,/* last inode returned */
5431da177e4SLinus Torvalds 	int			*count,/* size of buffer/count returned */
544faa63e95SMichal Marek 	void			__user *ubuffer,/* buffer with inode descriptions */
545faa63e95SMichal Marek 	inumbers_fmt_pf		formatter)
5461da177e4SLinus Torvalds {
547549fa006SJie Liu 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, *lastino);
548549fa006SJie Liu 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, *lastino);
549549fa006SJie Liu 	struct xfs_btree_cur	*cur = NULL;
550c7cb51dcSJie Liu 	struct xfs_buf		*agbp = NULL;
551549fa006SJie Liu 	struct xfs_inogrp	*buffer;
5521da177e4SLinus Torvalds 	int			bcount;
553549fa006SJie Liu 	int			left = *count;
554549fa006SJie Liu 	int			bufidx = 0;
555549fa006SJie Liu 	int			error = 0;
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds 	*count = 0;
558549fa006SJie Liu 	if (agno >= mp->m_sb.sb_agcount ||
559549fa006SJie Liu 	    *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
560549fa006SJie Liu 		return error;
561549fa006SJie Liu 
562e6a4b37fSTim Shimmin 	bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
563bf9216f9SDarrick J. Wong 	buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
564c7cb51dcSJie Liu 	do {
565549fa006SJie Liu 		struct xfs_inobt_rec_incore	r;
566549fa006SJie Liu 		int				stat;
567549fa006SJie Liu 
568c7cb51dcSJie Liu 		if (!agbp) {
5691da177e4SLinus Torvalds 			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
570c7cb51dcSJie Liu 			if (error)
571c7cb51dcSJie Liu 				break;
572c7cb51dcSJie Liu 
57357bd3dbeSBrian Foster 			cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
57457bd3dbeSBrian Foster 						    XFS_BTNUM_INO);
57521875505SChristoph Hellwig 			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
576549fa006SJie Liu 						 &stat);
577c7cb51dcSJie Liu 			if (error)
578c7cb51dcSJie Liu 				break;
579c7cb51dcSJie Liu 			if (!stat)
580c7cb51dcSJie Liu 				goto next_ag;
5811da177e4SLinus Torvalds 		}
582c7cb51dcSJie Liu 
583549fa006SJie Liu 		error = xfs_inobt_get_rec(cur, &r, &stat);
584c7cb51dcSJie Liu 		if (error)
585c7cb51dcSJie Liu 			break;
586c7cb51dcSJie Liu 		if (!stat)
587c7cb51dcSJie Liu 			goto next_ag;
588c7cb51dcSJie Liu 
5892e287a73SChristoph Hellwig 		agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
5902e287a73SChristoph Hellwig 		buffer[bufidx].xi_startino =
5912e287a73SChristoph Hellwig 			XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
59212d0714dSBrian Foster 		buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
5932e287a73SChristoph Hellwig 		buffer[bufidx].xi_allocmask = ~r.ir_free;
594c7cb51dcSJie Liu 		if (++bufidx == bcount) {
595faa63e95SMichal Marek 			long	written;
596c7cb51dcSJie Liu 
597549fa006SJie Liu 			error = formatter(ubuffer, buffer, bufidx, &written);
598549fa006SJie Liu 			if (error)
5991da177e4SLinus Torvalds 				break;
600faa63e95SMichal Marek 			ubuffer += written;
6011da177e4SLinus Torvalds 			*count += bufidx;
6021da177e4SLinus Torvalds 			bufidx = 0;
6031da177e4SLinus Torvalds 		}
604c7cb51dcSJie Liu 		if (!--left)
605c7cb51dcSJie Liu 			break;
606c7cb51dcSJie Liu 
607549fa006SJie Liu 		error = xfs_btree_increment(cur, 0, &stat);
608c7cb51dcSJie Liu 		if (error)
609c7cb51dcSJie Liu 			break;
610c7cb51dcSJie Liu 		if (stat)
611c7cb51dcSJie Liu 			continue;
612c7cb51dcSJie Liu 
613c7cb51dcSJie Liu next_ag:
6141da177e4SLinus Torvalds 		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
6151da177e4SLinus Torvalds 		cur = NULL;
6161da177e4SLinus Torvalds 		xfs_buf_relse(agbp);
6171da177e4SLinus Torvalds 		agbp = NULL;
618c7cb51dcSJie Liu 		agino = 0;
619a8b1ee8bSEric Sandeen 		agno++;
620a8b1ee8bSEric Sandeen 	} while (agno < mp->m_sb.sb_agcount);
621c7cb51dcSJie Liu 
6221da177e4SLinus Torvalds 	if (!error) {
6231da177e4SLinus Torvalds 		if (bufidx) {
624faa63e95SMichal Marek 			long	written;
625c7cb51dcSJie Liu 
626549fa006SJie Liu 			error = formatter(ubuffer, buffer, bufidx, &written);
627549fa006SJie Liu 			if (!error)
6281da177e4SLinus Torvalds 				*count += bufidx;
6291da177e4SLinus Torvalds 		}
6301da177e4SLinus Torvalds 		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
6311da177e4SLinus Torvalds 	}
632c7cb51dcSJie Liu 
633f0e2d93cSDenys Vlasenko 	kmem_free(buffer);
6341da177e4SLinus Torvalds 	if (cur)
6351da177e4SLinus Torvalds 		xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
6361da177e4SLinus Torvalds 					   XFS_BTREE_NOERROR));
6371da177e4SLinus Torvalds 	if (agbp)
6381da177e4SLinus Torvalds 		xfs_buf_relse(agbp);
639c7cb51dcSJie Liu 
6401da177e4SLinus Torvalds 	return error;
6411da177e4SLinus Torvalds }
642