xref: /openbmc/linux/fs/xfs/xfs_itable.c (revision 6d3ebaae7c20128bfa6965a8d5cee0d1deea8486)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_inum.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_ialloc.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_itable.h"
33 #include "xfs_error.h"
34 #include "xfs_trace.h"
35 #include "xfs_icache.h"
36 
37 STATIC int
38 xfs_internal_inum(
39 	xfs_mount_t	*mp,
40 	xfs_ino_t	ino)
41 {
42 	return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
43 		(xfs_sb_version_hasquota(&mp->m_sb) &&
44 		 xfs_is_quota_inode(&mp->m_sb, ino)));
45 }
46 
47 /*
48  * Return stat information for one inode.
49  * Return 0 if ok, else errno.
50  */
51 int
52 xfs_bulkstat_one_int(
53 	struct xfs_mount	*mp,		/* mount point for filesystem */
54 	xfs_ino_t		ino,		/* inode to get data for */
55 	void __user		*buffer,	/* buffer to place output in */
56 	int			ubsize,		/* size of buffer */
57 	bulkstat_one_fmt_pf	formatter,	/* formatter, copy to user */
58 	int			*ubused,	/* bytes used by me */
59 	int			*stat)		/* BULKSTAT_RV_... */
60 {
61 	struct xfs_icdinode	*dic;		/* dinode core info pointer */
62 	struct xfs_inode	*ip;		/* incore inode pointer */
63 	struct xfs_bstat	*buf;		/* return buffer */
64 	int			error = 0;	/* error value */
65 
66 	*stat = BULKSTAT_RV_NOTHING;
67 
68 	if (!buffer || xfs_internal_inum(mp, ino))
69 		return -EINVAL;
70 
71 	buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
72 	if (!buf)
73 		return -ENOMEM;
74 
75 	error = xfs_iget(mp, NULL, ino,
76 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
77 			 XFS_ILOCK_SHARED, &ip);
78 	if (error)
79 		goto out_free;
80 
81 	ASSERT(ip != NULL);
82 	ASSERT(ip->i_imap.im_blkno != 0);
83 
84 	dic = &ip->i_d;
85 
86 	/* xfs_iget returns the following without needing
87 	 * further change.
88 	 */
89 	buf->bs_nlink = dic->di_nlink;
90 	buf->bs_projid_lo = dic->di_projid_lo;
91 	buf->bs_projid_hi = dic->di_projid_hi;
92 	buf->bs_ino = ino;
93 	buf->bs_mode = dic->di_mode;
94 	buf->bs_uid = dic->di_uid;
95 	buf->bs_gid = dic->di_gid;
96 	buf->bs_size = dic->di_size;
97 	buf->bs_atime.tv_sec = dic->di_atime.t_sec;
98 	buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
99 	buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
100 	buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
101 	buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
102 	buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
103 	buf->bs_xflags = xfs_ip2xflags(ip);
104 	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
105 	buf->bs_extents = dic->di_nextents;
106 	buf->bs_gen = dic->di_gen;
107 	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
108 	buf->bs_dmevmask = dic->di_dmevmask;
109 	buf->bs_dmstate = dic->di_dmstate;
110 	buf->bs_aextents = dic->di_anextents;
111 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
112 
113 	switch (dic->di_format) {
114 	case XFS_DINODE_FMT_DEV:
115 		buf->bs_rdev = ip->i_df.if_u2.if_rdev;
116 		buf->bs_blksize = BLKDEV_IOSIZE;
117 		buf->bs_blocks = 0;
118 		break;
119 	case XFS_DINODE_FMT_LOCAL:
120 	case XFS_DINODE_FMT_UUID:
121 		buf->bs_rdev = 0;
122 		buf->bs_blksize = mp->m_sb.sb_blocksize;
123 		buf->bs_blocks = 0;
124 		break;
125 	case XFS_DINODE_FMT_EXTENTS:
126 	case XFS_DINODE_FMT_BTREE:
127 		buf->bs_rdev = 0;
128 		buf->bs_blksize = mp->m_sb.sb_blocksize;
129 		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
130 		break;
131 	}
132 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
133 	IRELE(ip);
134 
135 	error = formatter(buffer, ubsize, ubused, buf);
136 	if (!error)
137 		*stat = BULKSTAT_RV_DIDONE;
138 
139  out_free:
140 	kmem_free(buf);
141 	return error;
142 }
143 
144 /* Return 0 on success or positive error */
145 STATIC int
146 xfs_bulkstat_one_fmt(
147 	void			__user *ubuffer,
148 	int			ubsize,
149 	int			*ubused,
150 	const xfs_bstat_t	*buffer)
151 {
152 	if (ubsize < sizeof(*buffer))
153 		return -ENOMEM;
154 	if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
155 		return -EFAULT;
156 	if (ubused)
157 		*ubused = sizeof(*buffer);
158 	return 0;
159 }
160 
161 int
162 xfs_bulkstat_one(
163 	xfs_mount_t	*mp,		/* mount point for filesystem */
164 	xfs_ino_t	ino,		/* inode number to get data for */
165 	void		__user *buffer,	/* buffer to place output in */
166 	int		ubsize,		/* size of buffer */
167 	int		*ubused,	/* bytes used by me */
168 	int		*stat)		/* BULKSTAT_RV_... */
169 {
170 	return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
171 				    xfs_bulkstat_one_fmt, ubused, stat);
172 }
173 
174 /*
175  * Loop over all clusters in a chunk for a given incore inode allocation btree
176  * record.  Do a readahead if there are any allocated inodes in that cluster.
177  */
178 STATIC void
179 xfs_bulkstat_ichunk_ra(
180 	struct xfs_mount		*mp,
181 	xfs_agnumber_t			agno,
182 	struct xfs_inobt_rec_incore	*irec)
183 {
184 	xfs_agblock_t			agbno;
185 	struct blk_plug			plug;
186 	int				blks_per_cluster;
187 	int				inodes_per_cluster;
188 	int				i;	/* inode chunk index */
189 
190 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
191 	blks_per_cluster = xfs_icluster_size_fsb(mp);
192 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
193 
194 	blk_start_plug(&plug);
195 	for (i = 0; i < XFS_INODES_PER_CHUNK;
196 	     i += inodes_per_cluster, agbno += blks_per_cluster) {
197 		if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
198 			xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
199 					     &xfs_inode_buf_ops);
200 		}
201 	}
202 	blk_finish_plug(&plug);
203 }
204 
205 /*
206  * Lookup the inode chunk that the given inode lives in and then get the record
207  * if we found the chunk.  If the inode was not the last in the chunk and there
208  * are some left allocated, update the data for the pointed-to record as well as
209  * return the count of grabbed inodes.
210  */
211 STATIC int
212 xfs_bulkstat_grab_ichunk(
213 	struct xfs_btree_cur		*cur,	/* btree cursor */
214 	xfs_agino_t			agino,	/* starting inode of chunk */
215 	int				*icount,/* return # of inodes grabbed */
216 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
217 {
218 	int				idx;	/* index into inode chunk */
219 	int				stat;
220 	int				error = 0;
221 
222 	/* Lookup the inode chunk that this inode lives in */
223 	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
224 	if (error)
225 		return error;
226 	if (!stat) {
227 		*icount = 0;
228 		return error;
229 	}
230 
231 	/* Get the record, should always work */
232 	error = xfs_inobt_get_rec(cur, irec, &stat);
233 	if (error)
234 		return error;
235 	XFS_WANT_CORRUPTED_RETURN(stat == 1);
236 
237 	/* Check if the record contains the inode in request */
238 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
239 		return -EINVAL;
240 
241 	idx = agino - irec->ir_startino + 1;
242 	if (idx < XFS_INODES_PER_CHUNK &&
243 	    (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
244 		int	i;
245 
246 		/* We got a right chunk with some left inodes allocated at it.
247 		 * Grab the chunk record.  Mark all the uninteresting inodes
248 		 * free -- because they're before our start point.
249 		 */
250 		for (i = 0; i < idx; i++) {
251 			if (XFS_INOBT_MASK(i) & ~irec->ir_free)
252 				irec->ir_freecount++;
253 		}
254 
255 		irec->ir_free |= xfs_inobt_maskn(0, idx);
256 		*icount = XFS_INODES_PER_CHUNK - irec->ir_freecount;
257 	}
258 
259 	return 0;
260 }
261 
262 #define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
263 
264 /*
265  * Process inodes in chunk with a pointer to a formatter function
266  * that will iget the inode and fill in the appropriate structure.
267  */
268 int
269 xfs_bulkstat_ag_ichunk(
270 	struct xfs_mount		*mp,
271 	xfs_agnumber_t			agno,
272 	struct xfs_inobt_rec_incore	*irbp,
273 	bulkstat_one_pf			formatter,
274 	size_t				statstruct_size,
275 	struct xfs_bulkstat_agichunk	*acp)
276 {
277 	xfs_ino_t			lastino = acp->ac_lastino;
278 	char				__user **ubufp = acp->ac_ubuffer;
279 	int				ubleft = acp->ac_ubleft;
280 	int				ubelem = acp->ac_ubelem;
281 	int				chunkidx, clustidx;
282 	int				error = 0;
283 	xfs_agino_t			agino;
284 
285 	for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
286 	     XFS_BULKSTAT_UBLEFT(ubleft) &&
287 	     irbp->ir_freecount < XFS_INODES_PER_CHUNK;
288 	     chunkidx++, clustidx++, agino++) {
289 		int		fmterror;	/* bulkstat formatter result */
290 		int		ubused;
291 		xfs_ino_t	ino = XFS_AGINO_TO_INO(mp, agno, agino);
292 
293 		ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
294 
295 		/* Skip if this inode is free */
296 		if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
297 			lastino = ino;
298 			continue;
299 		}
300 
301 		/*
302 		 * Count used inodes as free so we can tell when the
303 		 * chunk is used up.
304 		 */
305 		irbp->ir_freecount++;
306 
307 		/* Get the inode and fill in a single buffer */
308 		ubused = statstruct_size;
309 		error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
310 		if (fmterror == BULKSTAT_RV_NOTHING) {
311 			if (error && error != -ENOENT && error != -EINVAL) {
312 				ubleft = 0;
313 				break;
314 			}
315 			lastino = ino;
316 			continue;
317 		}
318 		if (fmterror == BULKSTAT_RV_GIVEUP) {
319 			ubleft = 0;
320 			ASSERT(error);
321 			break;
322 		}
323 		if (*ubufp)
324 			*ubufp += ubused;
325 		ubleft -= ubused;
326 		ubelem++;
327 		lastino = ino;
328 	}
329 
330 	acp->ac_lastino = lastino;
331 	acp->ac_ubleft = ubleft;
332 	acp->ac_ubelem = ubelem;
333 
334 	return error;
335 }
336 
337 /*
338  * Return stat information in bulk (by-inode) for the filesystem.
339  */
340 int					/* error status */
341 xfs_bulkstat(
342 	xfs_mount_t		*mp,	/* mount point for filesystem */
343 	xfs_ino_t		*lastinop, /* last inode returned */
344 	int			*ubcountp, /* size of buffer/count returned */
345 	bulkstat_one_pf		formatter, /* func that'd fill a single buf */
346 	size_t			statstruct_size, /* sizeof struct filling */
347 	char			__user *ubuffer, /* buffer with inode stats */
348 	int			*done)	/* 1 if there are more stats to get */
349 {
350 	xfs_buf_t		*agbp;	/* agi header buffer */
351 	xfs_agi_t		*agi;	/* agi header data */
352 	xfs_agino_t		agino;	/* inode # in allocation group */
353 	xfs_agnumber_t		agno;	/* allocation group number */
354 	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
355 	int			end_of_ag; /* set if we've seen the ag end */
356 	int			error;	/* error code */
357 	int                     fmterror;/* bulkstat formatter result */
358 	int			i;	/* loop index */
359 	int			icount;	/* count of inodes good in irbuf */
360 	size_t			irbsize; /* size of irec buffer in bytes */
361 	xfs_ino_t		ino;	/* inode number (filesystem) */
362 	xfs_inobt_rec_incore_t	*irbp;	/* current irec buffer pointer */
363 	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
364 	xfs_inobt_rec_incore_t	*irbufend; /* end of good irec buffer entries */
365 	xfs_ino_t		lastino; /* last inode number returned */
366 	int			nirbuf;	/* size of irbuf */
367 	int			rval;	/* return value error code */
368 	int			tmp;	/* result value from btree calls */
369 	int			ubcount; /* size of user's buffer */
370 	int			ubleft;	/* bytes left in user's buffer */
371 	char			__user *ubufp;	/* pointer into user's buffer */
372 	int			ubelem;	/* spaces used in user's buffer */
373 
374 	/*
375 	 * Get the last inode value, see if there's nothing to do.
376 	 */
377 	ino = (xfs_ino_t)*lastinop;
378 	lastino = ino;
379 	agno = XFS_INO_TO_AGNO(mp, ino);
380 	agino = XFS_INO_TO_AGINO(mp, ino);
381 	if (agno >= mp->m_sb.sb_agcount ||
382 	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
383 		*done = 1;
384 		*ubcountp = 0;
385 		return 0;
386 	}
387 
388 	ubcount = *ubcountp; /* statstruct's */
389 	ubleft = ubcount * statstruct_size; /* bytes */
390 	*ubcountp = ubelem = 0;
391 	*done = 0;
392 	fmterror = 0;
393 	ubufp = ubuffer;
394 	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
395 	if (!irbuf)
396 		return -ENOMEM;
397 
398 	nirbuf = irbsize / sizeof(*irbuf);
399 
400 	/*
401 	 * Loop over the allocation groups, starting from the last
402 	 * inode returned; 0 means start of the allocation group.
403 	 */
404 	rval = 0;
405 	while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
406 		cond_resched();
407 		error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
408 		if (error)
409 			break;
410 		agi = XFS_BUF_TO_AGI(agbp);
411 		/*
412 		 * Allocate and initialize a btree cursor for ialloc btree.
413 		 */
414 		cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
415 					    XFS_BTNUM_INO);
416 		irbp = irbuf;
417 		irbufend = irbuf + nirbuf;
418 		end_of_ag = 0;
419 		icount = 0;
420 		if (agino > 0) {
421 			/*
422 			 * In the middle of an allocation group, we need to get
423 			 * the remainder of the chunk we're in.
424 			 */
425 			struct xfs_inobt_rec_incore	r;
426 
427 			error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
428 			if (error)
429 				break;
430 			if (icount) {
431 				irbp->ir_startino = r.ir_startino;
432 				irbp->ir_freecount = r.ir_freecount;
433 				irbp->ir_free = r.ir_free;
434 				irbp++;
435 				agino = r.ir_startino + XFS_INODES_PER_CHUNK;
436 			}
437 			/* Increment to the next record */
438 			error = xfs_btree_increment(cur, 0, &tmp);
439 		} else {
440 			/* Start of ag.  Lookup the first inode chunk */
441 			error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
442 		}
443 		if (error)
444 			break;
445 
446 		/*
447 		 * Loop through inode btree records in this ag,
448 		 * until we run out of inodes or space in the buffer.
449 		 */
450 		while (irbp < irbufend && icount < ubcount) {
451 			struct xfs_inobt_rec_incore	r;
452 
453 			error = xfs_inobt_get_rec(cur, &r, &i);
454 			if (error || i == 0) {
455 				end_of_ag = 1;
456 				break;
457 			}
458 
459 			/*
460 			 * If this chunk has any allocated inodes, save it.
461 			 * Also start read-ahead now for this chunk.
462 			 */
463 			if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
464 				xfs_bulkstat_ichunk_ra(mp, agno, &r);
465 				irbp->ir_startino = r.ir_startino;
466 				irbp->ir_freecount = r.ir_freecount;
467 				irbp->ir_free = r.ir_free;
468 				irbp++;
469 				icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
470 			}
471 			/*
472 			 * Set agino to after this chunk and bump the cursor.
473 			 */
474 			agino = r.ir_startino + XFS_INODES_PER_CHUNK;
475 			error = xfs_btree_increment(cur, 0, &tmp);
476 			cond_resched();
477 		}
478 		/*
479 		 * Drop the btree buffers and the agi buffer.
480 		 * We can't hold any of the locks these represent
481 		 * when calling iget.
482 		 */
483 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
484 		xfs_buf_relse(agbp);
485 		/*
486 		 * Now format all the good inodes into the user's buffer.
487 		 */
488 		irbufend = irbp;
489 		for (irbp = irbuf;
490 		     irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
491 			struct xfs_bulkstat_agichunk ac;
492 
493 			ac.ac_lastino = lastino;
494 			ac.ac_ubuffer = &ubuffer;
495 			ac.ac_ubleft = ubleft;
496 			ac.ac_ubelem = ubelem;
497 			error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
498 					formatter, statstruct_size, &ac);
499 			if (error)
500 				rval = error;
501 
502 			lastino = ac.ac_lastino;
503 			ubleft = ac.ac_ubleft;
504 			ubelem = ac.ac_ubelem;
505 
506 			cond_resched();
507 		}
508 		/*
509 		 * Set up for the next loop iteration.
510 		 */
511 		if (XFS_BULKSTAT_UBLEFT(ubleft)) {
512 			if (end_of_ag) {
513 				agno++;
514 				agino = 0;
515 			} else
516 				agino = XFS_INO_TO_AGINO(mp, lastino);
517 		} else
518 			break;
519 	}
520 	/*
521 	 * Done, we're either out of filesystem or space to put the data.
522 	 */
523 	kmem_free(irbuf);
524 	*ubcountp = ubelem;
525 	/*
526 	 * Found some inodes, return them now and return the error next time.
527 	 */
528 	if (ubelem)
529 		rval = 0;
530 	if (agno >= mp->m_sb.sb_agcount) {
531 		/*
532 		 * If we ran out of filesystem, mark lastino as off
533 		 * the end of the filesystem, so the next call
534 		 * will return immediately.
535 		 */
536 		*lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
537 		*done = 1;
538 	} else
539 		*lastinop = (xfs_ino_t)lastino;
540 
541 	return rval;
542 }
543 
544 int
545 xfs_inumbers_fmt(
546 	void			__user *ubuffer, /* buffer to write to */
547 	const struct xfs_inogrp	*buffer,	/* buffer to read from */
548 	long			count,		/* # of elements to read */
549 	long			*written)	/* # of bytes written */
550 {
551 	if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
552 		return -EFAULT;
553 	*written = count * sizeof(*buffer);
554 	return 0;
555 }
556 
557 /*
558  * Return inode number table for the filesystem.
559  */
560 int					/* error status */
561 xfs_inumbers(
562 	struct xfs_mount	*mp,/* mount point for filesystem */
563 	xfs_ino_t		*lastino,/* last inode returned */
564 	int			*count,/* size of buffer/count returned */
565 	void			__user *ubuffer,/* buffer with inode descriptions */
566 	inumbers_fmt_pf		formatter)
567 {
568 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, *lastino);
569 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, *lastino);
570 	struct xfs_btree_cur	*cur = NULL;
571 	struct xfs_buf		*agbp = NULL;
572 	struct xfs_inogrp	*buffer;
573 	int			bcount;
574 	int			left = *count;
575 	int			bufidx = 0;
576 	int			error = 0;
577 
578 	*count = 0;
579 	if (agno >= mp->m_sb.sb_agcount ||
580 	    *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
581 		return error;
582 
583 	bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
584 	buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
585 	do {
586 		struct xfs_inobt_rec_incore	r;
587 		int				stat;
588 
589 		if (!agbp) {
590 			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
591 			if (error)
592 				break;
593 
594 			cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
595 						    XFS_BTNUM_INO);
596 			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
597 						 &stat);
598 			if (error)
599 				break;
600 			if (!stat)
601 				goto next_ag;
602 		}
603 
604 		error = xfs_inobt_get_rec(cur, &r, &stat);
605 		if (error)
606 			break;
607 		if (!stat)
608 			goto next_ag;
609 
610 		agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
611 		buffer[bufidx].xi_startino =
612 			XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
613 		buffer[bufidx].xi_alloccount =
614 			XFS_INODES_PER_CHUNK - r.ir_freecount;
615 		buffer[bufidx].xi_allocmask = ~r.ir_free;
616 		if (++bufidx == bcount) {
617 			long	written;
618 
619 			error = formatter(ubuffer, buffer, bufidx, &written);
620 			if (error)
621 				break;
622 			ubuffer += written;
623 			*count += bufidx;
624 			bufidx = 0;
625 		}
626 		if (!--left)
627 			break;
628 
629 		error = xfs_btree_increment(cur, 0, &stat);
630 		if (error)
631 			break;
632 		if (stat)
633 			continue;
634 
635 next_ag:
636 		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
637 		cur = NULL;
638 		xfs_buf_relse(agbp);
639 		agbp = NULL;
640 		agino = 0;
641 		agno++;
642 	} while (agno < mp->m_sb.sb_agcount);
643 
644 	if (!error) {
645 		if (bufidx) {
646 			long	written;
647 
648 			error = formatter(ubuffer, buffer, bufidx, &written);
649 			if (!error)
650 				*count += bufidx;
651 		}
652 		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
653 	}
654 
655 	kmem_free(buffer);
656 	if (cur)
657 		xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
658 					   XFS_BTREE_NOERROR));
659 	if (agbp)
660 		xfs_buf_relse(agbp);
661 
662 	return error;
663 }
664