11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 37b718769SNathan Scott * All Rights Reserved. 41da177e4SLinus Torvalds * 57b718769SNathan Scott * This program is free software; you can redistribute it and/or 67b718769SNathan Scott * modify it under the terms of the GNU General Public License as 71da177e4SLinus Torvalds * published by the Free Software Foundation. 81da177e4SLinus Torvalds * 97b718769SNathan Scott * This program is distributed in the hope that it would be useful, 107b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 117b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127b718769SNathan Scott * GNU General Public License for more details. 131da177e4SLinus Torvalds * 147b718769SNathan Scott * You should have received a copy of the GNU General Public License 157b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 167b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds #include "xfs.h" 19a844f451SNathan Scott #include "xfs_fs.h" 2070a9883cSDave Chinner #include "xfs_shared.h" 21a4fbe6abSDave Chinner #include "xfs_format.h" 22239880efSDave Chinner #include "xfs_log_format.h" 23239880efSDave Chinner #include "xfs_trans_resv.h" 241da177e4SLinus Torvalds #include "xfs_mount.h" 251da177e4SLinus Torvalds #include "xfs_inode.h" 26a4fbe6abSDave Chinner #include "xfs_btree.h" 271da177e4SLinus Torvalds #include "xfs_ialloc.h" 28a4fbe6abSDave Chinner #include "xfs_ialloc_btree.h" 291da177e4SLinus Torvalds #include "xfs_itable.h" 301da177e4SLinus Torvalds #include "xfs_error.h" 31f2d67614SChristoph Hellwig #include "xfs_trace.h" 3233479e05SDave Chinner #include "xfs_icache.h" 331da177e4SLinus Torvalds 347dce11dbSChristoph Hellwig /* 357dce11dbSChristoph Hellwig * Return stat information for one inode. 367dce11dbSChristoph Hellwig * Return 0 if ok, else errno. 377dce11dbSChristoph Hellwig */ 387dce11dbSChristoph Hellwig int 397dce11dbSChristoph Hellwig xfs_bulkstat_one_int( 407dce11dbSChristoph Hellwig struct xfs_mount *mp, /* mount point for filesystem */ 417dce11dbSChristoph Hellwig xfs_ino_t ino, /* inode to get data for */ 427dce11dbSChristoph Hellwig void __user *buffer, /* buffer to place output in */ 437dce11dbSChristoph Hellwig int ubsize, /* size of buffer */ 447dce11dbSChristoph Hellwig bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 457dce11dbSChristoph Hellwig int *ubused, /* bytes used by me */ 461da177e4SLinus Torvalds int *stat) /* BULKSTAT_RV_... */ 471da177e4SLinus Torvalds { 487dce11dbSChristoph Hellwig struct xfs_icdinode *dic; /* dinode core info pointer */ 497dce11dbSChristoph Hellwig struct xfs_inode *ip; /* incore inode pointer */ 503987848cSDave Chinner struct inode *inode; 517dce11dbSChristoph Hellwig struct xfs_bstat *buf; /* return buffer */ 527dce11dbSChristoph Hellwig int error = 0; /* error value */ 537dce11dbSChristoph Hellwig 547dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_NOTHING; 557dce11dbSChristoph Hellwig 567dce11dbSChristoph Hellwig if (!buffer || xfs_internal_inum(mp, ino)) 572451337dSDave Chinner return -EINVAL; 587dce11dbSChristoph Hellwig 59f7ca3522SDarrick J. Wong buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 607dce11dbSChristoph Hellwig if (!buf) 612451337dSDave Chinner return -ENOMEM; 621da177e4SLinus Torvalds 63745b1f47SNathan Scott error = xfs_iget(mp, NULL, ino, 645132ba8fSDave Chinner (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 655132ba8fSDave Chinner XFS_ILOCK_SHARED, &ip); 668fe65776SJie Liu if (error) 677dce11dbSChristoph Hellwig goto out_free; 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds ASSERT(ip != NULL); 7092bfc6e7SChristoph Hellwig ASSERT(ip->i_imap.im_blkno != 0); 713987848cSDave Chinner inode = VFS_I(ip); 721da177e4SLinus Torvalds 731da177e4SLinus Torvalds dic = &ip->i_d; 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* xfs_iget returns the following without needing 761da177e4SLinus Torvalds * further change. 771da177e4SLinus Torvalds */ 786743099cSArkadiusz Mi?kiewicz buf->bs_projid_lo = dic->di_projid_lo; 796743099cSArkadiusz Mi?kiewicz buf->bs_projid_hi = dic->di_projid_hi; 801da177e4SLinus Torvalds buf->bs_ino = ino; 811da177e4SLinus Torvalds buf->bs_uid = dic->di_uid; 821da177e4SLinus Torvalds buf->bs_gid = dic->di_gid; 831da177e4SLinus Torvalds buf->bs_size = dic->di_size; 843987848cSDave Chinner 8554d7b5c1SDave Chinner buf->bs_nlink = inode->i_nlink; 863987848cSDave Chinner buf->bs_atime.tv_sec = inode->i_atime.tv_sec; 873987848cSDave Chinner buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; 883987848cSDave Chinner buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; 893987848cSDave Chinner buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; 903987848cSDave Chinner buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; 913987848cSDave Chinner buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; 929e9a2674SDave Chinner buf->bs_gen = inode->i_generation; 93c19b3b05SDave Chinner buf->bs_mode = inode->i_mode; 943987848cSDave Chinner 951da177e4SLinus Torvalds buf->bs_xflags = xfs_ip2xflags(ip); 961da177e4SLinus Torvalds buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 971da177e4SLinus Torvalds buf->bs_extents = dic->di_nextents; 981da177e4SLinus Torvalds memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 991da177e4SLinus Torvalds buf->bs_dmevmask = dic->di_dmevmask; 1001da177e4SLinus Torvalds buf->bs_dmstate = dic->di_dmstate; 1011da177e4SLinus Torvalds buf->bs_aextents = dic->di_anextents; 10207000ee6SDave Chinner buf->bs_forkoff = XFS_IFORK_BOFF(ip); 1031da177e4SLinus Torvalds 104f7ca3522SDarrick J. Wong if (dic->di_version == 3) { 105f7ca3522SDarrick J. Wong if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 106f7ca3522SDarrick J. Wong buf->bs_cowextsize = dic->di_cowextsize << 107f7ca3522SDarrick J. Wong mp->m_sb.sb_blocklog; 108f7ca3522SDarrick J. Wong } 109f7ca3522SDarrick J. Wong 1101da177e4SLinus Torvalds switch (dic->di_format) { 1111da177e4SLinus Torvalds case XFS_DINODE_FMT_DEV: 112*66f36464SChristoph Hellwig buf->bs_rdev = sysv_encode_dev(inode->i_rdev); 1131da177e4SLinus Torvalds buf->bs_blksize = BLKDEV_IOSIZE; 1141da177e4SLinus Torvalds buf->bs_blocks = 0; 1151da177e4SLinus Torvalds break; 1161da177e4SLinus Torvalds case XFS_DINODE_FMT_LOCAL: 1171da177e4SLinus Torvalds buf->bs_rdev = 0; 1181da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1191da177e4SLinus Torvalds buf->bs_blocks = 0; 1201da177e4SLinus Torvalds break; 1211da177e4SLinus Torvalds case XFS_DINODE_FMT_EXTENTS: 1221da177e4SLinus Torvalds case XFS_DINODE_FMT_BTREE: 1231da177e4SLinus Torvalds buf->bs_rdev = 0; 1241da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1251da177e4SLinus Torvalds buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 1261da177e4SLinus Torvalds break; 1271da177e4SLinus Torvalds } 128f2d67614SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 129f2d67614SChristoph Hellwig IRELE(ip); 1307dce11dbSChristoph Hellwig 1317dce11dbSChristoph Hellwig error = formatter(buffer, ubsize, ubused, buf); 1327dce11dbSChristoph Hellwig if (!error) 1337dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_DIDONE; 1347dce11dbSChristoph Hellwig 1357dce11dbSChristoph Hellwig out_free: 1367dce11dbSChristoph Hellwig kmem_free(buf); 1371da177e4SLinus Torvalds return error; 1381da177e4SLinus Torvalds } 1391da177e4SLinus Torvalds 14065fbaf24Ssandeen@sandeen.net /* Return 0 on success or positive error */ 141faa63e95SMichal Marek STATIC int 142faa63e95SMichal Marek xfs_bulkstat_one_fmt( 143faa63e95SMichal Marek void __user *ubuffer, 14465fbaf24Ssandeen@sandeen.net int ubsize, 14565fbaf24Ssandeen@sandeen.net int *ubused, 146faa63e95SMichal Marek const xfs_bstat_t *buffer) 147faa63e95SMichal Marek { 14865fbaf24Ssandeen@sandeen.net if (ubsize < sizeof(*buffer)) 1492451337dSDave Chinner return -ENOMEM; 150faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 1512451337dSDave Chinner return -EFAULT; 15265fbaf24Ssandeen@sandeen.net if (ubused) 15365fbaf24Ssandeen@sandeen.net *ubused = sizeof(*buffer); 15465fbaf24Ssandeen@sandeen.net return 0; 155faa63e95SMichal Marek } 156faa63e95SMichal Marek 1572ee4fa5cSsandeen@sandeen.net int 1582ee4fa5cSsandeen@sandeen.net xfs_bulkstat_one( 1592ee4fa5cSsandeen@sandeen.net xfs_mount_t *mp, /* mount point for filesystem */ 1602ee4fa5cSsandeen@sandeen.net xfs_ino_t ino, /* inode number to get data for */ 1612ee4fa5cSsandeen@sandeen.net void __user *buffer, /* buffer to place output in */ 1622ee4fa5cSsandeen@sandeen.net int ubsize, /* size of buffer */ 1632ee4fa5cSsandeen@sandeen.net int *ubused, /* bytes used by me */ 1642ee4fa5cSsandeen@sandeen.net int *stat) /* BULKSTAT_RV_... */ 1652ee4fa5cSsandeen@sandeen.net { 1662ee4fa5cSsandeen@sandeen.net return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 1677b6259e7SDave Chinner xfs_bulkstat_one_fmt, ubused, stat); 1688b56f083SNathan Scott } 1698b56f083SNathan Scott 1704b8fdfecSJie Liu /* 1714b8fdfecSJie Liu * Loop over all clusters in a chunk for a given incore inode allocation btree 1724b8fdfecSJie Liu * record. Do a readahead if there are any allocated inodes in that cluster. 1734b8fdfecSJie Liu */ 1744b8fdfecSJie Liu STATIC void 1754b8fdfecSJie Liu xfs_bulkstat_ichunk_ra( 1764b8fdfecSJie Liu struct xfs_mount *mp, 1774b8fdfecSJie Liu xfs_agnumber_t agno, 1784b8fdfecSJie Liu struct xfs_inobt_rec_incore *irec) 1794b8fdfecSJie Liu { 1804b8fdfecSJie Liu xfs_agblock_t agbno; 1814b8fdfecSJie Liu struct blk_plug plug; 1824b8fdfecSJie Liu int blks_per_cluster; 1834b8fdfecSJie Liu int inodes_per_cluster; 1844b8fdfecSJie Liu int i; /* inode chunk index */ 1854b8fdfecSJie Liu 1864b8fdfecSJie Liu agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); 1874b8fdfecSJie Liu blks_per_cluster = xfs_icluster_size_fsb(mp); 1884b8fdfecSJie Liu inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 1894b8fdfecSJie Liu 1904b8fdfecSJie Liu blk_start_plug(&plug); 1914b8fdfecSJie Liu for (i = 0; i < XFS_INODES_PER_CHUNK; 1924b8fdfecSJie Liu i += inodes_per_cluster, agbno += blks_per_cluster) { 1934b8fdfecSJie Liu if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { 1944b8fdfecSJie Liu xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, 1954b8fdfecSJie Liu &xfs_inode_buf_ops); 1964b8fdfecSJie Liu } 1974b8fdfecSJie Liu } 1984b8fdfecSJie Liu blk_finish_plug(&plug); 1994b8fdfecSJie Liu } 2004b8fdfecSJie Liu 201f3d1e587SJie Liu /* 202f3d1e587SJie Liu * Lookup the inode chunk that the given inode lives in and then get the record 203f3d1e587SJie Liu * if we found the chunk. If the inode was not the last in the chunk and there 204f3d1e587SJie Liu * are some left allocated, update the data for the pointed-to record as well as 205f3d1e587SJie Liu * return the count of grabbed inodes. 206f3d1e587SJie Liu */ 207f3d1e587SJie Liu STATIC int 208f3d1e587SJie Liu xfs_bulkstat_grab_ichunk( 209f3d1e587SJie Liu struct xfs_btree_cur *cur, /* btree cursor */ 210f3d1e587SJie Liu xfs_agino_t agino, /* starting inode of chunk */ 211f3d1e587SJie Liu int *icount,/* return # of inodes grabbed */ 212f3d1e587SJie Liu struct xfs_inobt_rec_incore *irec) /* btree record */ 213f3d1e587SJie Liu { 214f3d1e587SJie Liu int idx; /* index into inode chunk */ 215f3d1e587SJie Liu int stat; 216f3d1e587SJie Liu int error = 0; 217f3d1e587SJie Liu 218f3d1e587SJie Liu /* Lookup the inode chunk that this inode lives in */ 219f3d1e587SJie Liu error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); 220f3d1e587SJie Liu if (error) 221f3d1e587SJie Liu return error; 222f3d1e587SJie Liu if (!stat) { 223f3d1e587SJie Liu *icount = 0; 224f3d1e587SJie Liu return error; 225f3d1e587SJie Liu } 226f3d1e587SJie Liu 227f3d1e587SJie Liu /* Get the record, should always work */ 228f3d1e587SJie Liu error = xfs_inobt_get_rec(cur, irec, &stat); 229f3d1e587SJie Liu if (error) 230f3d1e587SJie Liu return error; 2315fb5aeeeSEric Sandeen XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); 232f3d1e587SJie Liu 233f3d1e587SJie Liu /* Check if the record contains the inode in request */ 234febe3cbeSDave Chinner if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 235febe3cbeSDave Chinner *icount = 0; 236febe3cbeSDave Chinner return 0; 237febe3cbeSDave Chinner } 238f3d1e587SJie Liu 239f3d1e587SJie Liu idx = agino - irec->ir_startino + 1; 240f3d1e587SJie Liu if (idx < XFS_INODES_PER_CHUNK && 241f3d1e587SJie Liu (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 242f3d1e587SJie Liu int i; 243f3d1e587SJie Liu 244f3d1e587SJie Liu /* We got a right chunk with some left inodes allocated at it. 245f3d1e587SJie Liu * Grab the chunk record. Mark all the uninteresting inodes 246f3d1e587SJie Liu * free -- because they're before our start point. 247f3d1e587SJie Liu */ 248f3d1e587SJie Liu for (i = 0; i < idx; i++) { 249f3d1e587SJie Liu if (XFS_INOBT_MASK(i) & ~irec->ir_free) 250f3d1e587SJie Liu irec->ir_freecount++; 251f3d1e587SJie Liu } 252f3d1e587SJie Liu 253f3d1e587SJie Liu irec->ir_free |= xfs_inobt_maskn(0, idx); 25412d0714dSBrian Foster *icount = irec->ir_count - irec->ir_freecount; 255f3d1e587SJie Liu } 256f3d1e587SJie Liu 257f3d1e587SJie Liu return 0; 258f3d1e587SJie Liu } 259f3d1e587SJie Liu 260cd57e594SLachlan McIlroy #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 261cd57e594SLachlan McIlroy 262bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk { 263bf4a5af2SDave Chinner char __user **ac_ubuffer;/* pointer into user's buffer */ 264bf4a5af2SDave Chinner int ac_ubleft; /* bytes left in user's buffer */ 265bf4a5af2SDave Chinner int ac_ubelem; /* spaces used in user's buffer */ 266bf4a5af2SDave Chinner }; 267bf4a5af2SDave Chinner 2688b56f083SNathan Scott /* 2691e773c49SJie Liu * Process inodes in chunk with a pointer to a formatter function 2701e773c49SJie Liu * that will iget the inode and fill in the appropriate structure. 2711e773c49SJie Liu */ 272bf4a5af2SDave Chinner static int 2731e773c49SJie Liu xfs_bulkstat_ag_ichunk( 2741e773c49SJie Liu struct xfs_mount *mp, 2751e773c49SJie Liu xfs_agnumber_t agno, 2761e773c49SJie Liu struct xfs_inobt_rec_incore *irbp, 2771e773c49SJie Liu bulkstat_one_pf formatter, 2781e773c49SJie Liu size_t statstruct_size, 279bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk *acp, 28000275899SDave Chinner xfs_agino_t *last_agino) 2811e773c49SJie Liu { 2821e773c49SJie Liu char __user **ubufp = acp->ac_ubuffer; 2832b831ac6SDave Chinner int chunkidx; 2841e773c49SJie Liu int error = 0; 28500275899SDave Chinner xfs_agino_t agino = irbp->ir_startino; 2861e773c49SJie Liu 2872b831ac6SDave Chinner for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; 2882b831ac6SDave Chinner chunkidx++, agino++) { 2892b831ac6SDave Chinner int fmterror; 2901e773c49SJie Liu int ubused; 29100275899SDave Chinner 29200275899SDave Chinner /* inode won't fit in buffer, we are done */ 29300275899SDave Chinner if (acp->ac_ubleft < statstruct_size) 29400275899SDave Chinner break; 2951e773c49SJie Liu 2961e773c49SJie Liu /* Skip if this inode is free */ 29700275899SDave Chinner if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) 2981e773c49SJie Liu continue; 2991e773c49SJie Liu 3001e773c49SJie Liu /* Get the inode and fill in a single buffer */ 3011e773c49SJie Liu ubused = statstruct_size; 30200275899SDave Chinner error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), 30300275899SDave Chinner *ubufp, acp->ac_ubleft, &ubused, &fmterror); 30400275899SDave Chinner 3052b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_GIVEUP || 3062b831ac6SDave Chinner (error && error != -ENOENT && error != -EINVAL)) { 3072b831ac6SDave Chinner acp->ac_ubleft = 0; 3081e773c49SJie Liu ASSERT(error); 3091e773c49SJie Liu break; 3101e773c49SJie Liu } 3112b831ac6SDave Chinner 3122b831ac6SDave Chinner /* be careful not to leak error if at end of chunk */ 3132b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_NOTHING || error) { 3142b831ac6SDave Chinner error = 0; 3152b831ac6SDave Chinner continue; 3161e773c49SJie Liu } 3171e773c49SJie Liu 3182b831ac6SDave Chinner *ubufp += ubused; 3192b831ac6SDave Chinner acp->ac_ubleft -= ubused; 3202b831ac6SDave Chinner acp->ac_ubelem++; 3212b831ac6SDave Chinner } 3221e773c49SJie Liu 32300275899SDave Chinner /* 32400275899SDave Chinner * Post-update *last_agino. At this point, agino will always point one 32500275899SDave Chinner * inode past the last inode we processed successfully. Hence we 32600275899SDave Chinner * substract that inode when setting the *last_agino cursor so that we 32700275899SDave Chinner * return the correct cookie to userspace. On the next bulkstat call, 32800275899SDave Chinner * the inode under the lastino cookie will be skipped as we have already 32900275899SDave Chinner * processed it here. 33000275899SDave Chinner */ 33100275899SDave Chinner *last_agino = agino - 1; 33200275899SDave Chinner 3331e773c49SJie Liu return error; 3341e773c49SJie Liu } 3351e773c49SJie Liu 3361e773c49SJie Liu /* 3371da177e4SLinus Torvalds * Return stat information in bulk (by-inode) for the filesystem. 3381da177e4SLinus Torvalds */ 3391da177e4SLinus Torvalds int /* error status */ 3401da177e4SLinus Torvalds xfs_bulkstat( 3411da177e4SLinus Torvalds xfs_mount_t *mp, /* mount point for filesystem */ 3421da177e4SLinus Torvalds xfs_ino_t *lastinop, /* last inode returned */ 3431da177e4SLinus Torvalds int *ubcountp, /* size of buffer/count returned */ 3441da177e4SLinus Torvalds bulkstat_one_pf formatter, /* func that'd fill a single buf */ 3451da177e4SLinus Torvalds size_t statstruct_size, /* sizeof struct filling */ 3461da177e4SLinus Torvalds char __user *ubuffer, /* buffer with inode stats */ 347c41564b5SNathan Scott int *done) /* 1 if there are more stats to get */ 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds xfs_buf_t *agbp; /* agi header buffer */ 3501da177e4SLinus Torvalds xfs_agino_t agino; /* inode # in allocation group */ 3511da177e4SLinus Torvalds xfs_agnumber_t agno; /* allocation group number */ 3521da177e4SLinus Torvalds xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 35326275093SNathan Scott xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 3541da177e4SLinus Torvalds int nirbuf; /* size of irbuf */ 3551da177e4SLinus Torvalds int ubcount; /* size of user's buffer */ 356bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk ac; 3576e57c542SDave Chinner int error = 0; 3581da177e4SLinus Torvalds 3591da177e4SLinus Torvalds /* 3601da177e4SLinus Torvalds * Get the last inode value, see if there's nothing to do. 3611da177e4SLinus Torvalds */ 36200275899SDave Chinner agno = XFS_INO_TO_AGNO(mp, *lastinop); 36300275899SDave Chinner agino = XFS_INO_TO_AGINO(mp, *lastinop); 3641da177e4SLinus Torvalds if (agno >= mp->m_sb.sb_agcount || 36500275899SDave Chinner *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { 3661da177e4SLinus Torvalds *done = 1; 3671da177e4SLinus Torvalds *ubcountp = 0; 3681da177e4SLinus Torvalds return 0; 3691da177e4SLinus Torvalds } 370296dfd7fSJie Liu 3711da177e4SLinus Torvalds ubcount = *ubcountp; /* statstruct's */ 372bf4a5af2SDave Chinner ac.ac_ubuffer = &ubuffer; 373bf4a5af2SDave Chinner ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; 374bf4a5af2SDave Chinner ac.ac_ubelem = 0; 375bf4a5af2SDave Chinner 376bf4a5af2SDave Chinner *ubcountp = 0; 3771da177e4SLinus Torvalds *done = 0; 378bf4a5af2SDave Chinner 37908b005f1SDarrick J. Wong irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP); 380bdfb0430SChristoph Hellwig if (!irbuf) 3812451337dSDave Chinner return -ENOMEM; 38208b005f1SDarrick J. Wong nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf); 383bb3c7d29SNathan Scott 3841da177e4SLinus Torvalds /* 3851da177e4SLinus Torvalds * Loop over the allocation groups, starting from the last 3861da177e4SLinus Torvalds * inode returned; 0 means start of the allocation group. 3871da177e4SLinus Torvalds */ 3886e57c542SDave Chinner while (agno < mp->m_sb.sb_agcount) { 3896e57c542SDave Chinner struct xfs_inobt_rec_incore *irbp = irbuf; 3906e57c542SDave Chinner struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; 3916e57c542SDave Chinner bool end_of_ag = false; 3926e57c542SDave Chinner int icount = 0; 3936e57c542SDave Chinner int stat; 3946e57c542SDave Chinner 3951da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 396d4c27348SJie Liu if (error) 397d4c27348SJie Liu break; 3981da177e4SLinus Torvalds /* 3991da177e4SLinus Torvalds * Allocate and initialize a btree cursor for ialloc btree. 4001da177e4SLinus Torvalds */ 40157bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 40257bd3dbeSBrian Foster XFS_BTNUM_INO); 4031da177e4SLinus Torvalds if (agino > 0) { 404f3d1e587SJie Liu /* 405f3d1e587SJie Liu * In the middle of an allocation group, we need to get 406f3d1e587SJie Liu * the remainder of the chunk we're in. 407f3d1e587SJie Liu */ 408f3d1e587SJie Liu struct xfs_inobt_rec_incore r; 4092e287a73SChristoph Hellwig 410f3d1e587SJie Liu error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 411f3d1e587SJie Liu if (error) 412a6bbce54SDave Chinner goto del_cursor; 413f3d1e587SJie Liu if (icount) { 4142e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 41512d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 41612d0714dSBrian Foster irbp->ir_count = r.ir_count; 4172e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4182e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4191da177e4SLinus Torvalds irbp++; 4201da177e4SLinus Torvalds } 421f3d1e587SJie Liu /* Increment to the next record */ 422afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 4231da177e4SLinus Torvalds } else { 424f3d1e587SJie Liu /* Start of ag. Lookup the first inode chunk */ 425afa947cbSDave Chinner error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); 4261da177e4SLinus Torvalds } 427afa947cbSDave Chinner if (error || stat == 0) { 4286e57c542SDave Chinner end_of_ag = true; 429a6bbce54SDave Chinner goto del_cursor; 430afa947cbSDave Chinner } 431d4c27348SJie Liu 4321da177e4SLinus Torvalds /* 4331da177e4SLinus Torvalds * Loop through inode btree records in this ag, 4341da177e4SLinus Torvalds * until we run out of inodes or space in the buffer. 4351da177e4SLinus Torvalds */ 4361da177e4SLinus Torvalds while (irbp < irbufend && icount < ubcount) { 437d4c27348SJie Liu struct xfs_inobt_rec_incore r; 4382e287a73SChristoph Hellwig 439afa947cbSDave Chinner error = xfs_inobt_get_rec(cur, &r, &stat); 440afa947cbSDave Chinner if (error || stat == 0) { 4416e57c542SDave Chinner end_of_ag = true; 442a6bbce54SDave Chinner goto del_cursor; 4432e287a73SChristoph Hellwig } 4442e287a73SChristoph Hellwig 4451da177e4SLinus Torvalds /* 4461da177e4SLinus Torvalds * If this chunk has any allocated inodes, save it. 44726275093SNathan Scott * Also start read-ahead now for this chunk. 4481da177e4SLinus Torvalds */ 44912d0714dSBrian Foster if (r.ir_freecount < r.ir_count) { 4504b8fdfecSJie Liu xfs_bulkstat_ichunk_ra(mp, agno, &r); 4512e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 45212d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 45312d0714dSBrian Foster irbp->ir_count = r.ir_count; 4542e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4552e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4561da177e4SLinus Torvalds irbp++; 45712d0714dSBrian Foster icount += r.ir_count - r.ir_freecount; 4581da177e4SLinus Torvalds } 459afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 460afa947cbSDave Chinner if (error || stat == 0) { 4616e57c542SDave Chinner end_of_ag = true; 4627a19dee1SJan Kara goto del_cursor; 4637a19dee1SJan Kara } 464cd57e594SLachlan McIlroy cond_resched(); 4651da177e4SLinus Torvalds } 466a6bbce54SDave Chinner 4671da177e4SLinus Torvalds /* 468a6bbce54SDave Chinner * Drop the btree buffers and the agi buffer as we can't hold any 469a6bbce54SDave Chinner * of the locks these represent when calling iget. If there is a 470a6bbce54SDave Chinner * pending error, then we are done. 4711da177e4SLinus Torvalds */ 472a6bbce54SDave Chinner del_cursor: 473f307080aSBrian Foster xfs_btree_del_cursor(cur, error ? 474f307080aSBrian Foster XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4751da177e4SLinus Torvalds xfs_buf_relse(agbp); 476a6bbce54SDave Chinner if (error) 477a6bbce54SDave Chinner break; 4781da177e4SLinus Torvalds /* 47900275899SDave Chinner * Now format all the good inodes into the user's buffer. The 48000275899SDave Chinner * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer 48100275899SDave Chinner * for the next loop iteration. 4821da177e4SLinus Torvalds */ 4831da177e4SLinus Torvalds irbufend = irbp; 4841da177e4SLinus Torvalds for (irbp = irbuf; 4856e57c542SDave Chinner irbp < irbufend && ac.ac_ubleft >= statstruct_size; 486bf4a5af2SDave Chinner irbp++) { 4871e773c49SJie Liu error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 488bf4a5af2SDave Chinner formatter, statstruct_size, &ac, 48900275899SDave Chinner &agino); 4901e773c49SJie Liu if (error) 491febe3cbeSDave Chinner break; 4921da177e4SLinus Torvalds 493cd57e594SLachlan McIlroy cond_resched(); 4941da177e4SLinus Torvalds } 495bf4a5af2SDave Chinner 496febe3cbeSDave Chinner /* 497febe3cbeSDave Chinner * If we've run out of space or had a formatting error, we 498febe3cbeSDave Chinner * are now done 499febe3cbeSDave Chinner */ 500febe3cbeSDave Chinner if (ac.ac_ubleft < statstruct_size || error) 5016e57c542SDave Chinner break; 5026e57c542SDave Chinner 5031da177e4SLinus Torvalds if (end_of_ag) { 5041da177e4SLinus Torvalds agno++; 5051da177e4SLinus Torvalds agino = 0; 50600275899SDave Chinner } 5071da177e4SLinus Torvalds } 5081da177e4SLinus Torvalds /* 5091da177e4SLinus Torvalds * Done, we're either out of filesystem or space to put the data. 5101da177e4SLinus Torvalds */ 511fdd3cceeSDave Chinner kmem_free(irbuf); 512bf4a5af2SDave Chinner *ubcountp = ac.ac_ubelem; 513febe3cbeSDave Chinner 514cd57e594SLachlan McIlroy /* 515febe3cbeSDave Chinner * We found some inodes, so clear the error status and return them. 516febe3cbeSDave Chinner * The lastino pointer will point directly at the inode that triggered 517febe3cbeSDave Chinner * any error that occurred, so on the next call the error will be 518febe3cbeSDave Chinner * triggered again and propagated to userspace as there will be no 519febe3cbeSDave Chinner * formatted inodes in the buffer. 520cd57e594SLachlan McIlroy */ 521bf4a5af2SDave Chinner if (ac.ac_ubelem) 522febe3cbeSDave Chinner error = 0; 523febe3cbeSDave Chinner 5241da177e4SLinus Torvalds /* 52500275899SDave Chinner * If we ran out of filesystem, lastino will point off the end of 52600275899SDave Chinner * the filesystem so the next call will return immediately. 5271da177e4SLinus Torvalds */ 52800275899SDave Chinner *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); 52900275899SDave Chinner if (agno >= mp->m_sb.sb_agcount) 5301da177e4SLinus Torvalds *done = 1; 5311da177e4SLinus Torvalds 532febe3cbeSDave Chinner return error; 5331da177e4SLinus Torvalds } 5341da177e4SLinus Torvalds 535faa63e95SMichal Marek int 536faa63e95SMichal Marek xfs_inumbers_fmt( 537faa63e95SMichal Marek void __user *ubuffer, /* buffer to write to */ 538549fa006SJie Liu const struct xfs_inogrp *buffer, /* buffer to read from */ 539faa63e95SMichal Marek long count, /* # of elements to read */ 540faa63e95SMichal Marek long *written) /* # of bytes written */ 541faa63e95SMichal Marek { 542faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 543faa63e95SMichal Marek return -EFAULT; 544faa63e95SMichal Marek *written = count * sizeof(*buffer); 545faa63e95SMichal Marek return 0; 546faa63e95SMichal Marek } 547faa63e95SMichal Marek 5481da177e4SLinus Torvalds /* 5491da177e4SLinus Torvalds * Return inode number table for the filesystem. 5501da177e4SLinus Torvalds */ 5511da177e4SLinus Torvalds int /* error status */ 5521da177e4SLinus Torvalds xfs_inumbers( 553549fa006SJie Liu struct xfs_mount *mp,/* mount point for filesystem */ 5541da177e4SLinus Torvalds xfs_ino_t *lastino,/* last inode returned */ 5551da177e4SLinus Torvalds int *count,/* size of buffer/count returned */ 556faa63e95SMichal Marek void __user *ubuffer,/* buffer with inode descriptions */ 557faa63e95SMichal Marek inumbers_fmt_pf formatter) 5581da177e4SLinus Torvalds { 559549fa006SJie Liu xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); 560549fa006SJie Liu xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); 561549fa006SJie Liu struct xfs_btree_cur *cur = NULL; 562c7cb51dcSJie Liu struct xfs_buf *agbp = NULL; 563549fa006SJie Liu struct xfs_inogrp *buffer; 5641da177e4SLinus Torvalds int bcount; 565549fa006SJie Liu int left = *count; 566549fa006SJie Liu int bufidx = 0; 567549fa006SJie Liu int error = 0; 5681da177e4SLinus Torvalds 5691da177e4SLinus Torvalds *count = 0; 570549fa006SJie Liu if (agno >= mp->m_sb.sb_agcount || 571549fa006SJie Liu *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) 572549fa006SJie Liu return error; 573549fa006SJie Liu 574e6a4b37fSTim Shimmin bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 575bf9216f9SDarrick J. Wong buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP); 576c7cb51dcSJie Liu do { 577549fa006SJie Liu struct xfs_inobt_rec_incore r; 578549fa006SJie Liu int stat; 579549fa006SJie Liu 580c7cb51dcSJie Liu if (!agbp) { 5811da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 582c7cb51dcSJie Liu if (error) 583c7cb51dcSJie Liu break; 584c7cb51dcSJie Liu 58557bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 58657bd3dbeSBrian Foster XFS_BTNUM_INO); 58721875505SChristoph Hellwig error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 588549fa006SJie Liu &stat); 589c7cb51dcSJie Liu if (error) 590c7cb51dcSJie Liu break; 591c7cb51dcSJie Liu if (!stat) 592c7cb51dcSJie Liu goto next_ag; 5931da177e4SLinus Torvalds } 594c7cb51dcSJie Liu 595549fa006SJie Liu error = xfs_inobt_get_rec(cur, &r, &stat); 596c7cb51dcSJie Liu if (error) 597c7cb51dcSJie Liu break; 598c7cb51dcSJie Liu if (!stat) 599c7cb51dcSJie Liu goto next_ag; 600c7cb51dcSJie Liu 6012e287a73SChristoph Hellwig agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 6022e287a73SChristoph Hellwig buffer[bufidx].xi_startino = 6032e287a73SChristoph Hellwig XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 60412d0714dSBrian Foster buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount; 6052e287a73SChristoph Hellwig buffer[bufidx].xi_allocmask = ~r.ir_free; 606c7cb51dcSJie Liu if (++bufidx == bcount) { 607faa63e95SMichal Marek long written; 608c7cb51dcSJie Liu 609549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 610549fa006SJie Liu if (error) 6111da177e4SLinus Torvalds break; 612faa63e95SMichal Marek ubuffer += written; 6131da177e4SLinus Torvalds *count += bufidx; 6141da177e4SLinus Torvalds bufidx = 0; 6151da177e4SLinus Torvalds } 616c7cb51dcSJie Liu if (!--left) 617c7cb51dcSJie Liu break; 618c7cb51dcSJie Liu 619549fa006SJie Liu error = xfs_btree_increment(cur, 0, &stat); 620c7cb51dcSJie Liu if (error) 621c7cb51dcSJie Liu break; 622c7cb51dcSJie Liu if (stat) 623c7cb51dcSJie Liu continue; 624c7cb51dcSJie Liu 625c7cb51dcSJie Liu next_ag: 6261da177e4SLinus Torvalds xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 6271da177e4SLinus Torvalds cur = NULL; 6281da177e4SLinus Torvalds xfs_buf_relse(agbp); 6291da177e4SLinus Torvalds agbp = NULL; 630c7cb51dcSJie Liu agino = 0; 631a8b1ee8bSEric Sandeen agno++; 632a8b1ee8bSEric Sandeen } while (agno < mp->m_sb.sb_agcount); 633c7cb51dcSJie Liu 6341da177e4SLinus Torvalds if (!error) { 6351da177e4SLinus Torvalds if (bufidx) { 636faa63e95SMichal Marek long written; 637c7cb51dcSJie Liu 638549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 639549fa006SJie Liu if (!error) 6401da177e4SLinus Torvalds *count += bufidx; 6411da177e4SLinus Torvalds } 6421da177e4SLinus Torvalds *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 6431da177e4SLinus Torvalds } 644c7cb51dcSJie Liu 645f0e2d93cSDenys Vlasenko kmem_free(buffer); 6461da177e4SLinus Torvalds if (cur) 6471da177e4SLinus Torvalds xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 6481da177e4SLinus Torvalds XFS_BTREE_NOERROR)); 6491da177e4SLinus Torvalds if (agbp) 6501da177e4SLinus Torvalds xfs_buf_relse(agbp); 651c7cb51dcSJie Liu 6521da177e4SLinus Torvalds return error; 6531da177e4SLinus Torvalds } 654