11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 37b718769SNathan Scott * All Rights Reserved. 41da177e4SLinus Torvalds * 57b718769SNathan Scott * This program is free software; you can redistribute it and/or 67b718769SNathan Scott * modify it under the terms of the GNU General Public License as 71da177e4SLinus Torvalds * published by the Free Software Foundation. 81da177e4SLinus Torvalds * 97b718769SNathan Scott * This program is distributed in the hope that it would be useful, 107b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 117b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127b718769SNathan Scott * GNU General Public License for more details. 131da177e4SLinus Torvalds * 147b718769SNathan Scott * You should have received a copy of the GNU General Public License 157b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 167b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds #include "xfs.h" 19a844f451SNathan Scott #include "xfs_fs.h" 2070a9883cSDave Chinner #include "xfs_shared.h" 21a4fbe6abSDave Chinner #include "xfs_format.h" 22239880efSDave Chinner #include "xfs_log_format.h" 23239880efSDave Chinner #include "xfs_trans_resv.h" 241da177e4SLinus Torvalds #include "xfs_mount.h" 251da177e4SLinus Torvalds #include "xfs_inode.h" 26a4fbe6abSDave Chinner #include "xfs_btree.h" 271da177e4SLinus Torvalds #include "xfs_ialloc.h" 28a4fbe6abSDave Chinner #include "xfs_ialloc_btree.h" 291da177e4SLinus Torvalds #include "xfs_itable.h" 301da177e4SLinus Torvalds #include "xfs_error.h" 31f2d67614SChristoph Hellwig #include "xfs_trace.h" 3233479e05SDave Chinner #include "xfs_icache.h" 331da177e4SLinus Torvalds 34d96f8f89SEric Sandeen STATIC int 356f1f2168SVlad Apostolov xfs_internal_inum( 366f1f2168SVlad Apostolov xfs_mount_t *mp, 376f1f2168SVlad Apostolov xfs_ino_t ino) 386f1f2168SVlad Apostolov { 396f1f2168SVlad Apostolov return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 4062118709SEric Sandeen (xfs_sb_version_hasquota(&mp->m_sb) && 419cad19d2SChandra Seetharaman xfs_is_quota_inode(&mp->m_sb, ino))); 426f1f2168SVlad Apostolov } 436f1f2168SVlad Apostolov 447dce11dbSChristoph Hellwig /* 457dce11dbSChristoph Hellwig * Return stat information for one inode. 467dce11dbSChristoph Hellwig * Return 0 if ok, else errno. 477dce11dbSChristoph Hellwig */ 487dce11dbSChristoph Hellwig int 497dce11dbSChristoph Hellwig xfs_bulkstat_one_int( 507dce11dbSChristoph Hellwig struct xfs_mount *mp, /* mount point for filesystem */ 517dce11dbSChristoph Hellwig xfs_ino_t ino, /* inode to get data for */ 527dce11dbSChristoph Hellwig void __user *buffer, /* buffer to place output in */ 537dce11dbSChristoph Hellwig int ubsize, /* size of buffer */ 547dce11dbSChristoph Hellwig bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 557dce11dbSChristoph Hellwig int *ubused, /* bytes used by me */ 561da177e4SLinus Torvalds int *stat) /* BULKSTAT_RV_... */ 571da177e4SLinus Torvalds { 587dce11dbSChristoph Hellwig struct xfs_icdinode *dic; /* dinode core info pointer */ 597dce11dbSChristoph Hellwig struct xfs_inode *ip; /* incore inode pointer */ 603987848cSDave Chinner struct inode *inode; 617dce11dbSChristoph Hellwig struct xfs_bstat *buf; /* return buffer */ 627dce11dbSChristoph Hellwig int error = 0; /* error value */ 637dce11dbSChristoph Hellwig 647dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_NOTHING; 657dce11dbSChristoph Hellwig 667dce11dbSChristoph Hellwig if (!buffer || xfs_internal_inum(mp, ino)) 672451337dSDave Chinner return -EINVAL; 687dce11dbSChristoph Hellwig 697dce11dbSChristoph Hellwig buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 707dce11dbSChristoph Hellwig if (!buf) 712451337dSDave Chinner return -ENOMEM; 721da177e4SLinus Torvalds 73745b1f47SNathan Scott error = xfs_iget(mp, NULL, ino, 745132ba8fSDave Chinner (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 755132ba8fSDave Chinner XFS_ILOCK_SHARED, &ip); 768fe65776SJie Liu if (error) 777dce11dbSChristoph Hellwig goto out_free; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds ASSERT(ip != NULL); 8092bfc6e7SChristoph Hellwig ASSERT(ip->i_imap.im_blkno != 0); 813987848cSDave Chinner inode = VFS_I(ip); 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds dic = &ip->i_d; 841da177e4SLinus Torvalds 851da177e4SLinus Torvalds /* xfs_iget returns the following without needing 861da177e4SLinus Torvalds * further change. 871da177e4SLinus Torvalds */ 886743099cSArkadiusz Mi?kiewicz buf->bs_projid_lo = dic->di_projid_lo; 896743099cSArkadiusz Mi?kiewicz buf->bs_projid_hi = dic->di_projid_hi; 901da177e4SLinus Torvalds buf->bs_ino = ino; 911da177e4SLinus Torvalds buf->bs_mode = dic->di_mode; 921da177e4SLinus Torvalds buf->bs_uid = dic->di_uid; 931da177e4SLinus Torvalds buf->bs_gid = dic->di_gid; 941da177e4SLinus Torvalds buf->bs_size = dic->di_size; 953987848cSDave Chinner 9654d7b5c1SDave Chinner buf->bs_nlink = inode->i_nlink; 973987848cSDave Chinner buf->bs_atime.tv_sec = inode->i_atime.tv_sec; 983987848cSDave Chinner buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; 993987848cSDave Chinner buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; 1003987848cSDave Chinner buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; 1013987848cSDave Chinner buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; 1023987848cSDave Chinner buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; 103*9e9a2674SDave Chinner buf->bs_gen = inode->i_generation; 1043987848cSDave Chinner 1051da177e4SLinus Torvalds buf->bs_xflags = xfs_ip2xflags(ip); 1061da177e4SLinus Torvalds buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 1071da177e4SLinus Torvalds buf->bs_extents = dic->di_nextents; 1081da177e4SLinus Torvalds memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 1091da177e4SLinus Torvalds buf->bs_dmevmask = dic->di_dmevmask; 1101da177e4SLinus Torvalds buf->bs_dmstate = dic->di_dmstate; 1111da177e4SLinus Torvalds buf->bs_aextents = dic->di_anextents; 11207000ee6SDave Chinner buf->bs_forkoff = XFS_IFORK_BOFF(ip); 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds switch (dic->di_format) { 1151da177e4SLinus Torvalds case XFS_DINODE_FMT_DEV: 1161da177e4SLinus Torvalds buf->bs_rdev = ip->i_df.if_u2.if_rdev; 1171da177e4SLinus Torvalds buf->bs_blksize = BLKDEV_IOSIZE; 1181da177e4SLinus Torvalds buf->bs_blocks = 0; 1191da177e4SLinus Torvalds break; 1201da177e4SLinus Torvalds case XFS_DINODE_FMT_LOCAL: 1211da177e4SLinus Torvalds case XFS_DINODE_FMT_UUID: 1221da177e4SLinus Torvalds buf->bs_rdev = 0; 1231da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1241da177e4SLinus Torvalds buf->bs_blocks = 0; 1251da177e4SLinus Torvalds break; 1261da177e4SLinus Torvalds case XFS_DINODE_FMT_EXTENTS: 1271da177e4SLinus Torvalds case XFS_DINODE_FMT_BTREE: 1281da177e4SLinus Torvalds buf->bs_rdev = 0; 1291da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1301da177e4SLinus Torvalds buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 1311da177e4SLinus Torvalds break; 1321da177e4SLinus Torvalds } 133f2d67614SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 134f2d67614SChristoph Hellwig IRELE(ip); 1357dce11dbSChristoph Hellwig 1367dce11dbSChristoph Hellwig error = formatter(buffer, ubsize, ubused, buf); 1377dce11dbSChristoph Hellwig if (!error) 1387dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_DIDONE; 1397dce11dbSChristoph Hellwig 1407dce11dbSChristoph Hellwig out_free: 1417dce11dbSChristoph Hellwig kmem_free(buf); 1421da177e4SLinus Torvalds return error; 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds 14565fbaf24Ssandeen@sandeen.net /* Return 0 on success or positive error */ 146faa63e95SMichal Marek STATIC int 147faa63e95SMichal Marek xfs_bulkstat_one_fmt( 148faa63e95SMichal Marek void __user *ubuffer, 14965fbaf24Ssandeen@sandeen.net int ubsize, 15065fbaf24Ssandeen@sandeen.net int *ubused, 151faa63e95SMichal Marek const xfs_bstat_t *buffer) 152faa63e95SMichal Marek { 15365fbaf24Ssandeen@sandeen.net if (ubsize < sizeof(*buffer)) 1542451337dSDave Chinner return -ENOMEM; 155faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 1562451337dSDave Chinner return -EFAULT; 15765fbaf24Ssandeen@sandeen.net if (ubused) 15865fbaf24Ssandeen@sandeen.net *ubused = sizeof(*buffer); 15965fbaf24Ssandeen@sandeen.net return 0; 160faa63e95SMichal Marek } 161faa63e95SMichal Marek 1622ee4fa5cSsandeen@sandeen.net int 1632ee4fa5cSsandeen@sandeen.net xfs_bulkstat_one( 1642ee4fa5cSsandeen@sandeen.net xfs_mount_t *mp, /* mount point for filesystem */ 1652ee4fa5cSsandeen@sandeen.net xfs_ino_t ino, /* inode number to get data for */ 1662ee4fa5cSsandeen@sandeen.net void __user *buffer, /* buffer to place output in */ 1672ee4fa5cSsandeen@sandeen.net int ubsize, /* size of buffer */ 1682ee4fa5cSsandeen@sandeen.net int *ubused, /* bytes used by me */ 1692ee4fa5cSsandeen@sandeen.net int *stat) /* BULKSTAT_RV_... */ 1702ee4fa5cSsandeen@sandeen.net { 1712ee4fa5cSsandeen@sandeen.net return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 1727b6259e7SDave Chinner xfs_bulkstat_one_fmt, ubused, stat); 1738b56f083SNathan Scott } 1748b56f083SNathan Scott 1754b8fdfecSJie Liu /* 1764b8fdfecSJie Liu * Loop over all clusters in a chunk for a given incore inode allocation btree 1774b8fdfecSJie Liu * record. Do a readahead if there are any allocated inodes in that cluster. 1784b8fdfecSJie Liu */ 1794b8fdfecSJie Liu STATIC void 1804b8fdfecSJie Liu xfs_bulkstat_ichunk_ra( 1814b8fdfecSJie Liu struct xfs_mount *mp, 1824b8fdfecSJie Liu xfs_agnumber_t agno, 1834b8fdfecSJie Liu struct xfs_inobt_rec_incore *irec) 1844b8fdfecSJie Liu { 1854b8fdfecSJie Liu xfs_agblock_t agbno; 1864b8fdfecSJie Liu struct blk_plug plug; 1874b8fdfecSJie Liu int blks_per_cluster; 1884b8fdfecSJie Liu int inodes_per_cluster; 1894b8fdfecSJie Liu int i; /* inode chunk index */ 1904b8fdfecSJie Liu 1914b8fdfecSJie Liu agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); 1924b8fdfecSJie Liu blks_per_cluster = xfs_icluster_size_fsb(mp); 1934b8fdfecSJie Liu inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 1944b8fdfecSJie Liu 1954b8fdfecSJie Liu blk_start_plug(&plug); 1964b8fdfecSJie Liu for (i = 0; i < XFS_INODES_PER_CHUNK; 1974b8fdfecSJie Liu i += inodes_per_cluster, agbno += blks_per_cluster) { 1984b8fdfecSJie Liu if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { 1994b8fdfecSJie Liu xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, 2004b8fdfecSJie Liu &xfs_inode_buf_ops); 2014b8fdfecSJie Liu } 2024b8fdfecSJie Liu } 2034b8fdfecSJie Liu blk_finish_plug(&plug); 2044b8fdfecSJie Liu } 2054b8fdfecSJie Liu 206f3d1e587SJie Liu /* 207f3d1e587SJie Liu * Lookup the inode chunk that the given inode lives in and then get the record 208f3d1e587SJie Liu * if we found the chunk. If the inode was not the last in the chunk and there 209f3d1e587SJie Liu * are some left allocated, update the data for the pointed-to record as well as 210f3d1e587SJie Liu * return the count of grabbed inodes. 211f3d1e587SJie Liu */ 212f3d1e587SJie Liu STATIC int 213f3d1e587SJie Liu xfs_bulkstat_grab_ichunk( 214f3d1e587SJie Liu struct xfs_btree_cur *cur, /* btree cursor */ 215f3d1e587SJie Liu xfs_agino_t agino, /* starting inode of chunk */ 216f3d1e587SJie Liu int *icount,/* return # of inodes grabbed */ 217f3d1e587SJie Liu struct xfs_inobt_rec_incore *irec) /* btree record */ 218f3d1e587SJie Liu { 219f3d1e587SJie Liu int idx; /* index into inode chunk */ 220f3d1e587SJie Liu int stat; 221f3d1e587SJie Liu int error = 0; 222f3d1e587SJie Liu 223f3d1e587SJie Liu /* Lookup the inode chunk that this inode lives in */ 224f3d1e587SJie Liu error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); 225f3d1e587SJie Liu if (error) 226f3d1e587SJie Liu return error; 227f3d1e587SJie Liu if (!stat) { 228f3d1e587SJie Liu *icount = 0; 229f3d1e587SJie Liu return error; 230f3d1e587SJie Liu } 231f3d1e587SJie Liu 232f3d1e587SJie Liu /* Get the record, should always work */ 233f3d1e587SJie Liu error = xfs_inobt_get_rec(cur, irec, &stat); 234f3d1e587SJie Liu if (error) 235f3d1e587SJie Liu return error; 2365fb5aeeeSEric Sandeen XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); 237f3d1e587SJie Liu 238f3d1e587SJie Liu /* Check if the record contains the inode in request */ 239febe3cbeSDave Chinner if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 240febe3cbeSDave Chinner *icount = 0; 241febe3cbeSDave Chinner return 0; 242febe3cbeSDave Chinner } 243f3d1e587SJie Liu 244f3d1e587SJie Liu idx = agino - irec->ir_startino + 1; 245f3d1e587SJie Liu if (idx < XFS_INODES_PER_CHUNK && 246f3d1e587SJie Liu (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 247f3d1e587SJie Liu int i; 248f3d1e587SJie Liu 249f3d1e587SJie Liu /* We got a right chunk with some left inodes allocated at it. 250f3d1e587SJie Liu * Grab the chunk record. Mark all the uninteresting inodes 251f3d1e587SJie Liu * free -- because they're before our start point. 252f3d1e587SJie Liu */ 253f3d1e587SJie Liu for (i = 0; i < idx; i++) { 254f3d1e587SJie Liu if (XFS_INOBT_MASK(i) & ~irec->ir_free) 255f3d1e587SJie Liu irec->ir_freecount++; 256f3d1e587SJie Liu } 257f3d1e587SJie Liu 258f3d1e587SJie Liu irec->ir_free |= xfs_inobt_maskn(0, idx); 25912d0714dSBrian Foster *icount = irec->ir_count - irec->ir_freecount; 260f3d1e587SJie Liu } 261f3d1e587SJie Liu 262f3d1e587SJie Liu return 0; 263f3d1e587SJie Liu } 264f3d1e587SJie Liu 265cd57e594SLachlan McIlroy #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 266cd57e594SLachlan McIlroy 267bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk { 268bf4a5af2SDave Chinner char __user **ac_ubuffer;/* pointer into user's buffer */ 269bf4a5af2SDave Chinner int ac_ubleft; /* bytes left in user's buffer */ 270bf4a5af2SDave Chinner int ac_ubelem; /* spaces used in user's buffer */ 271bf4a5af2SDave Chinner }; 272bf4a5af2SDave Chinner 2738b56f083SNathan Scott /* 2741e773c49SJie Liu * Process inodes in chunk with a pointer to a formatter function 2751e773c49SJie Liu * that will iget the inode and fill in the appropriate structure. 2761e773c49SJie Liu */ 277bf4a5af2SDave Chinner static int 2781e773c49SJie Liu xfs_bulkstat_ag_ichunk( 2791e773c49SJie Liu struct xfs_mount *mp, 2801e773c49SJie Liu xfs_agnumber_t agno, 2811e773c49SJie Liu struct xfs_inobt_rec_incore *irbp, 2821e773c49SJie Liu bulkstat_one_pf formatter, 2831e773c49SJie Liu size_t statstruct_size, 284bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk *acp, 28500275899SDave Chinner xfs_agino_t *last_agino) 2861e773c49SJie Liu { 2871e773c49SJie Liu char __user **ubufp = acp->ac_ubuffer; 2882b831ac6SDave Chinner int chunkidx; 2891e773c49SJie Liu int error = 0; 29000275899SDave Chinner xfs_agino_t agino = irbp->ir_startino; 2911e773c49SJie Liu 2922b831ac6SDave Chinner for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; 2932b831ac6SDave Chinner chunkidx++, agino++) { 2942b831ac6SDave Chinner int fmterror; 2951e773c49SJie Liu int ubused; 29600275899SDave Chinner 29700275899SDave Chinner /* inode won't fit in buffer, we are done */ 29800275899SDave Chinner if (acp->ac_ubleft < statstruct_size) 29900275899SDave Chinner break; 3001e773c49SJie Liu 3011e773c49SJie Liu /* Skip if this inode is free */ 30200275899SDave Chinner if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) 3031e773c49SJie Liu continue; 3041e773c49SJie Liu 3051e773c49SJie Liu /* Get the inode and fill in a single buffer */ 3061e773c49SJie Liu ubused = statstruct_size; 30700275899SDave Chinner error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), 30800275899SDave Chinner *ubufp, acp->ac_ubleft, &ubused, &fmterror); 30900275899SDave Chinner 3102b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_GIVEUP || 3112b831ac6SDave Chinner (error && error != -ENOENT && error != -EINVAL)) { 3122b831ac6SDave Chinner acp->ac_ubleft = 0; 3131e773c49SJie Liu ASSERT(error); 3141e773c49SJie Liu break; 3151e773c49SJie Liu } 3162b831ac6SDave Chinner 3172b831ac6SDave Chinner /* be careful not to leak error if at end of chunk */ 3182b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_NOTHING || error) { 3192b831ac6SDave Chinner error = 0; 3202b831ac6SDave Chinner continue; 3211e773c49SJie Liu } 3221e773c49SJie Liu 3232b831ac6SDave Chinner *ubufp += ubused; 3242b831ac6SDave Chinner acp->ac_ubleft -= ubused; 3252b831ac6SDave Chinner acp->ac_ubelem++; 3262b831ac6SDave Chinner } 3271e773c49SJie Liu 32800275899SDave Chinner /* 32900275899SDave Chinner * Post-update *last_agino. At this point, agino will always point one 33000275899SDave Chinner * inode past the last inode we processed successfully. Hence we 33100275899SDave Chinner * substract that inode when setting the *last_agino cursor so that we 33200275899SDave Chinner * return the correct cookie to userspace. On the next bulkstat call, 33300275899SDave Chinner * the inode under the lastino cookie will be skipped as we have already 33400275899SDave Chinner * processed it here. 33500275899SDave Chinner */ 33600275899SDave Chinner *last_agino = agino - 1; 33700275899SDave Chinner 3381e773c49SJie Liu return error; 3391e773c49SJie Liu } 3401e773c49SJie Liu 3411e773c49SJie Liu /* 3421da177e4SLinus Torvalds * Return stat information in bulk (by-inode) for the filesystem. 3431da177e4SLinus Torvalds */ 3441da177e4SLinus Torvalds int /* error status */ 3451da177e4SLinus Torvalds xfs_bulkstat( 3461da177e4SLinus Torvalds xfs_mount_t *mp, /* mount point for filesystem */ 3471da177e4SLinus Torvalds xfs_ino_t *lastinop, /* last inode returned */ 3481da177e4SLinus Torvalds int *ubcountp, /* size of buffer/count returned */ 3491da177e4SLinus Torvalds bulkstat_one_pf formatter, /* func that'd fill a single buf */ 3501da177e4SLinus Torvalds size_t statstruct_size, /* sizeof struct filling */ 3511da177e4SLinus Torvalds char __user *ubuffer, /* buffer with inode stats */ 352c41564b5SNathan Scott int *done) /* 1 if there are more stats to get */ 3531da177e4SLinus Torvalds { 3541da177e4SLinus Torvalds xfs_buf_t *agbp; /* agi header buffer */ 3551da177e4SLinus Torvalds xfs_agino_t agino; /* inode # in allocation group */ 3561da177e4SLinus Torvalds xfs_agnumber_t agno; /* allocation group number */ 3571da177e4SLinus Torvalds xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 358215101c3SNathan Scott size_t irbsize; /* size of irec buffer in bytes */ 35926275093SNathan Scott xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 3601da177e4SLinus Torvalds int nirbuf; /* size of irbuf */ 3611da177e4SLinus Torvalds int ubcount; /* size of user's buffer */ 362bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk ac; 3636e57c542SDave Chinner int error = 0; 3641da177e4SLinus Torvalds 3651da177e4SLinus Torvalds /* 3661da177e4SLinus Torvalds * Get the last inode value, see if there's nothing to do. 3671da177e4SLinus Torvalds */ 36800275899SDave Chinner agno = XFS_INO_TO_AGNO(mp, *lastinop); 36900275899SDave Chinner agino = XFS_INO_TO_AGINO(mp, *lastinop); 3701da177e4SLinus Torvalds if (agno >= mp->m_sb.sb_agcount || 37100275899SDave Chinner *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { 3721da177e4SLinus Torvalds *done = 1; 3731da177e4SLinus Torvalds *ubcountp = 0; 3741da177e4SLinus Torvalds return 0; 3751da177e4SLinus Torvalds } 376296dfd7fSJie Liu 3771da177e4SLinus Torvalds ubcount = *ubcountp; /* statstruct's */ 378bf4a5af2SDave Chinner ac.ac_ubuffer = &ubuffer; 379bf4a5af2SDave Chinner ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; 380bf4a5af2SDave Chinner ac.ac_ubelem = 0; 381bf4a5af2SDave Chinner 382bf4a5af2SDave Chinner *ubcountp = 0; 3831da177e4SLinus Torvalds *done = 0; 384bf4a5af2SDave Chinner 385bdfb0430SChristoph Hellwig irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 386bdfb0430SChristoph Hellwig if (!irbuf) 3872451337dSDave Chinner return -ENOMEM; 388bdfb0430SChristoph Hellwig 389bb3c7d29SNathan Scott nirbuf = irbsize / sizeof(*irbuf); 390bb3c7d29SNathan Scott 3911da177e4SLinus Torvalds /* 3921da177e4SLinus Torvalds * Loop over the allocation groups, starting from the last 3931da177e4SLinus Torvalds * inode returned; 0 means start of the allocation group. 3941da177e4SLinus Torvalds */ 3956e57c542SDave Chinner while (agno < mp->m_sb.sb_agcount) { 3966e57c542SDave Chinner struct xfs_inobt_rec_incore *irbp = irbuf; 3976e57c542SDave Chinner struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; 3986e57c542SDave Chinner bool end_of_ag = false; 3996e57c542SDave Chinner int icount = 0; 4006e57c542SDave Chinner int stat; 4016e57c542SDave Chinner 4021da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 403d4c27348SJie Liu if (error) 404d4c27348SJie Liu break; 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * Allocate and initialize a btree cursor for ialloc btree. 4071da177e4SLinus Torvalds */ 40857bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 40957bd3dbeSBrian Foster XFS_BTNUM_INO); 4101da177e4SLinus Torvalds if (agino > 0) { 411f3d1e587SJie Liu /* 412f3d1e587SJie Liu * In the middle of an allocation group, we need to get 413f3d1e587SJie Liu * the remainder of the chunk we're in. 414f3d1e587SJie Liu */ 415f3d1e587SJie Liu struct xfs_inobt_rec_incore r; 4162e287a73SChristoph Hellwig 417f3d1e587SJie Liu error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 418f3d1e587SJie Liu if (error) 419a6bbce54SDave Chinner goto del_cursor; 420f3d1e587SJie Liu if (icount) { 4212e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 42212d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 42312d0714dSBrian Foster irbp->ir_count = r.ir_count; 4242e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4252e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4261da177e4SLinus Torvalds irbp++; 4271da177e4SLinus Torvalds } 428f3d1e587SJie Liu /* Increment to the next record */ 429afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 4301da177e4SLinus Torvalds } else { 431f3d1e587SJie Liu /* Start of ag. Lookup the first inode chunk */ 432afa947cbSDave Chinner error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); 4331da177e4SLinus Torvalds } 434afa947cbSDave Chinner if (error || stat == 0) { 4356e57c542SDave Chinner end_of_ag = true; 436a6bbce54SDave Chinner goto del_cursor; 437afa947cbSDave Chinner } 438d4c27348SJie Liu 4391da177e4SLinus Torvalds /* 4401da177e4SLinus Torvalds * Loop through inode btree records in this ag, 4411da177e4SLinus Torvalds * until we run out of inodes or space in the buffer. 4421da177e4SLinus Torvalds */ 4431da177e4SLinus Torvalds while (irbp < irbufend && icount < ubcount) { 444d4c27348SJie Liu struct xfs_inobt_rec_incore r; 4452e287a73SChristoph Hellwig 446afa947cbSDave Chinner error = xfs_inobt_get_rec(cur, &r, &stat); 447afa947cbSDave Chinner if (error || stat == 0) { 4486e57c542SDave Chinner end_of_ag = true; 449a6bbce54SDave Chinner goto del_cursor; 4502e287a73SChristoph Hellwig } 4512e287a73SChristoph Hellwig 4521da177e4SLinus Torvalds /* 4531da177e4SLinus Torvalds * If this chunk has any allocated inodes, save it. 45426275093SNathan Scott * Also start read-ahead now for this chunk. 4551da177e4SLinus Torvalds */ 45612d0714dSBrian Foster if (r.ir_freecount < r.ir_count) { 4574b8fdfecSJie Liu xfs_bulkstat_ichunk_ra(mp, agno, &r); 4582e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 45912d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 46012d0714dSBrian Foster irbp->ir_count = r.ir_count; 4612e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4622e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4631da177e4SLinus Torvalds irbp++; 46412d0714dSBrian Foster icount += r.ir_count - r.ir_freecount; 4651da177e4SLinus Torvalds } 466afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 467afa947cbSDave Chinner if (error || stat == 0) { 4686e57c542SDave Chinner end_of_ag = true; 4697a19dee1SJan Kara goto del_cursor; 4707a19dee1SJan Kara } 471cd57e594SLachlan McIlroy cond_resched(); 4721da177e4SLinus Torvalds } 473a6bbce54SDave Chinner 4741da177e4SLinus Torvalds /* 475a6bbce54SDave Chinner * Drop the btree buffers and the agi buffer as we can't hold any 476a6bbce54SDave Chinner * of the locks these represent when calling iget. If there is a 477a6bbce54SDave Chinner * pending error, then we are done. 4781da177e4SLinus Torvalds */ 479a6bbce54SDave Chinner del_cursor: 480f307080aSBrian Foster xfs_btree_del_cursor(cur, error ? 481f307080aSBrian Foster XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 4821da177e4SLinus Torvalds xfs_buf_relse(agbp); 483a6bbce54SDave Chinner if (error) 484a6bbce54SDave Chinner break; 4851da177e4SLinus Torvalds /* 48600275899SDave Chinner * Now format all the good inodes into the user's buffer. The 48700275899SDave Chinner * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer 48800275899SDave Chinner * for the next loop iteration. 4891da177e4SLinus Torvalds */ 4901da177e4SLinus Torvalds irbufend = irbp; 4911da177e4SLinus Torvalds for (irbp = irbuf; 4926e57c542SDave Chinner irbp < irbufend && ac.ac_ubleft >= statstruct_size; 493bf4a5af2SDave Chinner irbp++) { 4941e773c49SJie Liu error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 495bf4a5af2SDave Chinner formatter, statstruct_size, &ac, 49600275899SDave Chinner &agino); 4971e773c49SJie Liu if (error) 498febe3cbeSDave Chinner break; 4991da177e4SLinus Torvalds 500cd57e594SLachlan McIlroy cond_resched(); 5011da177e4SLinus Torvalds } 502bf4a5af2SDave Chinner 503febe3cbeSDave Chinner /* 504febe3cbeSDave Chinner * If we've run out of space or had a formatting error, we 505febe3cbeSDave Chinner * are now done 506febe3cbeSDave Chinner */ 507febe3cbeSDave Chinner if (ac.ac_ubleft < statstruct_size || error) 5086e57c542SDave Chinner break; 5096e57c542SDave Chinner 5101da177e4SLinus Torvalds if (end_of_ag) { 5111da177e4SLinus Torvalds agno++; 5121da177e4SLinus Torvalds agino = 0; 51300275899SDave Chinner } 5141da177e4SLinus Torvalds } 5151da177e4SLinus Torvalds /* 5161da177e4SLinus Torvalds * Done, we're either out of filesystem or space to put the data. 5171da177e4SLinus Torvalds */ 518fdd3cceeSDave Chinner kmem_free(irbuf); 519bf4a5af2SDave Chinner *ubcountp = ac.ac_ubelem; 520febe3cbeSDave Chinner 521cd57e594SLachlan McIlroy /* 522febe3cbeSDave Chinner * We found some inodes, so clear the error status and return them. 523febe3cbeSDave Chinner * The lastino pointer will point directly at the inode that triggered 524febe3cbeSDave Chinner * any error that occurred, so on the next call the error will be 525febe3cbeSDave Chinner * triggered again and propagated to userspace as there will be no 526febe3cbeSDave Chinner * formatted inodes in the buffer. 527cd57e594SLachlan McIlroy */ 528bf4a5af2SDave Chinner if (ac.ac_ubelem) 529febe3cbeSDave Chinner error = 0; 530febe3cbeSDave Chinner 5311da177e4SLinus Torvalds /* 53200275899SDave Chinner * If we ran out of filesystem, lastino will point off the end of 53300275899SDave Chinner * the filesystem so the next call will return immediately. 5341da177e4SLinus Torvalds */ 53500275899SDave Chinner *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); 53600275899SDave Chinner if (agno >= mp->m_sb.sb_agcount) 5371da177e4SLinus Torvalds *done = 1; 5381da177e4SLinus Torvalds 539febe3cbeSDave Chinner return error; 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 542faa63e95SMichal Marek int 543faa63e95SMichal Marek xfs_inumbers_fmt( 544faa63e95SMichal Marek void __user *ubuffer, /* buffer to write to */ 545549fa006SJie Liu const struct xfs_inogrp *buffer, /* buffer to read from */ 546faa63e95SMichal Marek long count, /* # of elements to read */ 547faa63e95SMichal Marek long *written) /* # of bytes written */ 548faa63e95SMichal Marek { 549faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 550faa63e95SMichal Marek return -EFAULT; 551faa63e95SMichal Marek *written = count * sizeof(*buffer); 552faa63e95SMichal Marek return 0; 553faa63e95SMichal Marek } 554faa63e95SMichal Marek 5551da177e4SLinus Torvalds /* 5561da177e4SLinus Torvalds * Return inode number table for the filesystem. 5571da177e4SLinus Torvalds */ 5581da177e4SLinus Torvalds int /* error status */ 5591da177e4SLinus Torvalds xfs_inumbers( 560549fa006SJie Liu struct xfs_mount *mp,/* mount point for filesystem */ 5611da177e4SLinus Torvalds xfs_ino_t *lastino,/* last inode returned */ 5621da177e4SLinus Torvalds int *count,/* size of buffer/count returned */ 563faa63e95SMichal Marek void __user *ubuffer,/* buffer with inode descriptions */ 564faa63e95SMichal Marek inumbers_fmt_pf formatter) 5651da177e4SLinus Torvalds { 566549fa006SJie Liu xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); 567549fa006SJie Liu xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); 568549fa006SJie Liu struct xfs_btree_cur *cur = NULL; 569c7cb51dcSJie Liu struct xfs_buf *agbp = NULL; 570549fa006SJie Liu struct xfs_inogrp *buffer; 5711da177e4SLinus Torvalds int bcount; 572549fa006SJie Liu int left = *count; 573549fa006SJie Liu int bufidx = 0; 574549fa006SJie Liu int error = 0; 5751da177e4SLinus Torvalds 5761da177e4SLinus Torvalds *count = 0; 577549fa006SJie Liu if (agno >= mp->m_sb.sb_agcount || 578549fa006SJie Liu *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) 579549fa006SJie Liu return error; 580549fa006SJie Liu 581e6a4b37fSTim Shimmin bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 5821da177e4SLinus Torvalds buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 583c7cb51dcSJie Liu do { 584549fa006SJie Liu struct xfs_inobt_rec_incore r; 585549fa006SJie Liu int stat; 586549fa006SJie Liu 587c7cb51dcSJie Liu if (!agbp) { 5881da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 589c7cb51dcSJie Liu if (error) 590c7cb51dcSJie Liu break; 591c7cb51dcSJie Liu 59257bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 59357bd3dbeSBrian Foster XFS_BTNUM_INO); 59421875505SChristoph Hellwig error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 595549fa006SJie Liu &stat); 596c7cb51dcSJie Liu if (error) 597c7cb51dcSJie Liu break; 598c7cb51dcSJie Liu if (!stat) 599c7cb51dcSJie Liu goto next_ag; 6001da177e4SLinus Torvalds } 601c7cb51dcSJie Liu 602549fa006SJie Liu error = xfs_inobt_get_rec(cur, &r, &stat); 603c7cb51dcSJie Liu if (error) 604c7cb51dcSJie Liu break; 605c7cb51dcSJie Liu if (!stat) 606c7cb51dcSJie Liu goto next_ag; 607c7cb51dcSJie Liu 6082e287a73SChristoph Hellwig agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 6092e287a73SChristoph Hellwig buffer[bufidx].xi_startino = 6102e287a73SChristoph Hellwig XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 61112d0714dSBrian Foster buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount; 6122e287a73SChristoph Hellwig buffer[bufidx].xi_allocmask = ~r.ir_free; 613c7cb51dcSJie Liu if (++bufidx == bcount) { 614faa63e95SMichal Marek long written; 615c7cb51dcSJie Liu 616549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 617549fa006SJie Liu if (error) 6181da177e4SLinus Torvalds break; 619faa63e95SMichal Marek ubuffer += written; 6201da177e4SLinus Torvalds *count += bufidx; 6211da177e4SLinus Torvalds bufidx = 0; 6221da177e4SLinus Torvalds } 623c7cb51dcSJie Liu if (!--left) 624c7cb51dcSJie Liu break; 625c7cb51dcSJie Liu 626549fa006SJie Liu error = xfs_btree_increment(cur, 0, &stat); 627c7cb51dcSJie Liu if (error) 628c7cb51dcSJie Liu break; 629c7cb51dcSJie Liu if (stat) 630c7cb51dcSJie Liu continue; 631c7cb51dcSJie Liu 632c7cb51dcSJie Liu next_ag: 6331da177e4SLinus Torvalds xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 6341da177e4SLinus Torvalds cur = NULL; 6351da177e4SLinus Torvalds xfs_buf_relse(agbp); 6361da177e4SLinus Torvalds agbp = NULL; 637c7cb51dcSJie Liu agino = 0; 638a8b1ee8bSEric Sandeen agno++; 639a8b1ee8bSEric Sandeen } while (agno < mp->m_sb.sb_agcount); 640c7cb51dcSJie Liu 6411da177e4SLinus Torvalds if (!error) { 6421da177e4SLinus Torvalds if (bufidx) { 643faa63e95SMichal Marek long written; 644c7cb51dcSJie Liu 645549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 646549fa006SJie Liu if (!error) 6471da177e4SLinus Torvalds *count += bufidx; 6481da177e4SLinus Torvalds } 6491da177e4SLinus Torvalds *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 6501da177e4SLinus Torvalds } 651c7cb51dcSJie Liu 652f0e2d93cSDenys Vlasenko kmem_free(buffer); 6531da177e4SLinus Torvalds if (cur) 6541da177e4SLinus Torvalds xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 6551da177e4SLinus Torvalds XFS_BTREE_NOERROR)); 6561da177e4SLinus Torvalds if (agbp) 6571da177e4SLinus Torvalds xfs_buf_relse(agbp); 658c7cb51dcSJie Liu 6591da177e4SLinus Torvalds return error; 6601da177e4SLinus Torvalds } 661