11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 37b718769SNathan Scott * All Rights Reserved. 41da177e4SLinus Torvalds * 57b718769SNathan Scott * This program is free software; you can redistribute it and/or 67b718769SNathan Scott * modify it under the terms of the GNU General Public License as 71da177e4SLinus Torvalds * published by the Free Software Foundation. 81da177e4SLinus Torvalds * 97b718769SNathan Scott * This program is distributed in the hope that it would be useful, 107b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 117b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127b718769SNathan Scott * GNU General Public License for more details. 131da177e4SLinus Torvalds * 147b718769SNathan Scott * You should have received a copy of the GNU General Public License 157b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 167b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds #include "xfs.h" 19a844f451SNathan Scott #include "xfs_fs.h" 2070a9883cSDave Chinner #include "xfs_shared.h" 21a4fbe6abSDave Chinner #include "xfs_format.h" 22239880efSDave Chinner #include "xfs_log_format.h" 23239880efSDave Chinner #include "xfs_trans_resv.h" 241da177e4SLinus Torvalds #include "xfs_mount.h" 251da177e4SLinus Torvalds #include "xfs_inode.h" 26a4fbe6abSDave Chinner #include "xfs_btree.h" 271da177e4SLinus Torvalds #include "xfs_ialloc.h" 28a4fbe6abSDave Chinner #include "xfs_ialloc_btree.h" 291da177e4SLinus Torvalds #include "xfs_itable.h" 301da177e4SLinus Torvalds #include "xfs_error.h" 31f2d67614SChristoph Hellwig #include "xfs_trace.h" 3233479e05SDave Chinner #include "xfs_icache.h" 331da177e4SLinus Torvalds 34d96f8f89SEric Sandeen STATIC int 356f1f2168SVlad Apostolov xfs_internal_inum( 366f1f2168SVlad Apostolov xfs_mount_t *mp, 376f1f2168SVlad Apostolov xfs_ino_t ino) 386f1f2168SVlad Apostolov { 396f1f2168SVlad Apostolov return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 4062118709SEric Sandeen (xfs_sb_version_hasquota(&mp->m_sb) && 419cad19d2SChandra Seetharaman xfs_is_quota_inode(&mp->m_sb, ino))); 426f1f2168SVlad Apostolov } 436f1f2168SVlad Apostolov 447dce11dbSChristoph Hellwig /* 457dce11dbSChristoph Hellwig * Return stat information for one inode. 467dce11dbSChristoph Hellwig * Return 0 if ok, else errno. 477dce11dbSChristoph Hellwig */ 487dce11dbSChristoph Hellwig int 497dce11dbSChristoph Hellwig xfs_bulkstat_one_int( 507dce11dbSChristoph Hellwig struct xfs_mount *mp, /* mount point for filesystem */ 517dce11dbSChristoph Hellwig xfs_ino_t ino, /* inode to get data for */ 527dce11dbSChristoph Hellwig void __user *buffer, /* buffer to place output in */ 537dce11dbSChristoph Hellwig int ubsize, /* size of buffer */ 547dce11dbSChristoph Hellwig bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 557dce11dbSChristoph Hellwig int *ubused, /* bytes used by me */ 561da177e4SLinus Torvalds int *stat) /* BULKSTAT_RV_... */ 571da177e4SLinus Torvalds { 587dce11dbSChristoph Hellwig struct xfs_icdinode *dic; /* dinode core info pointer */ 597dce11dbSChristoph Hellwig struct xfs_inode *ip; /* incore inode pointer */ 607dce11dbSChristoph Hellwig struct xfs_bstat *buf; /* return buffer */ 617dce11dbSChristoph Hellwig int error = 0; /* error value */ 627dce11dbSChristoph Hellwig 637dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_NOTHING; 647dce11dbSChristoph Hellwig 657dce11dbSChristoph Hellwig if (!buffer || xfs_internal_inum(mp, ino)) 662451337dSDave Chinner return -EINVAL; 677dce11dbSChristoph Hellwig 687dce11dbSChristoph Hellwig buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 697dce11dbSChristoph Hellwig if (!buf) 702451337dSDave Chinner return -ENOMEM; 711da177e4SLinus Torvalds 72745b1f47SNathan Scott error = xfs_iget(mp, NULL, ino, 735132ba8fSDave Chinner (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 745132ba8fSDave Chinner XFS_ILOCK_SHARED, &ip); 758fe65776SJie Liu if (error) 767dce11dbSChristoph Hellwig goto out_free; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds ASSERT(ip != NULL); 7992bfc6e7SChristoph Hellwig ASSERT(ip->i_imap.im_blkno != 0); 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds dic = &ip->i_d; 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds /* xfs_iget returns the following without needing 841da177e4SLinus Torvalds * further change. 851da177e4SLinus Torvalds */ 861da177e4SLinus Torvalds buf->bs_nlink = dic->di_nlink; 876743099cSArkadiusz Mi?kiewicz buf->bs_projid_lo = dic->di_projid_lo; 886743099cSArkadiusz Mi?kiewicz buf->bs_projid_hi = dic->di_projid_hi; 891da177e4SLinus Torvalds buf->bs_ino = ino; 901da177e4SLinus Torvalds buf->bs_mode = dic->di_mode; 911da177e4SLinus Torvalds buf->bs_uid = dic->di_uid; 921da177e4SLinus Torvalds buf->bs_gid = dic->di_gid; 931da177e4SLinus Torvalds buf->bs_size = dic->di_size; 948a9c9980SChristoph Hellwig buf->bs_atime.tv_sec = dic->di_atime.t_sec; 958a9c9980SChristoph Hellwig buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; 968a9c9980SChristoph Hellwig buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 978a9c9980SChristoph Hellwig buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 988a9c9980SChristoph Hellwig buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 998a9c9980SChristoph Hellwig buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; 1001da177e4SLinus Torvalds buf->bs_xflags = xfs_ip2xflags(ip); 1011da177e4SLinus Torvalds buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 1021da177e4SLinus Torvalds buf->bs_extents = dic->di_nextents; 1031da177e4SLinus Torvalds buf->bs_gen = dic->di_gen; 1041da177e4SLinus Torvalds memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 1051da177e4SLinus Torvalds buf->bs_dmevmask = dic->di_dmevmask; 1061da177e4SLinus Torvalds buf->bs_dmstate = dic->di_dmstate; 1071da177e4SLinus Torvalds buf->bs_aextents = dic->di_anextents; 10807000ee6SDave Chinner buf->bs_forkoff = XFS_IFORK_BOFF(ip); 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds switch (dic->di_format) { 1111da177e4SLinus Torvalds case XFS_DINODE_FMT_DEV: 1121da177e4SLinus Torvalds buf->bs_rdev = ip->i_df.if_u2.if_rdev; 1131da177e4SLinus Torvalds buf->bs_blksize = BLKDEV_IOSIZE; 1141da177e4SLinus Torvalds buf->bs_blocks = 0; 1151da177e4SLinus Torvalds break; 1161da177e4SLinus Torvalds case XFS_DINODE_FMT_LOCAL: 1171da177e4SLinus Torvalds case XFS_DINODE_FMT_UUID: 1181da177e4SLinus Torvalds buf->bs_rdev = 0; 1191da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1201da177e4SLinus Torvalds buf->bs_blocks = 0; 1211da177e4SLinus Torvalds break; 1221da177e4SLinus Torvalds case XFS_DINODE_FMT_EXTENTS: 1231da177e4SLinus Torvalds case XFS_DINODE_FMT_BTREE: 1241da177e4SLinus Torvalds buf->bs_rdev = 0; 1251da177e4SLinus Torvalds buf->bs_blksize = mp->m_sb.sb_blocksize; 1261da177e4SLinus Torvalds buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 1271da177e4SLinus Torvalds break; 1281da177e4SLinus Torvalds } 129f2d67614SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 130f2d67614SChristoph Hellwig IRELE(ip); 1317dce11dbSChristoph Hellwig 1327dce11dbSChristoph Hellwig error = formatter(buffer, ubsize, ubused, buf); 1337dce11dbSChristoph Hellwig if (!error) 1347dce11dbSChristoph Hellwig *stat = BULKSTAT_RV_DIDONE; 1357dce11dbSChristoph Hellwig 1367dce11dbSChristoph Hellwig out_free: 1377dce11dbSChristoph Hellwig kmem_free(buf); 1381da177e4SLinus Torvalds return error; 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 14165fbaf24Ssandeen@sandeen.net /* Return 0 on success or positive error */ 142faa63e95SMichal Marek STATIC int 143faa63e95SMichal Marek xfs_bulkstat_one_fmt( 144faa63e95SMichal Marek void __user *ubuffer, 14565fbaf24Ssandeen@sandeen.net int ubsize, 14665fbaf24Ssandeen@sandeen.net int *ubused, 147faa63e95SMichal Marek const xfs_bstat_t *buffer) 148faa63e95SMichal Marek { 14965fbaf24Ssandeen@sandeen.net if (ubsize < sizeof(*buffer)) 1502451337dSDave Chinner return -ENOMEM; 151faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 1522451337dSDave Chinner return -EFAULT; 15365fbaf24Ssandeen@sandeen.net if (ubused) 15465fbaf24Ssandeen@sandeen.net *ubused = sizeof(*buffer); 15565fbaf24Ssandeen@sandeen.net return 0; 156faa63e95SMichal Marek } 157faa63e95SMichal Marek 1582ee4fa5cSsandeen@sandeen.net int 1592ee4fa5cSsandeen@sandeen.net xfs_bulkstat_one( 1602ee4fa5cSsandeen@sandeen.net xfs_mount_t *mp, /* mount point for filesystem */ 1612ee4fa5cSsandeen@sandeen.net xfs_ino_t ino, /* inode number to get data for */ 1622ee4fa5cSsandeen@sandeen.net void __user *buffer, /* buffer to place output in */ 1632ee4fa5cSsandeen@sandeen.net int ubsize, /* size of buffer */ 1642ee4fa5cSsandeen@sandeen.net int *ubused, /* bytes used by me */ 1652ee4fa5cSsandeen@sandeen.net int *stat) /* BULKSTAT_RV_... */ 1662ee4fa5cSsandeen@sandeen.net { 1672ee4fa5cSsandeen@sandeen.net return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 1687b6259e7SDave Chinner xfs_bulkstat_one_fmt, ubused, stat); 1698b56f083SNathan Scott } 1708b56f083SNathan Scott 1714b8fdfecSJie Liu /* 1724b8fdfecSJie Liu * Loop over all clusters in a chunk for a given incore inode allocation btree 1734b8fdfecSJie Liu * record. Do a readahead if there are any allocated inodes in that cluster. 1744b8fdfecSJie Liu */ 1754b8fdfecSJie Liu STATIC void 1764b8fdfecSJie Liu xfs_bulkstat_ichunk_ra( 1774b8fdfecSJie Liu struct xfs_mount *mp, 1784b8fdfecSJie Liu xfs_agnumber_t agno, 1794b8fdfecSJie Liu struct xfs_inobt_rec_incore *irec) 1804b8fdfecSJie Liu { 1814b8fdfecSJie Liu xfs_agblock_t agbno; 1824b8fdfecSJie Liu struct blk_plug plug; 1834b8fdfecSJie Liu int blks_per_cluster; 1844b8fdfecSJie Liu int inodes_per_cluster; 1854b8fdfecSJie Liu int i; /* inode chunk index */ 1864b8fdfecSJie Liu 1874b8fdfecSJie Liu agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); 1884b8fdfecSJie Liu blks_per_cluster = xfs_icluster_size_fsb(mp); 1894b8fdfecSJie Liu inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 1904b8fdfecSJie Liu 1914b8fdfecSJie Liu blk_start_plug(&plug); 1924b8fdfecSJie Liu for (i = 0; i < XFS_INODES_PER_CHUNK; 1934b8fdfecSJie Liu i += inodes_per_cluster, agbno += blks_per_cluster) { 1944b8fdfecSJie Liu if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { 1954b8fdfecSJie Liu xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, 1964b8fdfecSJie Liu &xfs_inode_buf_ops); 1974b8fdfecSJie Liu } 1984b8fdfecSJie Liu } 1994b8fdfecSJie Liu blk_finish_plug(&plug); 2004b8fdfecSJie Liu } 2014b8fdfecSJie Liu 202f3d1e587SJie Liu /* 203f3d1e587SJie Liu * Lookup the inode chunk that the given inode lives in and then get the record 204f3d1e587SJie Liu * if we found the chunk. If the inode was not the last in the chunk and there 205f3d1e587SJie Liu * are some left allocated, update the data for the pointed-to record as well as 206f3d1e587SJie Liu * return the count of grabbed inodes. 207f3d1e587SJie Liu */ 208f3d1e587SJie Liu STATIC int 209f3d1e587SJie Liu xfs_bulkstat_grab_ichunk( 210f3d1e587SJie Liu struct xfs_btree_cur *cur, /* btree cursor */ 211f3d1e587SJie Liu xfs_agino_t agino, /* starting inode of chunk */ 212f3d1e587SJie Liu int *icount,/* return # of inodes grabbed */ 213f3d1e587SJie Liu struct xfs_inobt_rec_incore *irec) /* btree record */ 214f3d1e587SJie Liu { 215f3d1e587SJie Liu int idx; /* index into inode chunk */ 216f3d1e587SJie Liu int stat; 217f3d1e587SJie Liu int error = 0; 218f3d1e587SJie Liu 219f3d1e587SJie Liu /* Lookup the inode chunk that this inode lives in */ 220f3d1e587SJie Liu error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); 221f3d1e587SJie Liu if (error) 222f3d1e587SJie Liu return error; 223f3d1e587SJie Liu if (!stat) { 224f3d1e587SJie Liu *icount = 0; 225f3d1e587SJie Liu return error; 226f3d1e587SJie Liu } 227f3d1e587SJie Liu 228f3d1e587SJie Liu /* Get the record, should always work */ 229f3d1e587SJie Liu error = xfs_inobt_get_rec(cur, irec, &stat); 230f3d1e587SJie Liu if (error) 231f3d1e587SJie Liu return error; 2325fb5aeeeSEric Sandeen XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); 233f3d1e587SJie Liu 234f3d1e587SJie Liu /* Check if the record contains the inode in request */ 235febe3cbeSDave Chinner if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 236febe3cbeSDave Chinner *icount = 0; 237febe3cbeSDave Chinner return 0; 238febe3cbeSDave Chinner } 239f3d1e587SJie Liu 240f3d1e587SJie Liu idx = agino - irec->ir_startino + 1; 241f3d1e587SJie Liu if (idx < XFS_INODES_PER_CHUNK && 242f3d1e587SJie Liu (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 243f3d1e587SJie Liu int i; 244f3d1e587SJie Liu 245f3d1e587SJie Liu /* We got a right chunk with some left inodes allocated at it. 246f3d1e587SJie Liu * Grab the chunk record. Mark all the uninteresting inodes 247f3d1e587SJie Liu * free -- because they're before our start point. 248f3d1e587SJie Liu */ 249f3d1e587SJie Liu for (i = 0; i < idx; i++) { 250f3d1e587SJie Liu if (XFS_INOBT_MASK(i) & ~irec->ir_free) 251f3d1e587SJie Liu irec->ir_freecount++; 252f3d1e587SJie Liu } 253f3d1e587SJie Liu 254f3d1e587SJie Liu irec->ir_free |= xfs_inobt_maskn(0, idx); 255*12d0714dSBrian Foster *icount = irec->ir_count - irec->ir_freecount; 256f3d1e587SJie Liu } 257f3d1e587SJie Liu 258f3d1e587SJie Liu return 0; 259f3d1e587SJie Liu } 260f3d1e587SJie Liu 261cd57e594SLachlan McIlroy #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 262cd57e594SLachlan McIlroy 263bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk { 264bf4a5af2SDave Chinner char __user **ac_ubuffer;/* pointer into user's buffer */ 265bf4a5af2SDave Chinner int ac_ubleft; /* bytes left in user's buffer */ 266bf4a5af2SDave Chinner int ac_ubelem; /* spaces used in user's buffer */ 267bf4a5af2SDave Chinner }; 268bf4a5af2SDave Chinner 2698b56f083SNathan Scott /* 2701e773c49SJie Liu * Process inodes in chunk with a pointer to a formatter function 2711e773c49SJie Liu * that will iget the inode and fill in the appropriate structure. 2721e773c49SJie Liu */ 273bf4a5af2SDave Chinner static int 2741e773c49SJie Liu xfs_bulkstat_ag_ichunk( 2751e773c49SJie Liu struct xfs_mount *mp, 2761e773c49SJie Liu xfs_agnumber_t agno, 2771e773c49SJie Liu struct xfs_inobt_rec_incore *irbp, 2781e773c49SJie Liu bulkstat_one_pf formatter, 2791e773c49SJie Liu size_t statstruct_size, 280bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk *acp, 28100275899SDave Chinner xfs_agino_t *last_agino) 2821e773c49SJie Liu { 2831e773c49SJie Liu char __user **ubufp = acp->ac_ubuffer; 2842b831ac6SDave Chinner int chunkidx; 2851e773c49SJie Liu int error = 0; 28600275899SDave Chinner xfs_agino_t agino = irbp->ir_startino; 2871e773c49SJie Liu 2882b831ac6SDave Chinner for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; 2892b831ac6SDave Chinner chunkidx++, agino++) { 2902b831ac6SDave Chinner int fmterror; 2911e773c49SJie Liu int ubused; 29200275899SDave Chinner 29300275899SDave Chinner /* inode won't fit in buffer, we are done */ 29400275899SDave Chinner if (acp->ac_ubleft < statstruct_size) 29500275899SDave Chinner break; 2961e773c49SJie Liu 2971e773c49SJie Liu /* Skip if this inode is free */ 29800275899SDave Chinner if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) 2991e773c49SJie Liu continue; 3001e773c49SJie Liu 3011e773c49SJie Liu /* Get the inode and fill in a single buffer */ 3021e773c49SJie Liu ubused = statstruct_size; 30300275899SDave Chinner error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), 30400275899SDave Chinner *ubufp, acp->ac_ubleft, &ubused, &fmterror); 30500275899SDave Chinner 3062b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_GIVEUP || 3072b831ac6SDave Chinner (error && error != -ENOENT && error != -EINVAL)) { 3082b831ac6SDave Chinner acp->ac_ubleft = 0; 3091e773c49SJie Liu ASSERT(error); 3101e773c49SJie Liu break; 3111e773c49SJie Liu } 3122b831ac6SDave Chinner 3132b831ac6SDave Chinner /* be careful not to leak error if at end of chunk */ 3142b831ac6SDave Chinner if (fmterror == BULKSTAT_RV_NOTHING || error) { 3152b831ac6SDave Chinner error = 0; 3162b831ac6SDave Chinner continue; 3171e773c49SJie Liu } 3181e773c49SJie Liu 3192b831ac6SDave Chinner *ubufp += ubused; 3202b831ac6SDave Chinner acp->ac_ubleft -= ubused; 3212b831ac6SDave Chinner acp->ac_ubelem++; 3222b831ac6SDave Chinner } 3231e773c49SJie Liu 32400275899SDave Chinner /* 32500275899SDave Chinner * Post-update *last_agino. At this point, agino will always point one 32600275899SDave Chinner * inode past the last inode we processed successfully. Hence we 32700275899SDave Chinner * substract that inode when setting the *last_agino cursor so that we 32800275899SDave Chinner * return the correct cookie to userspace. On the next bulkstat call, 32900275899SDave Chinner * the inode under the lastino cookie will be skipped as we have already 33000275899SDave Chinner * processed it here. 33100275899SDave Chinner */ 33200275899SDave Chinner *last_agino = agino - 1; 33300275899SDave Chinner 3341e773c49SJie Liu return error; 3351e773c49SJie Liu } 3361e773c49SJie Liu 3371e773c49SJie Liu /* 3381da177e4SLinus Torvalds * Return stat information in bulk (by-inode) for the filesystem. 3391da177e4SLinus Torvalds */ 3401da177e4SLinus Torvalds int /* error status */ 3411da177e4SLinus Torvalds xfs_bulkstat( 3421da177e4SLinus Torvalds xfs_mount_t *mp, /* mount point for filesystem */ 3431da177e4SLinus Torvalds xfs_ino_t *lastinop, /* last inode returned */ 3441da177e4SLinus Torvalds int *ubcountp, /* size of buffer/count returned */ 3451da177e4SLinus Torvalds bulkstat_one_pf formatter, /* func that'd fill a single buf */ 3461da177e4SLinus Torvalds size_t statstruct_size, /* sizeof struct filling */ 3471da177e4SLinus Torvalds char __user *ubuffer, /* buffer with inode stats */ 348c41564b5SNathan Scott int *done) /* 1 if there are more stats to get */ 3491da177e4SLinus Torvalds { 3501da177e4SLinus Torvalds xfs_buf_t *agbp; /* agi header buffer */ 3511da177e4SLinus Torvalds xfs_agino_t agino; /* inode # in allocation group */ 3521da177e4SLinus Torvalds xfs_agnumber_t agno; /* allocation group number */ 3531da177e4SLinus Torvalds xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 354215101c3SNathan Scott size_t irbsize; /* size of irec buffer in bytes */ 35526275093SNathan Scott xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 3561da177e4SLinus Torvalds int nirbuf; /* size of irbuf */ 3571da177e4SLinus Torvalds int ubcount; /* size of user's buffer */ 358bf4a5af2SDave Chinner struct xfs_bulkstat_agichunk ac; 3596e57c542SDave Chinner int error = 0; 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds /* 3621da177e4SLinus Torvalds * Get the last inode value, see if there's nothing to do. 3631da177e4SLinus Torvalds */ 36400275899SDave Chinner agno = XFS_INO_TO_AGNO(mp, *lastinop); 36500275899SDave Chinner agino = XFS_INO_TO_AGINO(mp, *lastinop); 3661da177e4SLinus Torvalds if (agno >= mp->m_sb.sb_agcount || 36700275899SDave Chinner *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { 3681da177e4SLinus Torvalds *done = 1; 3691da177e4SLinus Torvalds *ubcountp = 0; 3701da177e4SLinus Torvalds return 0; 3711da177e4SLinus Torvalds } 372296dfd7fSJie Liu 3731da177e4SLinus Torvalds ubcount = *ubcountp; /* statstruct's */ 374bf4a5af2SDave Chinner ac.ac_ubuffer = &ubuffer; 375bf4a5af2SDave Chinner ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; 376bf4a5af2SDave Chinner ac.ac_ubelem = 0; 377bf4a5af2SDave Chinner 378bf4a5af2SDave Chinner *ubcountp = 0; 3791da177e4SLinus Torvalds *done = 0; 380bf4a5af2SDave Chinner 381bdfb0430SChristoph Hellwig irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 382bdfb0430SChristoph Hellwig if (!irbuf) 3832451337dSDave Chinner return -ENOMEM; 384bdfb0430SChristoph Hellwig 385bb3c7d29SNathan Scott nirbuf = irbsize / sizeof(*irbuf); 386bb3c7d29SNathan Scott 3871da177e4SLinus Torvalds /* 3881da177e4SLinus Torvalds * Loop over the allocation groups, starting from the last 3891da177e4SLinus Torvalds * inode returned; 0 means start of the allocation group. 3901da177e4SLinus Torvalds */ 3916e57c542SDave Chinner while (agno < mp->m_sb.sb_agcount) { 3926e57c542SDave Chinner struct xfs_inobt_rec_incore *irbp = irbuf; 3936e57c542SDave Chinner struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; 3946e57c542SDave Chinner bool end_of_ag = false; 3956e57c542SDave Chinner int icount = 0; 3966e57c542SDave Chinner int stat; 3976e57c542SDave Chinner 3981da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 399d4c27348SJie Liu if (error) 400d4c27348SJie Liu break; 4011da177e4SLinus Torvalds /* 4021da177e4SLinus Torvalds * Allocate and initialize a btree cursor for ialloc btree. 4031da177e4SLinus Torvalds */ 40457bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 40557bd3dbeSBrian Foster XFS_BTNUM_INO); 4061da177e4SLinus Torvalds if (agino > 0) { 407f3d1e587SJie Liu /* 408f3d1e587SJie Liu * In the middle of an allocation group, we need to get 409f3d1e587SJie Liu * the remainder of the chunk we're in. 410f3d1e587SJie Liu */ 411f3d1e587SJie Liu struct xfs_inobt_rec_incore r; 4122e287a73SChristoph Hellwig 413f3d1e587SJie Liu error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 414f3d1e587SJie Liu if (error) 415a6bbce54SDave Chinner goto del_cursor; 416f3d1e587SJie Liu if (icount) { 4172e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 418*12d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 419*12d0714dSBrian Foster irbp->ir_count = r.ir_count; 4202e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4212e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4221da177e4SLinus Torvalds irbp++; 4231da177e4SLinus Torvalds } 424f3d1e587SJie Liu /* Increment to the next record */ 425afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 4261da177e4SLinus Torvalds } else { 427f3d1e587SJie Liu /* Start of ag. Lookup the first inode chunk */ 428afa947cbSDave Chinner error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); 4291da177e4SLinus Torvalds } 430afa947cbSDave Chinner if (error || stat == 0) { 4316e57c542SDave Chinner end_of_ag = true; 432a6bbce54SDave Chinner goto del_cursor; 433afa947cbSDave Chinner } 434d4c27348SJie Liu 4351da177e4SLinus Torvalds /* 4361da177e4SLinus Torvalds * Loop through inode btree records in this ag, 4371da177e4SLinus Torvalds * until we run out of inodes or space in the buffer. 4381da177e4SLinus Torvalds */ 4391da177e4SLinus Torvalds while (irbp < irbufend && icount < ubcount) { 440d4c27348SJie Liu struct xfs_inobt_rec_incore r; 4412e287a73SChristoph Hellwig 442afa947cbSDave Chinner error = xfs_inobt_get_rec(cur, &r, &stat); 443afa947cbSDave Chinner if (error || stat == 0) { 4446e57c542SDave Chinner end_of_ag = true; 445a6bbce54SDave Chinner goto del_cursor; 4462e287a73SChristoph Hellwig } 4472e287a73SChristoph Hellwig 4481da177e4SLinus Torvalds /* 4491da177e4SLinus Torvalds * If this chunk has any allocated inodes, save it. 45026275093SNathan Scott * Also start read-ahead now for this chunk. 4511da177e4SLinus Torvalds */ 452*12d0714dSBrian Foster if (r.ir_freecount < r.ir_count) { 4534b8fdfecSJie Liu xfs_bulkstat_ichunk_ra(mp, agno, &r); 4542e287a73SChristoph Hellwig irbp->ir_startino = r.ir_startino; 455*12d0714dSBrian Foster irbp->ir_holemask = r.ir_holemask; 456*12d0714dSBrian Foster irbp->ir_count = r.ir_count; 4572e287a73SChristoph Hellwig irbp->ir_freecount = r.ir_freecount; 4582e287a73SChristoph Hellwig irbp->ir_free = r.ir_free; 4591da177e4SLinus Torvalds irbp++; 460*12d0714dSBrian Foster icount += r.ir_count - r.ir_freecount; 4611da177e4SLinus Torvalds } 462afa947cbSDave Chinner error = xfs_btree_increment(cur, 0, &stat); 463afa947cbSDave Chinner if (error || stat == 0) { 4646e57c542SDave Chinner end_of_ag = true; 4657a19dee1SJan Kara goto del_cursor; 4667a19dee1SJan Kara } 467cd57e594SLachlan McIlroy cond_resched(); 4681da177e4SLinus Torvalds } 469a6bbce54SDave Chinner 4701da177e4SLinus Torvalds /* 471a6bbce54SDave Chinner * Drop the btree buffers and the agi buffer as we can't hold any 472a6bbce54SDave Chinner * of the locks these represent when calling iget. If there is a 473a6bbce54SDave Chinner * pending error, then we are done. 4741da177e4SLinus Torvalds */ 475a6bbce54SDave Chinner del_cursor: 4761da177e4SLinus Torvalds xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 4771da177e4SLinus Torvalds xfs_buf_relse(agbp); 478a6bbce54SDave Chinner if (error) 479a6bbce54SDave Chinner break; 4801da177e4SLinus Torvalds /* 48100275899SDave Chinner * Now format all the good inodes into the user's buffer. The 48200275899SDave Chinner * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer 48300275899SDave Chinner * for the next loop iteration. 4841da177e4SLinus Torvalds */ 4851da177e4SLinus Torvalds irbufend = irbp; 4861da177e4SLinus Torvalds for (irbp = irbuf; 4876e57c542SDave Chinner irbp < irbufend && ac.ac_ubleft >= statstruct_size; 488bf4a5af2SDave Chinner irbp++) { 4891e773c49SJie Liu error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 490bf4a5af2SDave Chinner formatter, statstruct_size, &ac, 49100275899SDave Chinner &agino); 4921e773c49SJie Liu if (error) 493febe3cbeSDave Chinner break; 4941da177e4SLinus Torvalds 495cd57e594SLachlan McIlroy cond_resched(); 4961da177e4SLinus Torvalds } 497bf4a5af2SDave Chinner 498febe3cbeSDave Chinner /* 499febe3cbeSDave Chinner * If we've run out of space or had a formatting error, we 500febe3cbeSDave Chinner * are now done 501febe3cbeSDave Chinner */ 502febe3cbeSDave Chinner if (ac.ac_ubleft < statstruct_size || error) 5036e57c542SDave Chinner break; 5046e57c542SDave Chinner 5051da177e4SLinus Torvalds if (end_of_ag) { 5061da177e4SLinus Torvalds agno++; 5071da177e4SLinus Torvalds agino = 0; 50800275899SDave Chinner } 5091da177e4SLinus Torvalds } 5101da177e4SLinus Torvalds /* 5111da177e4SLinus Torvalds * Done, we're either out of filesystem or space to put the data. 5121da177e4SLinus Torvalds */ 513fdd3cceeSDave Chinner kmem_free(irbuf); 514bf4a5af2SDave Chinner *ubcountp = ac.ac_ubelem; 515febe3cbeSDave Chinner 516cd57e594SLachlan McIlroy /* 517febe3cbeSDave Chinner * We found some inodes, so clear the error status and return them. 518febe3cbeSDave Chinner * The lastino pointer will point directly at the inode that triggered 519febe3cbeSDave Chinner * any error that occurred, so on the next call the error will be 520febe3cbeSDave Chinner * triggered again and propagated to userspace as there will be no 521febe3cbeSDave Chinner * formatted inodes in the buffer. 522cd57e594SLachlan McIlroy */ 523bf4a5af2SDave Chinner if (ac.ac_ubelem) 524febe3cbeSDave Chinner error = 0; 525febe3cbeSDave Chinner 5261da177e4SLinus Torvalds /* 52700275899SDave Chinner * If we ran out of filesystem, lastino will point off the end of 52800275899SDave Chinner * the filesystem so the next call will return immediately. 5291da177e4SLinus Torvalds */ 53000275899SDave Chinner *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); 53100275899SDave Chinner if (agno >= mp->m_sb.sb_agcount) 5321da177e4SLinus Torvalds *done = 1; 5331da177e4SLinus Torvalds 534febe3cbeSDave Chinner return error; 5351da177e4SLinus Torvalds } 5361da177e4SLinus Torvalds 537faa63e95SMichal Marek int 538faa63e95SMichal Marek xfs_inumbers_fmt( 539faa63e95SMichal Marek void __user *ubuffer, /* buffer to write to */ 540549fa006SJie Liu const struct xfs_inogrp *buffer, /* buffer to read from */ 541faa63e95SMichal Marek long count, /* # of elements to read */ 542faa63e95SMichal Marek long *written) /* # of bytes written */ 543faa63e95SMichal Marek { 544faa63e95SMichal Marek if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 545faa63e95SMichal Marek return -EFAULT; 546faa63e95SMichal Marek *written = count * sizeof(*buffer); 547faa63e95SMichal Marek return 0; 548faa63e95SMichal Marek } 549faa63e95SMichal Marek 5501da177e4SLinus Torvalds /* 5511da177e4SLinus Torvalds * Return inode number table for the filesystem. 5521da177e4SLinus Torvalds */ 5531da177e4SLinus Torvalds int /* error status */ 5541da177e4SLinus Torvalds xfs_inumbers( 555549fa006SJie Liu struct xfs_mount *mp,/* mount point for filesystem */ 5561da177e4SLinus Torvalds xfs_ino_t *lastino,/* last inode returned */ 5571da177e4SLinus Torvalds int *count,/* size of buffer/count returned */ 558faa63e95SMichal Marek void __user *ubuffer,/* buffer with inode descriptions */ 559faa63e95SMichal Marek inumbers_fmt_pf formatter) 5601da177e4SLinus Torvalds { 561549fa006SJie Liu xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); 562549fa006SJie Liu xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); 563549fa006SJie Liu struct xfs_btree_cur *cur = NULL; 564c7cb51dcSJie Liu struct xfs_buf *agbp = NULL; 565549fa006SJie Liu struct xfs_inogrp *buffer; 5661da177e4SLinus Torvalds int bcount; 567549fa006SJie Liu int left = *count; 568549fa006SJie Liu int bufidx = 0; 569549fa006SJie Liu int error = 0; 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds *count = 0; 572549fa006SJie Liu if (agno >= mp->m_sb.sb_agcount || 573549fa006SJie Liu *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) 574549fa006SJie Liu return error; 575549fa006SJie Liu 576e6a4b37fSTim Shimmin bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 5771da177e4SLinus Torvalds buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 578c7cb51dcSJie Liu do { 579549fa006SJie Liu struct xfs_inobt_rec_incore r; 580549fa006SJie Liu int stat; 581549fa006SJie Liu 582c7cb51dcSJie Liu if (!agbp) { 5831da177e4SLinus Torvalds error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 584c7cb51dcSJie Liu if (error) 585c7cb51dcSJie Liu break; 586c7cb51dcSJie Liu 58757bd3dbeSBrian Foster cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 58857bd3dbeSBrian Foster XFS_BTNUM_INO); 58921875505SChristoph Hellwig error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 590549fa006SJie Liu &stat); 591c7cb51dcSJie Liu if (error) 592c7cb51dcSJie Liu break; 593c7cb51dcSJie Liu if (!stat) 594c7cb51dcSJie Liu goto next_ag; 5951da177e4SLinus Torvalds } 596c7cb51dcSJie Liu 597549fa006SJie Liu error = xfs_inobt_get_rec(cur, &r, &stat); 598c7cb51dcSJie Liu if (error) 599c7cb51dcSJie Liu break; 600c7cb51dcSJie Liu if (!stat) 601c7cb51dcSJie Liu goto next_ag; 602c7cb51dcSJie Liu 6032e287a73SChristoph Hellwig agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 6042e287a73SChristoph Hellwig buffer[bufidx].xi_startino = 6052e287a73SChristoph Hellwig XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 606*12d0714dSBrian Foster buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount; 6072e287a73SChristoph Hellwig buffer[bufidx].xi_allocmask = ~r.ir_free; 608c7cb51dcSJie Liu if (++bufidx == bcount) { 609faa63e95SMichal Marek long written; 610c7cb51dcSJie Liu 611549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 612549fa006SJie Liu if (error) 6131da177e4SLinus Torvalds break; 614faa63e95SMichal Marek ubuffer += written; 6151da177e4SLinus Torvalds *count += bufidx; 6161da177e4SLinus Torvalds bufidx = 0; 6171da177e4SLinus Torvalds } 618c7cb51dcSJie Liu if (!--left) 619c7cb51dcSJie Liu break; 620c7cb51dcSJie Liu 621549fa006SJie Liu error = xfs_btree_increment(cur, 0, &stat); 622c7cb51dcSJie Liu if (error) 623c7cb51dcSJie Liu break; 624c7cb51dcSJie Liu if (stat) 625c7cb51dcSJie Liu continue; 626c7cb51dcSJie Liu 627c7cb51dcSJie Liu next_ag: 6281da177e4SLinus Torvalds xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 6291da177e4SLinus Torvalds cur = NULL; 6301da177e4SLinus Torvalds xfs_buf_relse(agbp); 6311da177e4SLinus Torvalds agbp = NULL; 632c7cb51dcSJie Liu agino = 0; 633a8b1ee8bSEric Sandeen agno++; 634a8b1ee8bSEric Sandeen } while (agno < mp->m_sb.sb_agcount); 635c7cb51dcSJie Liu 6361da177e4SLinus Torvalds if (!error) { 6371da177e4SLinus Torvalds if (bufidx) { 638faa63e95SMichal Marek long written; 639c7cb51dcSJie Liu 640549fa006SJie Liu error = formatter(ubuffer, buffer, bufidx, &written); 641549fa006SJie Liu if (!error) 6421da177e4SLinus Torvalds *count += bufidx; 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 6451da177e4SLinus Torvalds } 646c7cb51dcSJie Liu 647f0e2d93cSDenys Vlasenko kmem_free(buffer); 6481da177e4SLinus Torvalds if (cur) 6491da177e4SLinus Torvalds xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 6501da177e4SLinus Torvalds XFS_BTREE_NOERROR)); 6511da177e4SLinus Torvalds if (agbp) 6521da177e4SLinus Torvalds xfs_buf_relse(agbp); 653c7cb51dcSJie Liu 6541da177e4SLinus Torvalds return error; 6551da177e4SLinus Torvalds } 656