1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_trans.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_mount.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_alloc_btree.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_dinode.h" 31 #include "xfs_inode.h" 32 #include "xfs_ialloc.h" 33 #include "xfs_itable.h" 34 #include "xfs_error.h" 35 #include "xfs_btree.h" 36 #include "xfs_trace.h" 37 #include "xfs_icache.h" 38 39 STATIC int 40 xfs_internal_inum( 41 xfs_mount_t *mp, 42 xfs_ino_t ino) 43 { 44 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 45 (xfs_sb_version_hasquota(&mp->m_sb) && 46 xfs_is_quota_inode(&mp->m_sb, ino))); 47 } 48 49 /* 50 * Return stat information for one inode. 51 * Return 0 if ok, else errno. 52 */ 53 int 54 xfs_bulkstat_one_int( 55 struct xfs_mount *mp, /* mount point for filesystem */ 56 xfs_ino_t ino, /* inode to get data for */ 57 void __user *buffer, /* buffer to place output in */ 58 int ubsize, /* size of buffer */ 59 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 60 int *ubused, /* bytes used by me */ 61 int *stat) /* BULKSTAT_RV_... */ 62 { 63 struct xfs_icdinode *dic; /* dinode core info pointer */ 64 struct xfs_inode *ip; /* incore inode pointer */ 65 struct xfs_bstat *buf; /* return buffer */ 66 int error = 0; /* error value */ 67 68 *stat = BULKSTAT_RV_NOTHING; 69 70 if (!buffer || xfs_internal_inum(mp, ino)) 71 return XFS_ERROR(EINVAL); 72 73 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 74 if (!buf) 75 return XFS_ERROR(ENOMEM); 76 77 error = xfs_iget(mp, NULL, ino, 78 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 79 XFS_ILOCK_SHARED, &ip); 80 if (error) { 81 *stat = BULKSTAT_RV_NOTHING; 82 goto out_free; 83 } 84 85 ASSERT(ip != NULL); 86 ASSERT(ip->i_imap.im_blkno != 0); 87 88 dic = &ip->i_d; 89 90 /* xfs_iget returns the following without needing 91 * further change. 92 */ 93 buf->bs_nlink = dic->di_nlink; 94 buf->bs_projid_lo = dic->di_projid_lo; 95 buf->bs_projid_hi = dic->di_projid_hi; 96 buf->bs_ino = ino; 97 buf->bs_mode = dic->di_mode; 98 buf->bs_uid = dic->di_uid; 99 buf->bs_gid = dic->di_gid; 100 buf->bs_size = dic->di_size; 101 buf->bs_atime.tv_sec = dic->di_atime.t_sec; 102 buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; 103 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 104 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 105 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 106 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; 107 buf->bs_xflags = xfs_ip2xflags(ip); 108 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 109 buf->bs_extents = dic->di_nextents; 110 buf->bs_gen = dic->di_gen; 111 memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 112 buf->bs_dmevmask = dic->di_dmevmask; 113 buf->bs_dmstate = dic->di_dmstate; 114 buf->bs_aextents = dic->di_anextents; 115 buf->bs_forkoff = XFS_IFORK_BOFF(ip); 116 117 switch (dic->di_format) { 118 case XFS_DINODE_FMT_DEV: 119 buf->bs_rdev = ip->i_df.if_u2.if_rdev; 120 buf->bs_blksize = BLKDEV_IOSIZE; 121 buf->bs_blocks = 0; 122 break; 123 case XFS_DINODE_FMT_LOCAL: 124 case XFS_DINODE_FMT_UUID: 125 buf->bs_rdev = 0; 126 buf->bs_blksize = mp->m_sb.sb_blocksize; 127 buf->bs_blocks = 0; 128 break; 129 case XFS_DINODE_FMT_EXTENTS: 130 case XFS_DINODE_FMT_BTREE: 131 buf->bs_rdev = 0; 132 buf->bs_blksize = mp->m_sb.sb_blocksize; 133 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 134 break; 135 } 136 xfs_iunlock(ip, XFS_ILOCK_SHARED); 137 IRELE(ip); 138 139 error = formatter(buffer, ubsize, ubused, buf); 140 141 if (!error) 142 *stat = BULKSTAT_RV_DIDONE; 143 144 out_free: 145 kmem_free(buf); 146 return error; 147 } 148 149 /* Return 0 on success or positive error */ 150 STATIC int 151 xfs_bulkstat_one_fmt( 152 void __user *ubuffer, 153 int ubsize, 154 int *ubused, 155 const xfs_bstat_t *buffer) 156 { 157 if (ubsize < sizeof(*buffer)) 158 return XFS_ERROR(ENOMEM); 159 if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 160 return XFS_ERROR(EFAULT); 161 if (ubused) 162 *ubused = sizeof(*buffer); 163 return 0; 164 } 165 166 int 167 xfs_bulkstat_one( 168 xfs_mount_t *mp, /* mount point for filesystem */ 169 xfs_ino_t ino, /* inode number to get data for */ 170 void __user *buffer, /* buffer to place output in */ 171 int ubsize, /* size of buffer */ 172 int *ubused, /* bytes used by me */ 173 int *stat) /* BULKSTAT_RV_... */ 174 { 175 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 176 xfs_bulkstat_one_fmt, ubused, stat); 177 } 178 179 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 180 181 /* 182 * Return stat information in bulk (by-inode) for the filesystem. 183 */ 184 int /* error status */ 185 xfs_bulkstat( 186 xfs_mount_t *mp, /* mount point for filesystem */ 187 xfs_ino_t *lastinop, /* last inode returned */ 188 int *ubcountp, /* size of buffer/count returned */ 189 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 190 size_t statstruct_size, /* sizeof struct filling */ 191 char __user *ubuffer, /* buffer with inode stats */ 192 int *done) /* 1 if there are more stats to get */ 193 { 194 xfs_agblock_t agbno=0;/* allocation group block number */ 195 xfs_buf_t *agbp; /* agi header buffer */ 196 xfs_agi_t *agi; /* agi header data */ 197 xfs_agino_t agino; /* inode # in allocation group */ 198 xfs_agnumber_t agno; /* allocation group number */ 199 int chunkidx; /* current index into inode chunk */ 200 int clustidx; /* current index into inode cluster */ 201 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 202 int end_of_ag; /* set if we've seen the ag end */ 203 int error; /* error code */ 204 int fmterror;/* bulkstat formatter result */ 205 int i; /* loop index */ 206 int icount; /* count of inodes good in irbuf */ 207 size_t irbsize; /* size of irec buffer in bytes */ 208 xfs_ino_t ino; /* inode number (filesystem) */ 209 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ 210 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 211 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 212 xfs_ino_t lastino; /* last inode number returned */ 213 int nbcluster; /* # of blocks in a cluster */ 214 int nicluster; /* # of inodes in a cluster */ 215 int nimask; /* mask for inode clusters */ 216 int nirbuf; /* size of irbuf */ 217 int rval; /* return value error code */ 218 int tmp; /* result value from btree calls */ 219 int ubcount; /* size of user's buffer */ 220 int ubleft; /* bytes left in user's buffer */ 221 char __user *ubufp; /* pointer into user's buffer */ 222 int ubelem; /* spaces used in user's buffer */ 223 int ubused; /* bytes used by formatter */ 224 225 /* 226 * Get the last inode value, see if there's nothing to do. 227 */ 228 ino = (xfs_ino_t)*lastinop; 229 lastino = ino; 230 agno = XFS_INO_TO_AGNO(mp, ino); 231 agino = XFS_INO_TO_AGINO(mp, ino); 232 if (agno >= mp->m_sb.sb_agcount || 233 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 234 *done = 1; 235 *ubcountp = 0; 236 return 0; 237 } 238 if (!ubcountp || *ubcountp <= 0) { 239 return EINVAL; 240 } 241 ubcount = *ubcountp; /* statstruct's */ 242 ubleft = ubcount * statstruct_size; /* bytes */ 243 *ubcountp = ubelem = 0; 244 *done = 0; 245 fmterror = 0; 246 ubufp = ubuffer; 247 nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? 248 mp->m_sb.sb_inopblock : 249 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 250 nimask = ~(nicluster - 1); 251 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 252 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 253 if (!irbuf) 254 return ENOMEM; 255 256 nirbuf = irbsize / sizeof(*irbuf); 257 258 /* 259 * Loop over the allocation groups, starting from the last 260 * inode returned; 0 means start of the allocation group. 261 */ 262 rval = 0; 263 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 264 cond_resched(); 265 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 266 if (error) { 267 /* 268 * Skip this allocation group and go to the next one. 269 */ 270 agno++; 271 agino = 0; 272 continue; 273 } 274 agi = XFS_BUF_TO_AGI(agbp); 275 /* 276 * Allocate and initialize a btree cursor for ialloc btree. 277 */ 278 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); 279 irbp = irbuf; 280 irbufend = irbuf + nirbuf; 281 end_of_ag = 0; 282 /* 283 * If we're returning in the middle of an allocation group, 284 * we need to get the remainder of the chunk we're in. 285 */ 286 if (agino > 0) { 287 xfs_inobt_rec_incore_t r; 288 289 /* 290 * Lookup the inode chunk that this inode lives in. 291 */ 292 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, 293 &tmp); 294 if (!error && /* no I/O error */ 295 tmp && /* lookup succeeded */ 296 /* got the record, should always work */ 297 !(error = xfs_inobt_get_rec(cur, &r, &i)) && 298 i == 1 && 299 /* this is the right chunk */ 300 agino < r.ir_startino + XFS_INODES_PER_CHUNK && 301 /* lastino was not last in chunk */ 302 (chunkidx = agino - r.ir_startino + 1) < 303 XFS_INODES_PER_CHUNK && 304 /* there are some left allocated */ 305 xfs_inobt_maskn(chunkidx, 306 XFS_INODES_PER_CHUNK - chunkidx) & 307 ~r.ir_free) { 308 /* 309 * Grab the chunk record. Mark all the 310 * uninteresting inodes (because they're 311 * before our start point) free. 312 */ 313 for (i = 0; i < chunkidx; i++) { 314 if (XFS_INOBT_MASK(i) & ~r.ir_free) 315 r.ir_freecount++; 316 } 317 r.ir_free |= xfs_inobt_maskn(0, chunkidx); 318 irbp->ir_startino = r.ir_startino; 319 irbp->ir_freecount = r.ir_freecount; 320 irbp->ir_free = r.ir_free; 321 irbp++; 322 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 323 icount = XFS_INODES_PER_CHUNK - r.ir_freecount; 324 } else { 325 /* 326 * If any of those tests failed, bump the 327 * inode number (just in case). 328 */ 329 agino++; 330 icount = 0; 331 } 332 /* 333 * In any case, increment to the next record. 334 */ 335 if (!error) 336 error = xfs_btree_increment(cur, 0, &tmp); 337 } else { 338 /* 339 * Start of ag. Lookup the first inode chunk. 340 */ 341 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 342 icount = 0; 343 } 344 /* 345 * Loop through inode btree records in this ag, 346 * until we run out of inodes or space in the buffer. 347 */ 348 while (irbp < irbufend && icount < ubcount) { 349 xfs_inobt_rec_incore_t r; 350 351 /* 352 * Loop as long as we're unable to read the 353 * inode btree. 354 */ 355 while (error) { 356 agino += XFS_INODES_PER_CHUNK; 357 if (XFS_AGINO_TO_AGBNO(mp, agino) >= 358 be32_to_cpu(agi->agi_length)) 359 break; 360 error = xfs_inobt_lookup(cur, agino, 361 XFS_LOOKUP_GE, &tmp); 362 cond_resched(); 363 } 364 /* 365 * If ran off the end of the ag either with an error, 366 * or the normal way, set end and stop collecting. 367 */ 368 if (error) { 369 end_of_ag = 1; 370 break; 371 } 372 373 error = xfs_inobt_get_rec(cur, &r, &i); 374 if (error || i == 0) { 375 end_of_ag = 1; 376 break; 377 } 378 379 /* 380 * If this chunk has any allocated inodes, save it. 381 * Also start read-ahead now for this chunk. 382 */ 383 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 384 struct blk_plug plug; 385 /* 386 * Loop over all clusters in the next chunk. 387 * Do a readahead if there are any allocated 388 * inodes in that cluster. 389 */ 390 blk_start_plug(&plug); 391 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); 392 for (chunkidx = 0; 393 chunkidx < XFS_INODES_PER_CHUNK; 394 chunkidx += nicluster, 395 agbno += nbcluster) { 396 if (xfs_inobt_maskn(chunkidx, nicluster) 397 & ~r.ir_free) 398 xfs_btree_reada_bufs(mp, agno, 399 agbno, nbcluster, 400 &xfs_inode_buf_ops); 401 } 402 blk_finish_plug(&plug); 403 irbp->ir_startino = r.ir_startino; 404 irbp->ir_freecount = r.ir_freecount; 405 irbp->ir_free = r.ir_free; 406 irbp++; 407 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 408 } 409 /* 410 * Set agino to after this chunk and bump the cursor. 411 */ 412 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 413 error = xfs_btree_increment(cur, 0, &tmp); 414 cond_resched(); 415 } 416 /* 417 * Drop the btree buffers and the agi buffer. 418 * We can't hold any of the locks these represent 419 * when calling iget. 420 */ 421 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 422 xfs_buf_relse(agbp); 423 /* 424 * Now format all the good inodes into the user's buffer. 425 */ 426 irbufend = irbp; 427 for (irbp = irbuf; 428 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 429 /* 430 * Now process this chunk of inodes. 431 */ 432 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 433 XFS_BULKSTAT_UBLEFT(ubleft) && 434 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 435 chunkidx++, clustidx++, agino++) { 436 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 437 438 ino = XFS_AGINO_TO_INO(mp, agno, agino); 439 /* 440 * Skip if this inode is free. 441 */ 442 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 443 lastino = ino; 444 continue; 445 } 446 /* 447 * Count used inodes as free so we can tell 448 * when the chunk is used up. 449 */ 450 irbp->ir_freecount++; 451 452 /* 453 * Get the inode and fill in a single buffer. 454 */ 455 ubused = statstruct_size; 456 error = formatter(mp, ino, ubufp, ubleft, 457 &ubused, &fmterror); 458 if (fmterror == BULKSTAT_RV_NOTHING) { 459 if (error && error != ENOENT && 460 error != EINVAL) { 461 ubleft = 0; 462 rval = error; 463 break; 464 } 465 lastino = ino; 466 continue; 467 } 468 if (fmterror == BULKSTAT_RV_GIVEUP) { 469 ubleft = 0; 470 ASSERT(error); 471 rval = error; 472 break; 473 } 474 if (ubufp) 475 ubufp += ubused; 476 ubleft -= ubused; 477 ubelem++; 478 lastino = ino; 479 } 480 481 cond_resched(); 482 } 483 /* 484 * Set up for the next loop iteration. 485 */ 486 if (XFS_BULKSTAT_UBLEFT(ubleft)) { 487 if (end_of_ag) { 488 agno++; 489 agino = 0; 490 } else 491 agino = XFS_INO_TO_AGINO(mp, lastino); 492 } else 493 break; 494 } 495 /* 496 * Done, we're either out of filesystem or space to put the data. 497 */ 498 kmem_free(irbuf); 499 *ubcountp = ubelem; 500 /* 501 * Found some inodes, return them now and return the error next time. 502 */ 503 if (ubelem) 504 rval = 0; 505 if (agno >= mp->m_sb.sb_agcount) { 506 /* 507 * If we ran out of filesystem, mark lastino as off 508 * the end of the filesystem, so the next call 509 * will return immediately. 510 */ 511 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); 512 *done = 1; 513 } else 514 *lastinop = (xfs_ino_t)lastino; 515 516 return rval; 517 } 518 519 /* 520 * Return stat information in bulk (by-inode) for the filesystem. 521 * Special case for non-sequential one inode bulkstat. 522 */ 523 int /* error status */ 524 xfs_bulkstat_single( 525 xfs_mount_t *mp, /* mount point for filesystem */ 526 xfs_ino_t *lastinop, /* inode to return */ 527 char __user *buffer, /* buffer with inode stats */ 528 int *done) /* 1 if there are more stats to get */ 529 { 530 int count; /* count value for bulkstat call */ 531 int error; /* return value */ 532 xfs_ino_t ino; /* filesystem inode number */ 533 int res; /* result from bs1 */ 534 535 /* 536 * note that requesting valid inode numbers which are not allocated 537 * to inodes will most likely cause xfs_imap_to_bp to generate warning 538 * messages about bad magic numbers. This is ok. The fact that 539 * the inode isn't actually an inode is handled by the 540 * error check below. Done this way to make the usual case faster 541 * at the expense of the error case. 542 */ 543 544 ino = *lastinop; 545 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 546 NULL, &res); 547 if (error) { 548 /* 549 * Special case way failed, do it the "long" way 550 * to see if that works. 551 */ 552 (*lastinop)--; 553 count = 1; 554 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 555 sizeof(xfs_bstat_t), buffer, done)) 556 return error; 557 if (count == 0 || (xfs_ino_t)*lastinop != ino) 558 return error == EFSCORRUPTED ? 559 XFS_ERROR(EINVAL) : error; 560 else 561 return 0; 562 } 563 *done = 0; 564 return 0; 565 } 566 567 int 568 xfs_inumbers_fmt( 569 void __user *ubuffer, /* buffer to write to */ 570 const xfs_inogrp_t *buffer, /* buffer to read from */ 571 long count, /* # of elements to read */ 572 long *written) /* # of bytes written */ 573 { 574 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 575 return -EFAULT; 576 *written = count * sizeof(*buffer); 577 return 0; 578 } 579 580 /* 581 * Return inode number table for the filesystem. 582 */ 583 int /* error status */ 584 xfs_inumbers( 585 xfs_mount_t *mp, /* mount point for filesystem */ 586 xfs_ino_t *lastino, /* last inode returned */ 587 int *count, /* size of buffer/count returned */ 588 void __user *ubuffer,/* buffer with inode descriptions */ 589 inumbers_fmt_pf formatter) 590 { 591 xfs_buf_t *agbp; 592 xfs_agino_t agino; 593 xfs_agnumber_t agno; 594 int bcount; 595 xfs_inogrp_t *buffer; 596 int bufidx; 597 xfs_btree_cur_t *cur; 598 int error; 599 xfs_inobt_rec_incore_t r; 600 int i; 601 xfs_ino_t ino; 602 int left; 603 int tmp; 604 605 ino = (xfs_ino_t)*lastino; 606 agno = XFS_INO_TO_AGNO(mp, ino); 607 agino = XFS_INO_TO_AGINO(mp, ino); 608 left = *count; 609 *count = 0; 610 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 611 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 612 error = bufidx = 0; 613 cur = NULL; 614 agbp = NULL; 615 while (left > 0 && agno < mp->m_sb.sb_agcount) { 616 if (agbp == NULL) { 617 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 618 if (error) { 619 /* 620 * If we can't read the AGI of this ag, 621 * then just skip to the next one. 622 */ 623 ASSERT(cur == NULL); 624 agbp = NULL; 625 agno++; 626 agino = 0; 627 continue; 628 } 629 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); 630 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 631 &tmp); 632 if (error) { 633 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 634 cur = NULL; 635 xfs_buf_relse(agbp); 636 agbp = NULL; 637 /* 638 * Move up the last inode in the current 639 * chunk. The lookup_ge will always get 640 * us the first inode in the next chunk. 641 */ 642 agino += XFS_INODES_PER_CHUNK - 1; 643 continue; 644 } 645 } 646 error = xfs_inobt_get_rec(cur, &r, &i); 647 if (error || i == 0) { 648 xfs_buf_relse(agbp); 649 agbp = NULL; 650 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 651 cur = NULL; 652 agno++; 653 agino = 0; 654 continue; 655 } 656 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 657 buffer[bufidx].xi_startino = 658 XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 659 buffer[bufidx].xi_alloccount = 660 XFS_INODES_PER_CHUNK - r.ir_freecount; 661 buffer[bufidx].xi_allocmask = ~r.ir_free; 662 bufidx++; 663 left--; 664 if (bufidx == bcount) { 665 long written; 666 if (formatter(ubuffer, buffer, bufidx, &written)) { 667 error = XFS_ERROR(EFAULT); 668 break; 669 } 670 ubuffer += written; 671 *count += bufidx; 672 bufidx = 0; 673 } 674 if (left) { 675 error = xfs_btree_increment(cur, 0, &tmp); 676 if (error) { 677 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 678 cur = NULL; 679 xfs_buf_relse(agbp); 680 agbp = NULL; 681 /* 682 * The agino value has already been bumped. 683 * Just try to skip up to it. 684 */ 685 agino += XFS_INODES_PER_CHUNK; 686 continue; 687 } 688 } 689 } 690 if (!error) { 691 if (bufidx) { 692 long written; 693 if (formatter(ubuffer, buffer, bufidx, &written)) 694 error = XFS_ERROR(EFAULT); 695 else 696 *count += bufidx; 697 } 698 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 699 } 700 kmem_free(buffer); 701 if (cur) 702 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 703 XFS_BTREE_NOERROR)); 704 if (agbp) 705 xfs_buf_relse(agbp); 706 return error; 707 } 708