1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_bit.h" 24 #include "xfs_log.h" 25 #include "xfs_inum.h" 26 #include "xfs_trans.h" 27 #include "xfs_trans_priv.h" 28 #include "xfs_sb.h" 29 #include "xfs_ag.h" 30 #include "xfs_mount.h" 31 #include "xfs_bmap_btree.h" 32 #include "xfs_alloc_btree.h" 33 #include "xfs_ialloc_btree.h" 34 #include "xfs_attr_sf.h" 35 #include "xfs_dinode.h" 36 #include "xfs_inode.h" 37 #include "xfs_buf_item.h" 38 #include "xfs_inode_item.h" 39 #include "xfs_btree.h" 40 #include "xfs_btree_trace.h" 41 #include "xfs_alloc.h" 42 #include "xfs_ialloc.h" 43 #include "xfs_bmap.h" 44 #include "xfs_error.h" 45 #include "xfs_utils.h" 46 #include "xfs_quota.h" 47 #include "xfs_filestream.h" 48 #include "xfs_vnodeops.h" 49 #include "xfs_trace.h" 50 51 kmem_zone_t *xfs_ifork_zone; 52 kmem_zone_t *xfs_inode_zone; 53 54 /* 55 * Used in xfs_itruncate(). This is the maximum number of extents 56 * freed from a file in a single transaction. 57 */ 58 #define XFS_ITRUNC_MAX_EXTENTS 2 59 60 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 61 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 62 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 63 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 64 65 #ifdef DEBUG 66 /* 67 * Make sure that the extents in the given memory buffer 68 * are valid. 69 */ 70 STATIC void 71 xfs_validate_extents( 72 xfs_ifork_t *ifp, 73 int nrecs, 74 xfs_exntfmt_t fmt) 75 { 76 xfs_bmbt_irec_t irec; 77 xfs_bmbt_rec_host_t rec; 78 int i; 79 80 for (i = 0; i < nrecs; i++) { 81 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 82 rec.l0 = get_unaligned(&ep->l0); 83 rec.l1 = get_unaligned(&ep->l1); 84 xfs_bmbt_get_all(&rec, &irec); 85 if (fmt == XFS_EXTFMT_NOSTATE) 86 ASSERT(irec.br_state == XFS_EXT_NORM); 87 } 88 } 89 #else /* DEBUG */ 90 #define xfs_validate_extents(ifp, nrecs, fmt) 91 #endif /* DEBUG */ 92 93 /* 94 * Check that none of the inode's in the buffer have a next 95 * unlinked field of 0. 96 */ 97 #if defined(DEBUG) 98 void 99 xfs_inobp_check( 100 xfs_mount_t *mp, 101 xfs_buf_t *bp) 102 { 103 int i; 104 int j; 105 xfs_dinode_t *dip; 106 107 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 108 109 for (i = 0; i < j; i++) { 110 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 111 i * mp->m_sb.sb_inodesize); 112 if (!dip->di_next_unlinked) { 113 xfs_alert(mp, 114 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.", 115 bp); 116 ASSERT(dip->di_next_unlinked); 117 } 118 } 119 } 120 #endif 121 122 /* 123 * Find the buffer associated with the given inode map 124 * We do basic validation checks on the buffer once it has been 125 * retrieved from disk. 126 */ 127 STATIC int 128 xfs_imap_to_bp( 129 xfs_mount_t *mp, 130 xfs_trans_t *tp, 131 struct xfs_imap *imap, 132 xfs_buf_t **bpp, 133 uint buf_flags, 134 uint iget_flags) 135 { 136 int error; 137 int i; 138 int ni; 139 xfs_buf_t *bp; 140 141 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 142 (int)imap->im_len, buf_flags, &bp); 143 if (error) { 144 if (error != EAGAIN) { 145 xfs_warn(mp, 146 "%s: xfs_trans_read_buf() returned error %d.", 147 __func__, error); 148 } else { 149 ASSERT(buf_flags & XBF_TRYLOCK); 150 } 151 return error; 152 } 153 154 /* 155 * Validate the magic number and version of every inode in the buffer 156 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 157 */ 158 #ifdef DEBUG 159 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 160 #else /* usual case */ 161 ni = 1; 162 #endif 163 164 for (i = 0; i < ni; i++) { 165 int di_ok; 166 xfs_dinode_t *dip; 167 168 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 169 (i << mp->m_sb.sb_inodelog)); 170 di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && 171 XFS_DINODE_GOOD_VERSION(dip->di_version); 172 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 173 XFS_ERRTAG_ITOBP_INOTOBP, 174 XFS_RANDOM_ITOBP_INOTOBP))) { 175 if (iget_flags & XFS_IGET_UNTRUSTED) { 176 xfs_trans_brelse(tp, bp); 177 return XFS_ERROR(EINVAL); 178 } 179 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 180 XFS_ERRLEVEL_HIGH, mp, dip); 181 #ifdef DEBUG 182 xfs_emerg(mp, 183 "bad inode magic/vsn daddr %lld #%d (magic=%x)", 184 (unsigned long long)imap->im_blkno, i, 185 be16_to_cpu(dip->di_magic)); 186 ASSERT(0); 187 #endif 188 xfs_trans_brelse(tp, bp); 189 return XFS_ERROR(EFSCORRUPTED); 190 } 191 } 192 193 xfs_inobp_check(mp, bp); 194 195 /* 196 * Mark the buffer as an inode buffer now that it looks good 197 */ 198 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 199 200 *bpp = bp; 201 return 0; 202 } 203 204 /* 205 * This routine is called to map an inode number within a file 206 * system to the buffer containing the on-disk version of the 207 * inode. It returns a pointer to the buffer containing the 208 * on-disk inode in the bpp parameter, and in the dip parameter 209 * it returns a pointer to the on-disk inode within that buffer. 210 * 211 * If a non-zero error is returned, then the contents of bpp and 212 * dipp are undefined. 213 * 214 * Use xfs_imap() to determine the size and location of the 215 * buffer to read from disk. 216 */ 217 int 218 xfs_inotobp( 219 xfs_mount_t *mp, 220 xfs_trans_t *tp, 221 xfs_ino_t ino, 222 xfs_dinode_t **dipp, 223 xfs_buf_t **bpp, 224 int *offset, 225 uint imap_flags) 226 { 227 struct xfs_imap imap; 228 xfs_buf_t *bp; 229 int error; 230 231 imap.im_blkno = 0; 232 error = xfs_imap(mp, tp, ino, &imap, imap_flags); 233 if (error) 234 return error; 235 236 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags); 237 if (error) 238 return error; 239 240 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 241 *bpp = bp; 242 *offset = imap.im_boffset; 243 return 0; 244 } 245 246 247 /* 248 * This routine is called to map an inode to the buffer containing 249 * the on-disk version of the inode. It returns a pointer to the 250 * buffer containing the on-disk inode in the bpp parameter, and in 251 * the dip parameter it returns a pointer to the on-disk inode within 252 * that buffer. 253 * 254 * If a non-zero error is returned, then the contents of bpp and 255 * dipp are undefined. 256 * 257 * The inode is expected to already been mapped to its buffer and read 258 * in once, thus we can use the mapping information stored in the inode 259 * rather than calling xfs_imap(). This allows us to avoid the overhead 260 * of looking at the inode btree for small block file systems 261 * (see xfs_imap()). 262 */ 263 int 264 xfs_itobp( 265 xfs_mount_t *mp, 266 xfs_trans_t *tp, 267 xfs_inode_t *ip, 268 xfs_dinode_t **dipp, 269 xfs_buf_t **bpp, 270 uint buf_flags) 271 { 272 xfs_buf_t *bp; 273 int error; 274 275 ASSERT(ip->i_imap.im_blkno != 0); 276 277 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0); 278 if (error) 279 return error; 280 281 if (!bp) { 282 ASSERT(buf_flags & XBF_TRYLOCK); 283 ASSERT(tp == NULL); 284 *bpp = NULL; 285 return EAGAIN; 286 } 287 288 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 289 *bpp = bp; 290 return 0; 291 } 292 293 /* 294 * Move inode type and inode format specific information from the 295 * on-disk inode to the in-core inode. For fifos, devs, and sockets 296 * this means set if_rdev to the proper value. For files, directories, 297 * and symlinks this means to bring in the in-line data or extent 298 * pointers. For a file in B-tree format, only the root is immediately 299 * brought in-core. The rest will be in-lined in if_extents when it 300 * is first referenced (see xfs_iread_extents()). 301 */ 302 STATIC int 303 xfs_iformat( 304 xfs_inode_t *ip, 305 xfs_dinode_t *dip) 306 { 307 xfs_attr_shortform_t *atp; 308 int size; 309 int error; 310 xfs_fsize_t di_size; 311 ip->i_df.if_ext_max = 312 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 313 error = 0; 314 315 if (unlikely(be32_to_cpu(dip->di_nextents) + 316 be16_to_cpu(dip->di_anextents) > 317 be64_to_cpu(dip->di_nblocks))) { 318 xfs_warn(ip->i_mount, 319 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 320 (unsigned long long)ip->i_ino, 321 (int)(be32_to_cpu(dip->di_nextents) + 322 be16_to_cpu(dip->di_anextents)), 323 (unsigned long long) 324 be64_to_cpu(dip->di_nblocks)); 325 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 326 ip->i_mount, dip); 327 return XFS_ERROR(EFSCORRUPTED); 328 } 329 330 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 331 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", 332 (unsigned long long)ip->i_ino, 333 dip->di_forkoff); 334 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 335 ip->i_mount, dip); 336 return XFS_ERROR(EFSCORRUPTED); 337 } 338 339 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && 340 !ip->i_mount->m_rtdev_targp)) { 341 xfs_warn(ip->i_mount, 342 "corrupt dinode %Lu, has realtime flag set.", 343 ip->i_ino); 344 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", 345 XFS_ERRLEVEL_LOW, ip->i_mount, dip); 346 return XFS_ERROR(EFSCORRUPTED); 347 } 348 349 switch (ip->i_d.di_mode & S_IFMT) { 350 case S_IFIFO: 351 case S_IFCHR: 352 case S_IFBLK: 353 case S_IFSOCK: 354 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 355 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 356 ip->i_mount, dip); 357 return XFS_ERROR(EFSCORRUPTED); 358 } 359 ip->i_d.di_size = 0; 360 ip->i_size = 0; 361 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); 362 break; 363 364 case S_IFREG: 365 case S_IFLNK: 366 case S_IFDIR: 367 switch (dip->di_format) { 368 case XFS_DINODE_FMT_LOCAL: 369 /* 370 * no local regular files yet 371 */ 372 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 373 xfs_warn(ip->i_mount, 374 "corrupt inode %Lu (local format for regular file).", 375 (unsigned long long) ip->i_ino); 376 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 377 XFS_ERRLEVEL_LOW, 378 ip->i_mount, dip); 379 return XFS_ERROR(EFSCORRUPTED); 380 } 381 382 di_size = be64_to_cpu(dip->di_size); 383 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 384 xfs_warn(ip->i_mount, 385 "corrupt inode %Lu (bad size %Ld for local inode).", 386 (unsigned long long) ip->i_ino, 387 (long long) di_size); 388 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 389 XFS_ERRLEVEL_LOW, 390 ip->i_mount, dip); 391 return XFS_ERROR(EFSCORRUPTED); 392 } 393 394 size = (int)di_size; 395 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 396 break; 397 case XFS_DINODE_FMT_EXTENTS: 398 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 399 break; 400 case XFS_DINODE_FMT_BTREE: 401 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 402 break; 403 default: 404 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 405 ip->i_mount); 406 return XFS_ERROR(EFSCORRUPTED); 407 } 408 break; 409 410 default: 411 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 412 return XFS_ERROR(EFSCORRUPTED); 413 } 414 if (error) { 415 return error; 416 } 417 if (!XFS_DFORK_Q(dip)) 418 return 0; 419 ASSERT(ip->i_afp == NULL); 420 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); 421 ip->i_afp->if_ext_max = 422 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 423 switch (dip->di_aformat) { 424 case XFS_DINODE_FMT_LOCAL: 425 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 426 size = be16_to_cpu(atp->hdr.totsize); 427 428 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 429 xfs_warn(ip->i_mount, 430 "corrupt inode %Lu (bad attr fork size %Ld).", 431 (unsigned long long) ip->i_ino, 432 (long long) size); 433 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 434 XFS_ERRLEVEL_LOW, 435 ip->i_mount, dip); 436 return XFS_ERROR(EFSCORRUPTED); 437 } 438 439 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 440 break; 441 case XFS_DINODE_FMT_EXTENTS: 442 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 443 break; 444 case XFS_DINODE_FMT_BTREE: 445 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 446 break; 447 default: 448 error = XFS_ERROR(EFSCORRUPTED); 449 break; 450 } 451 if (error) { 452 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 453 ip->i_afp = NULL; 454 xfs_idestroy_fork(ip, XFS_DATA_FORK); 455 } 456 return error; 457 } 458 459 /* 460 * The file is in-lined in the on-disk inode. 461 * If it fits into if_inline_data, then copy 462 * it there, otherwise allocate a buffer for it 463 * and copy the data there. Either way, set 464 * if_data to point at the data. 465 * If we allocate a buffer for the data, make 466 * sure that its size is a multiple of 4 and 467 * record the real size in i_real_bytes. 468 */ 469 STATIC int 470 xfs_iformat_local( 471 xfs_inode_t *ip, 472 xfs_dinode_t *dip, 473 int whichfork, 474 int size) 475 { 476 xfs_ifork_t *ifp; 477 int real_size; 478 479 /* 480 * If the size is unreasonable, then something 481 * is wrong and we just bail out rather than crash in 482 * kmem_alloc() or memcpy() below. 483 */ 484 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 485 xfs_warn(ip->i_mount, 486 "corrupt inode %Lu (bad size %d for local fork, size = %d).", 487 (unsigned long long) ip->i_ino, size, 488 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 489 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 490 ip->i_mount, dip); 491 return XFS_ERROR(EFSCORRUPTED); 492 } 493 ifp = XFS_IFORK_PTR(ip, whichfork); 494 real_size = 0; 495 if (size == 0) 496 ifp->if_u1.if_data = NULL; 497 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 498 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 499 else { 500 real_size = roundup(size, 4); 501 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); 502 } 503 ifp->if_bytes = size; 504 ifp->if_real_bytes = real_size; 505 if (size) 506 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 507 ifp->if_flags &= ~XFS_IFEXTENTS; 508 ifp->if_flags |= XFS_IFINLINE; 509 return 0; 510 } 511 512 /* 513 * The file consists of a set of extents all 514 * of which fit into the on-disk inode. 515 * If there are few enough extents to fit into 516 * the if_inline_ext, then copy them there. 517 * Otherwise allocate a buffer for them and copy 518 * them into it. Either way, set if_extents 519 * to point at the extents. 520 */ 521 STATIC int 522 xfs_iformat_extents( 523 xfs_inode_t *ip, 524 xfs_dinode_t *dip, 525 int whichfork) 526 { 527 xfs_bmbt_rec_t *dp; 528 xfs_ifork_t *ifp; 529 int nex; 530 int size; 531 int i; 532 533 ifp = XFS_IFORK_PTR(ip, whichfork); 534 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 535 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 536 537 /* 538 * If the number of extents is unreasonable, then something 539 * is wrong and we just bail out rather than crash in 540 * kmem_alloc() or memcpy() below. 541 */ 542 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 543 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", 544 (unsigned long long) ip->i_ino, nex); 545 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 546 ip->i_mount, dip); 547 return XFS_ERROR(EFSCORRUPTED); 548 } 549 550 ifp->if_real_bytes = 0; 551 if (nex == 0) 552 ifp->if_u1.if_extents = NULL; 553 else if (nex <= XFS_INLINE_EXTS) 554 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 555 else 556 xfs_iext_add(ifp, 0, nex); 557 558 ifp->if_bytes = size; 559 if (size) { 560 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 561 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 562 for (i = 0; i < nex; i++, dp++) { 563 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 564 ep->l0 = get_unaligned_be64(&dp->l0); 565 ep->l1 = get_unaligned_be64(&dp->l1); 566 } 567 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 568 if (whichfork != XFS_DATA_FORK || 569 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 570 if (unlikely(xfs_check_nostate_extents( 571 ifp, 0, nex))) { 572 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 573 XFS_ERRLEVEL_LOW, 574 ip->i_mount); 575 return XFS_ERROR(EFSCORRUPTED); 576 } 577 } 578 ifp->if_flags |= XFS_IFEXTENTS; 579 return 0; 580 } 581 582 /* 583 * The file has too many extents to fit into 584 * the inode, so they are in B-tree format. 585 * Allocate a buffer for the root of the B-tree 586 * and copy the root into it. The i_extents 587 * field will remain NULL until all of the 588 * extents are read in (when they are needed). 589 */ 590 STATIC int 591 xfs_iformat_btree( 592 xfs_inode_t *ip, 593 xfs_dinode_t *dip, 594 int whichfork) 595 { 596 xfs_bmdr_block_t *dfp; 597 xfs_ifork_t *ifp; 598 /* REFERENCED */ 599 int nrecs; 600 int size; 601 602 ifp = XFS_IFORK_PTR(ip, whichfork); 603 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 604 size = XFS_BMAP_BROOT_SPACE(dfp); 605 nrecs = be16_to_cpu(dfp->bb_numrecs); 606 607 /* 608 * blow out if -- fork has less extents than can fit in 609 * fork (fork shouldn't be a btree format), root btree 610 * block has more records than can fit into the fork, 611 * or the number of extents is greater than the number of 612 * blocks. 613 */ 614 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 615 || XFS_BMDR_SPACE_CALC(nrecs) > 616 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 617 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 618 xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).", 619 (unsigned long long) ip->i_ino); 620 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 621 ip->i_mount, dip); 622 return XFS_ERROR(EFSCORRUPTED); 623 } 624 625 ifp->if_broot_bytes = size; 626 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); 627 ASSERT(ifp->if_broot != NULL); 628 /* 629 * Copy and convert from the on-disk structure 630 * to the in-memory structure. 631 */ 632 xfs_bmdr_to_bmbt(ip->i_mount, dfp, 633 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 634 ifp->if_broot, size); 635 ifp->if_flags &= ~XFS_IFEXTENTS; 636 ifp->if_flags |= XFS_IFBROOT; 637 638 return 0; 639 } 640 641 STATIC void 642 xfs_dinode_from_disk( 643 xfs_icdinode_t *to, 644 xfs_dinode_t *from) 645 { 646 to->di_magic = be16_to_cpu(from->di_magic); 647 to->di_mode = be16_to_cpu(from->di_mode); 648 to->di_version = from ->di_version; 649 to->di_format = from->di_format; 650 to->di_onlink = be16_to_cpu(from->di_onlink); 651 to->di_uid = be32_to_cpu(from->di_uid); 652 to->di_gid = be32_to_cpu(from->di_gid); 653 to->di_nlink = be32_to_cpu(from->di_nlink); 654 to->di_projid_lo = be16_to_cpu(from->di_projid_lo); 655 to->di_projid_hi = be16_to_cpu(from->di_projid_hi); 656 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 657 to->di_flushiter = be16_to_cpu(from->di_flushiter); 658 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 659 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 660 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 661 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 662 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 663 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 664 to->di_size = be64_to_cpu(from->di_size); 665 to->di_nblocks = be64_to_cpu(from->di_nblocks); 666 to->di_extsize = be32_to_cpu(from->di_extsize); 667 to->di_nextents = be32_to_cpu(from->di_nextents); 668 to->di_anextents = be16_to_cpu(from->di_anextents); 669 to->di_forkoff = from->di_forkoff; 670 to->di_aformat = from->di_aformat; 671 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 672 to->di_dmstate = be16_to_cpu(from->di_dmstate); 673 to->di_flags = be16_to_cpu(from->di_flags); 674 to->di_gen = be32_to_cpu(from->di_gen); 675 } 676 677 void 678 xfs_dinode_to_disk( 679 xfs_dinode_t *to, 680 xfs_icdinode_t *from) 681 { 682 to->di_magic = cpu_to_be16(from->di_magic); 683 to->di_mode = cpu_to_be16(from->di_mode); 684 to->di_version = from ->di_version; 685 to->di_format = from->di_format; 686 to->di_onlink = cpu_to_be16(from->di_onlink); 687 to->di_uid = cpu_to_be32(from->di_uid); 688 to->di_gid = cpu_to_be32(from->di_gid); 689 to->di_nlink = cpu_to_be32(from->di_nlink); 690 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 691 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 692 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 693 to->di_flushiter = cpu_to_be16(from->di_flushiter); 694 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 695 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 696 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 697 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 698 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 699 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 700 to->di_size = cpu_to_be64(from->di_size); 701 to->di_nblocks = cpu_to_be64(from->di_nblocks); 702 to->di_extsize = cpu_to_be32(from->di_extsize); 703 to->di_nextents = cpu_to_be32(from->di_nextents); 704 to->di_anextents = cpu_to_be16(from->di_anextents); 705 to->di_forkoff = from->di_forkoff; 706 to->di_aformat = from->di_aformat; 707 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 708 to->di_dmstate = cpu_to_be16(from->di_dmstate); 709 to->di_flags = cpu_to_be16(from->di_flags); 710 to->di_gen = cpu_to_be32(from->di_gen); 711 } 712 713 STATIC uint 714 _xfs_dic2xflags( 715 __uint16_t di_flags) 716 { 717 uint flags = 0; 718 719 if (di_flags & XFS_DIFLAG_ANY) { 720 if (di_flags & XFS_DIFLAG_REALTIME) 721 flags |= XFS_XFLAG_REALTIME; 722 if (di_flags & XFS_DIFLAG_PREALLOC) 723 flags |= XFS_XFLAG_PREALLOC; 724 if (di_flags & XFS_DIFLAG_IMMUTABLE) 725 flags |= XFS_XFLAG_IMMUTABLE; 726 if (di_flags & XFS_DIFLAG_APPEND) 727 flags |= XFS_XFLAG_APPEND; 728 if (di_flags & XFS_DIFLAG_SYNC) 729 flags |= XFS_XFLAG_SYNC; 730 if (di_flags & XFS_DIFLAG_NOATIME) 731 flags |= XFS_XFLAG_NOATIME; 732 if (di_flags & XFS_DIFLAG_NODUMP) 733 flags |= XFS_XFLAG_NODUMP; 734 if (di_flags & XFS_DIFLAG_RTINHERIT) 735 flags |= XFS_XFLAG_RTINHERIT; 736 if (di_flags & XFS_DIFLAG_PROJINHERIT) 737 flags |= XFS_XFLAG_PROJINHERIT; 738 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 739 flags |= XFS_XFLAG_NOSYMLINKS; 740 if (di_flags & XFS_DIFLAG_EXTSIZE) 741 flags |= XFS_XFLAG_EXTSIZE; 742 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 743 flags |= XFS_XFLAG_EXTSZINHERIT; 744 if (di_flags & XFS_DIFLAG_NODEFRAG) 745 flags |= XFS_XFLAG_NODEFRAG; 746 if (di_flags & XFS_DIFLAG_FILESTREAM) 747 flags |= XFS_XFLAG_FILESTREAM; 748 } 749 750 return flags; 751 } 752 753 uint 754 xfs_ip2xflags( 755 xfs_inode_t *ip) 756 { 757 xfs_icdinode_t *dic = &ip->i_d; 758 759 return _xfs_dic2xflags(dic->di_flags) | 760 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 761 } 762 763 uint 764 xfs_dic2xflags( 765 xfs_dinode_t *dip) 766 { 767 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | 768 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 769 } 770 771 /* 772 * Read the disk inode attributes into the in-core inode structure. 773 */ 774 int 775 xfs_iread( 776 xfs_mount_t *mp, 777 xfs_trans_t *tp, 778 xfs_inode_t *ip, 779 uint iget_flags) 780 { 781 xfs_buf_t *bp; 782 xfs_dinode_t *dip; 783 int error; 784 785 /* 786 * Fill in the location information in the in-core inode. 787 */ 788 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 789 if (error) 790 return error; 791 792 /* 793 * Get pointers to the on-disk inode and the buffer containing it. 794 */ 795 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 796 XBF_LOCK, iget_flags); 797 if (error) 798 return error; 799 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 800 801 /* 802 * If we got something that isn't an inode it means someone 803 * (nfs or dmi) has a stale handle. 804 */ 805 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { 806 #ifdef DEBUG 807 xfs_alert(mp, 808 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", 809 __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC); 810 #endif /* DEBUG */ 811 error = XFS_ERROR(EINVAL); 812 goto out_brelse; 813 } 814 815 /* 816 * If the on-disk inode is already linked to a directory 817 * entry, copy all of the inode into the in-core inode. 818 * xfs_iformat() handles copying in the inode format 819 * specific information. 820 * Otherwise, just get the truly permanent information. 821 */ 822 if (dip->di_mode) { 823 xfs_dinode_from_disk(&ip->i_d, dip); 824 error = xfs_iformat(ip, dip); 825 if (error) { 826 #ifdef DEBUG 827 xfs_alert(mp, "%s: xfs_iformat() returned error %d", 828 __func__, error); 829 #endif /* DEBUG */ 830 goto out_brelse; 831 } 832 } else { 833 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); 834 ip->i_d.di_version = dip->di_version; 835 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); 836 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 837 /* 838 * Make sure to pull in the mode here as well in 839 * case the inode is released without being used. 840 * This ensures that xfs_inactive() will see that 841 * the inode is already free and not try to mess 842 * with the uninitialized part of it. 843 */ 844 ip->i_d.di_mode = 0; 845 /* 846 * Initialize the per-fork minima and maxima for a new 847 * inode here. xfs_iformat will do it for old inodes. 848 */ 849 ip->i_df.if_ext_max = 850 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 851 } 852 853 /* 854 * The inode format changed when we moved the link count and 855 * made it 32 bits long. If this is an old format inode, 856 * convert it in memory to look like a new one. If it gets 857 * flushed to disk we will convert back before flushing or 858 * logging it. We zero out the new projid field and the old link 859 * count field. We'll handle clearing the pad field (the remains 860 * of the old uuid field) when we actually convert the inode to 861 * the new format. We don't change the version number so that we 862 * can distinguish this from a real new format inode. 863 */ 864 if (ip->i_d.di_version == 1) { 865 ip->i_d.di_nlink = ip->i_d.di_onlink; 866 ip->i_d.di_onlink = 0; 867 xfs_set_projid(ip, 0); 868 } 869 870 ip->i_delayed_blks = 0; 871 ip->i_size = ip->i_d.di_size; 872 873 /* 874 * Mark the buffer containing the inode as something to keep 875 * around for a while. This helps to keep recently accessed 876 * meta-data in-core longer. 877 */ 878 xfs_buf_set_ref(bp, XFS_INO_REF); 879 880 /* 881 * Use xfs_trans_brelse() to release the buffer containing the 882 * on-disk inode, because it was acquired with xfs_trans_read_buf() 883 * in xfs_itobp() above. If tp is NULL, this is just a normal 884 * brelse(). If we're within a transaction, then xfs_trans_brelse() 885 * will only release the buffer if it is not dirty within the 886 * transaction. It will be OK to release the buffer in this case, 887 * because inodes on disk are never destroyed and we will be 888 * locking the new in-core inode before putting it in the hash 889 * table where other processes can find it. Thus we don't have 890 * to worry about the inode being changed just because we released 891 * the buffer. 892 */ 893 out_brelse: 894 xfs_trans_brelse(tp, bp); 895 return error; 896 } 897 898 /* 899 * Read in extents from a btree-format inode. 900 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 901 */ 902 int 903 xfs_iread_extents( 904 xfs_trans_t *tp, 905 xfs_inode_t *ip, 906 int whichfork) 907 { 908 int error; 909 xfs_ifork_t *ifp; 910 xfs_extnum_t nextents; 911 912 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 913 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 914 ip->i_mount); 915 return XFS_ERROR(EFSCORRUPTED); 916 } 917 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 918 ifp = XFS_IFORK_PTR(ip, whichfork); 919 920 /* 921 * We know that the size is valid (it's checked in iformat_btree) 922 */ 923 ifp->if_bytes = ifp->if_real_bytes = 0; 924 ifp->if_flags |= XFS_IFEXTENTS; 925 xfs_iext_add(ifp, 0, nextents); 926 error = xfs_bmap_read_extents(tp, ip, whichfork); 927 if (error) { 928 xfs_iext_destroy(ifp); 929 ifp->if_flags &= ~XFS_IFEXTENTS; 930 return error; 931 } 932 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 933 return 0; 934 } 935 936 /* 937 * Allocate an inode on disk and return a copy of its in-core version. 938 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 939 * appropriately within the inode. The uid and gid for the inode are 940 * set according to the contents of the given cred structure. 941 * 942 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 943 * has a free inode available, call xfs_iget() 944 * to obtain the in-core version of the allocated inode. Finally, 945 * fill in the inode and log its initial contents. In this case, 946 * ialloc_context would be set to NULL and call_again set to false. 947 * 948 * If xfs_dialloc() does not have an available inode, 949 * it will replenish its supply by doing an allocation. Since we can 950 * only do one allocation within a transaction without deadlocks, we 951 * must commit the current transaction before returning the inode itself. 952 * In this case, therefore, we will set call_again to true and return. 953 * The caller should then commit the current transaction, start a new 954 * transaction, and call xfs_ialloc() again to actually get the inode. 955 * 956 * To ensure that some other process does not grab the inode that 957 * was allocated during the first call to xfs_ialloc(), this routine 958 * also returns the [locked] bp pointing to the head of the freelist 959 * as ialloc_context. The caller should hold this buffer across 960 * the commit and pass it back into this routine on the second call. 961 * 962 * If we are allocating quota inodes, we do not have a parent inode 963 * to attach to or associate with (i.e. pip == NULL) because they 964 * are not linked into the directory structure - they are attached 965 * directly to the superblock - and so have no parent. 966 */ 967 int 968 xfs_ialloc( 969 xfs_trans_t *tp, 970 xfs_inode_t *pip, 971 mode_t mode, 972 xfs_nlink_t nlink, 973 xfs_dev_t rdev, 974 prid_t prid, 975 int okalloc, 976 xfs_buf_t **ialloc_context, 977 boolean_t *call_again, 978 xfs_inode_t **ipp) 979 { 980 xfs_ino_t ino; 981 xfs_inode_t *ip; 982 uint flags; 983 int error; 984 timespec_t tv; 985 int filestreams = 0; 986 987 /* 988 * Call the space management code to pick 989 * the on-disk inode to be allocated. 990 */ 991 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 992 ialloc_context, call_again, &ino); 993 if (error) 994 return error; 995 if (*call_again || ino == NULLFSINO) { 996 *ipp = NULL; 997 return 0; 998 } 999 ASSERT(*ialloc_context == NULL); 1000 1001 /* 1002 * Get the in-core inode with the lock held exclusively. 1003 * This is because we're setting fields here we need 1004 * to prevent others from looking at until we're done. 1005 */ 1006 error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE, 1007 XFS_ILOCK_EXCL, &ip); 1008 if (error) 1009 return error; 1010 ASSERT(ip != NULL); 1011 1012 ip->i_d.di_mode = (__uint16_t)mode; 1013 ip->i_d.di_onlink = 0; 1014 ip->i_d.di_nlink = nlink; 1015 ASSERT(ip->i_d.di_nlink == nlink); 1016 ip->i_d.di_uid = current_fsuid(); 1017 ip->i_d.di_gid = current_fsgid(); 1018 xfs_set_projid(ip, prid); 1019 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1020 1021 /* 1022 * If the superblock version is up to where we support new format 1023 * inodes and this is currently an old format inode, then change 1024 * the inode version number now. This way we only do the conversion 1025 * here rather than here and in the flush/logging code. 1026 */ 1027 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 1028 ip->i_d.di_version == 1) { 1029 ip->i_d.di_version = 2; 1030 /* 1031 * We've already zeroed the old link count, the projid field, 1032 * and the pad field. 1033 */ 1034 } 1035 1036 /* 1037 * Project ids won't be stored on disk if we are using a version 1 inode. 1038 */ 1039 if ((prid != 0) && (ip->i_d.di_version == 1)) 1040 xfs_bump_ino_vers2(tp, ip); 1041 1042 if (pip && XFS_INHERIT_GID(pip)) { 1043 ip->i_d.di_gid = pip->i_d.di_gid; 1044 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1045 ip->i_d.di_mode |= S_ISGID; 1046 } 1047 } 1048 1049 /* 1050 * If the group ID of the new file does not match the effective group 1051 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1052 * (and only if the irix_sgid_inherit compatibility variable is set). 1053 */ 1054 if ((irix_sgid_inherit) && 1055 (ip->i_d.di_mode & S_ISGID) && 1056 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1057 ip->i_d.di_mode &= ~S_ISGID; 1058 } 1059 1060 ip->i_d.di_size = 0; 1061 ip->i_size = 0; 1062 ip->i_d.di_nextents = 0; 1063 ASSERT(ip->i_d.di_nblocks == 0); 1064 1065 nanotime(&tv); 1066 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 1067 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 1068 ip->i_d.di_atime = ip->i_d.di_mtime; 1069 ip->i_d.di_ctime = ip->i_d.di_mtime; 1070 1071 /* 1072 * di_gen will have been taken care of in xfs_iread. 1073 */ 1074 ip->i_d.di_extsize = 0; 1075 ip->i_d.di_dmevmask = 0; 1076 ip->i_d.di_dmstate = 0; 1077 ip->i_d.di_flags = 0; 1078 flags = XFS_ILOG_CORE; 1079 switch (mode & S_IFMT) { 1080 case S_IFIFO: 1081 case S_IFCHR: 1082 case S_IFBLK: 1083 case S_IFSOCK: 1084 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1085 ip->i_df.if_u2.if_rdev = rdev; 1086 ip->i_df.if_flags = 0; 1087 flags |= XFS_ILOG_DEV; 1088 break; 1089 case S_IFREG: 1090 /* 1091 * we can't set up filestreams until after the VFS inode 1092 * is set up properly. 1093 */ 1094 if (pip && xfs_inode_is_filestream(pip)) 1095 filestreams = 1; 1096 /* fall through */ 1097 case S_IFDIR: 1098 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1099 uint di_flags = 0; 1100 1101 if ((mode & S_IFMT) == S_IFDIR) { 1102 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1103 di_flags |= XFS_DIFLAG_RTINHERIT; 1104 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1105 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1106 ip->i_d.di_extsize = pip->i_d.di_extsize; 1107 } 1108 } else if ((mode & S_IFMT) == S_IFREG) { 1109 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1110 di_flags |= XFS_DIFLAG_REALTIME; 1111 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1112 di_flags |= XFS_DIFLAG_EXTSIZE; 1113 ip->i_d.di_extsize = pip->i_d.di_extsize; 1114 } 1115 } 1116 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1117 xfs_inherit_noatime) 1118 di_flags |= XFS_DIFLAG_NOATIME; 1119 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1120 xfs_inherit_nodump) 1121 di_flags |= XFS_DIFLAG_NODUMP; 1122 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1123 xfs_inherit_sync) 1124 di_flags |= XFS_DIFLAG_SYNC; 1125 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1126 xfs_inherit_nosymlinks) 1127 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1128 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1129 di_flags |= XFS_DIFLAG_PROJINHERIT; 1130 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1131 xfs_inherit_nodefrag) 1132 di_flags |= XFS_DIFLAG_NODEFRAG; 1133 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1134 di_flags |= XFS_DIFLAG_FILESTREAM; 1135 ip->i_d.di_flags |= di_flags; 1136 } 1137 /* FALLTHROUGH */ 1138 case S_IFLNK: 1139 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1140 ip->i_df.if_flags = XFS_IFEXTENTS; 1141 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1142 ip->i_df.if_u1.if_extents = NULL; 1143 break; 1144 default: 1145 ASSERT(0); 1146 } 1147 /* 1148 * Attribute fork settings for new inode. 1149 */ 1150 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1151 ip->i_d.di_anextents = 0; 1152 1153 /* 1154 * Log the new values stuffed into the inode. 1155 */ 1156 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); 1157 xfs_trans_log_inode(tp, ip, flags); 1158 1159 /* now that we have an i_mode we can setup inode ops and unlock */ 1160 xfs_setup_inode(ip); 1161 1162 /* now we have set up the vfs inode we can associate the filestream */ 1163 if (filestreams) { 1164 error = xfs_filestream_associate(pip, ip); 1165 if (error < 0) 1166 return -error; 1167 if (!error) 1168 xfs_iflags_set(ip, XFS_IFILESTREAM); 1169 } 1170 1171 *ipp = ip; 1172 return 0; 1173 } 1174 1175 /* 1176 * Check to make sure that there are no blocks allocated to the 1177 * file beyond the size of the file. We don't check this for 1178 * files with fixed size extents or real time extents, but we 1179 * at least do it for regular files. 1180 */ 1181 #ifdef DEBUG 1182 void 1183 xfs_isize_check( 1184 xfs_mount_t *mp, 1185 xfs_inode_t *ip, 1186 xfs_fsize_t isize) 1187 { 1188 xfs_fileoff_t map_first; 1189 int nimaps; 1190 xfs_bmbt_irec_t imaps[2]; 1191 1192 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1193 return; 1194 1195 if (XFS_IS_REALTIME_INODE(ip)) 1196 return; 1197 1198 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 1199 return; 1200 1201 nimaps = 2; 1202 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1203 /* 1204 * The filesystem could be shutting down, so bmapi may return 1205 * an error. 1206 */ 1207 if (xfs_bmapi(NULL, ip, map_first, 1208 (XFS_B_TO_FSB(mp, 1209 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1210 map_first), 1211 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1212 NULL)) 1213 return; 1214 ASSERT(nimaps == 1); 1215 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1216 } 1217 #endif /* DEBUG */ 1218 1219 /* 1220 * Calculate the last possible buffered byte in a file. This must 1221 * include data that was buffered beyond the EOF by the write code. 1222 * This also needs to deal with overflowing the xfs_fsize_t type 1223 * which can happen for sizes near the limit. 1224 * 1225 * We also need to take into account any blocks beyond the EOF. It 1226 * may be the case that they were buffered by a write which failed. 1227 * In that case the pages will still be in memory, but the inode size 1228 * will never have been updated. 1229 */ 1230 STATIC xfs_fsize_t 1231 xfs_file_last_byte( 1232 xfs_inode_t *ip) 1233 { 1234 xfs_mount_t *mp; 1235 xfs_fsize_t last_byte; 1236 xfs_fileoff_t last_block; 1237 xfs_fileoff_t size_last_block; 1238 int error; 1239 1240 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); 1241 1242 mp = ip->i_mount; 1243 /* 1244 * Only check for blocks beyond the EOF if the extents have 1245 * been read in. This eliminates the need for the inode lock, 1246 * and it also saves us from looking when it really isn't 1247 * necessary. 1248 */ 1249 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1250 xfs_ilock(ip, XFS_ILOCK_SHARED); 1251 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1252 XFS_DATA_FORK); 1253 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1254 if (error) { 1255 last_block = 0; 1256 } 1257 } else { 1258 last_block = 0; 1259 } 1260 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); 1261 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1262 1263 last_byte = XFS_FSB_TO_B(mp, last_block); 1264 if (last_byte < 0) { 1265 return XFS_MAXIOFFSET(mp); 1266 } 1267 last_byte += (1 << mp->m_writeio_log); 1268 if (last_byte < 0) { 1269 return XFS_MAXIOFFSET(mp); 1270 } 1271 return last_byte; 1272 } 1273 1274 /* 1275 * Start the truncation of the file to new_size. The new size 1276 * must be smaller than the current size. This routine will 1277 * clear the buffer and page caches of file data in the removed 1278 * range, and xfs_itruncate_finish() will remove the underlying 1279 * disk blocks. 1280 * 1281 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1282 * must NOT have the inode lock held at all. This is because we're 1283 * calling into the buffer/page cache code and we can't hold the 1284 * inode lock when we do so. 1285 * 1286 * We need to wait for any direct I/Os in flight to complete before we 1287 * proceed with the truncate. This is needed to prevent the extents 1288 * being read or written by the direct I/Os from being removed while the 1289 * I/O is in flight as there is no other method of synchronising 1290 * direct I/O with the truncate operation. Also, because we hold 1291 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1292 * started until the truncate completes and drops the lock. Essentially, 1293 * the xfs_ioend_wait() call forms an I/O barrier that provides strict 1294 * ordering between direct I/Os and the truncate operation. 1295 * 1296 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1297 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1298 * in the case that the caller is locking things out of order and 1299 * may not be able to call xfs_itruncate_finish() with the inode lock 1300 * held without dropping the I/O lock. If the caller must drop the 1301 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1302 * must be called again with all the same restrictions as the initial 1303 * call. 1304 */ 1305 int 1306 xfs_itruncate_start( 1307 xfs_inode_t *ip, 1308 uint flags, 1309 xfs_fsize_t new_size) 1310 { 1311 xfs_fsize_t last_byte; 1312 xfs_off_t toss_start; 1313 xfs_mount_t *mp; 1314 int error = 0; 1315 1316 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1317 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1318 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1319 (flags == XFS_ITRUNC_MAYBE)); 1320 1321 mp = ip->i_mount; 1322 1323 /* wait for the completion of any pending DIOs */ 1324 if (new_size == 0 || new_size < ip->i_size) 1325 xfs_ioend_wait(ip); 1326 1327 /* 1328 * Call toss_pages or flushinval_pages to get rid of pages 1329 * overlapping the region being removed. We have to use 1330 * the less efficient flushinval_pages in the case that the 1331 * caller may not be able to finish the truncate without 1332 * dropping the inode's I/O lock. Make sure 1333 * to catch any pages brought in by buffers overlapping 1334 * the EOF by searching out beyond the isize by our 1335 * block size. We round new_size up to a block boundary 1336 * so that we don't toss things on the same block as 1337 * new_size but before it. 1338 * 1339 * Before calling toss_page or flushinval_pages, make sure to 1340 * call remapf() over the same region if the file is mapped. 1341 * This frees up mapped file references to the pages in the 1342 * given range and for the flushinval_pages case it ensures 1343 * that we get the latest mapped changes flushed out. 1344 */ 1345 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1346 toss_start = XFS_FSB_TO_B(mp, toss_start); 1347 if (toss_start < 0) { 1348 /* 1349 * The place to start tossing is beyond our maximum 1350 * file size, so there is no way that the data extended 1351 * out there. 1352 */ 1353 return 0; 1354 } 1355 last_byte = xfs_file_last_byte(ip); 1356 trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte); 1357 if (last_byte > toss_start) { 1358 if (flags & XFS_ITRUNC_DEFINITE) { 1359 xfs_tosspages(ip, toss_start, 1360 -1, FI_REMAPF_LOCKED); 1361 } else { 1362 error = xfs_flushinval_pages(ip, toss_start, 1363 -1, FI_REMAPF_LOCKED); 1364 } 1365 } 1366 1367 #ifdef DEBUG 1368 if (new_size == 0) { 1369 ASSERT(VN_CACHED(VFS_I(ip)) == 0); 1370 } 1371 #endif 1372 return error; 1373 } 1374 1375 /* 1376 * Shrink the file to the given new_size. The new size must be smaller than 1377 * the current size. This will free up the underlying blocks in the removed 1378 * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). 1379 * 1380 * The transaction passed to this routine must have made a permanent log 1381 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1382 * given transaction and start new ones, so make sure everything involved in 1383 * the transaction is tidy before calling here. Some transaction will be 1384 * returned to the caller to be committed. The incoming transaction must 1385 * already include the inode, and both inode locks must be held exclusively. 1386 * The inode must also be "held" within the transaction. On return the inode 1387 * will be "held" within the returned transaction. This routine does NOT 1388 * require any disk space to be reserved for it within the transaction. 1389 * 1390 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it 1391 * indicates the fork which is to be truncated. For the attribute fork we only 1392 * support truncation to size 0. 1393 * 1394 * We use the sync parameter to indicate whether or not the first transaction 1395 * we perform might have to be synchronous. For the attr fork, it needs to be 1396 * so if the unlink of the inode is not yet known to be permanent in the log. 1397 * This keeps us from freeing and reusing the blocks of the attribute fork 1398 * before the unlink of the inode becomes permanent. 1399 * 1400 * For the data fork, we normally have to run synchronously if we're being 1401 * called out of the inactive path or we're being called out of the create path 1402 * where we're truncating an existing file. Either way, the truncate needs to 1403 * be sync so blocks don't reappear in the file with altered data in case of a 1404 * crash. wsync filesystems can run the first case async because anything that 1405 * shrinks the inode has to run sync so by the time we're called here from 1406 * inactive, the inode size is permanently set to 0. 1407 * 1408 * Calls from the truncate path always need to be sync unless we're in a wsync 1409 * filesystem and the file has already been unlinked. 1410 * 1411 * The caller is responsible for correctly setting the sync parameter. It gets 1412 * too hard for us to guess here which path we're being called out of just 1413 * based on inode state. 1414 * 1415 * If we get an error, we must return with the inode locked and linked into the 1416 * current transaction. This keeps things simple for the higher level code, 1417 * because it always knows that the inode is locked and held in the transaction 1418 * that returns to it whether errors occur or not. We don't mark the inode 1419 * dirty on error so that transactions can be easily aborted if possible. 1420 */ 1421 int 1422 xfs_itruncate_finish( 1423 xfs_trans_t **tp, 1424 xfs_inode_t *ip, 1425 xfs_fsize_t new_size, 1426 int fork, 1427 int sync) 1428 { 1429 xfs_fsblock_t first_block; 1430 xfs_fileoff_t first_unmap_block; 1431 xfs_fileoff_t last_block; 1432 xfs_filblks_t unmap_len=0; 1433 xfs_mount_t *mp; 1434 xfs_trans_t *ntp; 1435 int done; 1436 int committed; 1437 xfs_bmap_free_t free_list; 1438 int error; 1439 1440 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1441 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1442 ASSERT(*tp != NULL); 1443 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1444 ASSERT(ip->i_transp == *tp); 1445 ASSERT(ip->i_itemp != NULL); 1446 ASSERT(ip->i_itemp->ili_lock_flags == 0); 1447 1448 1449 ntp = *tp; 1450 mp = (ntp)->t_mountp; 1451 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1452 1453 /* 1454 * We only support truncating the entire attribute fork. 1455 */ 1456 if (fork == XFS_ATTR_FORK) { 1457 new_size = 0LL; 1458 } 1459 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1460 trace_xfs_itruncate_finish_start(ip, new_size); 1461 1462 /* 1463 * The first thing we do is set the size to new_size permanently 1464 * on disk. This way we don't have to worry about anyone ever 1465 * being able to look at the data being freed even in the face 1466 * of a crash. What we're getting around here is the case where 1467 * we free a block, it is allocated to another file, it is written 1468 * to, and then we crash. If the new data gets written to the 1469 * file but the log buffers containing the free and reallocation 1470 * don't, then we'd end up with garbage in the blocks being freed. 1471 * As long as we make the new_size permanent before actually 1472 * freeing any blocks it doesn't matter if they get written to. 1473 * 1474 * The callers must signal into us whether or not the size 1475 * setting here must be synchronous. There are a few cases 1476 * where it doesn't have to be synchronous. Those cases 1477 * occur if the file is unlinked and we know the unlink is 1478 * permanent or if the blocks being truncated are guaranteed 1479 * to be beyond the inode eof (regardless of the link count) 1480 * and the eof value is permanent. Both of these cases occur 1481 * only on wsync-mounted filesystems. In those cases, we're 1482 * guaranteed that no user will ever see the data in the blocks 1483 * that are being truncated so the truncate can run async. 1484 * In the free beyond eof case, the file may wind up with 1485 * more blocks allocated to it than it needs if we crash 1486 * and that won't get fixed until the next time the file 1487 * is re-opened and closed but that's ok as that shouldn't 1488 * be too many blocks. 1489 * 1490 * However, we can't just make all wsync xactions run async 1491 * because there's one call out of the create path that needs 1492 * to run sync where it's truncating an existing file to size 1493 * 0 whose size is > 0. 1494 * 1495 * It's probably possible to come up with a test in this 1496 * routine that would correctly distinguish all the above 1497 * cases from the values of the function parameters and the 1498 * inode state but for sanity's sake, I've decided to let the 1499 * layers above just tell us. It's simpler to correctly figure 1500 * out in the layer above exactly under what conditions we 1501 * can run async and I think it's easier for others read and 1502 * follow the logic in case something has to be changed. 1503 * cscope is your friend -- rcc. 1504 * 1505 * The attribute fork is much simpler. 1506 * 1507 * For the attribute fork we allow the caller to tell us whether 1508 * the unlink of the inode that led to this call is yet permanent 1509 * in the on disk log. If it is not and we will be freeing extents 1510 * in this inode then we make the first transaction synchronous 1511 * to make sure that the unlink is permanent by the time we free 1512 * the blocks. 1513 */ 1514 if (fork == XFS_DATA_FORK) { 1515 if (ip->i_d.di_nextents > 0) { 1516 /* 1517 * If we are not changing the file size then do 1518 * not update the on-disk file size - we may be 1519 * called from xfs_inactive_free_eofblocks(). If we 1520 * update the on-disk file size and then the system 1521 * crashes before the contents of the file are 1522 * flushed to disk then the files may be full of 1523 * holes (ie NULL files bug). 1524 */ 1525 if (ip->i_size != new_size) { 1526 ip->i_d.di_size = new_size; 1527 ip->i_size = new_size; 1528 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1529 } 1530 } 1531 } else if (sync) { 1532 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1533 if (ip->i_d.di_anextents > 0) 1534 xfs_trans_set_sync(ntp); 1535 } 1536 ASSERT(fork == XFS_DATA_FORK || 1537 (fork == XFS_ATTR_FORK && 1538 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1539 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1540 1541 /* 1542 * Since it is possible for space to become allocated beyond 1543 * the end of the file (in a crash where the space is allocated 1544 * but the inode size is not yet updated), simply remove any 1545 * blocks which show up between the new EOF and the maximum 1546 * possible file size. If the first block to be removed is 1547 * beyond the maximum file size (ie it is the same as last_block), 1548 * then there is nothing to do. 1549 */ 1550 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1551 ASSERT(first_unmap_block <= last_block); 1552 done = 0; 1553 if (last_block == first_unmap_block) { 1554 done = 1; 1555 } else { 1556 unmap_len = last_block - first_unmap_block + 1; 1557 } 1558 while (!done) { 1559 /* 1560 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1561 * will tell us whether it freed the entire range or 1562 * not. If this is a synchronous mount (wsync), 1563 * then we can tell bunmapi to keep all the 1564 * transactions asynchronous since the unlink 1565 * transaction that made this inode inactive has 1566 * already hit the disk. There's no danger of 1567 * the freed blocks being reused, there being a 1568 * crash, and the reused blocks suddenly reappearing 1569 * in this file with garbage in them once recovery 1570 * runs. 1571 */ 1572 xfs_bmap_init(&free_list, &first_block); 1573 error = xfs_bunmapi(ntp, ip, 1574 first_unmap_block, unmap_len, 1575 xfs_bmapi_aflag(fork), 1576 XFS_ITRUNC_MAX_EXTENTS, 1577 &first_block, &free_list, 1578 &done); 1579 if (error) { 1580 /* 1581 * If the bunmapi call encounters an error, 1582 * return to the caller where the transaction 1583 * can be properly aborted. We just need to 1584 * make sure we're not holding any resources 1585 * that we were not when we came in. 1586 */ 1587 xfs_bmap_cancel(&free_list); 1588 return error; 1589 } 1590 1591 /* 1592 * Duplicate the transaction that has the permanent 1593 * reservation and commit the old transaction. 1594 */ 1595 error = xfs_bmap_finish(tp, &free_list, &committed); 1596 ntp = *tp; 1597 if (committed) 1598 xfs_trans_ijoin(ntp, ip); 1599 1600 if (error) { 1601 /* 1602 * If the bmap finish call encounters an error, return 1603 * to the caller where the transaction can be properly 1604 * aborted. We just need to make sure we're not 1605 * holding any resources that we were not when we came 1606 * in. 1607 * 1608 * Aborting from this point might lose some blocks in 1609 * the file system, but oh well. 1610 */ 1611 xfs_bmap_cancel(&free_list); 1612 return error; 1613 } 1614 1615 if (committed) { 1616 /* 1617 * Mark the inode dirty so it will be logged and 1618 * moved forward in the log as part of every commit. 1619 */ 1620 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1621 } 1622 1623 ntp = xfs_trans_dup(ntp); 1624 error = xfs_trans_commit(*tp, 0); 1625 *tp = ntp; 1626 1627 xfs_trans_ijoin(ntp, ip); 1628 1629 if (error) 1630 return error; 1631 /* 1632 * transaction commit worked ok so we can drop the extra ticket 1633 * reference that we gained in xfs_trans_dup() 1634 */ 1635 xfs_log_ticket_put(ntp->t_ticket); 1636 error = xfs_trans_reserve(ntp, 0, 1637 XFS_ITRUNCATE_LOG_RES(mp), 0, 1638 XFS_TRANS_PERM_LOG_RES, 1639 XFS_ITRUNCATE_LOG_COUNT); 1640 if (error) 1641 return error; 1642 } 1643 /* 1644 * Only update the size in the case of the data fork, but 1645 * always re-log the inode so that our permanent transaction 1646 * can keep on rolling it forward in the log. 1647 */ 1648 if (fork == XFS_DATA_FORK) { 1649 xfs_isize_check(mp, ip, new_size); 1650 /* 1651 * If we are not changing the file size then do 1652 * not update the on-disk file size - we may be 1653 * called from xfs_inactive_free_eofblocks(). If we 1654 * update the on-disk file size and then the system 1655 * crashes before the contents of the file are 1656 * flushed to disk then the files may be full of 1657 * holes (ie NULL files bug). 1658 */ 1659 if (ip->i_size != new_size) { 1660 ip->i_d.di_size = new_size; 1661 ip->i_size = new_size; 1662 } 1663 } 1664 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1665 ASSERT((new_size != 0) || 1666 (fork == XFS_ATTR_FORK) || 1667 (ip->i_delayed_blks == 0)); 1668 ASSERT((new_size != 0) || 1669 (fork == XFS_ATTR_FORK) || 1670 (ip->i_d.di_nextents == 0)); 1671 trace_xfs_itruncate_finish_end(ip, new_size); 1672 return 0; 1673 } 1674 1675 /* 1676 * This is called when the inode's link count goes to 0. 1677 * We place the on-disk inode on a list in the AGI. It 1678 * will be pulled from this list when the inode is freed. 1679 */ 1680 int 1681 xfs_iunlink( 1682 xfs_trans_t *tp, 1683 xfs_inode_t *ip) 1684 { 1685 xfs_mount_t *mp; 1686 xfs_agi_t *agi; 1687 xfs_dinode_t *dip; 1688 xfs_buf_t *agibp; 1689 xfs_buf_t *ibp; 1690 xfs_agino_t agino; 1691 short bucket_index; 1692 int offset; 1693 int error; 1694 1695 ASSERT(ip->i_d.di_nlink == 0); 1696 ASSERT(ip->i_d.di_mode != 0); 1697 ASSERT(ip->i_transp == tp); 1698 1699 mp = tp->t_mountp; 1700 1701 /* 1702 * Get the agi buffer first. It ensures lock ordering 1703 * on the list. 1704 */ 1705 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1706 if (error) 1707 return error; 1708 agi = XFS_BUF_TO_AGI(agibp); 1709 1710 /* 1711 * Get the index into the agi hash table for the 1712 * list this inode will go on. 1713 */ 1714 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1715 ASSERT(agino != 0); 1716 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1717 ASSERT(agi->agi_unlinked[bucket_index]); 1718 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1719 1720 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1721 /* 1722 * There is already another inode in the bucket we need 1723 * to add ourselves to. Add us at the front of the list. 1724 * Here we put the head pointer into our next pointer, 1725 * and then we fall through to point the head at us. 1726 */ 1727 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1728 if (error) 1729 return error; 1730 1731 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1732 /* both on-disk, don't endian flip twice */ 1733 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1734 offset = ip->i_imap.im_boffset + 1735 offsetof(xfs_dinode_t, di_next_unlinked); 1736 xfs_trans_inode_buf(tp, ibp); 1737 xfs_trans_log_buf(tp, ibp, offset, 1738 (offset + sizeof(xfs_agino_t) - 1)); 1739 xfs_inobp_check(mp, ibp); 1740 } 1741 1742 /* 1743 * Point the bucket head pointer at the inode being inserted. 1744 */ 1745 ASSERT(agino != 0); 1746 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1747 offset = offsetof(xfs_agi_t, agi_unlinked) + 1748 (sizeof(xfs_agino_t) * bucket_index); 1749 xfs_trans_log_buf(tp, agibp, offset, 1750 (offset + sizeof(xfs_agino_t) - 1)); 1751 return 0; 1752 } 1753 1754 /* 1755 * Pull the on-disk inode from the AGI unlinked list. 1756 */ 1757 STATIC int 1758 xfs_iunlink_remove( 1759 xfs_trans_t *tp, 1760 xfs_inode_t *ip) 1761 { 1762 xfs_ino_t next_ino; 1763 xfs_mount_t *mp; 1764 xfs_agi_t *agi; 1765 xfs_dinode_t *dip; 1766 xfs_buf_t *agibp; 1767 xfs_buf_t *ibp; 1768 xfs_agnumber_t agno; 1769 xfs_agino_t agino; 1770 xfs_agino_t next_agino; 1771 xfs_buf_t *last_ibp; 1772 xfs_dinode_t *last_dip = NULL; 1773 short bucket_index; 1774 int offset, last_offset = 0; 1775 int error; 1776 1777 mp = tp->t_mountp; 1778 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1779 1780 /* 1781 * Get the agi buffer first. It ensures lock ordering 1782 * on the list. 1783 */ 1784 error = xfs_read_agi(mp, tp, agno, &agibp); 1785 if (error) 1786 return error; 1787 1788 agi = XFS_BUF_TO_AGI(agibp); 1789 1790 /* 1791 * Get the index into the agi hash table for the 1792 * list this inode will go on. 1793 */ 1794 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1795 ASSERT(agino != 0); 1796 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1797 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 1798 ASSERT(agi->agi_unlinked[bucket_index]); 1799 1800 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1801 /* 1802 * We're at the head of the list. Get the inode's 1803 * on-disk buffer to see if there is anyone after us 1804 * on the list. Only modify our next pointer if it 1805 * is not already NULLAGINO. This saves us the overhead 1806 * of dealing with the buffer when there is no need to 1807 * change it. 1808 */ 1809 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1810 if (error) { 1811 xfs_warn(mp, "%s: xfs_itobp() returned error %d.", 1812 __func__, error); 1813 return error; 1814 } 1815 next_agino = be32_to_cpu(dip->di_next_unlinked); 1816 ASSERT(next_agino != 0); 1817 if (next_agino != NULLAGINO) { 1818 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1819 offset = ip->i_imap.im_boffset + 1820 offsetof(xfs_dinode_t, di_next_unlinked); 1821 xfs_trans_inode_buf(tp, ibp); 1822 xfs_trans_log_buf(tp, ibp, offset, 1823 (offset + sizeof(xfs_agino_t) - 1)); 1824 xfs_inobp_check(mp, ibp); 1825 } else { 1826 xfs_trans_brelse(tp, ibp); 1827 } 1828 /* 1829 * Point the bucket head pointer at the next inode. 1830 */ 1831 ASSERT(next_agino != 0); 1832 ASSERT(next_agino != agino); 1833 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 1834 offset = offsetof(xfs_agi_t, agi_unlinked) + 1835 (sizeof(xfs_agino_t) * bucket_index); 1836 xfs_trans_log_buf(tp, agibp, offset, 1837 (offset + sizeof(xfs_agino_t) - 1)); 1838 } else { 1839 /* 1840 * We need to search the list for the inode being freed. 1841 */ 1842 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1843 last_ibp = NULL; 1844 while (next_agino != agino) { 1845 /* 1846 * If the last inode wasn't the one pointing to 1847 * us, then release its buffer since we're not 1848 * going to do anything with it. 1849 */ 1850 if (last_ibp != NULL) { 1851 xfs_trans_brelse(tp, last_ibp); 1852 } 1853 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1854 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1855 &last_ibp, &last_offset, 0); 1856 if (error) { 1857 xfs_warn(mp, 1858 "%s: xfs_inotobp() returned error %d.", 1859 __func__, error); 1860 return error; 1861 } 1862 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1863 ASSERT(next_agino != NULLAGINO); 1864 ASSERT(next_agino != 0); 1865 } 1866 /* 1867 * Now last_ibp points to the buffer previous to us on 1868 * the unlinked list. Pull us from the list. 1869 */ 1870 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1871 if (error) { 1872 xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.", 1873 __func__, error); 1874 return error; 1875 } 1876 next_agino = be32_to_cpu(dip->di_next_unlinked); 1877 ASSERT(next_agino != 0); 1878 ASSERT(next_agino != agino); 1879 if (next_agino != NULLAGINO) { 1880 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1881 offset = ip->i_imap.im_boffset + 1882 offsetof(xfs_dinode_t, di_next_unlinked); 1883 xfs_trans_inode_buf(tp, ibp); 1884 xfs_trans_log_buf(tp, ibp, offset, 1885 (offset + sizeof(xfs_agino_t) - 1)); 1886 xfs_inobp_check(mp, ibp); 1887 } else { 1888 xfs_trans_brelse(tp, ibp); 1889 } 1890 /* 1891 * Point the previous inode on the list to the next inode. 1892 */ 1893 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1894 ASSERT(next_agino != 0); 1895 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1896 xfs_trans_inode_buf(tp, last_ibp); 1897 xfs_trans_log_buf(tp, last_ibp, offset, 1898 (offset + sizeof(xfs_agino_t) - 1)); 1899 xfs_inobp_check(mp, last_ibp); 1900 } 1901 return 0; 1902 } 1903 1904 /* 1905 * A big issue when freeing the inode cluster is is that we _cannot_ skip any 1906 * inodes that are in memory - they all must be marked stale and attached to 1907 * the cluster buffer. 1908 */ 1909 STATIC void 1910 xfs_ifree_cluster( 1911 xfs_inode_t *free_ip, 1912 xfs_trans_t *tp, 1913 xfs_ino_t inum) 1914 { 1915 xfs_mount_t *mp = free_ip->i_mount; 1916 int blks_per_cluster; 1917 int nbufs; 1918 int ninodes; 1919 int i, j; 1920 xfs_daddr_t blkno; 1921 xfs_buf_t *bp; 1922 xfs_inode_t *ip; 1923 xfs_inode_log_item_t *iip; 1924 xfs_log_item_t *lip; 1925 struct xfs_perag *pag; 1926 1927 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1928 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1929 blks_per_cluster = 1; 1930 ninodes = mp->m_sb.sb_inopblock; 1931 nbufs = XFS_IALLOC_BLOCKS(mp); 1932 } else { 1933 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 1934 mp->m_sb.sb_blocksize; 1935 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 1936 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1937 } 1938 1939 for (j = 0; j < nbufs; j++, inum += ninodes) { 1940 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1941 XFS_INO_TO_AGBNO(mp, inum)); 1942 1943 /* 1944 * We obtain and lock the backing buffer first in the process 1945 * here, as we have to ensure that any dirty inode that we 1946 * can't get the flush lock on is attached to the buffer. 1947 * If we scan the in-memory inodes first, then buffer IO can 1948 * complete before we get a lock on it, and hence we may fail 1949 * to mark all the active inodes on the buffer stale. 1950 */ 1951 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 1952 mp->m_bsize * blks_per_cluster, 1953 XBF_LOCK); 1954 1955 /* 1956 * Walk the inodes already attached to the buffer and mark them 1957 * stale. These will all have the flush locks held, so an 1958 * in-memory inode walk can't lock them. By marking them all 1959 * stale first, we will not attempt to lock them in the loop 1960 * below as the XFS_ISTALE flag will be set. 1961 */ 1962 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 1963 while (lip) { 1964 if (lip->li_type == XFS_LI_INODE) { 1965 iip = (xfs_inode_log_item_t *)lip; 1966 ASSERT(iip->ili_logged == 1); 1967 lip->li_cb = xfs_istale_done; 1968 xfs_trans_ail_copy_lsn(mp->m_ail, 1969 &iip->ili_flush_lsn, 1970 &iip->ili_item.li_lsn); 1971 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 1972 } 1973 lip = lip->li_bio_list; 1974 } 1975 1976 1977 /* 1978 * For each inode in memory attempt to add it to the inode 1979 * buffer and set it up for being staled on buffer IO 1980 * completion. This is safe as we've locked out tail pushing 1981 * and flushing by locking the buffer. 1982 * 1983 * We have already marked every inode that was part of a 1984 * transaction stale above, which means there is no point in 1985 * even trying to lock them. 1986 */ 1987 for (i = 0; i < ninodes; i++) { 1988 retry: 1989 rcu_read_lock(); 1990 ip = radix_tree_lookup(&pag->pag_ici_root, 1991 XFS_INO_TO_AGINO(mp, (inum + i))); 1992 1993 /* Inode not in memory, nothing to do */ 1994 if (!ip) { 1995 rcu_read_unlock(); 1996 continue; 1997 } 1998 1999 /* 2000 * because this is an RCU protected lookup, we could 2001 * find a recently freed or even reallocated inode 2002 * during the lookup. We need to check under the 2003 * i_flags_lock for a valid inode here. Skip it if it 2004 * is not valid, the wrong inode or stale. 2005 */ 2006 spin_lock(&ip->i_flags_lock); 2007 if (ip->i_ino != inum + i || 2008 __xfs_iflags_test(ip, XFS_ISTALE)) { 2009 spin_unlock(&ip->i_flags_lock); 2010 rcu_read_unlock(); 2011 continue; 2012 } 2013 spin_unlock(&ip->i_flags_lock); 2014 2015 /* 2016 * Don't try to lock/unlock the current inode, but we 2017 * _cannot_ skip the other inodes that we did not find 2018 * in the list attached to the buffer and are not 2019 * already marked stale. If we can't lock it, back off 2020 * and retry. 2021 */ 2022 if (ip != free_ip && 2023 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2024 rcu_read_unlock(); 2025 delay(1); 2026 goto retry; 2027 } 2028 rcu_read_unlock(); 2029 2030 xfs_iflock(ip); 2031 xfs_iflags_set(ip, XFS_ISTALE); 2032 2033 /* 2034 * we don't need to attach clean inodes or those only 2035 * with unlogged changes (which we throw away, anyway). 2036 */ 2037 iip = ip->i_itemp; 2038 if (!iip || xfs_inode_clean(ip)) { 2039 ASSERT(ip != free_ip); 2040 ip->i_update_core = 0; 2041 xfs_ifunlock(ip); 2042 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2043 continue; 2044 } 2045 2046 iip->ili_last_fields = iip->ili_format.ilf_fields; 2047 iip->ili_format.ilf_fields = 0; 2048 iip->ili_logged = 1; 2049 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2050 &iip->ili_item.li_lsn); 2051 2052 xfs_buf_attach_iodone(bp, xfs_istale_done, 2053 &iip->ili_item); 2054 2055 if (ip != free_ip) 2056 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2057 } 2058 2059 xfs_trans_stale_inode_buf(tp, bp); 2060 xfs_trans_binval(tp, bp); 2061 } 2062 2063 xfs_perag_put(pag); 2064 } 2065 2066 /* 2067 * This is called to return an inode to the inode free list. 2068 * The inode should already be truncated to 0 length and have 2069 * no pages associated with it. This routine also assumes that 2070 * the inode is already a part of the transaction. 2071 * 2072 * The on-disk copy of the inode will have been added to the list 2073 * of unlinked inodes in the AGI. We need to remove the inode from 2074 * that list atomically with respect to freeing it here. 2075 */ 2076 int 2077 xfs_ifree( 2078 xfs_trans_t *tp, 2079 xfs_inode_t *ip, 2080 xfs_bmap_free_t *flist) 2081 { 2082 int error; 2083 int delete; 2084 xfs_ino_t first_ino; 2085 xfs_dinode_t *dip; 2086 xfs_buf_t *ibp; 2087 2088 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2089 ASSERT(ip->i_transp == tp); 2090 ASSERT(ip->i_d.di_nlink == 0); 2091 ASSERT(ip->i_d.di_nextents == 0); 2092 ASSERT(ip->i_d.di_anextents == 0); 2093 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 2094 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2095 ASSERT(ip->i_d.di_nblocks == 0); 2096 2097 /* 2098 * Pull the on-disk inode from the AGI unlinked list. 2099 */ 2100 error = xfs_iunlink_remove(tp, ip); 2101 if (error != 0) { 2102 return error; 2103 } 2104 2105 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2106 if (error != 0) { 2107 return error; 2108 } 2109 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2110 ip->i_d.di_flags = 0; 2111 ip->i_d.di_dmevmask = 0; 2112 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2113 ip->i_df.if_ext_max = 2114 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2115 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2116 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2117 /* 2118 * Bump the generation count so no one will be confused 2119 * by reincarnations of this inode. 2120 */ 2121 ip->i_d.di_gen++; 2122 2123 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2124 2125 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK); 2126 if (error) 2127 return error; 2128 2129 /* 2130 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 2131 * from picking up this inode when it is reclaimed (its incore state 2132 * initialzed but not flushed to disk yet). The in-core di_mode is 2133 * already cleared and a corresponding transaction logged. 2134 * The hack here just synchronizes the in-core to on-disk 2135 * di_mode value in advance before the actual inode sync to disk. 2136 * This is OK because the inode is already unlinked and would never 2137 * change its di_mode again for this inode generation. 2138 * This is a temporary hack that would require a proper fix 2139 * in the future. 2140 */ 2141 dip->di_mode = 0; 2142 2143 if (delete) { 2144 xfs_ifree_cluster(ip, tp, first_ino); 2145 } 2146 2147 return 0; 2148 } 2149 2150 /* 2151 * Reallocate the space for if_broot based on the number of records 2152 * being added or deleted as indicated in rec_diff. Move the records 2153 * and pointers in if_broot to fit the new size. When shrinking this 2154 * will eliminate holes between the records and pointers created by 2155 * the caller. When growing this will create holes to be filled in 2156 * by the caller. 2157 * 2158 * The caller must not request to add more records than would fit in 2159 * the on-disk inode root. If the if_broot is currently NULL, then 2160 * if we adding records one will be allocated. The caller must also 2161 * not request that the number of records go below zero, although 2162 * it can go to zero. 2163 * 2164 * ip -- the inode whose if_broot area is changing 2165 * ext_diff -- the change in the number of records, positive or negative, 2166 * requested for the if_broot array. 2167 */ 2168 void 2169 xfs_iroot_realloc( 2170 xfs_inode_t *ip, 2171 int rec_diff, 2172 int whichfork) 2173 { 2174 struct xfs_mount *mp = ip->i_mount; 2175 int cur_max; 2176 xfs_ifork_t *ifp; 2177 struct xfs_btree_block *new_broot; 2178 int new_max; 2179 size_t new_size; 2180 char *np; 2181 char *op; 2182 2183 /* 2184 * Handle the degenerate case quietly. 2185 */ 2186 if (rec_diff == 0) { 2187 return; 2188 } 2189 2190 ifp = XFS_IFORK_PTR(ip, whichfork); 2191 if (rec_diff > 0) { 2192 /* 2193 * If there wasn't any memory allocated before, just 2194 * allocate it now and get out. 2195 */ 2196 if (ifp->if_broot_bytes == 0) { 2197 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2198 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 2199 ifp->if_broot_bytes = (int)new_size; 2200 return; 2201 } 2202 2203 /* 2204 * If there is already an existing if_broot, then we need 2205 * to realloc() it and shift the pointers to their new 2206 * location. The records don't change location because 2207 * they are kept butted up against the btree block header. 2208 */ 2209 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2210 new_max = cur_max + rec_diff; 2211 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2212 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 2213 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2214 KM_SLEEP | KM_NOFS); 2215 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2216 ifp->if_broot_bytes); 2217 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2218 (int)new_size); 2219 ifp->if_broot_bytes = (int)new_size; 2220 ASSERT(ifp->if_broot_bytes <= 2221 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2222 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2223 return; 2224 } 2225 2226 /* 2227 * rec_diff is less than 0. In this case, we are shrinking the 2228 * if_broot buffer. It must already exist. If we go to zero 2229 * records, just get rid of the root and clear the status bit. 2230 */ 2231 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2232 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2233 new_max = cur_max + rec_diff; 2234 ASSERT(new_max >= 0); 2235 if (new_max > 0) 2236 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2237 else 2238 new_size = 0; 2239 if (new_size > 0) { 2240 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 2241 /* 2242 * First copy over the btree block header. 2243 */ 2244 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); 2245 } else { 2246 new_broot = NULL; 2247 ifp->if_flags &= ~XFS_IFBROOT; 2248 } 2249 2250 /* 2251 * Only copy the records and pointers if there are any. 2252 */ 2253 if (new_max > 0) { 2254 /* 2255 * First copy the records. 2256 */ 2257 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); 2258 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); 2259 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2260 2261 /* 2262 * Then copy the pointers. 2263 */ 2264 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2265 ifp->if_broot_bytes); 2266 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 2267 (int)new_size); 2268 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2269 } 2270 kmem_free(ifp->if_broot); 2271 ifp->if_broot = new_broot; 2272 ifp->if_broot_bytes = (int)new_size; 2273 ASSERT(ifp->if_broot_bytes <= 2274 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2275 return; 2276 } 2277 2278 2279 /* 2280 * This is called when the amount of space needed for if_data 2281 * is increased or decreased. The change in size is indicated by 2282 * the number of bytes that need to be added or deleted in the 2283 * byte_diff parameter. 2284 * 2285 * If the amount of space needed has decreased below the size of the 2286 * inline buffer, then switch to using the inline buffer. Otherwise, 2287 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2288 * to what is needed. 2289 * 2290 * ip -- the inode whose if_data area is changing 2291 * byte_diff -- the change in the number of bytes, positive or negative, 2292 * requested for the if_data array. 2293 */ 2294 void 2295 xfs_idata_realloc( 2296 xfs_inode_t *ip, 2297 int byte_diff, 2298 int whichfork) 2299 { 2300 xfs_ifork_t *ifp; 2301 int new_size; 2302 int real_size; 2303 2304 if (byte_diff == 0) { 2305 return; 2306 } 2307 2308 ifp = XFS_IFORK_PTR(ip, whichfork); 2309 new_size = (int)ifp->if_bytes + byte_diff; 2310 ASSERT(new_size >= 0); 2311 2312 if (new_size == 0) { 2313 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2314 kmem_free(ifp->if_u1.if_data); 2315 } 2316 ifp->if_u1.if_data = NULL; 2317 real_size = 0; 2318 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2319 /* 2320 * If the valid extents/data can fit in if_inline_ext/data, 2321 * copy them from the malloc'd vector and free it. 2322 */ 2323 if (ifp->if_u1.if_data == NULL) { 2324 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2325 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2326 ASSERT(ifp->if_real_bytes != 0); 2327 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2328 new_size); 2329 kmem_free(ifp->if_u1.if_data); 2330 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2331 } 2332 real_size = 0; 2333 } else { 2334 /* 2335 * Stuck with malloc/realloc. 2336 * For inline data, the underlying buffer must be 2337 * a multiple of 4 bytes in size so that it can be 2338 * logged and stay on word boundaries. We enforce 2339 * that here. 2340 */ 2341 real_size = roundup(new_size, 4); 2342 if (ifp->if_u1.if_data == NULL) { 2343 ASSERT(ifp->if_real_bytes == 0); 2344 ifp->if_u1.if_data = kmem_alloc(real_size, 2345 KM_SLEEP | KM_NOFS); 2346 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2347 /* 2348 * Only do the realloc if the underlying size 2349 * is really changing. 2350 */ 2351 if (ifp->if_real_bytes != real_size) { 2352 ifp->if_u1.if_data = 2353 kmem_realloc(ifp->if_u1.if_data, 2354 real_size, 2355 ifp->if_real_bytes, 2356 KM_SLEEP | KM_NOFS); 2357 } 2358 } else { 2359 ASSERT(ifp->if_real_bytes == 0); 2360 ifp->if_u1.if_data = kmem_alloc(real_size, 2361 KM_SLEEP | KM_NOFS); 2362 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2363 ifp->if_bytes); 2364 } 2365 } 2366 ifp->if_real_bytes = real_size; 2367 ifp->if_bytes = new_size; 2368 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2369 } 2370 2371 void 2372 xfs_idestroy_fork( 2373 xfs_inode_t *ip, 2374 int whichfork) 2375 { 2376 xfs_ifork_t *ifp; 2377 2378 ifp = XFS_IFORK_PTR(ip, whichfork); 2379 if (ifp->if_broot != NULL) { 2380 kmem_free(ifp->if_broot); 2381 ifp->if_broot = NULL; 2382 } 2383 2384 /* 2385 * If the format is local, then we can't have an extents 2386 * array so just look for an inline data array. If we're 2387 * not local then we may or may not have an extents list, 2388 * so check and free it up if we do. 2389 */ 2390 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2391 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2392 (ifp->if_u1.if_data != NULL)) { 2393 ASSERT(ifp->if_real_bytes != 0); 2394 kmem_free(ifp->if_u1.if_data); 2395 ifp->if_u1.if_data = NULL; 2396 ifp->if_real_bytes = 0; 2397 } 2398 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2399 ((ifp->if_flags & XFS_IFEXTIREC) || 2400 ((ifp->if_u1.if_extents != NULL) && 2401 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2402 ASSERT(ifp->if_real_bytes != 0); 2403 xfs_iext_destroy(ifp); 2404 } 2405 ASSERT(ifp->if_u1.if_extents == NULL || 2406 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2407 ASSERT(ifp->if_real_bytes == 0); 2408 if (whichfork == XFS_ATTR_FORK) { 2409 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2410 ip->i_afp = NULL; 2411 } 2412 } 2413 2414 /* 2415 * This is called to unpin an inode. The caller must have the inode locked 2416 * in at least shared mode so that the buffer cannot be subsequently pinned 2417 * once someone is waiting for it to be unpinned. 2418 */ 2419 static void 2420 xfs_iunpin_nowait( 2421 struct xfs_inode *ip) 2422 { 2423 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2424 2425 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 2426 2427 /* Give the log a push to start the unpinning I/O */ 2428 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); 2429 2430 } 2431 2432 void 2433 xfs_iunpin_wait( 2434 struct xfs_inode *ip) 2435 { 2436 if (xfs_ipincount(ip)) { 2437 xfs_iunpin_nowait(ip); 2438 wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0)); 2439 } 2440 } 2441 2442 /* 2443 * xfs_iextents_copy() 2444 * 2445 * This is called to copy the REAL extents (as opposed to the delayed 2446 * allocation extents) from the inode into the given buffer. It 2447 * returns the number of bytes copied into the buffer. 2448 * 2449 * If there are no delayed allocation extents, then we can just 2450 * memcpy() the extents into the buffer. Otherwise, we need to 2451 * examine each extent in turn and skip those which are delayed. 2452 */ 2453 int 2454 xfs_iextents_copy( 2455 xfs_inode_t *ip, 2456 xfs_bmbt_rec_t *dp, 2457 int whichfork) 2458 { 2459 int copied; 2460 int i; 2461 xfs_ifork_t *ifp; 2462 int nrecs; 2463 xfs_fsblock_t start_block; 2464 2465 ifp = XFS_IFORK_PTR(ip, whichfork); 2466 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2467 ASSERT(ifp->if_bytes > 0); 2468 2469 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2470 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2471 ASSERT(nrecs > 0); 2472 2473 /* 2474 * There are some delayed allocation extents in the 2475 * inode, so copy the extents one at a time and skip 2476 * the delayed ones. There must be at least one 2477 * non-delayed extent. 2478 */ 2479 copied = 0; 2480 for (i = 0; i < nrecs; i++) { 2481 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2482 start_block = xfs_bmbt_get_startblock(ep); 2483 if (isnullstartblock(start_block)) { 2484 /* 2485 * It's a delayed allocation extent, so skip it. 2486 */ 2487 continue; 2488 } 2489 2490 /* Translate to on disk format */ 2491 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2492 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2493 dp++; 2494 copied++; 2495 } 2496 ASSERT(copied != 0); 2497 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2498 2499 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2500 } 2501 2502 /* 2503 * Each of the following cases stores data into the same region 2504 * of the on-disk inode, so only one of them can be valid at 2505 * any given time. While it is possible to have conflicting formats 2506 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2507 * in EXTENTS format, this can only happen when the fork has 2508 * changed formats after being modified but before being flushed. 2509 * In these cases, the format always takes precedence, because the 2510 * format indicates the current state of the fork. 2511 */ 2512 /*ARGSUSED*/ 2513 STATIC void 2514 xfs_iflush_fork( 2515 xfs_inode_t *ip, 2516 xfs_dinode_t *dip, 2517 xfs_inode_log_item_t *iip, 2518 int whichfork, 2519 xfs_buf_t *bp) 2520 { 2521 char *cp; 2522 xfs_ifork_t *ifp; 2523 xfs_mount_t *mp; 2524 #ifdef XFS_TRANS_DEBUG 2525 int first; 2526 #endif 2527 static const short brootflag[2] = 2528 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2529 static const short dataflag[2] = 2530 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2531 static const short extflag[2] = 2532 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2533 2534 if (!iip) 2535 return; 2536 ifp = XFS_IFORK_PTR(ip, whichfork); 2537 /* 2538 * This can happen if we gave up in iformat in an error path, 2539 * for the attribute fork. 2540 */ 2541 if (!ifp) { 2542 ASSERT(whichfork == XFS_ATTR_FORK); 2543 return; 2544 } 2545 cp = XFS_DFORK_PTR(dip, whichfork); 2546 mp = ip->i_mount; 2547 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2548 case XFS_DINODE_FMT_LOCAL: 2549 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2550 (ifp->if_bytes > 0)) { 2551 ASSERT(ifp->if_u1.if_data != NULL); 2552 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2553 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2554 } 2555 break; 2556 2557 case XFS_DINODE_FMT_EXTENTS: 2558 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2559 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2560 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2561 (ifp->if_bytes > 0)) { 2562 ASSERT(xfs_iext_get_ext(ifp, 0)); 2563 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2564 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2565 whichfork); 2566 } 2567 break; 2568 2569 case XFS_DINODE_FMT_BTREE: 2570 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 2571 (ifp->if_broot_bytes > 0)) { 2572 ASSERT(ifp->if_broot != NULL); 2573 ASSERT(ifp->if_broot_bytes <= 2574 (XFS_IFORK_SIZE(ip, whichfork) + 2575 XFS_BROOT_SIZE_ADJ)); 2576 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, 2577 (xfs_bmdr_block_t *)cp, 2578 XFS_DFORK_SIZE(dip, mp, whichfork)); 2579 } 2580 break; 2581 2582 case XFS_DINODE_FMT_DEV: 2583 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 2584 ASSERT(whichfork == XFS_DATA_FORK); 2585 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); 2586 } 2587 break; 2588 2589 case XFS_DINODE_FMT_UUID: 2590 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 2591 ASSERT(whichfork == XFS_DATA_FORK); 2592 memcpy(XFS_DFORK_DPTR(dip), 2593 &ip->i_df.if_u2.if_uuid, 2594 sizeof(uuid_t)); 2595 } 2596 break; 2597 2598 default: 2599 ASSERT(0); 2600 break; 2601 } 2602 } 2603 2604 STATIC int 2605 xfs_iflush_cluster( 2606 xfs_inode_t *ip, 2607 xfs_buf_t *bp) 2608 { 2609 xfs_mount_t *mp = ip->i_mount; 2610 struct xfs_perag *pag; 2611 unsigned long first_index, mask; 2612 unsigned long inodes_per_cluster; 2613 int ilist_size; 2614 xfs_inode_t **ilist; 2615 xfs_inode_t *iq; 2616 int nr_found; 2617 int clcount = 0; 2618 int bufwasdelwri; 2619 int i; 2620 2621 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2622 2623 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2624 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2625 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2626 if (!ilist) 2627 goto out_put; 2628 2629 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2630 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2631 rcu_read_lock(); 2632 /* really need a gang lookup range call here */ 2633 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2634 first_index, inodes_per_cluster); 2635 if (nr_found == 0) 2636 goto out_free; 2637 2638 for (i = 0; i < nr_found; i++) { 2639 iq = ilist[i]; 2640 if (iq == ip) 2641 continue; 2642 2643 /* 2644 * because this is an RCU protected lookup, we could find a 2645 * recently freed or even reallocated inode during the lookup. 2646 * We need to check under the i_flags_lock for a valid inode 2647 * here. Skip it if it is not valid or the wrong inode. 2648 */ 2649 spin_lock(&ip->i_flags_lock); 2650 if (!ip->i_ino || 2651 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { 2652 spin_unlock(&ip->i_flags_lock); 2653 continue; 2654 } 2655 spin_unlock(&ip->i_flags_lock); 2656 2657 /* 2658 * Do an un-protected check to see if the inode is dirty and 2659 * is a candidate for flushing. These checks will be repeated 2660 * later after the appropriate locks are acquired. 2661 */ 2662 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 2663 continue; 2664 2665 /* 2666 * Try to get locks. If any are unavailable or it is pinned, 2667 * then this inode cannot be flushed and is skipped. 2668 */ 2669 2670 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 2671 continue; 2672 if (!xfs_iflock_nowait(iq)) { 2673 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2674 continue; 2675 } 2676 if (xfs_ipincount(iq)) { 2677 xfs_ifunlock(iq); 2678 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2679 continue; 2680 } 2681 2682 /* 2683 * arriving here means that this inode can be flushed. First 2684 * re-check that it's dirty before flushing. 2685 */ 2686 if (!xfs_inode_clean(iq)) { 2687 int error; 2688 error = xfs_iflush_int(iq, bp); 2689 if (error) { 2690 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2691 goto cluster_corrupt_out; 2692 } 2693 clcount++; 2694 } else { 2695 xfs_ifunlock(iq); 2696 } 2697 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2698 } 2699 2700 if (clcount) { 2701 XFS_STATS_INC(xs_icluster_flushcnt); 2702 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 2703 } 2704 2705 out_free: 2706 rcu_read_unlock(); 2707 kmem_free(ilist); 2708 out_put: 2709 xfs_perag_put(pag); 2710 return 0; 2711 2712 2713 cluster_corrupt_out: 2714 /* 2715 * Corruption detected in the clustering loop. Invalidate the 2716 * inode buffer and shut down the filesystem. 2717 */ 2718 rcu_read_unlock(); 2719 /* 2720 * Clean up the buffer. If it was B_DELWRI, just release it -- 2721 * brelse can handle it with no problems. If not, shut down the 2722 * filesystem before releasing the buffer. 2723 */ 2724 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); 2725 if (bufwasdelwri) 2726 xfs_buf_relse(bp); 2727 2728 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2729 2730 if (!bufwasdelwri) { 2731 /* 2732 * Just like incore_relse: if we have b_iodone functions, 2733 * mark the buffer as an error and call them. Otherwise 2734 * mark it as stale and brelse. 2735 */ 2736 if (XFS_BUF_IODONE_FUNC(bp)) { 2737 XFS_BUF_UNDONE(bp); 2738 XFS_BUF_STALE(bp); 2739 XFS_BUF_ERROR(bp,EIO); 2740 xfs_buf_ioend(bp, 0); 2741 } else { 2742 XFS_BUF_STALE(bp); 2743 xfs_buf_relse(bp); 2744 } 2745 } 2746 2747 /* 2748 * Unlocks the flush lock 2749 */ 2750 xfs_iflush_abort(iq); 2751 kmem_free(ilist); 2752 xfs_perag_put(pag); 2753 return XFS_ERROR(EFSCORRUPTED); 2754 } 2755 2756 /* 2757 * xfs_iflush() will write a modified inode's changes out to the 2758 * inode's on disk home. The caller must have the inode lock held 2759 * in at least shared mode and the inode flush completion must be 2760 * active as well. The inode lock will still be held upon return from 2761 * the call and the caller is free to unlock it. 2762 * The inode flush will be completed when the inode reaches the disk. 2763 * The flags indicate how the inode's buffer should be written out. 2764 */ 2765 int 2766 xfs_iflush( 2767 xfs_inode_t *ip, 2768 uint flags) 2769 { 2770 xfs_inode_log_item_t *iip; 2771 xfs_buf_t *bp; 2772 xfs_dinode_t *dip; 2773 xfs_mount_t *mp; 2774 int error; 2775 2776 XFS_STATS_INC(xs_iflush_count); 2777 2778 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2779 ASSERT(!completion_done(&ip->i_flush)); 2780 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2781 ip->i_d.di_nextents > ip->i_df.if_ext_max); 2782 2783 iip = ip->i_itemp; 2784 mp = ip->i_mount; 2785 2786 /* 2787 * We can't flush the inode until it is unpinned, so wait for it if we 2788 * are allowed to block. We know no one new can pin it, because we are 2789 * holding the inode lock shared and you need to hold it exclusively to 2790 * pin the inode. 2791 * 2792 * If we are not allowed to block, force the log out asynchronously so 2793 * that when we come back the inode will be unpinned. If other inodes 2794 * in the same cluster are dirty, they will probably write the inode 2795 * out for us if they occur after the log force completes. 2796 */ 2797 if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) { 2798 xfs_iunpin_nowait(ip); 2799 xfs_ifunlock(ip); 2800 return EAGAIN; 2801 } 2802 xfs_iunpin_wait(ip); 2803 2804 /* 2805 * For stale inodes we cannot rely on the backing buffer remaining 2806 * stale in cache for the remaining life of the stale inode and so 2807 * xfs_itobp() below may give us a buffer that no longer contains 2808 * inodes below. We have to check this after ensuring the inode is 2809 * unpinned so that it is safe to reclaim the stale inode after the 2810 * flush call. 2811 */ 2812 if (xfs_iflags_test(ip, XFS_ISTALE)) { 2813 xfs_ifunlock(ip); 2814 return 0; 2815 } 2816 2817 /* 2818 * This may have been unpinned because the filesystem is shutting 2819 * down forcibly. If that's the case we must not write this inode 2820 * to disk, because the log record didn't make it to disk! 2821 */ 2822 if (XFS_FORCED_SHUTDOWN(mp)) { 2823 ip->i_update_core = 0; 2824 if (iip) 2825 iip->ili_format.ilf_fields = 0; 2826 xfs_ifunlock(ip); 2827 return XFS_ERROR(EIO); 2828 } 2829 2830 /* 2831 * Get the buffer containing the on-disk inode. 2832 */ 2833 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2834 (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK); 2835 if (error || !bp) { 2836 xfs_ifunlock(ip); 2837 return error; 2838 } 2839 2840 /* 2841 * First flush out the inode that xfs_iflush was called with. 2842 */ 2843 error = xfs_iflush_int(ip, bp); 2844 if (error) 2845 goto corrupt_out; 2846 2847 /* 2848 * If the buffer is pinned then push on the log now so we won't 2849 * get stuck waiting in the write for too long. 2850 */ 2851 if (XFS_BUF_ISPINNED(bp)) 2852 xfs_log_force(mp, 0); 2853 2854 /* 2855 * inode clustering: 2856 * see if other inodes can be gathered into this write 2857 */ 2858 error = xfs_iflush_cluster(ip, bp); 2859 if (error) 2860 goto cluster_corrupt_out; 2861 2862 if (flags & SYNC_WAIT) 2863 error = xfs_bwrite(mp, bp); 2864 else 2865 xfs_bdwrite(mp, bp); 2866 return error; 2867 2868 corrupt_out: 2869 xfs_buf_relse(bp); 2870 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2871 cluster_corrupt_out: 2872 /* 2873 * Unlocks the flush lock 2874 */ 2875 xfs_iflush_abort(ip); 2876 return XFS_ERROR(EFSCORRUPTED); 2877 } 2878 2879 2880 STATIC int 2881 xfs_iflush_int( 2882 xfs_inode_t *ip, 2883 xfs_buf_t *bp) 2884 { 2885 xfs_inode_log_item_t *iip; 2886 xfs_dinode_t *dip; 2887 xfs_mount_t *mp; 2888 #ifdef XFS_TRANS_DEBUG 2889 int first; 2890 #endif 2891 2892 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2893 ASSERT(!completion_done(&ip->i_flush)); 2894 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2895 ip->i_d.di_nextents > ip->i_df.if_ext_max); 2896 2897 iip = ip->i_itemp; 2898 mp = ip->i_mount; 2899 2900 /* set *dip = inode's place in the buffer */ 2901 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 2902 2903 /* 2904 * Clear i_update_core before copying out the data. 2905 * This is for coordination with our timestamp updates 2906 * that don't hold the inode lock. They will always 2907 * update the timestamps BEFORE setting i_update_core, 2908 * so if we clear i_update_core after they set it we 2909 * are guaranteed to see their updates to the timestamps. 2910 * I believe that this depends on strongly ordered memory 2911 * semantics, but we have that. We use the SYNCHRONIZE 2912 * macro to make sure that the compiler does not reorder 2913 * the i_update_core access below the data copy below. 2914 */ 2915 ip->i_update_core = 0; 2916 SYNCHRONIZE(); 2917 2918 /* 2919 * Make sure to get the latest timestamps from the Linux inode. 2920 */ 2921 xfs_synchronize_times(ip); 2922 2923 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 2924 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 2925 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2926 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", 2927 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 2928 goto corrupt_out; 2929 } 2930 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 2931 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 2932 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2933 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 2934 __func__, ip->i_ino, ip, ip->i_d.di_magic); 2935 goto corrupt_out; 2936 } 2937 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 2938 if (XFS_TEST_ERROR( 2939 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2940 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2941 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 2942 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2943 "%s: Bad regular inode %Lu, ptr 0x%p", 2944 __func__, ip->i_ino, ip); 2945 goto corrupt_out; 2946 } 2947 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 2948 if (XFS_TEST_ERROR( 2949 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2950 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2951 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 2952 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 2953 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2954 "%s: Bad directory inode %Lu, ptr 0x%p", 2955 __func__, ip->i_ino, ip); 2956 goto corrupt_out; 2957 } 2958 } 2959 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 2960 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 2961 XFS_RANDOM_IFLUSH_5)) { 2962 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2963 "%s: detected corrupt incore inode %Lu, " 2964 "total extents = %d, nblocks = %Ld, ptr 0x%p", 2965 __func__, ip->i_ino, 2966 ip->i_d.di_nextents + ip->i_d.di_anextents, 2967 ip->i_d.di_nblocks, ip); 2968 goto corrupt_out; 2969 } 2970 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 2971 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 2972 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2973 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 2974 __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 2975 goto corrupt_out; 2976 } 2977 /* 2978 * bump the flush iteration count, used to detect flushes which 2979 * postdate a log record during recovery. 2980 */ 2981 2982 ip->i_d.di_flushiter++; 2983 2984 /* 2985 * Copy the dirty parts of the inode into the on-disk 2986 * inode. We always copy out the core of the inode, 2987 * because if the inode is dirty at all the core must 2988 * be. 2989 */ 2990 xfs_dinode_to_disk(dip, &ip->i_d); 2991 2992 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 2993 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 2994 ip->i_d.di_flushiter = 0; 2995 2996 /* 2997 * If this is really an old format inode and the superblock version 2998 * has not been updated to support only new format inodes, then 2999 * convert back to the old inode format. If the superblock version 3000 * has been updated, then make the conversion permanent. 3001 */ 3002 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); 3003 if (ip->i_d.di_version == 1) { 3004 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 3005 /* 3006 * Convert it back. 3007 */ 3008 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3009 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); 3010 } else { 3011 /* 3012 * The superblock version has already been bumped, 3013 * so just make the conversion to the new inode 3014 * format permanent. 3015 */ 3016 ip->i_d.di_version = 2; 3017 dip->di_version = 2; 3018 ip->i_d.di_onlink = 0; 3019 dip->di_onlink = 0; 3020 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3021 memset(&(dip->di_pad[0]), 0, 3022 sizeof(dip->di_pad)); 3023 ASSERT(xfs_get_projid(ip) == 0); 3024 } 3025 } 3026 3027 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 3028 if (XFS_IFORK_Q(ip)) 3029 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3030 xfs_inobp_check(mp, bp); 3031 3032 /* 3033 * We've recorded everything logged in the inode, so we'd 3034 * like to clear the ilf_fields bits so we don't log and 3035 * flush things unnecessarily. However, we can't stop 3036 * logging all this information until the data we've copied 3037 * into the disk buffer is written to disk. If we did we might 3038 * overwrite the copy of the inode in the log with all the 3039 * data after re-logging only part of it, and in the face of 3040 * a crash we wouldn't have all the data we need to recover. 3041 * 3042 * What we do is move the bits to the ili_last_fields field. 3043 * When logging the inode, these bits are moved back to the 3044 * ilf_fields field. In the xfs_iflush_done() routine we 3045 * clear ili_last_fields, since we know that the information 3046 * those bits represent is permanently on disk. As long as 3047 * the flush completes before the inode is logged again, then 3048 * both ilf_fields and ili_last_fields will be cleared. 3049 * 3050 * We can play with the ilf_fields bits here, because the inode 3051 * lock must be held exclusively in order to set bits there 3052 * and the flush lock protects the ili_last_fields bits. 3053 * Set ili_logged so the flush done 3054 * routine can tell whether or not to look in the AIL. 3055 * Also, store the current LSN of the inode so that we can tell 3056 * whether the item has moved in the AIL from xfs_iflush_done(). 3057 * In order to read the lsn we need the AIL lock, because 3058 * it is a 64 bit value that cannot be read atomically. 3059 */ 3060 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3061 iip->ili_last_fields = iip->ili_format.ilf_fields; 3062 iip->ili_format.ilf_fields = 0; 3063 iip->ili_logged = 1; 3064 3065 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 3066 &iip->ili_item.li_lsn); 3067 3068 /* 3069 * Attach the function xfs_iflush_done to the inode's 3070 * buffer. This will remove the inode from the AIL 3071 * and unlock the inode's flush lock when the inode is 3072 * completely written to disk. 3073 */ 3074 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); 3075 3076 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3077 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3078 } else { 3079 /* 3080 * We're flushing an inode which is not in the AIL and has 3081 * not been logged but has i_update_core set. For this 3082 * case we can use a B_DELWRI flush and immediately drop 3083 * the inode flush lock because we can avoid the whole 3084 * AIL state thing. It's OK to drop the flush lock now, 3085 * because we've already locked the buffer and to do anything 3086 * you really need both. 3087 */ 3088 if (iip != NULL) { 3089 ASSERT(iip->ili_logged == 0); 3090 ASSERT(iip->ili_last_fields == 0); 3091 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3092 } 3093 xfs_ifunlock(ip); 3094 } 3095 3096 return 0; 3097 3098 corrupt_out: 3099 return XFS_ERROR(EFSCORRUPTED); 3100 } 3101 3102 /* 3103 * Return a pointer to the extent record at file index idx. 3104 */ 3105 xfs_bmbt_rec_host_t * 3106 xfs_iext_get_ext( 3107 xfs_ifork_t *ifp, /* inode fork pointer */ 3108 xfs_extnum_t idx) /* index of target extent */ 3109 { 3110 ASSERT(idx >= 0); 3111 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 3112 3113 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3114 return ifp->if_u1.if_ext_irec->er_extbuf; 3115 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3116 xfs_ext_irec_t *erp; /* irec pointer */ 3117 int erp_idx = 0; /* irec index */ 3118 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3119 3120 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3121 return &erp->er_extbuf[page_idx]; 3122 } else if (ifp->if_bytes) { 3123 return &ifp->if_u1.if_extents[idx]; 3124 } else { 3125 return NULL; 3126 } 3127 } 3128 3129 /* 3130 * Insert new item(s) into the extent records for incore inode 3131 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3132 */ 3133 void 3134 xfs_iext_insert( 3135 xfs_inode_t *ip, /* incore inode pointer */ 3136 xfs_extnum_t idx, /* starting index of new items */ 3137 xfs_extnum_t count, /* number of inserted items */ 3138 xfs_bmbt_irec_t *new, /* items to insert */ 3139 int state) /* type of extent conversion */ 3140 { 3141 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 3142 xfs_extnum_t i; /* extent record index */ 3143 3144 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); 3145 3146 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3147 xfs_iext_add(ifp, idx, count); 3148 for (i = idx; i < idx + count; i++, new++) 3149 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 3150 } 3151 3152 /* 3153 * This is called when the amount of space required for incore file 3154 * extents needs to be increased. The ext_diff parameter stores the 3155 * number of new extents being added and the idx parameter contains 3156 * the extent index where the new extents will be added. If the new 3157 * extents are being appended, then we just need to (re)allocate and 3158 * initialize the space. Otherwise, if the new extents are being 3159 * inserted into the middle of the existing entries, a bit more work 3160 * is required to make room for the new extents to be inserted. The 3161 * caller is responsible for filling in the new extent entries upon 3162 * return. 3163 */ 3164 void 3165 xfs_iext_add( 3166 xfs_ifork_t *ifp, /* inode fork pointer */ 3167 xfs_extnum_t idx, /* index to begin adding exts */ 3168 int ext_diff) /* number of extents to add */ 3169 { 3170 int byte_diff; /* new bytes being added */ 3171 int new_size; /* size of extents after adding */ 3172 xfs_extnum_t nextents; /* number of extents in file */ 3173 3174 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3175 ASSERT((idx >= 0) && (idx <= nextents)); 3176 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3177 new_size = ifp->if_bytes + byte_diff; 3178 /* 3179 * If the new number of extents (nextents + ext_diff) 3180 * fits inside the inode, then continue to use the inline 3181 * extent buffer. 3182 */ 3183 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3184 if (idx < nextents) { 3185 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3186 &ifp->if_u2.if_inline_ext[idx], 3187 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3188 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3189 } 3190 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3191 ifp->if_real_bytes = 0; 3192 } 3193 /* 3194 * Otherwise use a linear (direct) extent list. 3195 * If the extents are currently inside the inode, 3196 * xfs_iext_realloc_direct will switch us from 3197 * inline to direct extent allocation mode. 3198 */ 3199 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3200 xfs_iext_realloc_direct(ifp, new_size); 3201 if (idx < nextents) { 3202 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3203 &ifp->if_u1.if_extents[idx], 3204 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3205 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3206 } 3207 } 3208 /* Indirection array */ 3209 else { 3210 xfs_ext_irec_t *erp; 3211 int erp_idx = 0; 3212 int page_idx = idx; 3213 3214 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3215 if (ifp->if_flags & XFS_IFEXTIREC) { 3216 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3217 } else { 3218 xfs_iext_irec_init(ifp); 3219 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3220 erp = ifp->if_u1.if_ext_irec; 3221 } 3222 /* Extents fit in target extent page */ 3223 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3224 if (page_idx < erp->er_extcount) { 3225 memmove(&erp->er_extbuf[page_idx + ext_diff], 3226 &erp->er_extbuf[page_idx], 3227 (erp->er_extcount - page_idx) * 3228 sizeof(xfs_bmbt_rec_t)); 3229 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3230 } 3231 erp->er_extcount += ext_diff; 3232 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3233 } 3234 /* Insert a new extent page */ 3235 else if (erp) { 3236 xfs_iext_add_indirect_multi(ifp, 3237 erp_idx, page_idx, ext_diff); 3238 } 3239 /* 3240 * If extent(s) are being appended to the last page in 3241 * the indirection array and the new extent(s) don't fit 3242 * in the page, then erp is NULL and erp_idx is set to 3243 * the next index needed in the indirection array. 3244 */ 3245 else { 3246 int count = ext_diff; 3247 3248 while (count) { 3249 erp = xfs_iext_irec_new(ifp, erp_idx); 3250 erp->er_extcount = count; 3251 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3252 if (count) { 3253 erp_idx++; 3254 } 3255 } 3256 } 3257 } 3258 ifp->if_bytes = new_size; 3259 } 3260 3261 /* 3262 * This is called when incore extents are being added to the indirection 3263 * array and the new extents do not fit in the target extent list. The 3264 * erp_idx parameter contains the irec index for the target extent list 3265 * in the indirection array, and the idx parameter contains the extent 3266 * index within the list. The number of extents being added is stored 3267 * in the count parameter. 3268 * 3269 * |-------| |-------| 3270 * | | | | idx - number of extents before idx 3271 * | idx | | count | 3272 * | | | | count - number of extents being inserted at idx 3273 * |-------| |-------| 3274 * | count | | nex2 | nex2 - number of extents after idx + count 3275 * |-------| |-------| 3276 */ 3277 void 3278 xfs_iext_add_indirect_multi( 3279 xfs_ifork_t *ifp, /* inode fork pointer */ 3280 int erp_idx, /* target extent irec index */ 3281 xfs_extnum_t idx, /* index within target list */ 3282 int count) /* new extents being added */ 3283 { 3284 int byte_diff; /* new bytes being added */ 3285 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3286 xfs_extnum_t ext_diff; /* number of extents to add */ 3287 xfs_extnum_t ext_cnt; /* new extents still needed */ 3288 xfs_extnum_t nex2; /* extents after idx + count */ 3289 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3290 int nlists; /* number of irec's (lists) */ 3291 3292 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3293 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3294 nex2 = erp->er_extcount - idx; 3295 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3296 3297 /* 3298 * Save second part of target extent list 3299 * (all extents past */ 3300 if (nex2) { 3301 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3302 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); 3303 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3304 erp->er_extcount -= nex2; 3305 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3306 memset(&erp->er_extbuf[idx], 0, byte_diff); 3307 } 3308 3309 /* 3310 * Add the new extents to the end of the target 3311 * list, then allocate new irec record(s) and 3312 * extent buffer(s) as needed to store the rest 3313 * of the new extents. 3314 */ 3315 ext_cnt = count; 3316 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3317 if (ext_diff) { 3318 erp->er_extcount += ext_diff; 3319 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3320 ext_cnt -= ext_diff; 3321 } 3322 while (ext_cnt) { 3323 erp_idx++; 3324 erp = xfs_iext_irec_new(ifp, erp_idx); 3325 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3326 erp->er_extcount = ext_diff; 3327 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3328 ext_cnt -= ext_diff; 3329 } 3330 3331 /* Add nex2 extents back to indirection array */ 3332 if (nex2) { 3333 xfs_extnum_t ext_avail; 3334 int i; 3335 3336 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3337 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3338 i = 0; 3339 /* 3340 * If nex2 extents fit in the current page, append 3341 * nex2_ep after the new extents. 3342 */ 3343 if (nex2 <= ext_avail) { 3344 i = erp->er_extcount; 3345 } 3346 /* 3347 * Otherwise, check if space is available in the 3348 * next page. 3349 */ 3350 else if ((erp_idx < nlists - 1) && 3351 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3352 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3353 erp_idx++; 3354 erp++; 3355 /* Create a hole for nex2 extents */ 3356 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3357 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3358 } 3359 /* 3360 * Final choice, create a new extent page for 3361 * nex2 extents. 3362 */ 3363 else { 3364 erp_idx++; 3365 erp = xfs_iext_irec_new(ifp, erp_idx); 3366 } 3367 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3368 kmem_free(nex2_ep); 3369 erp->er_extcount += nex2; 3370 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3371 } 3372 } 3373 3374 /* 3375 * This is called when the amount of space required for incore file 3376 * extents needs to be decreased. The ext_diff parameter stores the 3377 * number of extents to be removed and the idx parameter contains 3378 * the extent index where the extents will be removed from. 3379 * 3380 * If the amount of space needed has decreased below the linear 3381 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3382 * extent array. Otherwise, use kmem_realloc() to adjust the 3383 * size to what is needed. 3384 */ 3385 void 3386 xfs_iext_remove( 3387 xfs_inode_t *ip, /* incore inode pointer */ 3388 xfs_extnum_t idx, /* index to begin removing exts */ 3389 int ext_diff, /* number of extents to remove */ 3390 int state) /* type of extent conversion */ 3391 { 3392 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; 3393 xfs_extnum_t nextents; /* number of extents in file */ 3394 int new_size; /* size of extents after removal */ 3395 3396 trace_xfs_iext_remove(ip, idx, state, _RET_IP_); 3397 3398 ASSERT(ext_diff > 0); 3399 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3400 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3401 3402 if (new_size == 0) { 3403 xfs_iext_destroy(ifp); 3404 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3405 xfs_iext_remove_indirect(ifp, idx, ext_diff); 3406 } else if (ifp->if_real_bytes) { 3407 xfs_iext_remove_direct(ifp, idx, ext_diff); 3408 } else { 3409 xfs_iext_remove_inline(ifp, idx, ext_diff); 3410 } 3411 ifp->if_bytes = new_size; 3412 } 3413 3414 /* 3415 * This removes ext_diff extents from the inline buffer, beginning 3416 * at extent index idx. 3417 */ 3418 void 3419 xfs_iext_remove_inline( 3420 xfs_ifork_t *ifp, /* inode fork pointer */ 3421 xfs_extnum_t idx, /* index to begin removing exts */ 3422 int ext_diff) /* number of extents to remove */ 3423 { 3424 int nextents; /* number of extents in file */ 3425 3426 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3427 ASSERT(idx < XFS_INLINE_EXTS); 3428 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3429 ASSERT(((nextents - ext_diff) > 0) && 3430 (nextents - ext_diff) < XFS_INLINE_EXTS); 3431 3432 if (idx + ext_diff < nextents) { 3433 memmove(&ifp->if_u2.if_inline_ext[idx], 3434 &ifp->if_u2.if_inline_ext[idx + ext_diff], 3435 (nextents - (idx + ext_diff)) * 3436 sizeof(xfs_bmbt_rec_t)); 3437 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 3438 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3439 } else { 3440 memset(&ifp->if_u2.if_inline_ext[idx], 0, 3441 ext_diff * sizeof(xfs_bmbt_rec_t)); 3442 } 3443 } 3444 3445 /* 3446 * This removes ext_diff extents from a linear (direct) extent list, 3447 * beginning at extent index idx. If the extents are being removed 3448 * from the end of the list (ie. truncate) then we just need to re- 3449 * allocate the list to remove the extra space. Otherwise, if the 3450 * extents are being removed from the middle of the existing extent 3451 * entries, then we first need to move the extent records beginning 3452 * at idx + ext_diff up in the list to overwrite the records being 3453 * removed, then remove the extra space via kmem_realloc. 3454 */ 3455 void 3456 xfs_iext_remove_direct( 3457 xfs_ifork_t *ifp, /* inode fork pointer */ 3458 xfs_extnum_t idx, /* index to begin removing exts */ 3459 int ext_diff) /* number of extents to remove */ 3460 { 3461 xfs_extnum_t nextents; /* number of extents in file */ 3462 int new_size; /* size of extents after removal */ 3463 3464 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3465 new_size = ifp->if_bytes - 3466 (ext_diff * sizeof(xfs_bmbt_rec_t)); 3467 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3468 3469 if (new_size == 0) { 3470 xfs_iext_destroy(ifp); 3471 return; 3472 } 3473 /* Move extents up in the list (if needed) */ 3474 if (idx + ext_diff < nextents) { 3475 memmove(&ifp->if_u1.if_extents[idx], 3476 &ifp->if_u1.if_extents[idx + ext_diff], 3477 (nextents - (idx + ext_diff)) * 3478 sizeof(xfs_bmbt_rec_t)); 3479 } 3480 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3481 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3482 /* 3483 * Reallocate the direct extent list. If the extents 3484 * will fit inside the inode then xfs_iext_realloc_direct 3485 * will switch from direct to inline extent allocation 3486 * mode for us. 3487 */ 3488 xfs_iext_realloc_direct(ifp, new_size); 3489 ifp->if_bytes = new_size; 3490 } 3491 3492 /* 3493 * This is called when incore extents are being removed from the 3494 * indirection array and the extents being removed span multiple extent 3495 * buffers. The idx parameter contains the file extent index where we 3496 * want to begin removing extents, and the count parameter contains 3497 * how many extents need to be removed. 3498 * 3499 * |-------| |-------| 3500 * | nex1 | | | nex1 - number of extents before idx 3501 * |-------| | count | 3502 * | | | | count - number of extents being removed at idx 3503 * | count | |-------| 3504 * | | | nex2 | nex2 - number of extents after idx + count 3505 * |-------| |-------| 3506 */ 3507 void 3508 xfs_iext_remove_indirect( 3509 xfs_ifork_t *ifp, /* inode fork pointer */ 3510 xfs_extnum_t idx, /* index to begin removing extents */ 3511 int count) /* number of extents to remove */ 3512 { 3513 xfs_ext_irec_t *erp; /* indirection array pointer */ 3514 int erp_idx = 0; /* indirection array index */ 3515 xfs_extnum_t ext_cnt; /* extents left to remove */ 3516 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3517 xfs_extnum_t nex1; /* number of extents before idx */ 3518 xfs_extnum_t nex2; /* extents after idx + count */ 3519 int page_idx = idx; /* index in target extent list */ 3520 3521 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3522 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3523 ASSERT(erp != NULL); 3524 nex1 = page_idx; 3525 ext_cnt = count; 3526 while (ext_cnt) { 3527 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3528 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 3529 /* 3530 * Check for deletion of entire list; 3531 * xfs_iext_irec_remove() updates extent offsets. 3532 */ 3533 if (ext_diff == erp->er_extcount) { 3534 xfs_iext_irec_remove(ifp, erp_idx); 3535 ext_cnt -= ext_diff; 3536 nex1 = 0; 3537 if (ext_cnt) { 3538 ASSERT(erp_idx < ifp->if_real_bytes / 3539 XFS_IEXT_BUFSZ); 3540 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3541 nex1 = 0; 3542 continue; 3543 } else { 3544 break; 3545 } 3546 } 3547 /* Move extents up (if needed) */ 3548 if (nex2) { 3549 memmove(&erp->er_extbuf[nex1], 3550 &erp->er_extbuf[nex1 + ext_diff], 3551 nex2 * sizeof(xfs_bmbt_rec_t)); 3552 } 3553 /* Zero out rest of page */ 3554 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 3555 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 3556 /* Update remaining counters */ 3557 erp->er_extcount -= ext_diff; 3558 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 3559 ext_cnt -= ext_diff; 3560 nex1 = 0; 3561 erp_idx++; 3562 erp++; 3563 } 3564 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 3565 xfs_iext_irec_compact(ifp); 3566 } 3567 3568 /* 3569 * Create, destroy, or resize a linear (direct) block of extents. 3570 */ 3571 void 3572 xfs_iext_realloc_direct( 3573 xfs_ifork_t *ifp, /* inode fork pointer */ 3574 int new_size) /* new size of extents */ 3575 { 3576 int rnew_size; /* real new size of extents */ 3577 3578 rnew_size = new_size; 3579 3580 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 3581 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 3582 (new_size != ifp->if_real_bytes))); 3583 3584 /* Free extent records */ 3585 if (new_size == 0) { 3586 xfs_iext_destroy(ifp); 3587 } 3588 /* Resize direct extent list and zero any new bytes */ 3589 else if (ifp->if_real_bytes) { 3590 /* Check if extents will fit inside the inode */ 3591 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 3592 xfs_iext_direct_to_inline(ifp, new_size / 3593 (uint)sizeof(xfs_bmbt_rec_t)); 3594 ifp->if_bytes = new_size; 3595 return; 3596 } 3597 if (!is_power_of_2(new_size)){ 3598 rnew_size = roundup_pow_of_two(new_size); 3599 } 3600 if (rnew_size != ifp->if_real_bytes) { 3601 ifp->if_u1.if_extents = 3602 kmem_realloc(ifp->if_u1.if_extents, 3603 rnew_size, 3604 ifp->if_real_bytes, KM_NOFS); 3605 } 3606 if (rnew_size > ifp->if_real_bytes) { 3607 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 3608 (uint)sizeof(xfs_bmbt_rec_t)], 0, 3609 rnew_size - ifp->if_real_bytes); 3610 } 3611 } 3612 /* 3613 * Switch from the inline extent buffer to a direct 3614 * extent list. Be sure to include the inline extent 3615 * bytes in new_size. 3616 */ 3617 else { 3618 new_size += ifp->if_bytes; 3619 if (!is_power_of_2(new_size)) { 3620 rnew_size = roundup_pow_of_two(new_size); 3621 } 3622 xfs_iext_inline_to_direct(ifp, rnew_size); 3623 } 3624 ifp->if_real_bytes = rnew_size; 3625 ifp->if_bytes = new_size; 3626 } 3627 3628 /* 3629 * Switch from linear (direct) extent records to inline buffer. 3630 */ 3631 void 3632 xfs_iext_direct_to_inline( 3633 xfs_ifork_t *ifp, /* inode fork pointer */ 3634 xfs_extnum_t nextents) /* number of extents in file */ 3635 { 3636 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3637 ASSERT(nextents <= XFS_INLINE_EXTS); 3638 /* 3639 * The inline buffer was zeroed when we switched 3640 * from inline to direct extent allocation mode, 3641 * so we don't need to clear it here. 3642 */ 3643 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 3644 nextents * sizeof(xfs_bmbt_rec_t)); 3645 kmem_free(ifp->if_u1.if_extents); 3646 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3647 ifp->if_real_bytes = 0; 3648 } 3649 3650 /* 3651 * Switch from inline buffer to linear (direct) extent records. 3652 * new_size should already be rounded up to the next power of 2 3653 * by the caller (when appropriate), so use new_size as it is. 3654 * However, since new_size may be rounded up, we can't update 3655 * if_bytes here. It is the caller's responsibility to update 3656 * if_bytes upon return. 3657 */ 3658 void 3659 xfs_iext_inline_to_direct( 3660 xfs_ifork_t *ifp, /* inode fork pointer */ 3661 int new_size) /* number of extents in file */ 3662 { 3663 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); 3664 memset(ifp->if_u1.if_extents, 0, new_size); 3665 if (ifp->if_bytes) { 3666 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 3667 ifp->if_bytes); 3668 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3669 sizeof(xfs_bmbt_rec_t)); 3670 } 3671 ifp->if_real_bytes = new_size; 3672 } 3673 3674 /* 3675 * Resize an extent indirection array to new_size bytes. 3676 */ 3677 STATIC void 3678 xfs_iext_realloc_indirect( 3679 xfs_ifork_t *ifp, /* inode fork pointer */ 3680 int new_size) /* new indirection array size */ 3681 { 3682 int nlists; /* number of irec's (ex lists) */ 3683 int size; /* current indirection array size */ 3684 3685 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3686 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3687 size = nlists * sizeof(xfs_ext_irec_t); 3688 ASSERT(ifp->if_real_bytes); 3689 ASSERT((new_size >= 0) && (new_size != size)); 3690 if (new_size == 0) { 3691 xfs_iext_destroy(ifp); 3692 } else { 3693 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 3694 kmem_realloc(ifp->if_u1.if_ext_irec, 3695 new_size, size, KM_NOFS); 3696 } 3697 } 3698 3699 /* 3700 * Switch from indirection array to linear (direct) extent allocations. 3701 */ 3702 STATIC void 3703 xfs_iext_indirect_to_direct( 3704 xfs_ifork_t *ifp) /* inode fork pointer */ 3705 { 3706 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3707 xfs_extnum_t nextents; /* number of extents in file */ 3708 int size; /* size of file extents */ 3709 3710 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3711 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3712 ASSERT(nextents <= XFS_LINEAR_EXTS); 3713 size = nextents * sizeof(xfs_bmbt_rec_t); 3714 3715 xfs_iext_irec_compact_pages(ifp); 3716 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 3717 3718 ep = ifp->if_u1.if_ext_irec->er_extbuf; 3719 kmem_free(ifp->if_u1.if_ext_irec); 3720 ifp->if_flags &= ~XFS_IFEXTIREC; 3721 ifp->if_u1.if_extents = ep; 3722 ifp->if_bytes = size; 3723 if (nextents < XFS_LINEAR_EXTS) { 3724 xfs_iext_realloc_direct(ifp, size); 3725 } 3726 } 3727 3728 /* 3729 * Free incore file extents. 3730 */ 3731 void 3732 xfs_iext_destroy( 3733 xfs_ifork_t *ifp) /* inode fork pointer */ 3734 { 3735 if (ifp->if_flags & XFS_IFEXTIREC) { 3736 int erp_idx; 3737 int nlists; 3738 3739 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3740 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 3741 xfs_iext_irec_remove(ifp, erp_idx); 3742 } 3743 ifp->if_flags &= ~XFS_IFEXTIREC; 3744 } else if (ifp->if_real_bytes) { 3745 kmem_free(ifp->if_u1.if_extents); 3746 } else if (ifp->if_bytes) { 3747 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3748 sizeof(xfs_bmbt_rec_t)); 3749 } 3750 ifp->if_u1.if_extents = NULL; 3751 ifp->if_real_bytes = 0; 3752 ifp->if_bytes = 0; 3753 } 3754 3755 /* 3756 * Return a pointer to the extent record for file system block bno. 3757 */ 3758 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 3759 xfs_iext_bno_to_ext( 3760 xfs_ifork_t *ifp, /* inode fork pointer */ 3761 xfs_fileoff_t bno, /* block number to search for */ 3762 xfs_extnum_t *idxp) /* index of target extent */ 3763 { 3764 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 3765 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 3766 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 3767 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3768 int high; /* upper boundary in search */ 3769 xfs_extnum_t idx = 0; /* index of target extent */ 3770 int low; /* lower boundary in search */ 3771 xfs_extnum_t nextents; /* number of file extents */ 3772 xfs_fileoff_t startoff = 0; /* start offset of extent */ 3773 3774 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3775 if (nextents == 0) { 3776 *idxp = 0; 3777 return NULL; 3778 } 3779 low = 0; 3780 if (ifp->if_flags & XFS_IFEXTIREC) { 3781 /* Find target extent list */ 3782 int erp_idx = 0; 3783 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 3784 base = erp->er_extbuf; 3785 high = erp->er_extcount - 1; 3786 } else { 3787 base = ifp->if_u1.if_extents; 3788 high = nextents - 1; 3789 } 3790 /* Binary search extent records */ 3791 while (low <= high) { 3792 idx = (low + high) >> 1; 3793 ep = base + idx; 3794 startoff = xfs_bmbt_get_startoff(ep); 3795 blockcount = xfs_bmbt_get_blockcount(ep); 3796 if (bno < startoff) { 3797 high = idx - 1; 3798 } else if (bno >= startoff + blockcount) { 3799 low = idx + 1; 3800 } else { 3801 /* Convert back to file-based extent index */ 3802 if (ifp->if_flags & XFS_IFEXTIREC) { 3803 idx += erp->er_extoff; 3804 } 3805 *idxp = idx; 3806 return ep; 3807 } 3808 } 3809 /* Convert back to file-based extent index */ 3810 if (ifp->if_flags & XFS_IFEXTIREC) { 3811 idx += erp->er_extoff; 3812 } 3813 if (bno >= startoff + blockcount) { 3814 if (++idx == nextents) { 3815 ep = NULL; 3816 } else { 3817 ep = xfs_iext_get_ext(ifp, idx); 3818 } 3819 } 3820 *idxp = idx; 3821 return ep; 3822 } 3823 3824 /* 3825 * Return a pointer to the indirection array entry containing the 3826 * extent record for filesystem block bno. Store the index of the 3827 * target irec in *erp_idxp. 3828 */ 3829 xfs_ext_irec_t * /* pointer to found extent record */ 3830 xfs_iext_bno_to_irec( 3831 xfs_ifork_t *ifp, /* inode fork pointer */ 3832 xfs_fileoff_t bno, /* block number to search for */ 3833 int *erp_idxp) /* irec index of target ext list */ 3834 { 3835 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3836 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 3837 int erp_idx; /* indirection array index */ 3838 int nlists; /* number of extent irec's (lists) */ 3839 int high; /* binary search upper limit */ 3840 int low; /* binary search lower limit */ 3841 3842 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3843 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3844 erp_idx = 0; 3845 low = 0; 3846 high = nlists - 1; 3847 while (low <= high) { 3848 erp_idx = (low + high) >> 1; 3849 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3850 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 3851 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 3852 high = erp_idx - 1; 3853 } else if (erp_next && bno >= 3854 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 3855 low = erp_idx + 1; 3856 } else { 3857 break; 3858 } 3859 } 3860 *erp_idxp = erp_idx; 3861 return erp; 3862 } 3863 3864 /* 3865 * Return a pointer to the indirection array entry containing the 3866 * extent record at file extent index *idxp. Store the index of the 3867 * target irec in *erp_idxp and store the page index of the target 3868 * extent record in *idxp. 3869 */ 3870 xfs_ext_irec_t * 3871 xfs_iext_idx_to_irec( 3872 xfs_ifork_t *ifp, /* inode fork pointer */ 3873 xfs_extnum_t *idxp, /* extent index (file -> page) */ 3874 int *erp_idxp, /* pointer to target irec */ 3875 int realloc) /* new bytes were just added */ 3876 { 3877 xfs_ext_irec_t *prev; /* pointer to previous irec */ 3878 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 3879 int erp_idx; /* indirection array index */ 3880 int nlists; /* number of irec's (ex lists) */ 3881 int high; /* binary search upper limit */ 3882 int low; /* binary search lower limit */ 3883 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 3884 3885 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3886 ASSERT(page_idx >= 0); 3887 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); 3888 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); 3889 3890 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3891 erp_idx = 0; 3892 low = 0; 3893 high = nlists - 1; 3894 3895 /* Binary search extent irec's */ 3896 while (low <= high) { 3897 erp_idx = (low + high) >> 1; 3898 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3899 prev = erp_idx > 0 ? erp - 1 : NULL; 3900 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 3901 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 3902 high = erp_idx - 1; 3903 } else if (page_idx > erp->er_extoff + erp->er_extcount || 3904 (page_idx == erp->er_extoff + erp->er_extcount && 3905 !realloc)) { 3906 low = erp_idx + 1; 3907 } else if (page_idx == erp->er_extoff + erp->er_extcount && 3908 erp->er_extcount == XFS_LINEAR_EXTS) { 3909 ASSERT(realloc); 3910 page_idx = 0; 3911 erp_idx++; 3912 erp = erp_idx < nlists ? erp + 1 : NULL; 3913 break; 3914 } else { 3915 page_idx -= erp->er_extoff; 3916 break; 3917 } 3918 } 3919 *idxp = page_idx; 3920 *erp_idxp = erp_idx; 3921 return(erp); 3922 } 3923 3924 /* 3925 * Allocate and initialize an indirection array once the space needed 3926 * for incore extents increases above XFS_IEXT_BUFSZ. 3927 */ 3928 void 3929 xfs_iext_irec_init( 3930 xfs_ifork_t *ifp) /* inode fork pointer */ 3931 { 3932 xfs_ext_irec_t *erp; /* indirection array pointer */ 3933 xfs_extnum_t nextents; /* number of extents in file */ 3934 3935 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3936 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3937 ASSERT(nextents <= XFS_LINEAR_EXTS); 3938 3939 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); 3940 3941 if (nextents == 0) { 3942 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3943 } else if (!ifp->if_real_bytes) { 3944 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 3945 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 3946 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 3947 } 3948 erp->er_extbuf = ifp->if_u1.if_extents; 3949 erp->er_extcount = nextents; 3950 erp->er_extoff = 0; 3951 3952 ifp->if_flags |= XFS_IFEXTIREC; 3953 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 3954 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 3955 ifp->if_u1.if_ext_irec = erp; 3956 3957 return; 3958 } 3959 3960 /* 3961 * Allocate and initialize a new entry in the indirection array. 3962 */ 3963 xfs_ext_irec_t * 3964 xfs_iext_irec_new( 3965 xfs_ifork_t *ifp, /* inode fork pointer */ 3966 int erp_idx) /* index for new irec */ 3967 { 3968 xfs_ext_irec_t *erp; /* indirection array pointer */ 3969 int i; /* loop counter */ 3970 int nlists; /* number of irec's (ex lists) */ 3971 3972 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3973 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3974 3975 /* Resize indirection array */ 3976 xfs_iext_realloc_indirect(ifp, ++nlists * 3977 sizeof(xfs_ext_irec_t)); 3978 /* 3979 * Move records down in the array so the 3980 * new page can use erp_idx. 3981 */ 3982 erp = ifp->if_u1.if_ext_irec; 3983 for (i = nlists - 1; i > erp_idx; i--) { 3984 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 3985 } 3986 ASSERT(i == erp_idx); 3987 3988 /* Initialize new extent record */ 3989 erp = ifp->if_u1.if_ext_irec; 3990 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 3991 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 3992 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 3993 erp[erp_idx].er_extcount = 0; 3994 erp[erp_idx].er_extoff = erp_idx > 0 ? 3995 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 3996 return (&erp[erp_idx]); 3997 } 3998 3999 /* 4000 * Remove a record from the indirection array. 4001 */ 4002 void 4003 xfs_iext_irec_remove( 4004 xfs_ifork_t *ifp, /* inode fork pointer */ 4005 int erp_idx) /* irec index to remove */ 4006 { 4007 xfs_ext_irec_t *erp; /* indirection array pointer */ 4008 int i; /* loop counter */ 4009 int nlists; /* number of irec's (ex lists) */ 4010 4011 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4012 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4013 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4014 if (erp->er_extbuf) { 4015 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4016 -erp->er_extcount); 4017 kmem_free(erp->er_extbuf); 4018 } 4019 /* Compact extent records */ 4020 erp = ifp->if_u1.if_ext_irec; 4021 for (i = erp_idx; i < nlists - 1; i++) { 4022 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4023 } 4024 /* 4025 * Manually free the last extent record from the indirection 4026 * array. A call to xfs_iext_realloc_indirect() with a size 4027 * of zero would result in a call to xfs_iext_destroy() which 4028 * would in turn call this function again, creating a nasty 4029 * infinite loop. 4030 */ 4031 if (--nlists) { 4032 xfs_iext_realloc_indirect(ifp, 4033 nlists * sizeof(xfs_ext_irec_t)); 4034 } else { 4035 kmem_free(ifp->if_u1.if_ext_irec); 4036 } 4037 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4038 } 4039 4040 /* 4041 * This is called to clean up large amounts of unused memory allocated 4042 * by the indirection array. Before compacting anything though, verify 4043 * that the indirection array is still needed and switch back to the 4044 * linear extent list (or even the inline buffer) if possible. The 4045 * compaction policy is as follows: 4046 * 4047 * Full Compaction: Extents fit into a single page (or inline buffer) 4048 * Partial Compaction: Extents occupy less than 50% of allocated space 4049 * No Compaction: Extents occupy at least 50% of allocated space 4050 */ 4051 void 4052 xfs_iext_irec_compact( 4053 xfs_ifork_t *ifp) /* inode fork pointer */ 4054 { 4055 xfs_extnum_t nextents; /* number of extents in file */ 4056 int nlists; /* number of irec's (ex lists) */ 4057 4058 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4059 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4060 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4061 4062 if (nextents == 0) { 4063 xfs_iext_destroy(ifp); 4064 } else if (nextents <= XFS_INLINE_EXTS) { 4065 xfs_iext_indirect_to_direct(ifp); 4066 xfs_iext_direct_to_inline(ifp, nextents); 4067 } else if (nextents <= XFS_LINEAR_EXTS) { 4068 xfs_iext_indirect_to_direct(ifp); 4069 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4070 xfs_iext_irec_compact_pages(ifp); 4071 } 4072 } 4073 4074 /* 4075 * Combine extents from neighboring extent pages. 4076 */ 4077 void 4078 xfs_iext_irec_compact_pages( 4079 xfs_ifork_t *ifp) /* inode fork pointer */ 4080 { 4081 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4082 int erp_idx = 0; /* indirection array index */ 4083 int nlists; /* number of irec's (ex lists) */ 4084 4085 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4086 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4087 while (erp_idx < nlists - 1) { 4088 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4089 erp_next = erp + 1; 4090 if (erp_next->er_extcount <= 4091 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4092 memcpy(&erp->er_extbuf[erp->er_extcount], 4093 erp_next->er_extbuf, erp_next->er_extcount * 4094 sizeof(xfs_bmbt_rec_t)); 4095 erp->er_extcount += erp_next->er_extcount; 4096 /* 4097 * Free page before removing extent record 4098 * so er_extoffs don't get modified in 4099 * xfs_iext_irec_remove. 4100 */ 4101 kmem_free(erp_next->er_extbuf); 4102 erp_next->er_extbuf = NULL; 4103 xfs_iext_irec_remove(ifp, erp_idx + 1); 4104 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4105 } else { 4106 erp_idx++; 4107 } 4108 } 4109 } 4110 4111 /* 4112 * This is called to update the er_extoff field in the indirection 4113 * array when extents have been added or removed from one of the 4114 * extent lists. erp_idx contains the irec index to begin updating 4115 * at and ext_diff contains the number of extents that were added 4116 * or removed. 4117 */ 4118 void 4119 xfs_iext_irec_update_extoffs( 4120 xfs_ifork_t *ifp, /* inode fork pointer */ 4121 int erp_idx, /* irec index to update */ 4122 int ext_diff) /* number of new extents */ 4123 { 4124 int i; /* loop counter */ 4125 int nlists; /* number of irec's (ex lists */ 4126 4127 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4128 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4129 for (i = erp_idx; i < nlists; i++) { 4130 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4131 } 4132 } 4133