1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_bit.h" 24 #include "xfs_log.h" 25 #include "xfs_inum.h" 26 #include "xfs_imap.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_sb.h" 30 #include "xfs_ag.h" 31 #include "xfs_dir2.h" 32 #include "xfs_dmapi.h" 33 #include "xfs_mount.h" 34 #include "xfs_bmap_btree.h" 35 #include "xfs_alloc_btree.h" 36 #include "xfs_ialloc_btree.h" 37 #include "xfs_dir2_sf.h" 38 #include "xfs_attr_sf.h" 39 #include "xfs_dinode.h" 40 #include "xfs_inode.h" 41 #include "xfs_buf_item.h" 42 #include "xfs_inode_item.h" 43 #include "xfs_btree.h" 44 #include "xfs_alloc.h" 45 #include "xfs_ialloc.h" 46 #include "xfs_bmap.h" 47 #include "xfs_rw.h" 48 #include "xfs_error.h" 49 #include "xfs_utils.h" 50 #include "xfs_dir2_trace.h" 51 #include "xfs_quota.h" 52 #include "xfs_acl.h" 53 #include "xfs_filestream.h" 54 #include "xfs_vnodeops.h" 55 56 kmem_zone_t *xfs_ifork_zone; 57 kmem_zone_t *xfs_inode_zone; 58 59 /* 60 * Used in xfs_itruncate(). This is the maximum number of extents 61 * freed from a file in a single transaction. 62 */ 63 #define XFS_ITRUNC_MAX_EXTENTS 2 64 65 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 66 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 67 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 68 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 69 70 #ifdef DEBUG 71 /* 72 * Make sure that the extents in the given memory buffer 73 * are valid. 74 */ 75 STATIC void 76 xfs_validate_extents( 77 xfs_ifork_t *ifp, 78 int nrecs, 79 xfs_exntfmt_t fmt) 80 { 81 xfs_bmbt_irec_t irec; 82 xfs_bmbt_rec_host_t rec; 83 int i; 84 85 for (i = 0; i < nrecs; i++) { 86 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 87 rec.l0 = get_unaligned(&ep->l0); 88 rec.l1 = get_unaligned(&ep->l1); 89 xfs_bmbt_get_all(&rec, &irec); 90 if (fmt == XFS_EXTFMT_NOSTATE) 91 ASSERT(irec.br_state == XFS_EXT_NORM); 92 } 93 } 94 #else /* DEBUG */ 95 #define xfs_validate_extents(ifp, nrecs, fmt) 96 #endif /* DEBUG */ 97 98 /* 99 * Check that none of the inode's in the buffer have a next 100 * unlinked field of 0. 101 */ 102 #if defined(DEBUG) 103 void 104 xfs_inobp_check( 105 xfs_mount_t *mp, 106 xfs_buf_t *bp) 107 { 108 int i; 109 int j; 110 xfs_dinode_t *dip; 111 112 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 113 114 for (i = 0; i < j; i++) { 115 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 116 i * mp->m_sb.sb_inodesize); 117 if (!dip->di_next_unlinked) { 118 xfs_fs_cmn_err(CE_ALERT, mp, 119 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 120 bp); 121 ASSERT(dip->di_next_unlinked); 122 } 123 } 124 } 125 #endif 126 127 /* 128 * Find the buffer associated with the given inode map 129 * We do basic validation checks on the buffer once it has been 130 * retrieved from disk. 131 */ 132 STATIC int 133 xfs_imap_to_bp( 134 xfs_mount_t *mp, 135 xfs_trans_t *tp, 136 xfs_imap_t *imap, 137 xfs_buf_t **bpp, 138 uint buf_flags, 139 uint imap_flags) 140 { 141 int error; 142 int i; 143 int ni; 144 xfs_buf_t *bp; 145 146 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 147 (int)imap->im_len, buf_flags, &bp); 148 if (error) { 149 if (error != EAGAIN) { 150 cmn_err(CE_WARN, 151 "xfs_imap_to_bp: xfs_trans_read_buf()returned " 152 "an error %d on %s. Returning error.", 153 error, mp->m_fsname); 154 } else { 155 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 156 } 157 return error; 158 } 159 160 /* 161 * Validate the magic number and version of every inode in the buffer 162 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 163 */ 164 #ifdef DEBUG 165 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 166 #else /* usual case */ 167 ni = 1; 168 #endif 169 170 for (i = 0; i < ni; i++) { 171 int di_ok; 172 xfs_dinode_t *dip; 173 174 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 175 (i << mp->m_sb.sb_inodelog)); 176 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && 177 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); 178 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 179 XFS_ERRTAG_ITOBP_INOTOBP, 180 XFS_RANDOM_ITOBP_INOTOBP))) { 181 if (imap_flags & XFS_IMAP_BULKSTAT) { 182 xfs_trans_brelse(tp, bp); 183 return XFS_ERROR(EINVAL); 184 } 185 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 186 XFS_ERRLEVEL_HIGH, mp, dip); 187 #ifdef DEBUG 188 cmn_err(CE_PANIC, 189 "Device %s - bad inode magic/vsn " 190 "daddr %lld #%d (magic=%x)", 191 XFS_BUFTARG_NAME(mp->m_ddev_targp), 192 (unsigned long long)imap->im_blkno, i, 193 be16_to_cpu(dip->di_core.di_magic)); 194 #endif 195 xfs_trans_brelse(tp, bp); 196 return XFS_ERROR(EFSCORRUPTED); 197 } 198 } 199 200 xfs_inobp_check(mp, bp); 201 202 /* 203 * Mark the buffer as an inode buffer now that it looks good 204 */ 205 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 206 207 *bpp = bp; 208 return 0; 209 } 210 211 /* 212 * This routine is called to map an inode number within a file 213 * system to the buffer containing the on-disk version of the 214 * inode. It returns a pointer to the buffer containing the 215 * on-disk inode in the bpp parameter, and in the dip parameter 216 * it returns a pointer to the on-disk inode within that buffer. 217 * 218 * If a non-zero error is returned, then the contents of bpp and 219 * dipp are undefined. 220 * 221 * Use xfs_imap() to determine the size and location of the 222 * buffer to read from disk. 223 */ 224 STATIC int 225 xfs_inotobp( 226 xfs_mount_t *mp, 227 xfs_trans_t *tp, 228 xfs_ino_t ino, 229 xfs_dinode_t **dipp, 230 xfs_buf_t **bpp, 231 int *offset) 232 { 233 xfs_imap_t imap; 234 xfs_buf_t *bp; 235 int error; 236 237 imap.im_blkno = 0; 238 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); 239 if (error) 240 return error; 241 242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0); 243 if (error) 244 return error; 245 246 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 247 *bpp = bp; 248 *offset = imap.im_boffset; 249 return 0; 250 } 251 252 253 /* 254 * This routine is called to map an inode to the buffer containing 255 * the on-disk version of the inode. It returns a pointer to the 256 * buffer containing the on-disk inode in the bpp parameter, and in 257 * the dip parameter it returns a pointer to the on-disk inode within 258 * that buffer. 259 * 260 * If a non-zero error is returned, then the contents of bpp and 261 * dipp are undefined. 262 * 263 * If the inode is new and has not yet been initialized, use xfs_imap() 264 * to determine the size and location of the buffer to read from disk. 265 * If the inode has already been mapped to its buffer and read in once, 266 * then use the mapping information stored in the inode rather than 267 * calling xfs_imap(). This allows us to avoid the overhead of looking 268 * at the inode btree for small block file systems (see xfs_dilocate()). 269 * We can tell whether the inode has been mapped in before by comparing 270 * its disk block address to 0. Only uninitialized inodes will have 271 * 0 for the disk block address. 272 */ 273 int 274 xfs_itobp( 275 xfs_mount_t *mp, 276 xfs_trans_t *tp, 277 xfs_inode_t *ip, 278 xfs_dinode_t **dipp, 279 xfs_buf_t **bpp, 280 xfs_daddr_t bno, 281 uint imap_flags, 282 uint buf_flags) 283 { 284 xfs_imap_t imap; 285 xfs_buf_t *bp; 286 int error; 287 288 if (ip->i_blkno == (xfs_daddr_t)0) { 289 imap.im_blkno = bno; 290 error = xfs_imap(mp, tp, ip->i_ino, &imap, 291 XFS_IMAP_LOOKUP | imap_flags); 292 if (error) 293 return error; 294 295 /* 296 * Fill in the fields in the inode that will be used to 297 * map the inode to its buffer from now on. 298 */ 299 ip->i_blkno = imap.im_blkno; 300 ip->i_len = imap.im_len; 301 ip->i_boffset = imap.im_boffset; 302 } else { 303 /* 304 * We've already mapped the inode once, so just use the 305 * mapping that we saved the first time. 306 */ 307 imap.im_blkno = ip->i_blkno; 308 imap.im_len = ip->i_len; 309 imap.im_boffset = ip->i_boffset; 310 } 311 ASSERT(bno == 0 || bno == imap.im_blkno); 312 313 error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags); 314 if (error) 315 return error; 316 317 if (!bp) { 318 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 319 ASSERT(tp == NULL); 320 *bpp = NULL; 321 return EAGAIN; 322 } 323 324 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 325 *bpp = bp; 326 return 0; 327 } 328 329 /* 330 * Move inode type and inode format specific information from the 331 * on-disk inode to the in-core inode. For fifos, devs, and sockets 332 * this means set if_rdev to the proper value. For files, directories, 333 * and symlinks this means to bring in the in-line data or extent 334 * pointers. For a file in B-tree format, only the root is immediately 335 * brought in-core. The rest will be in-lined in if_extents when it 336 * is first referenced (see xfs_iread_extents()). 337 */ 338 STATIC int 339 xfs_iformat( 340 xfs_inode_t *ip, 341 xfs_dinode_t *dip) 342 { 343 xfs_attr_shortform_t *atp; 344 int size; 345 int error; 346 xfs_fsize_t di_size; 347 ip->i_df.if_ext_max = 348 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 349 error = 0; 350 351 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) + 352 be16_to_cpu(dip->di_core.di_anextents) > 353 be64_to_cpu(dip->di_core.di_nblocks))) { 354 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 355 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 356 (unsigned long long)ip->i_ino, 357 (int)(be32_to_cpu(dip->di_core.di_nextents) + 358 be16_to_cpu(dip->di_core.di_anextents)), 359 (unsigned long long) 360 be64_to_cpu(dip->di_core.di_nblocks)); 361 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 362 ip->i_mount, dip); 363 return XFS_ERROR(EFSCORRUPTED); 364 } 365 366 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 367 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 368 "corrupt dinode %Lu, forkoff = 0x%x.", 369 (unsigned long long)ip->i_ino, 370 dip->di_core.di_forkoff); 371 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 372 ip->i_mount, dip); 373 return XFS_ERROR(EFSCORRUPTED); 374 } 375 376 switch (ip->i_d.di_mode & S_IFMT) { 377 case S_IFIFO: 378 case S_IFCHR: 379 case S_IFBLK: 380 case S_IFSOCK: 381 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) { 382 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 383 ip->i_mount, dip); 384 return XFS_ERROR(EFSCORRUPTED); 385 } 386 ip->i_d.di_size = 0; 387 ip->i_size = 0; 388 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev); 389 break; 390 391 case S_IFREG: 392 case S_IFLNK: 393 case S_IFDIR: 394 switch (dip->di_core.di_format) { 395 case XFS_DINODE_FMT_LOCAL: 396 /* 397 * no local regular files yet 398 */ 399 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) { 400 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 401 "corrupt inode %Lu " 402 "(local format for regular file).", 403 (unsigned long long) ip->i_ino); 404 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 405 XFS_ERRLEVEL_LOW, 406 ip->i_mount, dip); 407 return XFS_ERROR(EFSCORRUPTED); 408 } 409 410 di_size = be64_to_cpu(dip->di_core.di_size); 411 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 412 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 413 "corrupt inode %Lu " 414 "(bad size %Ld for local inode).", 415 (unsigned long long) ip->i_ino, 416 (long long) di_size); 417 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 418 XFS_ERRLEVEL_LOW, 419 ip->i_mount, dip); 420 return XFS_ERROR(EFSCORRUPTED); 421 } 422 423 size = (int)di_size; 424 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 425 break; 426 case XFS_DINODE_FMT_EXTENTS: 427 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 428 break; 429 case XFS_DINODE_FMT_BTREE: 430 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 431 break; 432 default: 433 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 434 ip->i_mount); 435 return XFS_ERROR(EFSCORRUPTED); 436 } 437 break; 438 439 default: 440 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 441 return XFS_ERROR(EFSCORRUPTED); 442 } 443 if (error) { 444 return error; 445 } 446 if (!XFS_DFORK_Q(dip)) 447 return 0; 448 ASSERT(ip->i_afp == NULL); 449 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 450 ip->i_afp->if_ext_max = 451 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 452 switch (dip->di_core.di_aformat) { 453 case XFS_DINODE_FMT_LOCAL: 454 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 455 size = be16_to_cpu(atp->hdr.totsize); 456 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 457 break; 458 case XFS_DINODE_FMT_EXTENTS: 459 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 460 break; 461 case XFS_DINODE_FMT_BTREE: 462 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 463 break; 464 default: 465 error = XFS_ERROR(EFSCORRUPTED); 466 break; 467 } 468 if (error) { 469 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 470 ip->i_afp = NULL; 471 xfs_idestroy_fork(ip, XFS_DATA_FORK); 472 } 473 return error; 474 } 475 476 /* 477 * The file is in-lined in the on-disk inode. 478 * If it fits into if_inline_data, then copy 479 * it there, otherwise allocate a buffer for it 480 * and copy the data there. Either way, set 481 * if_data to point at the data. 482 * If we allocate a buffer for the data, make 483 * sure that its size is a multiple of 4 and 484 * record the real size in i_real_bytes. 485 */ 486 STATIC int 487 xfs_iformat_local( 488 xfs_inode_t *ip, 489 xfs_dinode_t *dip, 490 int whichfork, 491 int size) 492 { 493 xfs_ifork_t *ifp; 494 int real_size; 495 496 /* 497 * If the size is unreasonable, then something 498 * is wrong and we just bail out rather than crash in 499 * kmem_alloc() or memcpy() below. 500 */ 501 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 502 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 503 "corrupt inode %Lu " 504 "(bad size %d for local fork, size = %d).", 505 (unsigned long long) ip->i_ino, size, 506 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 507 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 508 ip->i_mount, dip); 509 return XFS_ERROR(EFSCORRUPTED); 510 } 511 ifp = XFS_IFORK_PTR(ip, whichfork); 512 real_size = 0; 513 if (size == 0) 514 ifp->if_u1.if_data = NULL; 515 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 516 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 517 else { 518 real_size = roundup(size, 4); 519 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 520 } 521 ifp->if_bytes = size; 522 ifp->if_real_bytes = real_size; 523 if (size) 524 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 525 ifp->if_flags &= ~XFS_IFEXTENTS; 526 ifp->if_flags |= XFS_IFINLINE; 527 return 0; 528 } 529 530 /* 531 * The file consists of a set of extents all 532 * of which fit into the on-disk inode. 533 * If there are few enough extents to fit into 534 * the if_inline_ext, then copy them there. 535 * Otherwise allocate a buffer for them and copy 536 * them into it. Either way, set if_extents 537 * to point at the extents. 538 */ 539 STATIC int 540 xfs_iformat_extents( 541 xfs_inode_t *ip, 542 xfs_dinode_t *dip, 543 int whichfork) 544 { 545 xfs_bmbt_rec_t *dp; 546 xfs_ifork_t *ifp; 547 int nex; 548 int size; 549 int i; 550 551 ifp = XFS_IFORK_PTR(ip, whichfork); 552 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 553 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 554 555 /* 556 * If the number of extents is unreasonable, then something 557 * is wrong and we just bail out rather than crash in 558 * kmem_alloc() or memcpy() below. 559 */ 560 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 561 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 562 "corrupt inode %Lu ((a)extents = %d).", 563 (unsigned long long) ip->i_ino, nex); 564 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 565 ip->i_mount, dip); 566 return XFS_ERROR(EFSCORRUPTED); 567 } 568 569 ifp->if_real_bytes = 0; 570 if (nex == 0) 571 ifp->if_u1.if_extents = NULL; 572 else if (nex <= XFS_INLINE_EXTS) 573 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 574 else 575 xfs_iext_add(ifp, 0, nex); 576 577 ifp->if_bytes = size; 578 if (size) { 579 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 580 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 581 for (i = 0; i < nex; i++, dp++) { 582 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 583 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0)); 584 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1)); 585 } 586 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 587 if (whichfork != XFS_DATA_FORK || 588 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 589 if (unlikely(xfs_check_nostate_extents( 590 ifp, 0, nex))) { 591 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 592 XFS_ERRLEVEL_LOW, 593 ip->i_mount); 594 return XFS_ERROR(EFSCORRUPTED); 595 } 596 } 597 ifp->if_flags |= XFS_IFEXTENTS; 598 return 0; 599 } 600 601 /* 602 * The file has too many extents to fit into 603 * the inode, so they are in B-tree format. 604 * Allocate a buffer for the root of the B-tree 605 * and copy the root into it. The i_extents 606 * field will remain NULL until all of the 607 * extents are read in (when they are needed). 608 */ 609 STATIC int 610 xfs_iformat_btree( 611 xfs_inode_t *ip, 612 xfs_dinode_t *dip, 613 int whichfork) 614 { 615 xfs_bmdr_block_t *dfp; 616 xfs_ifork_t *ifp; 617 /* REFERENCED */ 618 int nrecs; 619 int size; 620 621 ifp = XFS_IFORK_PTR(ip, whichfork); 622 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 623 size = XFS_BMAP_BROOT_SPACE(dfp); 624 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); 625 626 /* 627 * blow out if -- fork has less extents than can fit in 628 * fork (fork shouldn't be a btree format), root btree 629 * block has more records than can fit into the fork, 630 * or the number of extents is greater than the number of 631 * blocks. 632 */ 633 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 634 || XFS_BMDR_SPACE_CALC(nrecs) > 635 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 636 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 637 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 638 "corrupt inode %Lu (btree).", 639 (unsigned long long) ip->i_ino); 640 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 641 ip->i_mount); 642 return XFS_ERROR(EFSCORRUPTED); 643 } 644 645 ifp->if_broot_bytes = size; 646 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 647 ASSERT(ifp->if_broot != NULL); 648 /* 649 * Copy and convert from the on-disk structure 650 * to the in-memory structure. 651 */ 652 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 653 ifp->if_broot, size); 654 ifp->if_flags &= ~XFS_IFEXTENTS; 655 ifp->if_flags |= XFS_IFBROOT; 656 657 return 0; 658 } 659 660 void 661 xfs_dinode_from_disk( 662 xfs_icdinode_t *to, 663 xfs_dinode_core_t *from) 664 { 665 to->di_magic = be16_to_cpu(from->di_magic); 666 to->di_mode = be16_to_cpu(from->di_mode); 667 to->di_version = from ->di_version; 668 to->di_format = from->di_format; 669 to->di_onlink = be16_to_cpu(from->di_onlink); 670 to->di_uid = be32_to_cpu(from->di_uid); 671 to->di_gid = be32_to_cpu(from->di_gid); 672 to->di_nlink = be32_to_cpu(from->di_nlink); 673 to->di_projid = be16_to_cpu(from->di_projid); 674 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 675 to->di_flushiter = be16_to_cpu(from->di_flushiter); 676 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 677 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 678 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 679 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 680 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 681 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 682 to->di_size = be64_to_cpu(from->di_size); 683 to->di_nblocks = be64_to_cpu(from->di_nblocks); 684 to->di_extsize = be32_to_cpu(from->di_extsize); 685 to->di_nextents = be32_to_cpu(from->di_nextents); 686 to->di_anextents = be16_to_cpu(from->di_anextents); 687 to->di_forkoff = from->di_forkoff; 688 to->di_aformat = from->di_aformat; 689 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 690 to->di_dmstate = be16_to_cpu(from->di_dmstate); 691 to->di_flags = be16_to_cpu(from->di_flags); 692 to->di_gen = be32_to_cpu(from->di_gen); 693 } 694 695 void 696 xfs_dinode_to_disk( 697 xfs_dinode_core_t *to, 698 xfs_icdinode_t *from) 699 { 700 to->di_magic = cpu_to_be16(from->di_magic); 701 to->di_mode = cpu_to_be16(from->di_mode); 702 to->di_version = from ->di_version; 703 to->di_format = from->di_format; 704 to->di_onlink = cpu_to_be16(from->di_onlink); 705 to->di_uid = cpu_to_be32(from->di_uid); 706 to->di_gid = cpu_to_be32(from->di_gid); 707 to->di_nlink = cpu_to_be32(from->di_nlink); 708 to->di_projid = cpu_to_be16(from->di_projid); 709 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 710 to->di_flushiter = cpu_to_be16(from->di_flushiter); 711 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 712 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 713 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 714 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 715 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 716 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 717 to->di_size = cpu_to_be64(from->di_size); 718 to->di_nblocks = cpu_to_be64(from->di_nblocks); 719 to->di_extsize = cpu_to_be32(from->di_extsize); 720 to->di_nextents = cpu_to_be32(from->di_nextents); 721 to->di_anextents = cpu_to_be16(from->di_anextents); 722 to->di_forkoff = from->di_forkoff; 723 to->di_aformat = from->di_aformat; 724 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 725 to->di_dmstate = cpu_to_be16(from->di_dmstate); 726 to->di_flags = cpu_to_be16(from->di_flags); 727 to->di_gen = cpu_to_be32(from->di_gen); 728 } 729 730 STATIC uint 731 _xfs_dic2xflags( 732 __uint16_t di_flags) 733 { 734 uint flags = 0; 735 736 if (di_flags & XFS_DIFLAG_ANY) { 737 if (di_flags & XFS_DIFLAG_REALTIME) 738 flags |= XFS_XFLAG_REALTIME; 739 if (di_flags & XFS_DIFLAG_PREALLOC) 740 flags |= XFS_XFLAG_PREALLOC; 741 if (di_flags & XFS_DIFLAG_IMMUTABLE) 742 flags |= XFS_XFLAG_IMMUTABLE; 743 if (di_flags & XFS_DIFLAG_APPEND) 744 flags |= XFS_XFLAG_APPEND; 745 if (di_flags & XFS_DIFLAG_SYNC) 746 flags |= XFS_XFLAG_SYNC; 747 if (di_flags & XFS_DIFLAG_NOATIME) 748 flags |= XFS_XFLAG_NOATIME; 749 if (di_flags & XFS_DIFLAG_NODUMP) 750 flags |= XFS_XFLAG_NODUMP; 751 if (di_flags & XFS_DIFLAG_RTINHERIT) 752 flags |= XFS_XFLAG_RTINHERIT; 753 if (di_flags & XFS_DIFLAG_PROJINHERIT) 754 flags |= XFS_XFLAG_PROJINHERIT; 755 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 756 flags |= XFS_XFLAG_NOSYMLINKS; 757 if (di_flags & XFS_DIFLAG_EXTSIZE) 758 flags |= XFS_XFLAG_EXTSIZE; 759 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 760 flags |= XFS_XFLAG_EXTSZINHERIT; 761 if (di_flags & XFS_DIFLAG_NODEFRAG) 762 flags |= XFS_XFLAG_NODEFRAG; 763 if (di_flags & XFS_DIFLAG_FILESTREAM) 764 flags |= XFS_XFLAG_FILESTREAM; 765 } 766 767 return flags; 768 } 769 770 uint 771 xfs_ip2xflags( 772 xfs_inode_t *ip) 773 { 774 xfs_icdinode_t *dic = &ip->i_d; 775 776 return _xfs_dic2xflags(dic->di_flags) | 777 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 778 } 779 780 uint 781 xfs_dic2xflags( 782 xfs_dinode_t *dip) 783 { 784 xfs_dinode_core_t *dic = &dip->di_core; 785 786 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | 787 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 788 } 789 790 /* 791 * Given a mount structure and an inode number, return a pointer 792 * to a newly allocated in-core inode corresponding to the given 793 * inode number. 794 * 795 * Initialize the inode's attributes and extent pointers if it 796 * already has them (it will not if the inode has no links). 797 */ 798 int 799 xfs_iread( 800 xfs_mount_t *mp, 801 xfs_trans_t *tp, 802 xfs_ino_t ino, 803 xfs_inode_t **ipp, 804 xfs_daddr_t bno, 805 uint imap_flags) 806 { 807 xfs_buf_t *bp; 808 xfs_dinode_t *dip; 809 xfs_inode_t *ip; 810 int error; 811 812 ASSERT(xfs_inode_zone != NULL); 813 814 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); 815 ip->i_ino = ino; 816 ip->i_mount = mp; 817 atomic_set(&ip->i_iocount, 0); 818 spin_lock_init(&ip->i_flags_lock); 819 820 /* 821 * Get pointer's to the on-disk inode and the buffer containing it. 822 * If the inode number refers to a block outside the file system 823 * then xfs_itobp() will return NULL. In this case we should 824 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 825 * know that this is a new incore inode. 826 */ 827 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK); 828 if (error) { 829 kmem_zone_free(xfs_inode_zone, ip); 830 return error; 831 } 832 833 /* 834 * Initialize inode's trace buffers. 835 * Do this before xfs_iformat in case it adds entries. 836 */ 837 #ifdef XFS_INODE_TRACE 838 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); 839 #endif 840 #ifdef XFS_BMAP_TRACE 841 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); 842 #endif 843 #ifdef XFS_BMBT_TRACE 844 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); 845 #endif 846 #ifdef XFS_RW_TRACE 847 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); 848 #endif 849 #ifdef XFS_ILOCK_TRACE 850 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); 851 #endif 852 #ifdef XFS_DIR2_TRACE 853 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); 854 #endif 855 856 /* 857 * If we got something that isn't an inode it means someone 858 * (nfs or dmi) has a stale handle. 859 */ 860 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) { 861 kmem_zone_free(xfs_inode_zone, ip); 862 xfs_trans_brelse(tp, bp); 863 #ifdef DEBUG 864 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 865 "dip->di_core.di_magic (0x%x) != " 866 "XFS_DINODE_MAGIC (0x%x)", 867 be16_to_cpu(dip->di_core.di_magic), 868 XFS_DINODE_MAGIC); 869 #endif /* DEBUG */ 870 return XFS_ERROR(EINVAL); 871 } 872 873 /* 874 * If the on-disk inode is already linked to a directory 875 * entry, copy all of the inode into the in-core inode. 876 * xfs_iformat() handles copying in the inode format 877 * specific information. 878 * Otherwise, just get the truly permanent information. 879 */ 880 if (dip->di_core.di_mode) { 881 xfs_dinode_from_disk(&ip->i_d, &dip->di_core); 882 error = xfs_iformat(ip, dip); 883 if (error) { 884 kmem_zone_free(xfs_inode_zone, ip); 885 xfs_trans_brelse(tp, bp); 886 #ifdef DEBUG 887 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 888 "xfs_iformat() returned error %d", 889 error); 890 #endif /* DEBUG */ 891 return error; 892 } 893 } else { 894 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic); 895 ip->i_d.di_version = dip->di_core.di_version; 896 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen); 897 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter); 898 /* 899 * Make sure to pull in the mode here as well in 900 * case the inode is released without being used. 901 * This ensures that xfs_inactive() will see that 902 * the inode is already free and not try to mess 903 * with the uninitialized part of it. 904 */ 905 ip->i_d.di_mode = 0; 906 /* 907 * Initialize the per-fork minima and maxima for a new 908 * inode here. xfs_iformat will do it for old inodes. 909 */ 910 ip->i_df.if_ext_max = 911 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 912 } 913 914 INIT_LIST_HEAD(&ip->i_reclaim); 915 916 /* 917 * The inode format changed when we moved the link count and 918 * made it 32 bits long. If this is an old format inode, 919 * convert it in memory to look like a new one. If it gets 920 * flushed to disk we will convert back before flushing or 921 * logging it. We zero out the new projid field and the old link 922 * count field. We'll handle clearing the pad field (the remains 923 * of the old uuid field) when we actually convert the inode to 924 * the new format. We don't change the version number so that we 925 * can distinguish this from a real new format inode. 926 */ 927 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 928 ip->i_d.di_nlink = ip->i_d.di_onlink; 929 ip->i_d.di_onlink = 0; 930 ip->i_d.di_projid = 0; 931 } 932 933 ip->i_delayed_blks = 0; 934 ip->i_size = ip->i_d.di_size; 935 936 /* 937 * Mark the buffer containing the inode as something to keep 938 * around for a while. This helps to keep recently accessed 939 * meta-data in-core longer. 940 */ 941 XFS_BUF_SET_REF(bp, XFS_INO_REF); 942 943 /* 944 * Use xfs_trans_brelse() to release the buffer containing the 945 * on-disk inode, because it was acquired with xfs_trans_read_buf() 946 * in xfs_itobp() above. If tp is NULL, this is just a normal 947 * brelse(). If we're within a transaction, then xfs_trans_brelse() 948 * will only release the buffer if it is not dirty within the 949 * transaction. It will be OK to release the buffer in this case, 950 * because inodes on disk are never destroyed and we will be 951 * locking the new in-core inode before putting it in the hash 952 * table where other processes can find it. Thus we don't have 953 * to worry about the inode being changed just because we released 954 * the buffer. 955 */ 956 xfs_trans_brelse(tp, bp); 957 *ipp = ip; 958 return 0; 959 } 960 961 /* 962 * Read in extents from a btree-format inode. 963 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 964 */ 965 int 966 xfs_iread_extents( 967 xfs_trans_t *tp, 968 xfs_inode_t *ip, 969 int whichfork) 970 { 971 int error; 972 xfs_ifork_t *ifp; 973 xfs_extnum_t nextents; 974 size_t size; 975 976 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 977 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 978 ip->i_mount); 979 return XFS_ERROR(EFSCORRUPTED); 980 } 981 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 982 size = nextents * sizeof(xfs_bmbt_rec_t); 983 ifp = XFS_IFORK_PTR(ip, whichfork); 984 985 /* 986 * We know that the size is valid (it's checked in iformat_btree) 987 */ 988 ifp->if_lastex = NULLEXTNUM; 989 ifp->if_bytes = ifp->if_real_bytes = 0; 990 ifp->if_flags |= XFS_IFEXTENTS; 991 xfs_iext_add(ifp, 0, nextents); 992 error = xfs_bmap_read_extents(tp, ip, whichfork); 993 if (error) { 994 xfs_iext_destroy(ifp); 995 ifp->if_flags &= ~XFS_IFEXTENTS; 996 return error; 997 } 998 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 999 return 0; 1000 } 1001 1002 /* 1003 * Allocate an inode on disk and return a copy of its in-core version. 1004 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 1005 * appropriately within the inode. The uid and gid for the inode are 1006 * set according to the contents of the given cred structure. 1007 * 1008 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 1009 * has a free inode available, call xfs_iget() 1010 * to obtain the in-core version of the allocated inode. Finally, 1011 * fill in the inode and log its initial contents. In this case, 1012 * ialloc_context would be set to NULL and call_again set to false. 1013 * 1014 * If xfs_dialloc() does not have an available inode, 1015 * it will replenish its supply by doing an allocation. Since we can 1016 * only do one allocation within a transaction without deadlocks, we 1017 * must commit the current transaction before returning the inode itself. 1018 * In this case, therefore, we will set call_again to true and return. 1019 * The caller should then commit the current transaction, start a new 1020 * transaction, and call xfs_ialloc() again to actually get the inode. 1021 * 1022 * To ensure that some other process does not grab the inode that 1023 * was allocated during the first call to xfs_ialloc(), this routine 1024 * also returns the [locked] bp pointing to the head of the freelist 1025 * as ialloc_context. The caller should hold this buffer across 1026 * the commit and pass it back into this routine on the second call. 1027 * 1028 * If we are allocating quota inodes, we do not have a parent inode 1029 * to attach to or associate with (i.e. pip == NULL) because they 1030 * are not linked into the directory structure - they are attached 1031 * directly to the superblock - and so have no parent. 1032 */ 1033 int 1034 xfs_ialloc( 1035 xfs_trans_t *tp, 1036 xfs_inode_t *pip, 1037 mode_t mode, 1038 xfs_nlink_t nlink, 1039 xfs_dev_t rdev, 1040 cred_t *cr, 1041 xfs_prid_t prid, 1042 int okalloc, 1043 xfs_buf_t **ialloc_context, 1044 boolean_t *call_again, 1045 xfs_inode_t **ipp) 1046 { 1047 xfs_ino_t ino; 1048 xfs_inode_t *ip; 1049 bhv_vnode_t *vp; 1050 uint flags; 1051 int error; 1052 1053 /* 1054 * Call the space management code to pick 1055 * the on-disk inode to be allocated. 1056 */ 1057 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 1058 ialloc_context, call_again, &ino); 1059 if (error != 0) { 1060 return error; 1061 } 1062 if (*call_again || ino == NULLFSINO) { 1063 *ipp = NULL; 1064 return 0; 1065 } 1066 ASSERT(*ialloc_context == NULL); 1067 1068 /* 1069 * Get the in-core inode with the lock held exclusively. 1070 * This is because we're setting fields here we need 1071 * to prevent others from looking at until we're done. 1072 */ 1073 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1074 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1075 if (error != 0) { 1076 return error; 1077 } 1078 ASSERT(ip != NULL); 1079 1080 vp = XFS_ITOV(ip); 1081 ip->i_d.di_mode = (__uint16_t)mode; 1082 ip->i_d.di_onlink = 0; 1083 ip->i_d.di_nlink = nlink; 1084 ASSERT(ip->i_d.di_nlink == nlink); 1085 ip->i_d.di_uid = current_fsuid(cr); 1086 ip->i_d.di_gid = current_fsgid(cr); 1087 ip->i_d.di_projid = prid; 1088 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1089 1090 /* 1091 * If the superblock version is up to where we support new format 1092 * inodes and this is currently an old format inode, then change 1093 * the inode version number now. This way we only do the conversion 1094 * here rather than here and in the flush/logging code. 1095 */ 1096 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 1097 ip->i_d.di_version == XFS_DINODE_VERSION_1) { 1098 ip->i_d.di_version = XFS_DINODE_VERSION_2; 1099 /* 1100 * We've already zeroed the old link count, the projid field, 1101 * and the pad field. 1102 */ 1103 } 1104 1105 /* 1106 * Project ids won't be stored on disk if we are using a version 1 inode. 1107 */ 1108 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1109 xfs_bump_ino_vers2(tp, ip); 1110 1111 if (pip && XFS_INHERIT_GID(pip)) { 1112 ip->i_d.di_gid = pip->i_d.di_gid; 1113 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1114 ip->i_d.di_mode |= S_ISGID; 1115 } 1116 } 1117 1118 /* 1119 * If the group ID of the new file does not match the effective group 1120 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1121 * (and only if the irix_sgid_inherit compatibility variable is set). 1122 */ 1123 if ((irix_sgid_inherit) && 1124 (ip->i_d.di_mode & S_ISGID) && 1125 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1126 ip->i_d.di_mode &= ~S_ISGID; 1127 } 1128 1129 ip->i_d.di_size = 0; 1130 ip->i_size = 0; 1131 ip->i_d.di_nextents = 0; 1132 ASSERT(ip->i_d.di_nblocks == 0); 1133 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); 1134 /* 1135 * di_gen will have been taken care of in xfs_iread. 1136 */ 1137 ip->i_d.di_extsize = 0; 1138 ip->i_d.di_dmevmask = 0; 1139 ip->i_d.di_dmstate = 0; 1140 ip->i_d.di_flags = 0; 1141 flags = XFS_ILOG_CORE; 1142 switch (mode & S_IFMT) { 1143 case S_IFIFO: 1144 case S_IFCHR: 1145 case S_IFBLK: 1146 case S_IFSOCK: 1147 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1148 ip->i_df.if_u2.if_rdev = rdev; 1149 ip->i_df.if_flags = 0; 1150 flags |= XFS_ILOG_DEV; 1151 break; 1152 case S_IFREG: 1153 if (pip && xfs_inode_is_filestream(pip)) { 1154 error = xfs_filestream_associate(pip, ip); 1155 if (error < 0) 1156 return -error; 1157 if (!error) 1158 xfs_iflags_set(ip, XFS_IFILESTREAM); 1159 } 1160 /* fall through */ 1161 case S_IFDIR: 1162 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1163 uint di_flags = 0; 1164 1165 if ((mode & S_IFMT) == S_IFDIR) { 1166 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1167 di_flags |= XFS_DIFLAG_RTINHERIT; 1168 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1169 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1170 ip->i_d.di_extsize = pip->i_d.di_extsize; 1171 } 1172 } else if ((mode & S_IFMT) == S_IFREG) { 1173 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1174 di_flags |= XFS_DIFLAG_REALTIME; 1175 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1176 di_flags |= XFS_DIFLAG_EXTSIZE; 1177 ip->i_d.di_extsize = pip->i_d.di_extsize; 1178 } 1179 } 1180 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1181 xfs_inherit_noatime) 1182 di_flags |= XFS_DIFLAG_NOATIME; 1183 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1184 xfs_inherit_nodump) 1185 di_flags |= XFS_DIFLAG_NODUMP; 1186 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1187 xfs_inherit_sync) 1188 di_flags |= XFS_DIFLAG_SYNC; 1189 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1190 xfs_inherit_nosymlinks) 1191 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1192 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1193 di_flags |= XFS_DIFLAG_PROJINHERIT; 1194 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1195 xfs_inherit_nodefrag) 1196 di_flags |= XFS_DIFLAG_NODEFRAG; 1197 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1198 di_flags |= XFS_DIFLAG_FILESTREAM; 1199 ip->i_d.di_flags |= di_flags; 1200 } 1201 /* FALLTHROUGH */ 1202 case S_IFLNK: 1203 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1204 ip->i_df.if_flags = XFS_IFEXTENTS; 1205 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1206 ip->i_df.if_u1.if_extents = NULL; 1207 break; 1208 default: 1209 ASSERT(0); 1210 } 1211 /* 1212 * Attribute fork settings for new inode. 1213 */ 1214 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1215 ip->i_d.di_anextents = 0; 1216 1217 /* 1218 * Log the new values stuffed into the inode. 1219 */ 1220 xfs_trans_log_inode(tp, ip, flags); 1221 1222 /* now that we have an i_mode we can setup inode ops and unlock */ 1223 xfs_initialize_vnode(tp->t_mountp, vp, ip); 1224 1225 *ipp = ip; 1226 return 0; 1227 } 1228 1229 /* 1230 * Check to make sure that there are no blocks allocated to the 1231 * file beyond the size of the file. We don't check this for 1232 * files with fixed size extents or real time extents, but we 1233 * at least do it for regular files. 1234 */ 1235 #ifdef DEBUG 1236 void 1237 xfs_isize_check( 1238 xfs_mount_t *mp, 1239 xfs_inode_t *ip, 1240 xfs_fsize_t isize) 1241 { 1242 xfs_fileoff_t map_first; 1243 int nimaps; 1244 xfs_bmbt_irec_t imaps[2]; 1245 1246 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1247 return; 1248 1249 if (XFS_IS_REALTIME_INODE(ip)) 1250 return; 1251 1252 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 1253 return; 1254 1255 nimaps = 2; 1256 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1257 /* 1258 * The filesystem could be shutting down, so bmapi may return 1259 * an error. 1260 */ 1261 if (xfs_bmapi(NULL, ip, map_first, 1262 (XFS_B_TO_FSB(mp, 1263 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1264 map_first), 1265 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1266 NULL, NULL)) 1267 return; 1268 ASSERT(nimaps == 1); 1269 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1270 } 1271 #endif /* DEBUG */ 1272 1273 /* 1274 * Calculate the last possible buffered byte in a file. This must 1275 * include data that was buffered beyond the EOF by the write code. 1276 * This also needs to deal with overflowing the xfs_fsize_t type 1277 * which can happen for sizes near the limit. 1278 * 1279 * We also need to take into account any blocks beyond the EOF. It 1280 * may be the case that they were buffered by a write which failed. 1281 * In that case the pages will still be in memory, but the inode size 1282 * will never have been updated. 1283 */ 1284 xfs_fsize_t 1285 xfs_file_last_byte( 1286 xfs_inode_t *ip) 1287 { 1288 xfs_mount_t *mp; 1289 xfs_fsize_t last_byte; 1290 xfs_fileoff_t last_block; 1291 xfs_fileoff_t size_last_block; 1292 int error; 1293 1294 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); 1295 1296 mp = ip->i_mount; 1297 /* 1298 * Only check for blocks beyond the EOF if the extents have 1299 * been read in. This eliminates the need for the inode lock, 1300 * and it also saves us from looking when it really isn't 1301 * necessary. 1302 */ 1303 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1304 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1305 XFS_DATA_FORK); 1306 if (error) { 1307 last_block = 0; 1308 } 1309 } else { 1310 last_block = 0; 1311 } 1312 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); 1313 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1314 1315 last_byte = XFS_FSB_TO_B(mp, last_block); 1316 if (last_byte < 0) { 1317 return XFS_MAXIOFFSET(mp); 1318 } 1319 last_byte += (1 << mp->m_writeio_log); 1320 if (last_byte < 0) { 1321 return XFS_MAXIOFFSET(mp); 1322 } 1323 return last_byte; 1324 } 1325 1326 #if defined(XFS_RW_TRACE) 1327 STATIC void 1328 xfs_itrunc_trace( 1329 int tag, 1330 xfs_inode_t *ip, 1331 int flag, 1332 xfs_fsize_t new_size, 1333 xfs_off_t toss_start, 1334 xfs_off_t toss_finish) 1335 { 1336 if (ip->i_rwtrace == NULL) { 1337 return; 1338 } 1339 1340 ktrace_enter(ip->i_rwtrace, 1341 (void*)((long)tag), 1342 (void*)ip, 1343 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), 1344 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), 1345 (void*)((long)flag), 1346 (void*)(unsigned long)((new_size >> 32) & 0xffffffff), 1347 (void*)(unsigned long)(new_size & 0xffffffff), 1348 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), 1349 (void*)(unsigned long)(toss_start & 0xffffffff), 1350 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1351 (void*)(unsigned long)(toss_finish & 0xffffffff), 1352 (void*)(unsigned long)current_cpu(), 1353 (void*)(unsigned long)current_pid(), 1354 (void*)NULL, 1355 (void*)NULL, 1356 (void*)NULL); 1357 } 1358 #else 1359 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1360 #endif 1361 1362 /* 1363 * Start the truncation of the file to new_size. The new size 1364 * must be smaller than the current size. This routine will 1365 * clear the buffer and page caches of file data in the removed 1366 * range, and xfs_itruncate_finish() will remove the underlying 1367 * disk blocks. 1368 * 1369 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1370 * must NOT have the inode lock held at all. This is because we're 1371 * calling into the buffer/page cache code and we can't hold the 1372 * inode lock when we do so. 1373 * 1374 * We need to wait for any direct I/Os in flight to complete before we 1375 * proceed with the truncate. This is needed to prevent the extents 1376 * being read or written by the direct I/Os from being removed while the 1377 * I/O is in flight as there is no other method of synchronising 1378 * direct I/O with the truncate operation. Also, because we hold 1379 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1380 * started until the truncate completes and drops the lock. Essentially, 1381 * the vn_iowait() call forms an I/O barrier that provides strict ordering 1382 * between direct I/Os and the truncate operation. 1383 * 1384 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1385 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1386 * in the case that the caller is locking things out of order and 1387 * may not be able to call xfs_itruncate_finish() with the inode lock 1388 * held without dropping the I/O lock. If the caller must drop the 1389 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1390 * must be called again with all the same restrictions as the initial 1391 * call. 1392 */ 1393 int 1394 xfs_itruncate_start( 1395 xfs_inode_t *ip, 1396 uint flags, 1397 xfs_fsize_t new_size) 1398 { 1399 xfs_fsize_t last_byte; 1400 xfs_off_t toss_start; 1401 xfs_mount_t *mp; 1402 bhv_vnode_t *vp; 1403 int error = 0; 1404 1405 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1406 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1407 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1408 (flags == XFS_ITRUNC_MAYBE)); 1409 1410 mp = ip->i_mount; 1411 vp = XFS_ITOV(ip); 1412 1413 /* wait for the completion of any pending DIOs */ 1414 if (new_size < ip->i_size) 1415 vn_iowait(ip); 1416 1417 /* 1418 * Call toss_pages or flushinval_pages to get rid of pages 1419 * overlapping the region being removed. We have to use 1420 * the less efficient flushinval_pages in the case that the 1421 * caller may not be able to finish the truncate without 1422 * dropping the inode's I/O lock. Make sure 1423 * to catch any pages brought in by buffers overlapping 1424 * the EOF by searching out beyond the isize by our 1425 * block size. We round new_size up to a block boundary 1426 * so that we don't toss things on the same block as 1427 * new_size but before it. 1428 * 1429 * Before calling toss_page or flushinval_pages, make sure to 1430 * call remapf() over the same region if the file is mapped. 1431 * This frees up mapped file references to the pages in the 1432 * given range and for the flushinval_pages case it ensures 1433 * that we get the latest mapped changes flushed out. 1434 */ 1435 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1436 toss_start = XFS_FSB_TO_B(mp, toss_start); 1437 if (toss_start < 0) { 1438 /* 1439 * The place to start tossing is beyond our maximum 1440 * file size, so there is no way that the data extended 1441 * out there. 1442 */ 1443 return 0; 1444 } 1445 last_byte = xfs_file_last_byte(ip); 1446 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1447 last_byte); 1448 if (last_byte > toss_start) { 1449 if (flags & XFS_ITRUNC_DEFINITE) { 1450 xfs_tosspages(ip, toss_start, 1451 -1, FI_REMAPF_LOCKED); 1452 } else { 1453 error = xfs_flushinval_pages(ip, toss_start, 1454 -1, FI_REMAPF_LOCKED); 1455 } 1456 } 1457 1458 #ifdef DEBUG 1459 if (new_size == 0) { 1460 ASSERT(VN_CACHED(vp) == 0); 1461 } 1462 #endif 1463 return error; 1464 } 1465 1466 /* 1467 * Shrink the file to the given new_size. The new size must be smaller than 1468 * the current size. This will free up the underlying blocks in the removed 1469 * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). 1470 * 1471 * The transaction passed to this routine must have made a permanent log 1472 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1473 * given transaction and start new ones, so make sure everything involved in 1474 * the transaction is tidy before calling here. Some transaction will be 1475 * returned to the caller to be committed. The incoming transaction must 1476 * already include the inode, and both inode locks must be held exclusively. 1477 * The inode must also be "held" within the transaction. On return the inode 1478 * will be "held" within the returned transaction. This routine does NOT 1479 * require any disk space to be reserved for it within the transaction. 1480 * 1481 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it 1482 * indicates the fork which is to be truncated. For the attribute fork we only 1483 * support truncation to size 0. 1484 * 1485 * We use the sync parameter to indicate whether or not the first transaction 1486 * we perform might have to be synchronous. For the attr fork, it needs to be 1487 * so if the unlink of the inode is not yet known to be permanent in the log. 1488 * This keeps us from freeing and reusing the blocks of the attribute fork 1489 * before the unlink of the inode becomes permanent. 1490 * 1491 * For the data fork, we normally have to run synchronously if we're being 1492 * called out of the inactive path or we're being called out of the create path 1493 * where we're truncating an existing file. Either way, the truncate needs to 1494 * be sync so blocks don't reappear in the file with altered data in case of a 1495 * crash. wsync filesystems can run the first case async because anything that 1496 * shrinks the inode has to run sync so by the time we're called here from 1497 * inactive, the inode size is permanently set to 0. 1498 * 1499 * Calls from the truncate path always need to be sync unless we're in a wsync 1500 * filesystem and the file has already been unlinked. 1501 * 1502 * The caller is responsible for correctly setting the sync parameter. It gets 1503 * too hard for us to guess here which path we're being called out of just 1504 * based on inode state. 1505 * 1506 * If we get an error, we must return with the inode locked and linked into the 1507 * current transaction. This keeps things simple for the higher level code, 1508 * because it always knows that the inode is locked and held in the transaction 1509 * that returns to it whether errors occur or not. We don't mark the inode 1510 * dirty on error so that transactions can be easily aborted if possible. 1511 */ 1512 int 1513 xfs_itruncate_finish( 1514 xfs_trans_t **tp, 1515 xfs_inode_t *ip, 1516 xfs_fsize_t new_size, 1517 int fork, 1518 int sync) 1519 { 1520 xfs_fsblock_t first_block; 1521 xfs_fileoff_t first_unmap_block; 1522 xfs_fileoff_t last_block; 1523 xfs_filblks_t unmap_len=0; 1524 xfs_mount_t *mp; 1525 xfs_trans_t *ntp; 1526 int done; 1527 int committed; 1528 xfs_bmap_free_t free_list; 1529 int error; 1530 1531 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1532 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 1533 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1534 ASSERT(*tp != NULL); 1535 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1536 ASSERT(ip->i_transp == *tp); 1537 ASSERT(ip->i_itemp != NULL); 1538 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1539 1540 1541 ntp = *tp; 1542 mp = (ntp)->t_mountp; 1543 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1544 1545 /* 1546 * We only support truncating the entire attribute fork. 1547 */ 1548 if (fork == XFS_ATTR_FORK) { 1549 new_size = 0LL; 1550 } 1551 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1552 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1553 /* 1554 * The first thing we do is set the size to new_size permanently 1555 * on disk. This way we don't have to worry about anyone ever 1556 * being able to look at the data being freed even in the face 1557 * of a crash. What we're getting around here is the case where 1558 * we free a block, it is allocated to another file, it is written 1559 * to, and then we crash. If the new data gets written to the 1560 * file but the log buffers containing the free and reallocation 1561 * don't, then we'd end up with garbage in the blocks being freed. 1562 * As long as we make the new_size permanent before actually 1563 * freeing any blocks it doesn't matter if they get writtten to. 1564 * 1565 * The callers must signal into us whether or not the size 1566 * setting here must be synchronous. There are a few cases 1567 * where it doesn't have to be synchronous. Those cases 1568 * occur if the file is unlinked and we know the unlink is 1569 * permanent or if the blocks being truncated are guaranteed 1570 * to be beyond the inode eof (regardless of the link count) 1571 * and the eof value is permanent. Both of these cases occur 1572 * only on wsync-mounted filesystems. In those cases, we're 1573 * guaranteed that no user will ever see the data in the blocks 1574 * that are being truncated so the truncate can run async. 1575 * In the free beyond eof case, the file may wind up with 1576 * more blocks allocated to it than it needs if we crash 1577 * and that won't get fixed until the next time the file 1578 * is re-opened and closed but that's ok as that shouldn't 1579 * be too many blocks. 1580 * 1581 * However, we can't just make all wsync xactions run async 1582 * because there's one call out of the create path that needs 1583 * to run sync where it's truncating an existing file to size 1584 * 0 whose size is > 0. 1585 * 1586 * It's probably possible to come up with a test in this 1587 * routine that would correctly distinguish all the above 1588 * cases from the values of the function parameters and the 1589 * inode state but for sanity's sake, I've decided to let the 1590 * layers above just tell us. It's simpler to correctly figure 1591 * out in the layer above exactly under what conditions we 1592 * can run async and I think it's easier for others read and 1593 * follow the logic in case something has to be changed. 1594 * cscope is your friend -- rcc. 1595 * 1596 * The attribute fork is much simpler. 1597 * 1598 * For the attribute fork we allow the caller to tell us whether 1599 * the unlink of the inode that led to this call is yet permanent 1600 * in the on disk log. If it is not and we will be freeing extents 1601 * in this inode then we make the first transaction synchronous 1602 * to make sure that the unlink is permanent by the time we free 1603 * the blocks. 1604 */ 1605 if (fork == XFS_DATA_FORK) { 1606 if (ip->i_d.di_nextents > 0) { 1607 /* 1608 * If we are not changing the file size then do 1609 * not update the on-disk file size - we may be 1610 * called from xfs_inactive_free_eofblocks(). If we 1611 * update the on-disk file size and then the system 1612 * crashes before the contents of the file are 1613 * flushed to disk then the files may be full of 1614 * holes (ie NULL files bug). 1615 */ 1616 if (ip->i_size != new_size) { 1617 ip->i_d.di_size = new_size; 1618 ip->i_size = new_size; 1619 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1620 } 1621 } 1622 } else if (sync) { 1623 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1624 if (ip->i_d.di_anextents > 0) 1625 xfs_trans_set_sync(ntp); 1626 } 1627 ASSERT(fork == XFS_DATA_FORK || 1628 (fork == XFS_ATTR_FORK && 1629 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1630 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1631 1632 /* 1633 * Since it is possible for space to become allocated beyond 1634 * the end of the file (in a crash where the space is allocated 1635 * but the inode size is not yet updated), simply remove any 1636 * blocks which show up between the new EOF and the maximum 1637 * possible file size. If the first block to be removed is 1638 * beyond the maximum file size (ie it is the same as last_block), 1639 * then there is nothing to do. 1640 */ 1641 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1642 ASSERT(first_unmap_block <= last_block); 1643 done = 0; 1644 if (last_block == first_unmap_block) { 1645 done = 1; 1646 } else { 1647 unmap_len = last_block - first_unmap_block + 1; 1648 } 1649 while (!done) { 1650 /* 1651 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1652 * will tell us whether it freed the entire range or 1653 * not. If this is a synchronous mount (wsync), 1654 * then we can tell bunmapi to keep all the 1655 * transactions asynchronous since the unlink 1656 * transaction that made this inode inactive has 1657 * already hit the disk. There's no danger of 1658 * the freed blocks being reused, there being a 1659 * crash, and the reused blocks suddenly reappearing 1660 * in this file with garbage in them once recovery 1661 * runs. 1662 */ 1663 XFS_BMAP_INIT(&free_list, &first_block); 1664 error = xfs_bunmapi(ntp, ip, 1665 first_unmap_block, unmap_len, 1666 XFS_BMAPI_AFLAG(fork) | 1667 (sync ? 0 : XFS_BMAPI_ASYNC), 1668 XFS_ITRUNC_MAX_EXTENTS, 1669 &first_block, &free_list, 1670 NULL, &done); 1671 if (error) { 1672 /* 1673 * If the bunmapi call encounters an error, 1674 * return to the caller where the transaction 1675 * can be properly aborted. We just need to 1676 * make sure we're not holding any resources 1677 * that we were not when we came in. 1678 */ 1679 xfs_bmap_cancel(&free_list); 1680 return error; 1681 } 1682 1683 /* 1684 * Duplicate the transaction that has the permanent 1685 * reservation and commit the old transaction. 1686 */ 1687 error = xfs_bmap_finish(tp, &free_list, &committed); 1688 ntp = *tp; 1689 if (committed) { 1690 /* link the inode into the next xact in the chain */ 1691 xfs_trans_ijoin(ntp, ip, 1692 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1693 xfs_trans_ihold(ntp, ip); 1694 } 1695 1696 if (error) { 1697 /* 1698 * If the bmap finish call encounters an error, return 1699 * to the caller where the transaction can be properly 1700 * aborted. We just need to make sure we're not 1701 * holding any resources that we were not when we came 1702 * in. 1703 * 1704 * Aborting from this point might lose some blocks in 1705 * the file system, but oh well. 1706 */ 1707 xfs_bmap_cancel(&free_list); 1708 return error; 1709 } 1710 1711 if (committed) { 1712 /* 1713 * Mark the inode dirty so it will be logged and 1714 * moved forward in the log as part of every commit. 1715 */ 1716 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1717 } 1718 1719 ntp = xfs_trans_dup(ntp); 1720 error = xfs_trans_commit(*tp, 0); 1721 *tp = ntp; 1722 1723 /* link the inode into the next transaction in the chain */ 1724 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1725 xfs_trans_ihold(ntp, ip); 1726 1727 if (!error) 1728 error = xfs_trans_reserve(ntp, 0, 1729 XFS_ITRUNCATE_LOG_RES(mp), 0, 1730 XFS_TRANS_PERM_LOG_RES, 1731 XFS_ITRUNCATE_LOG_COUNT); 1732 if (error) 1733 return error; 1734 } 1735 /* 1736 * Only update the size in the case of the data fork, but 1737 * always re-log the inode so that our permanent transaction 1738 * can keep on rolling it forward in the log. 1739 */ 1740 if (fork == XFS_DATA_FORK) { 1741 xfs_isize_check(mp, ip, new_size); 1742 /* 1743 * If we are not changing the file size then do 1744 * not update the on-disk file size - we may be 1745 * called from xfs_inactive_free_eofblocks(). If we 1746 * update the on-disk file size and then the system 1747 * crashes before the contents of the file are 1748 * flushed to disk then the files may be full of 1749 * holes (ie NULL files bug). 1750 */ 1751 if (ip->i_size != new_size) { 1752 ip->i_d.di_size = new_size; 1753 ip->i_size = new_size; 1754 } 1755 } 1756 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1757 ASSERT((new_size != 0) || 1758 (fork == XFS_ATTR_FORK) || 1759 (ip->i_delayed_blks == 0)); 1760 ASSERT((new_size != 0) || 1761 (fork == XFS_ATTR_FORK) || 1762 (ip->i_d.di_nextents == 0)); 1763 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1764 return 0; 1765 } 1766 1767 1768 /* 1769 * xfs_igrow_start 1770 * 1771 * Do the first part of growing a file: zero any data in the last 1772 * block that is beyond the old EOF. We need to do this before 1773 * the inode is joined to the transaction to modify the i_size. 1774 * That way we can drop the inode lock and call into the buffer 1775 * cache to get the buffer mapping the EOF. 1776 */ 1777 int 1778 xfs_igrow_start( 1779 xfs_inode_t *ip, 1780 xfs_fsize_t new_size, 1781 cred_t *credp) 1782 { 1783 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1784 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1785 ASSERT(new_size > ip->i_size); 1786 1787 /* 1788 * Zero any pages that may have been created by 1789 * xfs_write_file() beyond the end of the file 1790 * and any blocks between the old and new file sizes. 1791 */ 1792 return xfs_zero_eof(ip, new_size, ip->i_size); 1793 } 1794 1795 /* 1796 * xfs_igrow_finish 1797 * 1798 * This routine is called to extend the size of a file. 1799 * The inode must have both the iolock and the ilock locked 1800 * for update and it must be a part of the current transaction. 1801 * The xfs_igrow_start() function must have been called previously. 1802 * If the change_flag is not zero, the inode change timestamp will 1803 * be updated. 1804 */ 1805 void 1806 xfs_igrow_finish( 1807 xfs_trans_t *tp, 1808 xfs_inode_t *ip, 1809 xfs_fsize_t new_size, 1810 int change_flag) 1811 { 1812 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1813 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1814 ASSERT(ip->i_transp == tp); 1815 ASSERT(new_size > ip->i_size); 1816 1817 /* 1818 * Update the file size. Update the inode change timestamp 1819 * if change_flag set. 1820 */ 1821 ip->i_d.di_size = new_size; 1822 ip->i_size = new_size; 1823 if (change_flag) 1824 xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 1825 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1826 1827 } 1828 1829 1830 /* 1831 * This is called when the inode's link count goes to 0. 1832 * We place the on-disk inode on a list in the AGI. It 1833 * will be pulled from this list when the inode is freed. 1834 */ 1835 int 1836 xfs_iunlink( 1837 xfs_trans_t *tp, 1838 xfs_inode_t *ip) 1839 { 1840 xfs_mount_t *mp; 1841 xfs_agi_t *agi; 1842 xfs_dinode_t *dip; 1843 xfs_buf_t *agibp; 1844 xfs_buf_t *ibp; 1845 xfs_agnumber_t agno; 1846 xfs_daddr_t agdaddr; 1847 xfs_agino_t agino; 1848 short bucket_index; 1849 int offset; 1850 int error; 1851 int agi_ok; 1852 1853 ASSERT(ip->i_d.di_nlink == 0); 1854 ASSERT(ip->i_d.di_mode != 0); 1855 ASSERT(ip->i_transp == tp); 1856 1857 mp = tp->t_mountp; 1858 1859 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1860 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 1861 1862 /* 1863 * Get the agi buffer first. It ensures lock ordering 1864 * on the list. 1865 */ 1866 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 1867 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 1868 if (error) 1869 return error; 1870 1871 /* 1872 * Validate the magic number of the agi block. 1873 */ 1874 agi = XFS_BUF_TO_AGI(agibp); 1875 agi_ok = 1876 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 1877 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 1878 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, 1879 XFS_RANDOM_IUNLINK))) { 1880 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); 1881 xfs_trans_brelse(tp, agibp); 1882 return XFS_ERROR(EFSCORRUPTED); 1883 } 1884 /* 1885 * Get the index into the agi hash table for the 1886 * list this inode will go on. 1887 */ 1888 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1889 ASSERT(agino != 0); 1890 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1891 ASSERT(agi->agi_unlinked[bucket_index]); 1892 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1893 1894 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1895 /* 1896 * There is already another inode in the bucket we need 1897 * to add ourselves to. Add us at the front of the list. 1898 * Here we put the head pointer into our next pointer, 1899 * and then we fall through to point the head at us. 1900 */ 1901 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); 1902 if (error) 1903 return error; 1904 1905 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1906 /* both on-disk, don't endian flip twice */ 1907 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1908 offset = ip->i_boffset + 1909 offsetof(xfs_dinode_t, di_next_unlinked); 1910 xfs_trans_inode_buf(tp, ibp); 1911 xfs_trans_log_buf(tp, ibp, offset, 1912 (offset + sizeof(xfs_agino_t) - 1)); 1913 xfs_inobp_check(mp, ibp); 1914 } 1915 1916 /* 1917 * Point the bucket head pointer at the inode being inserted. 1918 */ 1919 ASSERT(agino != 0); 1920 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1921 offset = offsetof(xfs_agi_t, agi_unlinked) + 1922 (sizeof(xfs_agino_t) * bucket_index); 1923 xfs_trans_log_buf(tp, agibp, offset, 1924 (offset + sizeof(xfs_agino_t) - 1)); 1925 return 0; 1926 } 1927 1928 /* 1929 * Pull the on-disk inode from the AGI unlinked list. 1930 */ 1931 STATIC int 1932 xfs_iunlink_remove( 1933 xfs_trans_t *tp, 1934 xfs_inode_t *ip) 1935 { 1936 xfs_ino_t next_ino; 1937 xfs_mount_t *mp; 1938 xfs_agi_t *agi; 1939 xfs_dinode_t *dip; 1940 xfs_buf_t *agibp; 1941 xfs_buf_t *ibp; 1942 xfs_agnumber_t agno; 1943 xfs_daddr_t agdaddr; 1944 xfs_agino_t agino; 1945 xfs_agino_t next_agino; 1946 xfs_buf_t *last_ibp; 1947 xfs_dinode_t *last_dip = NULL; 1948 short bucket_index; 1949 int offset, last_offset = 0; 1950 int error; 1951 int agi_ok; 1952 1953 /* 1954 * First pull the on-disk inode from the AGI unlinked list. 1955 */ 1956 mp = tp->t_mountp; 1957 1958 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1959 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 1960 1961 /* 1962 * Get the agi buffer first. It ensures lock ordering 1963 * on the list. 1964 */ 1965 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 1966 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 1967 if (error) { 1968 cmn_err(CE_WARN, 1969 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", 1970 error, mp->m_fsname); 1971 return error; 1972 } 1973 /* 1974 * Validate the magic number of the agi block. 1975 */ 1976 agi = XFS_BUF_TO_AGI(agibp); 1977 agi_ok = 1978 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 1979 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 1980 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, 1981 XFS_RANDOM_IUNLINK_REMOVE))) { 1982 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, 1983 mp, agi); 1984 xfs_trans_brelse(tp, agibp); 1985 cmn_err(CE_WARN, 1986 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", 1987 mp->m_fsname); 1988 return XFS_ERROR(EFSCORRUPTED); 1989 } 1990 /* 1991 * Get the index into the agi hash table for the 1992 * list this inode will go on. 1993 */ 1994 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1995 ASSERT(agino != 0); 1996 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1997 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 1998 ASSERT(agi->agi_unlinked[bucket_index]); 1999 2000 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 2001 /* 2002 * We're at the head of the list. Get the inode's 2003 * on-disk buffer to see if there is anyone after us 2004 * on the list. Only modify our next pointer if it 2005 * is not already NULLAGINO. This saves us the overhead 2006 * of dealing with the buffer when there is no need to 2007 * change it. 2008 */ 2009 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); 2010 if (error) { 2011 cmn_err(CE_WARN, 2012 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2013 error, mp->m_fsname); 2014 return error; 2015 } 2016 next_agino = be32_to_cpu(dip->di_next_unlinked); 2017 ASSERT(next_agino != 0); 2018 if (next_agino != NULLAGINO) { 2019 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 2020 offset = ip->i_boffset + 2021 offsetof(xfs_dinode_t, di_next_unlinked); 2022 xfs_trans_inode_buf(tp, ibp); 2023 xfs_trans_log_buf(tp, ibp, offset, 2024 (offset + sizeof(xfs_agino_t) - 1)); 2025 xfs_inobp_check(mp, ibp); 2026 } else { 2027 xfs_trans_brelse(tp, ibp); 2028 } 2029 /* 2030 * Point the bucket head pointer at the next inode. 2031 */ 2032 ASSERT(next_agino != 0); 2033 ASSERT(next_agino != agino); 2034 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 2035 offset = offsetof(xfs_agi_t, agi_unlinked) + 2036 (sizeof(xfs_agino_t) * bucket_index); 2037 xfs_trans_log_buf(tp, agibp, offset, 2038 (offset + sizeof(xfs_agino_t) - 1)); 2039 } else { 2040 /* 2041 * We need to search the list for the inode being freed. 2042 */ 2043 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2044 last_ibp = NULL; 2045 while (next_agino != agino) { 2046 /* 2047 * If the last inode wasn't the one pointing to 2048 * us, then release its buffer since we're not 2049 * going to do anything with it. 2050 */ 2051 if (last_ibp != NULL) { 2052 xfs_trans_brelse(tp, last_ibp); 2053 } 2054 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 2055 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 2056 &last_ibp, &last_offset); 2057 if (error) { 2058 cmn_err(CE_WARN, 2059 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 2060 error, mp->m_fsname); 2061 return error; 2062 } 2063 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 2064 ASSERT(next_agino != NULLAGINO); 2065 ASSERT(next_agino != 0); 2066 } 2067 /* 2068 * Now last_ibp points to the buffer previous to us on 2069 * the unlinked list. Pull us from the list. 2070 */ 2071 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); 2072 if (error) { 2073 cmn_err(CE_WARN, 2074 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2075 error, mp->m_fsname); 2076 return error; 2077 } 2078 next_agino = be32_to_cpu(dip->di_next_unlinked); 2079 ASSERT(next_agino != 0); 2080 ASSERT(next_agino != agino); 2081 if (next_agino != NULLAGINO) { 2082 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 2083 offset = ip->i_boffset + 2084 offsetof(xfs_dinode_t, di_next_unlinked); 2085 xfs_trans_inode_buf(tp, ibp); 2086 xfs_trans_log_buf(tp, ibp, offset, 2087 (offset + sizeof(xfs_agino_t) - 1)); 2088 xfs_inobp_check(mp, ibp); 2089 } else { 2090 xfs_trans_brelse(tp, ibp); 2091 } 2092 /* 2093 * Point the previous inode on the list to the next inode. 2094 */ 2095 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 2096 ASSERT(next_agino != 0); 2097 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 2098 xfs_trans_inode_buf(tp, last_ibp); 2099 xfs_trans_log_buf(tp, last_ibp, offset, 2100 (offset + sizeof(xfs_agino_t) - 1)); 2101 xfs_inobp_check(mp, last_ibp); 2102 } 2103 return 0; 2104 } 2105 2106 STATIC void 2107 xfs_ifree_cluster( 2108 xfs_inode_t *free_ip, 2109 xfs_trans_t *tp, 2110 xfs_ino_t inum) 2111 { 2112 xfs_mount_t *mp = free_ip->i_mount; 2113 int blks_per_cluster; 2114 int nbufs; 2115 int ninodes; 2116 int i, j, found, pre_flushed; 2117 xfs_daddr_t blkno; 2118 xfs_buf_t *bp; 2119 xfs_inode_t *ip, **ip_found; 2120 xfs_inode_log_item_t *iip; 2121 xfs_log_item_t *lip; 2122 xfs_perag_t *pag = xfs_get_perag(mp, inum); 2123 2124 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2125 blks_per_cluster = 1; 2126 ninodes = mp->m_sb.sb_inopblock; 2127 nbufs = XFS_IALLOC_BLOCKS(mp); 2128 } else { 2129 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 2130 mp->m_sb.sb_blocksize; 2131 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 2132 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 2133 } 2134 2135 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 2136 2137 for (j = 0; j < nbufs; j++, inum += ninodes) { 2138 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2139 XFS_INO_TO_AGBNO(mp, inum)); 2140 2141 2142 /* 2143 * Look for each inode in memory and attempt to lock it, 2144 * we can be racing with flush and tail pushing here. 2145 * any inode we get the locks on, add to an array of 2146 * inode items to process later. 2147 * 2148 * The get the buffer lock, we could beat a flush 2149 * or tail pushing thread to the lock here, in which 2150 * case they will go looking for the inode buffer 2151 * and fail, we need some other form of interlock 2152 * here. 2153 */ 2154 found = 0; 2155 for (i = 0; i < ninodes; i++) { 2156 read_lock(&pag->pag_ici_lock); 2157 ip = radix_tree_lookup(&pag->pag_ici_root, 2158 XFS_INO_TO_AGINO(mp, (inum + i))); 2159 2160 /* Inode not in memory or we found it already, 2161 * nothing to do 2162 */ 2163 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2164 read_unlock(&pag->pag_ici_lock); 2165 continue; 2166 } 2167 2168 if (xfs_inode_clean(ip)) { 2169 read_unlock(&pag->pag_ici_lock); 2170 continue; 2171 } 2172 2173 /* If we can get the locks then add it to the 2174 * list, otherwise by the time we get the bp lock 2175 * below it will already be attached to the 2176 * inode buffer. 2177 */ 2178 2179 /* This inode will already be locked - by us, lets 2180 * keep it that way. 2181 */ 2182 2183 if (ip == free_ip) { 2184 if (xfs_iflock_nowait(ip)) { 2185 xfs_iflags_set(ip, XFS_ISTALE); 2186 if (xfs_inode_clean(ip)) { 2187 xfs_ifunlock(ip); 2188 } else { 2189 ip_found[found++] = ip; 2190 } 2191 } 2192 read_unlock(&pag->pag_ici_lock); 2193 continue; 2194 } 2195 2196 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2197 if (xfs_iflock_nowait(ip)) { 2198 xfs_iflags_set(ip, XFS_ISTALE); 2199 2200 if (xfs_inode_clean(ip)) { 2201 xfs_ifunlock(ip); 2202 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2203 } else { 2204 ip_found[found++] = ip; 2205 } 2206 } else { 2207 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2208 } 2209 } 2210 read_unlock(&pag->pag_ici_lock); 2211 } 2212 2213 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2214 mp->m_bsize * blks_per_cluster, 2215 XFS_BUF_LOCK); 2216 2217 pre_flushed = 0; 2218 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2219 while (lip) { 2220 if (lip->li_type == XFS_LI_INODE) { 2221 iip = (xfs_inode_log_item_t *)lip; 2222 ASSERT(iip->ili_logged == 1); 2223 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2224 spin_lock(&mp->m_ail_lock); 2225 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2226 spin_unlock(&mp->m_ail_lock); 2227 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 2228 pre_flushed++; 2229 } 2230 lip = lip->li_bio_list; 2231 } 2232 2233 for (i = 0; i < found; i++) { 2234 ip = ip_found[i]; 2235 iip = ip->i_itemp; 2236 2237 if (!iip) { 2238 ip->i_update_core = 0; 2239 xfs_ifunlock(ip); 2240 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2241 continue; 2242 } 2243 2244 iip->ili_last_fields = iip->ili_format.ilf_fields; 2245 iip->ili_format.ilf_fields = 0; 2246 iip->ili_logged = 1; 2247 spin_lock(&mp->m_ail_lock); 2248 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2249 spin_unlock(&mp->m_ail_lock); 2250 2251 xfs_buf_attach_iodone(bp, 2252 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2253 xfs_istale_done, (xfs_log_item_t *)iip); 2254 if (ip != free_ip) { 2255 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2256 } 2257 } 2258 2259 if (found || pre_flushed) 2260 xfs_trans_stale_inode_buf(tp, bp); 2261 xfs_trans_binval(tp, bp); 2262 } 2263 2264 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *)); 2265 xfs_put_perag(mp, pag); 2266 } 2267 2268 /* 2269 * This is called to return an inode to the inode free list. 2270 * The inode should already be truncated to 0 length and have 2271 * no pages associated with it. This routine also assumes that 2272 * the inode is already a part of the transaction. 2273 * 2274 * The on-disk copy of the inode will have been added to the list 2275 * of unlinked inodes in the AGI. We need to remove the inode from 2276 * that list atomically with respect to freeing it here. 2277 */ 2278 int 2279 xfs_ifree( 2280 xfs_trans_t *tp, 2281 xfs_inode_t *ip, 2282 xfs_bmap_free_t *flist) 2283 { 2284 int error; 2285 int delete; 2286 xfs_ino_t first_ino; 2287 xfs_dinode_t *dip; 2288 xfs_buf_t *ibp; 2289 2290 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2291 ASSERT(ip->i_transp == tp); 2292 ASSERT(ip->i_d.di_nlink == 0); 2293 ASSERT(ip->i_d.di_nextents == 0); 2294 ASSERT(ip->i_d.di_anextents == 0); 2295 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 2296 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2297 ASSERT(ip->i_d.di_nblocks == 0); 2298 2299 /* 2300 * Pull the on-disk inode from the AGI unlinked list. 2301 */ 2302 error = xfs_iunlink_remove(tp, ip); 2303 if (error != 0) { 2304 return error; 2305 } 2306 2307 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2308 if (error != 0) { 2309 return error; 2310 } 2311 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2312 ip->i_d.di_flags = 0; 2313 ip->i_d.di_dmevmask = 0; 2314 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2315 ip->i_df.if_ext_max = 2316 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2317 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2318 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2319 /* 2320 * Bump the generation count so no one will be confused 2321 * by reincarnations of this inode. 2322 */ 2323 ip->i_d.di_gen++; 2324 2325 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2326 2327 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); 2328 if (error) 2329 return error; 2330 2331 /* 2332 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 2333 * from picking up this inode when it is reclaimed (its incore state 2334 * initialzed but not flushed to disk yet). The in-core di_mode is 2335 * already cleared and a corresponding transaction logged. 2336 * The hack here just synchronizes the in-core to on-disk 2337 * di_mode value in advance before the actual inode sync to disk. 2338 * This is OK because the inode is already unlinked and would never 2339 * change its di_mode again for this inode generation. 2340 * This is a temporary hack that would require a proper fix 2341 * in the future. 2342 */ 2343 dip->di_core.di_mode = 0; 2344 2345 if (delete) { 2346 xfs_ifree_cluster(ip, tp, first_ino); 2347 } 2348 2349 return 0; 2350 } 2351 2352 /* 2353 * Reallocate the space for if_broot based on the number of records 2354 * being added or deleted as indicated in rec_diff. Move the records 2355 * and pointers in if_broot to fit the new size. When shrinking this 2356 * will eliminate holes between the records and pointers created by 2357 * the caller. When growing this will create holes to be filled in 2358 * by the caller. 2359 * 2360 * The caller must not request to add more records than would fit in 2361 * the on-disk inode root. If the if_broot is currently NULL, then 2362 * if we adding records one will be allocated. The caller must also 2363 * not request that the number of records go below zero, although 2364 * it can go to zero. 2365 * 2366 * ip -- the inode whose if_broot area is changing 2367 * ext_diff -- the change in the number of records, positive or negative, 2368 * requested for the if_broot array. 2369 */ 2370 void 2371 xfs_iroot_realloc( 2372 xfs_inode_t *ip, 2373 int rec_diff, 2374 int whichfork) 2375 { 2376 int cur_max; 2377 xfs_ifork_t *ifp; 2378 xfs_bmbt_block_t *new_broot; 2379 int new_max; 2380 size_t new_size; 2381 char *np; 2382 char *op; 2383 2384 /* 2385 * Handle the degenerate case quietly. 2386 */ 2387 if (rec_diff == 0) { 2388 return; 2389 } 2390 2391 ifp = XFS_IFORK_PTR(ip, whichfork); 2392 if (rec_diff > 0) { 2393 /* 2394 * If there wasn't any memory allocated before, just 2395 * allocate it now and get out. 2396 */ 2397 if (ifp->if_broot_bytes == 0) { 2398 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2399 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, 2400 KM_SLEEP); 2401 ifp->if_broot_bytes = (int)new_size; 2402 return; 2403 } 2404 2405 /* 2406 * If there is already an existing if_broot, then we need 2407 * to realloc() it and shift the pointers to their new 2408 * location. The records don't change location because 2409 * they are kept butted up against the btree block header. 2410 */ 2411 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2412 new_max = cur_max + rec_diff; 2413 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2414 ifp->if_broot = (xfs_bmbt_block_t *) 2415 kmem_realloc(ifp->if_broot, 2416 new_size, 2417 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2418 KM_SLEEP); 2419 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2420 ifp->if_broot_bytes); 2421 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2422 (int)new_size); 2423 ifp->if_broot_bytes = (int)new_size; 2424 ASSERT(ifp->if_broot_bytes <= 2425 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2426 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2427 return; 2428 } 2429 2430 /* 2431 * rec_diff is less than 0. In this case, we are shrinking the 2432 * if_broot buffer. It must already exist. If we go to zero 2433 * records, just get rid of the root and clear the status bit. 2434 */ 2435 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2436 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2437 new_max = cur_max + rec_diff; 2438 ASSERT(new_max >= 0); 2439 if (new_max > 0) 2440 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2441 else 2442 new_size = 0; 2443 if (new_size > 0) { 2444 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); 2445 /* 2446 * First copy over the btree block header. 2447 */ 2448 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); 2449 } else { 2450 new_broot = NULL; 2451 ifp->if_flags &= ~XFS_IFBROOT; 2452 } 2453 2454 /* 2455 * Only copy the records and pointers if there are any. 2456 */ 2457 if (new_max > 0) { 2458 /* 2459 * First copy the records. 2460 */ 2461 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, 2462 ifp->if_broot_bytes); 2463 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, 2464 (int)new_size); 2465 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2466 2467 /* 2468 * Then copy the pointers. 2469 */ 2470 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2471 ifp->if_broot_bytes); 2472 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, 2473 (int)new_size); 2474 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2475 } 2476 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2477 ifp->if_broot = new_broot; 2478 ifp->if_broot_bytes = (int)new_size; 2479 ASSERT(ifp->if_broot_bytes <= 2480 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2481 return; 2482 } 2483 2484 2485 /* 2486 * This is called when the amount of space needed for if_data 2487 * is increased or decreased. The change in size is indicated by 2488 * the number of bytes that need to be added or deleted in the 2489 * byte_diff parameter. 2490 * 2491 * If the amount of space needed has decreased below the size of the 2492 * inline buffer, then switch to using the inline buffer. Otherwise, 2493 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2494 * to what is needed. 2495 * 2496 * ip -- the inode whose if_data area is changing 2497 * byte_diff -- the change in the number of bytes, positive or negative, 2498 * requested for the if_data array. 2499 */ 2500 void 2501 xfs_idata_realloc( 2502 xfs_inode_t *ip, 2503 int byte_diff, 2504 int whichfork) 2505 { 2506 xfs_ifork_t *ifp; 2507 int new_size; 2508 int real_size; 2509 2510 if (byte_diff == 0) { 2511 return; 2512 } 2513 2514 ifp = XFS_IFORK_PTR(ip, whichfork); 2515 new_size = (int)ifp->if_bytes + byte_diff; 2516 ASSERT(new_size >= 0); 2517 2518 if (new_size == 0) { 2519 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2520 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2521 } 2522 ifp->if_u1.if_data = NULL; 2523 real_size = 0; 2524 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2525 /* 2526 * If the valid extents/data can fit in if_inline_ext/data, 2527 * copy them from the malloc'd vector and free it. 2528 */ 2529 if (ifp->if_u1.if_data == NULL) { 2530 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2531 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2532 ASSERT(ifp->if_real_bytes != 0); 2533 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2534 new_size); 2535 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2536 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2537 } 2538 real_size = 0; 2539 } else { 2540 /* 2541 * Stuck with malloc/realloc. 2542 * For inline data, the underlying buffer must be 2543 * a multiple of 4 bytes in size so that it can be 2544 * logged and stay on word boundaries. We enforce 2545 * that here. 2546 */ 2547 real_size = roundup(new_size, 4); 2548 if (ifp->if_u1.if_data == NULL) { 2549 ASSERT(ifp->if_real_bytes == 0); 2550 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2551 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2552 /* 2553 * Only do the realloc if the underlying size 2554 * is really changing. 2555 */ 2556 if (ifp->if_real_bytes != real_size) { 2557 ifp->if_u1.if_data = 2558 kmem_realloc(ifp->if_u1.if_data, 2559 real_size, 2560 ifp->if_real_bytes, 2561 KM_SLEEP); 2562 } 2563 } else { 2564 ASSERT(ifp->if_real_bytes == 0); 2565 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2566 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2567 ifp->if_bytes); 2568 } 2569 } 2570 ifp->if_real_bytes = real_size; 2571 ifp->if_bytes = new_size; 2572 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2573 } 2574 2575 2576 2577 2578 /* 2579 * Map inode to disk block and offset. 2580 * 2581 * mp -- the mount point structure for the current file system 2582 * tp -- the current transaction 2583 * ino -- the inode number of the inode to be located 2584 * imap -- this structure is filled in with the information necessary 2585 * to retrieve the given inode from disk 2586 * flags -- flags to pass to xfs_dilocate indicating whether or not 2587 * lookups in the inode btree were OK or not 2588 */ 2589 int 2590 xfs_imap( 2591 xfs_mount_t *mp, 2592 xfs_trans_t *tp, 2593 xfs_ino_t ino, 2594 xfs_imap_t *imap, 2595 uint flags) 2596 { 2597 xfs_fsblock_t fsbno; 2598 int len; 2599 int off; 2600 int error; 2601 2602 fsbno = imap->im_blkno ? 2603 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; 2604 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); 2605 if (error) 2606 return error; 2607 2608 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); 2609 imap->im_len = XFS_FSB_TO_BB(mp, len); 2610 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); 2611 imap->im_ioffset = (ushort)off; 2612 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); 2613 2614 /* 2615 * If the inode number maps to a block outside the bounds 2616 * of the file system then return NULL rather than calling 2617 * read_buf and panicing when we get an error from the 2618 * driver. 2619 */ 2620 if ((imap->im_blkno + imap->im_len) > 2621 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2622 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 2623 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > " 2624 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)", 2625 (unsigned long long) imap->im_blkno, 2626 (unsigned long long) imap->im_len, 2627 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2628 return EINVAL; 2629 } 2630 return 0; 2631 } 2632 2633 void 2634 xfs_idestroy_fork( 2635 xfs_inode_t *ip, 2636 int whichfork) 2637 { 2638 xfs_ifork_t *ifp; 2639 2640 ifp = XFS_IFORK_PTR(ip, whichfork); 2641 if (ifp->if_broot != NULL) { 2642 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2643 ifp->if_broot = NULL; 2644 } 2645 2646 /* 2647 * If the format is local, then we can't have an extents 2648 * array so just look for an inline data array. If we're 2649 * not local then we may or may not have an extents list, 2650 * so check and free it up if we do. 2651 */ 2652 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2653 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2654 (ifp->if_u1.if_data != NULL)) { 2655 ASSERT(ifp->if_real_bytes != 0); 2656 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2657 ifp->if_u1.if_data = NULL; 2658 ifp->if_real_bytes = 0; 2659 } 2660 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2661 ((ifp->if_flags & XFS_IFEXTIREC) || 2662 ((ifp->if_u1.if_extents != NULL) && 2663 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2664 ASSERT(ifp->if_real_bytes != 0); 2665 xfs_iext_destroy(ifp); 2666 } 2667 ASSERT(ifp->if_u1.if_extents == NULL || 2668 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2669 ASSERT(ifp->if_real_bytes == 0); 2670 if (whichfork == XFS_ATTR_FORK) { 2671 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2672 ip->i_afp = NULL; 2673 } 2674 } 2675 2676 /* 2677 * This is called free all the memory associated with an inode. 2678 * It must free the inode itself and any buffers allocated for 2679 * if_extents/if_data and if_broot. It must also free the lock 2680 * associated with the inode. 2681 */ 2682 void 2683 xfs_idestroy( 2684 xfs_inode_t *ip) 2685 { 2686 switch (ip->i_d.di_mode & S_IFMT) { 2687 case S_IFREG: 2688 case S_IFDIR: 2689 case S_IFLNK: 2690 xfs_idestroy_fork(ip, XFS_DATA_FORK); 2691 break; 2692 } 2693 if (ip->i_afp) 2694 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 2695 mrfree(&ip->i_lock); 2696 mrfree(&ip->i_iolock); 2697 freesema(&ip->i_flock); 2698 2699 #ifdef XFS_INODE_TRACE 2700 ktrace_free(ip->i_trace); 2701 #endif 2702 #ifdef XFS_BMAP_TRACE 2703 ktrace_free(ip->i_xtrace); 2704 #endif 2705 #ifdef XFS_BMBT_TRACE 2706 ktrace_free(ip->i_btrace); 2707 #endif 2708 #ifdef XFS_RW_TRACE 2709 ktrace_free(ip->i_rwtrace); 2710 #endif 2711 #ifdef XFS_ILOCK_TRACE 2712 ktrace_free(ip->i_lock_trace); 2713 #endif 2714 #ifdef XFS_DIR2_TRACE 2715 ktrace_free(ip->i_dir_trace); 2716 #endif 2717 if (ip->i_itemp) { 2718 /* 2719 * Only if we are shutting down the fs will we see an 2720 * inode still in the AIL. If it is there, we should remove 2721 * it to prevent a use-after-free from occurring. 2722 */ 2723 xfs_mount_t *mp = ip->i_mount; 2724 xfs_log_item_t *lip = &ip->i_itemp->ili_item; 2725 2726 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || 2727 XFS_FORCED_SHUTDOWN(ip->i_mount)); 2728 if (lip->li_flags & XFS_LI_IN_AIL) { 2729 spin_lock(&mp->m_ail_lock); 2730 if (lip->li_flags & XFS_LI_IN_AIL) 2731 xfs_trans_delete_ail(mp, lip); 2732 else 2733 spin_unlock(&mp->m_ail_lock); 2734 } 2735 xfs_inode_item_destroy(ip); 2736 } 2737 kmem_zone_free(xfs_inode_zone, ip); 2738 } 2739 2740 2741 /* 2742 * Increment the pin count of the given buffer. 2743 * This value is protected by ipinlock spinlock in the mount structure. 2744 */ 2745 void 2746 xfs_ipin( 2747 xfs_inode_t *ip) 2748 { 2749 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2750 2751 atomic_inc(&ip->i_pincount); 2752 } 2753 2754 /* 2755 * Decrement the pin count of the given inode, and wake up 2756 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2757 * inode must have been previously pinned with a call to xfs_ipin(). 2758 */ 2759 void 2760 xfs_iunpin( 2761 xfs_inode_t *ip) 2762 { 2763 ASSERT(atomic_read(&ip->i_pincount) > 0); 2764 2765 if (atomic_dec_and_test(&ip->i_pincount)) 2766 wake_up(&ip->i_ipin_wait); 2767 } 2768 2769 /* 2770 * This is called to unpin an inode. It can be directed to wait or to return 2771 * immediately without waiting for the inode to be unpinned. The caller must 2772 * have the inode locked in at least shared mode so that the buffer cannot be 2773 * subsequently pinned once someone is waiting for it to be unpinned. 2774 */ 2775 STATIC void 2776 __xfs_iunpin_wait( 2777 xfs_inode_t *ip, 2778 int wait) 2779 { 2780 xfs_inode_log_item_t *iip = ip->i_itemp; 2781 2782 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); 2783 if (atomic_read(&ip->i_pincount) == 0) 2784 return; 2785 2786 /* Give the log a push to start the unpinning I/O */ 2787 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2788 iip->ili_last_lsn : 0, XFS_LOG_FORCE); 2789 if (wait) 2790 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2791 } 2792 2793 static inline void 2794 xfs_iunpin_wait( 2795 xfs_inode_t *ip) 2796 { 2797 __xfs_iunpin_wait(ip, 1); 2798 } 2799 2800 static inline void 2801 xfs_iunpin_nowait( 2802 xfs_inode_t *ip) 2803 { 2804 __xfs_iunpin_wait(ip, 0); 2805 } 2806 2807 2808 /* 2809 * xfs_iextents_copy() 2810 * 2811 * This is called to copy the REAL extents (as opposed to the delayed 2812 * allocation extents) from the inode into the given buffer. It 2813 * returns the number of bytes copied into the buffer. 2814 * 2815 * If there are no delayed allocation extents, then we can just 2816 * memcpy() the extents into the buffer. Otherwise, we need to 2817 * examine each extent in turn and skip those which are delayed. 2818 */ 2819 int 2820 xfs_iextents_copy( 2821 xfs_inode_t *ip, 2822 xfs_bmbt_rec_t *dp, 2823 int whichfork) 2824 { 2825 int copied; 2826 int i; 2827 xfs_ifork_t *ifp; 2828 int nrecs; 2829 xfs_fsblock_t start_block; 2830 2831 ifp = XFS_IFORK_PTR(ip, whichfork); 2832 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 2833 ASSERT(ifp->if_bytes > 0); 2834 2835 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2836 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2837 ASSERT(nrecs > 0); 2838 2839 /* 2840 * There are some delayed allocation extents in the 2841 * inode, so copy the extents one at a time and skip 2842 * the delayed ones. There must be at least one 2843 * non-delayed extent. 2844 */ 2845 copied = 0; 2846 for (i = 0; i < nrecs; i++) { 2847 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2848 start_block = xfs_bmbt_get_startblock(ep); 2849 if (ISNULLSTARTBLOCK(start_block)) { 2850 /* 2851 * It's a delayed allocation extent, so skip it. 2852 */ 2853 continue; 2854 } 2855 2856 /* Translate to on disk format */ 2857 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2858 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2859 dp++; 2860 copied++; 2861 } 2862 ASSERT(copied != 0); 2863 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2864 2865 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2866 } 2867 2868 /* 2869 * Each of the following cases stores data into the same region 2870 * of the on-disk inode, so only one of them can be valid at 2871 * any given time. While it is possible to have conflicting formats 2872 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2873 * in EXTENTS format, this can only happen when the fork has 2874 * changed formats after being modified but before being flushed. 2875 * In these cases, the format always takes precedence, because the 2876 * format indicates the current state of the fork. 2877 */ 2878 /*ARGSUSED*/ 2879 STATIC void 2880 xfs_iflush_fork( 2881 xfs_inode_t *ip, 2882 xfs_dinode_t *dip, 2883 xfs_inode_log_item_t *iip, 2884 int whichfork, 2885 xfs_buf_t *bp) 2886 { 2887 char *cp; 2888 xfs_ifork_t *ifp; 2889 xfs_mount_t *mp; 2890 #ifdef XFS_TRANS_DEBUG 2891 int first; 2892 #endif 2893 static const short brootflag[2] = 2894 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2895 static const short dataflag[2] = 2896 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2897 static const short extflag[2] = 2898 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2899 2900 if (!iip) 2901 return; 2902 ifp = XFS_IFORK_PTR(ip, whichfork); 2903 /* 2904 * This can happen if we gave up in iformat in an error path, 2905 * for the attribute fork. 2906 */ 2907 if (!ifp) { 2908 ASSERT(whichfork == XFS_ATTR_FORK); 2909 return; 2910 } 2911 cp = XFS_DFORK_PTR(dip, whichfork); 2912 mp = ip->i_mount; 2913 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2914 case XFS_DINODE_FMT_LOCAL: 2915 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2916 (ifp->if_bytes > 0)) { 2917 ASSERT(ifp->if_u1.if_data != NULL); 2918 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2919 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2920 } 2921 break; 2922 2923 case XFS_DINODE_FMT_EXTENTS: 2924 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2925 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2926 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || 2927 (ifp->if_bytes == 0)); 2928 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || 2929 (ifp->if_bytes > 0)); 2930 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2931 (ifp->if_bytes > 0)) { 2932 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2933 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2934 whichfork); 2935 } 2936 break; 2937 2938 case XFS_DINODE_FMT_BTREE: 2939 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 2940 (ifp->if_broot_bytes > 0)) { 2941 ASSERT(ifp->if_broot != NULL); 2942 ASSERT(ifp->if_broot_bytes <= 2943 (XFS_IFORK_SIZE(ip, whichfork) + 2944 XFS_BROOT_SIZE_ADJ)); 2945 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, 2946 (xfs_bmdr_block_t *)cp, 2947 XFS_DFORK_SIZE(dip, mp, whichfork)); 2948 } 2949 break; 2950 2951 case XFS_DINODE_FMT_DEV: 2952 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 2953 ASSERT(whichfork == XFS_DATA_FORK); 2954 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev); 2955 } 2956 break; 2957 2958 case XFS_DINODE_FMT_UUID: 2959 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 2960 ASSERT(whichfork == XFS_DATA_FORK); 2961 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, 2962 sizeof(uuid_t)); 2963 } 2964 break; 2965 2966 default: 2967 ASSERT(0); 2968 break; 2969 } 2970 } 2971 2972 STATIC int 2973 xfs_iflush_cluster( 2974 xfs_inode_t *ip, 2975 xfs_buf_t *bp) 2976 { 2977 xfs_mount_t *mp = ip->i_mount; 2978 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 2979 unsigned long first_index, mask; 2980 int ilist_size; 2981 xfs_inode_t **ilist; 2982 xfs_inode_t *iq; 2983 int nr_found; 2984 int clcount = 0; 2985 int bufwasdelwri; 2986 int i; 2987 2988 ASSERT(pag->pagi_inodeok); 2989 ASSERT(pag->pag_ici_init); 2990 2991 ilist_size = XFS_INODE_CLUSTER_SIZE(mp) * sizeof(xfs_inode_t *); 2992 ilist = kmem_alloc(ilist_size, KM_MAYFAIL); 2993 if (!ilist) 2994 return 0; 2995 2996 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2997 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2998 read_lock(&pag->pag_ici_lock); 2999 /* really need a gang lookup range call here */ 3000 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 3001 first_index, 3002 XFS_INODE_CLUSTER_SIZE(mp)); 3003 if (nr_found == 0) 3004 goto out_free; 3005 3006 for (i = 0; i < nr_found; i++) { 3007 iq = ilist[i]; 3008 if (iq == ip) 3009 continue; 3010 /* if the inode lies outside this cluster, we're done. */ 3011 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) 3012 break; 3013 /* 3014 * Do an un-protected check to see if the inode is dirty and 3015 * is a candidate for flushing. These checks will be repeated 3016 * later after the appropriate locks are acquired. 3017 */ 3018 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 3019 continue; 3020 3021 /* 3022 * Try to get locks. If any are unavailable or it is pinned, 3023 * then this inode cannot be flushed and is skipped. 3024 */ 3025 3026 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 3027 continue; 3028 if (!xfs_iflock_nowait(iq)) { 3029 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3030 continue; 3031 } 3032 if (xfs_ipincount(iq)) { 3033 xfs_ifunlock(iq); 3034 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3035 continue; 3036 } 3037 3038 /* 3039 * arriving here means that this inode can be flushed. First 3040 * re-check that it's dirty before flushing. 3041 */ 3042 if (!xfs_inode_clean(iq)) { 3043 int error; 3044 error = xfs_iflush_int(iq, bp); 3045 if (error) { 3046 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3047 goto cluster_corrupt_out; 3048 } 3049 clcount++; 3050 } else { 3051 xfs_ifunlock(iq); 3052 } 3053 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3054 } 3055 3056 if (clcount) { 3057 XFS_STATS_INC(xs_icluster_flushcnt); 3058 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 3059 } 3060 3061 out_free: 3062 read_unlock(&pag->pag_ici_lock); 3063 kmem_free(ilist, ilist_size); 3064 return 0; 3065 3066 3067 cluster_corrupt_out: 3068 /* 3069 * Corruption detected in the clustering loop. Invalidate the 3070 * inode buffer and shut down the filesystem. 3071 */ 3072 read_unlock(&pag->pag_ici_lock); 3073 /* 3074 * Clean up the buffer. If it was B_DELWRI, just release it -- 3075 * brelse can handle it with no problems. If not, shut down the 3076 * filesystem before releasing the buffer. 3077 */ 3078 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); 3079 if (bufwasdelwri) 3080 xfs_buf_relse(bp); 3081 3082 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3083 3084 if (!bufwasdelwri) { 3085 /* 3086 * Just like incore_relse: if we have b_iodone functions, 3087 * mark the buffer as an error and call them. Otherwise 3088 * mark it as stale and brelse. 3089 */ 3090 if (XFS_BUF_IODONE_FUNC(bp)) { 3091 XFS_BUF_CLR_BDSTRAT_FUNC(bp); 3092 XFS_BUF_UNDONE(bp); 3093 XFS_BUF_STALE(bp); 3094 XFS_BUF_SHUT(bp); 3095 XFS_BUF_ERROR(bp,EIO); 3096 xfs_biodone(bp); 3097 } else { 3098 XFS_BUF_STALE(bp); 3099 xfs_buf_relse(bp); 3100 } 3101 } 3102 3103 /* 3104 * Unlocks the flush lock 3105 */ 3106 xfs_iflush_abort(iq); 3107 kmem_free(ilist, ilist_size); 3108 return XFS_ERROR(EFSCORRUPTED); 3109 } 3110 3111 /* 3112 * xfs_iflush() will write a modified inode's changes out to the 3113 * inode's on disk home. The caller must have the inode lock held 3114 * in at least shared mode and the inode flush semaphore must be 3115 * held as well. The inode lock will still be held upon return from 3116 * the call and the caller is free to unlock it. 3117 * The inode flush lock will be unlocked when the inode reaches the disk. 3118 * The flags indicate how the inode's buffer should be written out. 3119 */ 3120 int 3121 xfs_iflush( 3122 xfs_inode_t *ip, 3123 uint flags) 3124 { 3125 xfs_inode_log_item_t *iip; 3126 xfs_buf_t *bp; 3127 xfs_dinode_t *dip; 3128 xfs_mount_t *mp; 3129 int error; 3130 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK); 3131 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 3132 3133 XFS_STATS_INC(xs_iflush_count); 3134 3135 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3136 ASSERT(issemalocked(&(ip->i_flock))); 3137 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3138 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3139 3140 iip = ip->i_itemp; 3141 mp = ip->i_mount; 3142 3143 /* 3144 * If the inode isn't dirty, then just release the inode 3145 * flush lock and do nothing. 3146 */ 3147 if (xfs_inode_clean(ip)) { 3148 ASSERT((iip != NULL) ? 3149 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); 3150 xfs_ifunlock(ip); 3151 return 0; 3152 } 3153 3154 /* 3155 * We can't flush the inode until it is unpinned, so wait for it if we 3156 * are allowed to block. We know noone new can pin it, because we are 3157 * holding the inode lock shared and you need to hold it exclusively to 3158 * pin the inode. 3159 * 3160 * If we are not allowed to block, force the log out asynchronously so 3161 * that when we come back the inode will be unpinned. If other inodes 3162 * in the same cluster are dirty, they will probably write the inode 3163 * out for us if they occur after the log force completes. 3164 */ 3165 if (noblock && xfs_ipincount(ip)) { 3166 xfs_iunpin_nowait(ip); 3167 xfs_ifunlock(ip); 3168 return EAGAIN; 3169 } 3170 xfs_iunpin_wait(ip); 3171 3172 /* 3173 * This may have been unpinned because the filesystem is shutting 3174 * down forcibly. If that's the case we must not write this inode 3175 * to disk, because the log record didn't make it to disk! 3176 */ 3177 if (XFS_FORCED_SHUTDOWN(mp)) { 3178 ip->i_update_core = 0; 3179 if (iip) 3180 iip->ili_format.ilf_fields = 0; 3181 xfs_ifunlock(ip); 3182 return XFS_ERROR(EIO); 3183 } 3184 3185 /* 3186 * Decide how buffer will be flushed out. This is done before 3187 * the call to xfs_iflush_int because this field is zeroed by it. 3188 */ 3189 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3190 /* 3191 * Flush out the inode buffer according to the directions 3192 * of the caller. In the cases where the caller has given 3193 * us a choice choose the non-delwri case. This is because 3194 * the inode is in the AIL and we need to get it out soon. 3195 */ 3196 switch (flags) { 3197 case XFS_IFLUSH_SYNC: 3198 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3199 flags = 0; 3200 break; 3201 case XFS_IFLUSH_ASYNC_NOBLOCK: 3202 case XFS_IFLUSH_ASYNC: 3203 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3204 flags = INT_ASYNC; 3205 break; 3206 case XFS_IFLUSH_DELWRI: 3207 flags = INT_DELWRI; 3208 break; 3209 default: 3210 ASSERT(0); 3211 flags = 0; 3212 break; 3213 } 3214 } else { 3215 switch (flags) { 3216 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3217 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3218 case XFS_IFLUSH_DELWRI: 3219 flags = INT_DELWRI; 3220 break; 3221 case XFS_IFLUSH_ASYNC_NOBLOCK: 3222 case XFS_IFLUSH_ASYNC: 3223 flags = INT_ASYNC; 3224 break; 3225 case XFS_IFLUSH_SYNC: 3226 flags = 0; 3227 break; 3228 default: 3229 ASSERT(0); 3230 flags = 0; 3231 break; 3232 } 3233 } 3234 3235 /* 3236 * Get the buffer containing the on-disk inode. 3237 */ 3238 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0, 3239 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); 3240 if (error || !bp) { 3241 xfs_ifunlock(ip); 3242 return error; 3243 } 3244 3245 /* 3246 * First flush out the inode that xfs_iflush was called with. 3247 */ 3248 error = xfs_iflush_int(ip, bp); 3249 if (error) 3250 goto corrupt_out; 3251 3252 /* 3253 * If the buffer is pinned then push on the log now so we won't 3254 * get stuck waiting in the write for too long. 3255 */ 3256 if (XFS_BUF_ISPINNED(bp)) 3257 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 3258 3259 /* 3260 * inode clustering: 3261 * see if other inodes can be gathered into this write 3262 */ 3263 error = xfs_iflush_cluster(ip, bp); 3264 if (error) 3265 goto cluster_corrupt_out; 3266 3267 if (flags & INT_DELWRI) { 3268 xfs_bdwrite(mp, bp); 3269 } else if (flags & INT_ASYNC) { 3270 error = xfs_bawrite(mp, bp); 3271 } else { 3272 error = xfs_bwrite(mp, bp); 3273 } 3274 return error; 3275 3276 corrupt_out: 3277 xfs_buf_relse(bp); 3278 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3279 cluster_corrupt_out: 3280 /* 3281 * Unlocks the flush lock 3282 */ 3283 xfs_iflush_abort(ip); 3284 return XFS_ERROR(EFSCORRUPTED); 3285 } 3286 3287 3288 STATIC int 3289 xfs_iflush_int( 3290 xfs_inode_t *ip, 3291 xfs_buf_t *bp) 3292 { 3293 xfs_inode_log_item_t *iip; 3294 xfs_dinode_t *dip; 3295 xfs_mount_t *mp; 3296 #ifdef XFS_TRANS_DEBUG 3297 int first; 3298 #endif 3299 3300 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3301 ASSERT(issemalocked(&(ip->i_flock))); 3302 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3303 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3304 3305 iip = ip->i_itemp; 3306 mp = ip->i_mount; 3307 3308 3309 /* 3310 * If the inode isn't dirty, then just release the inode 3311 * flush lock and do nothing. 3312 */ 3313 if (xfs_inode_clean(ip)) { 3314 xfs_ifunlock(ip); 3315 return 0; 3316 } 3317 3318 /* set *dip = inode's place in the buffer */ 3319 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); 3320 3321 /* 3322 * Clear i_update_core before copying out the data. 3323 * This is for coordination with our timestamp updates 3324 * that don't hold the inode lock. They will always 3325 * update the timestamps BEFORE setting i_update_core, 3326 * so if we clear i_update_core after they set it we 3327 * are guaranteed to see their updates to the timestamps. 3328 * I believe that this depends on strongly ordered memory 3329 * semantics, but we have that. We use the SYNCHRONIZE 3330 * macro to make sure that the compiler does not reorder 3331 * the i_update_core access below the data copy below. 3332 */ 3333 ip->i_update_core = 0; 3334 SYNCHRONIZE(); 3335 3336 /* 3337 * Make sure to get the latest atime from the Linux inode. 3338 */ 3339 xfs_synchronize_atime(ip); 3340 3341 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC, 3342 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3343 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3344 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 3345 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip); 3346 goto corrupt_out; 3347 } 3348 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 3349 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 3350 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3351 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 3352 ip->i_ino, ip, ip->i_d.di_magic); 3353 goto corrupt_out; 3354 } 3355 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3356 if (XFS_TEST_ERROR( 3357 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3358 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 3359 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 3360 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3361 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 3362 ip->i_ino, ip); 3363 goto corrupt_out; 3364 } 3365 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 3366 if (XFS_TEST_ERROR( 3367 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3368 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3369 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 3370 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 3371 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3372 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 3373 ip->i_ino, ip); 3374 goto corrupt_out; 3375 } 3376 } 3377 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 3378 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 3379 XFS_RANDOM_IFLUSH_5)) { 3380 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3381 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 3382 ip->i_ino, 3383 ip->i_d.di_nextents + ip->i_d.di_anextents, 3384 ip->i_d.di_nblocks, 3385 ip); 3386 goto corrupt_out; 3387 } 3388 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 3389 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 3390 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3391 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 3392 ip->i_ino, ip->i_d.di_forkoff, ip); 3393 goto corrupt_out; 3394 } 3395 /* 3396 * bump the flush iteration count, used to detect flushes which 3397 * postdate a log record during recovery. 3398 */ 3399 3400 ip->i_d.di_flushiter++; 3401 3402 /* 3403 * Copy the dirty parts of the inode into the on-disk 3404 * inode. We always copy out the core of the inode, 3405 * because if the inode is dirty at all the core must 3406 * be. 3407 */ 3408 xfs_dinode_to_disk(&dip->di_core, &ip->i_d); 3409 3410 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3411 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3412 ip->i_d.di_flushiter = 0; 3413 3414 /* 3415 * If this is really an old format inode and the superblock version 3416 * has not been updated to support only new format inodes, then 3417 * convert back to the old inode format. If the superblock version 3418 * has been updated, then make the conversion permanent. 3419 */ 3420 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || 3421 xfs_sb_version_hasnlink(&mp->m_sb)); 3422 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 3423 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 3424 /* 3425 * Convert it back. 3426 */ 3427 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3428 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink); 3429 } else { 3430 /* 3431 * The superblock version has already been bumped, 3432 * so just make the conversion to the new inode 3433 * format permanent. 3434 */ 3435 ip->i_d.di_version = XFS_DINODE_VERSION_2; 3436 dip->di_core.di_version = XFS_DINODE_VERSION_2; 3437 ip->i_d.di_onlink = 0; 3438 dip->di_core.di_onlink = 0; 3439 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3440 memset(&(dip->di_core.di_pad[0]), 0, 3441 sizeof(dip->di_core.di_pad)); 3442 ASSERT(ip->i_d.di_projid == 0); 3443 } 3444 } 3445 3446 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 3447 if (XFS_IFORK_Q(ip)) 3448 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3449 xfs_inobp_check(mp, bp); 3450 3451 /* 3452 * We've recorded everything logged in the inode, so we'd 3453 * like to clear the ilf_fields bits so we don't log and 3454 * flush things unnecessarily. However, we can't stop 3455 * logging all this information until the data we've copied 3456 * into the disk buffer is written to disk. If we did we might 3457 * overwrite the copy of the inode in the log with all the 3458 * data after re-logging only part of it, and in the face of 3459 * a crash we wouldn't have all the data we need to recover. 3460 * 3461 * What we do is move the bits to the ili_last_fields field. 3462 * When logging the inode, these bits are moved back to the 3463 * ilf_fields field. In the xfs_iflush_done() routine we 3464 * clear ili_last_fields, since we know that the information 3465 * those bits represent is permanently on disk. As long as 3466 * the flush completes before the inode is logged again, then 3467 * both ilf_fields and ili_last_fields will be cleared. 3468 * 3469 * We can play with the ilf_fields bits here, because the inode 3470 * lock must be held exclusively in order to set bits there 3471 * and the flush lock protects the ili_last_fields bits. 3472 * Set ili_logged so the flush done 3473 * routine can tell whether or not to look in the AIL. 3474 * Also, store the current LSN of the inode so that we can tell 3475 * whether the item has moved in the AIL from xfs_iflush_done(). 3476 * In order to read the lsn we need the AIL lock, because 3477 * it is a 64 bit value that cannot be read atomically. 3478 */ 3479 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3480 iip->ili_last_fields = iip->ili_format.ilf_fields; 3481 iip->ili_format.ilf_fields = 0; 3482 iip->ili_logged = 1; 3483 3484 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ 3485 spin_lock(&mp->m_ail_lock); 3486 iip->ili_flush_lsn = iip->ili_item.li_lsn; 3487 spin_unlock(&mp->m_ail_lock); 3488 3489 /* 3490 * Attach the function xfs_iflush_done to the inode's 3491 * buffer. This will remove the inode from the AIL 3492 * and unlock the inode's flush lock when the inode is 3493 * completely written to disk. 3494 */ 3495 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3496 xfs_iflush_done, (xfs_log_item_t *)iip); 3497 3498 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3499 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3500 } else { 3501 /* 3502 * We're flushing an inode which is not in the AIL and has 3503 * not been logged but has i_update_core set. For this 3504 * case we can use a B_DELWRI flush and immediately drop 3505 * the inode flush lock because we can avoid the whole 3506 * AIL state thing. It's OK to drop the flush lock now, 3507 * because we've already locked the buffer and to do anything 3508 * you really need both. 3509 */ 3510 if (iip != NULL) { 3511 ASSERT(iip->ili_logged == 0); 3512 ASSERT(iip->ili_last_fields == 0); 3513 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3514 } 3515 xfs_ifunlock(ip); 3516 } 3517 3518 return 0; 3519 3520 corrupt_out: 3521 return XFS_ERROR(EFSCORRUPTED); 3522 } 3523 3524 3525 /* 3526 * Flush all inactive inodes in mp. 3527 */ 3528 void 3529 xfs_iflush_all( 3530 xfs_mount_t *mp) 3531 { 3532 xfs_inode_t *ip; 3533 bhv_vnode_t *vp; 3534 3535 again: 3536 XFS_MOUNT_ILOCK(mp); 3537 ip = mp->m_inodes; 3538 if (ip == NULL) 3539 goto out; 3540 3541 do { 3542 /* Make sure we skip markers inserted by sync */ 3543 if (ip->i_mount == NULL) { 3544 ip = ip->i_mnext; 3545 continue; 3546 } 3547 3548 vp = XFS_ITOV_NULL(ip); 3549 if (!vp) { 3550 XFS_MOUNT_IUNLOCK(mp); 3551 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); 3552 goto again; 3553 } 3554 3555 ASSERT(vn_count(vp) == 0); 3556 3557 ip = ip->i_mnext; 3558 } while (ip != mp->m_inodes); 3559 out: 3560 XFS_MOUNT_IUNLOCK(mp); 3561 } 3562 3563 #ifdef XFS_ILOCK_TRACE 3564 ktrace_t *xfs_ilock_trace_buf; 3565 3566 void 3567 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) 3568 { 3569 ktrace_enter(ip->i_lock_trace, 3570 (void *)ip, 3571 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ 3572 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ 3573 (void *)ra, /* caller of ilock */ 3574 (void *)(unsigned long)current_cpu(), 3575 (void *)(unsigned long)current_pid(), 3576 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3577 } 3578 #endif 3579 3580 /* 3581 * Return a pointer to the extent record at file index idx. 3582 */ 3583 xfs_bmbt_rec_host_t * 3584 xfs_iext_get_ext( 3585 xfs_ifork_t *ifp, /* inode fork pointer */ 3586 xfs_extnum_t idx) /* index of target extent */ 3587 { 3588 ASSERT(idx >= 0); 3589 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3590 return ifp->if_u1.if_ext_irec->er_extbuf; 3591 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3592 xfs_ext_irec_t *erp; /* irec pointer */ 3593 int erp_idx = 0; /* irec index */ 3594 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3595 3596 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3597 return &erp->er_extbuf[page_idx]; 3598 } else if (ifp->if_bytes) { 3599 return &ifp->if_u1.if_extents[idx]; 3600 } else { 3601 return NULL; 3602 } 3603 } 3604 3605 /* 3606 * Insert new item(s) into the extent records for incore inode 3607 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3608 */ 3609 void 3610 xfs_iext_insert( 3611 xfs_ifork_t *ifp, /* inode fork pointer */ 3612 xfs_extnum_t idx, /* starting index of new items */ 3613 xfs_extnum_t count, /* number of inserted items */ 3614 xfs_bmbt_irec_t *new) /* items to insert */ 3615 { 3616 xfs_extnum_t i; /* extent record index */ 3617 3618 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3619 xfs_iext_add(ifp, idx, count); 3620 for (i = idx; i < idx + count; i++, new++) 3621 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 3622 } 3623 3624 /* 3625 * This is called when the amount of space required for incore file 3626 * extents needs to be increased. The ext_diff parameter stores the 3627 * number of new extents being added and the idx parameter contains 3628 * the extent index where the new extents will be added. If the new 3629 * extents are being appended, then we just need to (re)allocate and 3630 * initialize the space. Otherwise, if the new extents are being 3631 * inserted into the middle of the existing entries, a bit more work 3632 * is required to make room for the new extents to be inserted. The 3633 * caller is responsible for filling in the new extent entries upon 3634 * return. 3635 */ 3636 void 3637 xfs_iext_add( 3638 xfs_ifork_t *ifp, /* inode fork pointer */ 3639 xfs_extnum_t idx, /* index to begin adding exts */ 3640 int ext_diff) /* number of extents to add */ 3641 { 3642 int byte_diff; /* new bytes being added */ 3643 int new_size; /* size of extents after adding */ 3644 xfs_extnum_t nextents; /* number of extents in file */ 3645 3646 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3647 ASSERT((idx >= 0) && (idx <= nextents)); 3648 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3649 new_size = ifp->if_bytes + byte_diff; 3650 /* 3651 * If the new number of extents (nextents + ext_diff) 3652 * fits inside the inode, then continue to use the inline 3653 * extent buffer. 3654 */ 3655 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3656 if (idx < nextents) { 3657 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3658 &ifp->if_u2.if_inline_ext[idx], 3659 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3660 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3661 } 3662 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3663 ifp->if_real_bytes = 0; 3664 ifp->if_lastex = nextents + ext_diff; 3665 } 3666 /* 3667 * Otherwise use a linear (direct) extent list. 3668 * If the extents are currently inside the inode, 3669 * xfs_iext_realloc_direct will switch us from 3670 * inline to direct extent allocation mode. 3671 */ 3672 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3673 xfs_iext_realloc_direct(ifp, new_size); 3674 if (idx < nextents) { 3675 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3676 &ifp->if_u1.if_extents[idx], 3677 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3678 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3679 } 3680 } 3681 /* Indirection array */ 3682 else { 3683 xfs_ext_irec_t *erp; 3684 int erp_idx = 0; 3685 int page_idx = idx; 3686 3687 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3688 if (ifp->if_flags & XFS_IFEXTIREC) { 3689 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3690 } else { 3691 xfs_iext_irec_init(ifp); 3692 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3693 erp = ifp->if_u1.if_ext_irec; 3694 } 3695 /* Extents fit in target extent page */ 3696 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3697 if (page_idx < erp->er_extcount) { 3698 memmove(&erp->er_extbuf[page_idx + ext_diff], 3699 &erp->er_extbuf[page_idx], 3700 (erp->er_extcount - page_idx) * 3701 sizeof(xfs_bmbt_rec_t)); 3702 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3703 } 3704 erp->er_extcount += ext_diff; 3705 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3706 } 3707 /* Insert a new extent page */ 3708 else if (erp) { 3709 xfs_iext_add_indirect_multi(ifp, 3710 erp_idx, page_idx, ext_diff); 3711 } 3712 /* 3713 * If extent(s) are being appended to the last page in 3714 * the indirection array and the new extent(s) don't fit 3715 * in the page, then erp is NULL and erp_idx is set to 3716 * the next index needed in the indirection array. 3717 */ 3718 else { 3719 int count = ext_diff; 3720 3721 while (count) { 3722 erp = xfs_iext_irec_new(ifp, erp_idx); 3723 erp->er_extcount = count; 3724 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3725 if (count) { 3726 erp_idx++; 3727 } 3728 } 3729 } 3730 } 3731 ifp->if_bytes = new_size; 3732 } 3733 3734 /* 3735 * This is called when incore extents are being added to the indirection 3736 * array and the new extents do not fit in the target extent list. The 3737 * erp_idx parameter contains the irec index for the target extent list 3738 * in the indirection array, and the idx parameter contains the extent 3739 * index within the list. The number of extents being added is stored 3740 * in the count parameter. 3741 * 3742 * |-------| |-------| 3743 * | | | | idx - number of extents before idx 3744 * | idx | | count | 3745 * | | | | count - number of extents being inserted at idx 3746 * |-------| |-------| 3747 * | count | | nex2 | nex2 - number of extents after idx + count 3748 * |-------| |-------| 3749 */ 3750 void 3751 xfs_iext_add_indirect_multi( 3752 xfs_ifork_t *ifp, /* inode fork pointer */ 3753 int erp_idx, /* target extent irec index */ 3754 xfs_extnum_t idx, /* index within target list */ 3755 int count) /* new extents being added */ 3756 { 3757 int byte_diff; /* new bytes being added */ 3758 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3759 xfs_extnum_t ext_diff; /* number of extents to add */ 3760 xfs_extnum_t ext_cnt; /* new extents still needed */ 3761 xfs_extnum_t nex2; /* extents after idx + count */ 3762 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3763 int nlists; /* number of irec's (lists) */ 3764 3765 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3766 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3767 nex2 = erp->er_extcount - idx; 3768 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3769 3770 /* 3771 * Save second part of target extent list 3772 * (all extents past */ 3773 if (nex2) { 3774 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3775 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); 3776 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3777 erp->er_extcount -= nex2; 3778 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3779 memset(&erp->er_extbuf[idx], 0, byte_diff); 3780 } 3781 3782 /* 3783 * Add the new extents to the end of the target 3784 * list, then allocate new irec record(s) and 3785 * extent buffer(s) as needed to store the rest 3786 * of the new extents. 3787 */ 3788 ext_cnt = count; 3789 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3790 if (ext_diff) { 3791 erp->er_extcount += ext_diff; 3792 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3793 ext_cnt -= ext_diff; 3794 } 3795 while (ext_cnt) { 3796 erp_idx++; 3797 erp = xfs_iext_irec_new(ifp, erp_idx); 3798 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3799 erp->er_extcount = ext_diff; 3800 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3801 ext_cnt -= ext_diff; 3802 } 3803 3804 /* Add nex2 extents back to indirection array */ 3805 if (nex2) { 3806 xfs_extnum_t ext_avail; 3807 int i; 3808 3809 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3810 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3811 i = 0; 3812 /* 3813 * If nex2 extents fit in the current page, append 3814 * nex2_ep after the new extents. 3815 */ 3816 if (nex2 <= ext_avail) { 3817 i = erp->er_extcount; 3818 } 3819 /* 3820 * Otherwise, check if space is available in the 3821 * next page. 3822 */ 3823 else if ((erp_idx < nlists - 1) && 3824 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3825 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3826 erp_idx++; 3827 erp++; 3828 /* Create a hole for nex2 extents */ 3829 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3830 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3831 } 3832 /* 3833 * Final choice, create a new extent page for 3834 * nex2 extents. 3835 */ 3836 else { 3837 erp_idx++; 3838 erp = xfs_iext_irec_new(ifp, erp_idx); 3839 } 3840 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3841 kmem_free(nex2_ep, byte_diff); 3842 erp->er_extcount += nex2; 3843 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3844 } 3845 } 3846 3847 /* 3848 * This is called when the amount of space required for incore file 3849 * extents needs to be decreased. The ext_diff parameter stores the 3850 * number of extents to be removed and the idx parameter contains 3851 * the extent index where the extents will be removed from. 3852 * 3853 * If the amount of space needed has decreased below the linear 3854 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3855 * extent array. Otherwise, use kmem_realloc() to adjust the 3856 * size to what is needed. 3857 */ 3858 void 3859 xfs_iext_remove( 3860 xfs_ifork_t *ifp, /* inode fork pointer */ 3861 xfs_extnum_t idx, /* index to begin removing exts */ 3862 int ext_diff) /* number of extents to remove */ 3863 { 3864 xfs_extnum_t nextents; /* number of extents in file */ 3865 int new_size; /* size of extents after removal */ 3866 3867 ASSERT(ext_diff > 0); 3868 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3869 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3870 3871 if (new_size == 0) { 3872 xfs_iext_destroy(ifp); 3873 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3874 xfs_iext_remove_indirect(ifp, idx, ext_diff); 3875 } else if (ifp->if_real_bytes) { 3876 xfs_iext_remove_direct(ifp, idx, ext_diff); 3877 } else { 3878 xfs_iext_remove_inline(ifp, idx, ext_diff); 3879 } 3880 ifp->if_bytes = new_size; 3881 } 3882 3883 /* 3884 * This removes ext_diff extents from the inline buffer, beginning 3885 * at extent index idx. 3886 */ 3887 void 3888 xfs_iext_remove_inline( 3889 xfs_ifork_t *ifp, /* inode fork pointer */ 3890 xfs_extnum_t idx, /* index to begin removing exts */ 3891 int ext_diff) /* number of extents to remove */ 3892 { 3893 int nextents; /* number of extents in file */ 3894 3895 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3896 ASSERT(idx < XFS_INLINE_EXTS); 3897 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3898 ASSERT(((nextents - ext_diff) > 0) && 3899 (nextents - ext_diff) < XFS_INLINE_EXTS); 3900 3901 if (idx + ext_diff < nextents) { 3902 memmove(&ifp->if_u2.if_inline_ext[idx], 3903 &ifp->if_u2.if_inline_ext[idx + ext_diff], 3904 (nextents - (idx + ext_diff)) * 3905 sizeof(xfs_bmbt_rec_t)); 3906 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 3907 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3908 } else { 3909 memset(&ifp->if_u2.if_inline_ext[idx], 0, 3910 ext_diff * sizeof(xfs_bmbt_rec_t)); 3911 } 3912 } 3913 3914 /* 3915 * This removes ext_diff extents from a linear (direct) extent list, 3916 * beginning at extent index idx. If the extents are being removed 3917 * from the end of the list (ie. truncate) then we just need to re- 3918 * allocate the list to remove the extra space. Otherwise, if the 3919 * extents are being removed from the middle of the existing extent 3920 * entries, then we first need to move the extent records beginning 3921 * at idx + ext_diff up in the list to overwrite the records being 3922 * removed, then remove the extra space via kmem_realloc. 3923 */ 3924 void 3925 xfs_iext_remove_direct( 3926 xfs_ifork_t *ifp, /* inode fork pointer */ 3927 xfs_extnum_t idx, /* index to begin removing exts */ 3928 int ext_diff) /* number of extents to remove */ 3929 { 3930 xfs_extnum_t nextents; /* number of extents in file */ 3931 int new_size; /* size of extents after removal */ 3932 3933 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3934 new_size = ifp->if_bytes - 3935 (ext_diff * sizeof(xfs_bmbt_rec_t)); 3936 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3937 3938 if (new_size == 0) { 3939 xfs_iext_destroy(ifp); 3940 return; 3941 } 3942 /* Move extents up in the list (if needed) */ 3943 if (idx + ext_diff < nextents) { 3944 memmove(&ifp->if_u1.if_extents[idx], 3945 &ifp->if_u1.if_extents[idx + ext_diff], 3946 (nextents - (idx + ext_diff)) * 3947 sizeof(xfs_bmbt_rec_t)); 3948 } 3949 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3950 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3951 /* 3952 * Reallocate the direct extent list. If the extents 3953 * will fit inside the inode then xfs_iext_realloc_direct 3954 * will switch from direct to inline extent allocation 3955 * mode for us. 3956 */ 3957 xfs_iext_realloc_direct(ifp, new_size); 3958 ifp->if_bytes = new_size; 3959 } 3960 3961 /* 3962 * This is called when incore extents are being removed from the 3963 * indirection array and the extents being removed span multiple extent 3964 * buffers. The idx parameter contains the file extent index where we 3965 * want to begin removing extents, and the count parameter contains 3966 * how many extents need to be removed. 3967 * 3968 * |-------| |-------| 3969 * | nex1 | | | nex1 - number of extents before idx 3970 * |-------| | count | 3971 * | | | | count - number of extents being removed at idx 3972 * | count | |-------| 3973 * | | | nex2 | nex2 - number of extents after idx + count 3974 * |-------| |-------| 3975 */ 3976 void 3977 xfs_iext_remove_indirect( 3978 xfs_ifork_t *ifp, /* inode fork pointer */ 3979 xfs_extnum_t idx, /* index to begin removing extents */ 3980 int count) /* number of extents to remove */ 3981 { 3982 xfs_ext_irec_t *erp; /* indirection array pointer */ 3983 int erp_idx = 0; /* indirection array index */ 3984 xfs_extnum_t ext_cnt; /* extents left to remove */ 3985 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3986 xfs_extnum_t nex1; /* number of extents before idx */ 3987 xfs_extnum_t nex2; /* extents after idx + count */ 3988 int nlists; /* entries in indirection array */ 3989 int page_idx = idx; /* index in target extent list */ 3990 3991 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3992 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3993 ASSERT(erp != NULL); 3994 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3995 nex1 = page_idx; 3996 ext_cnt = count; 3997 while (ext_cnt) { 3998 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3999 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 4000 /* 4001 * Check for deletion of entire list; 4002 * xfs_iext_irec_remove() updates extent offsets. 4003 */ 4004 if (ext_diff == erp->er_extcount) { 4005 xfs_iext_irec_remove(ifp, erp_idx); 4006 ext_cnt -= ext_diff; 4007 nex1 = 0; 4008 if (ext_cnt) { 4009 ASSERT(erp_idx < ifp->if_real_bytes / 4010 XFS_IEXT_BUFSZ); 4011 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4012 nex1 = 0; 4013 continue; 4014 } else { 4015 break; 4016 } 4017 } 4018 /* Move extents up (if needed) */ 4019 if (nex2) { 4020 memmove(&erp->er_extbuf[nex1], 4021 &erp->er_extbuf[nex1 + ext_diff], 4022 nex2 * sizeof(xfs_bmbt_rec_t)); 4023 } 4024 /* Zero out rest of page */ 4025 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 4026 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 4027 /* Update remaining counters */ 4028 erp->er_extcount -= ext_diff; 4029 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 4030 ext_cnt -= ext_diff; 4031 nex1 = 0; 4032 erp_idx++; 4033 erp++; 4034 } 4035 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 4036 xfs_iext_irec_compact(ifp); 4037 } 4038 4039 /* 4040 * Create, destroy, or resize a linear (direct) block of extents. 4041 */ 4042 void 4043 xfs_iext_realloc_direct( 4044 xfs_ifork_t *ifp, /* inode fork pointer */ 4045 int new_size) /* new size of extents */ 4046 { 4047 int rnew_size; /* real new size of extents */ 4048 4049 rnew_size = new_size; 4050 4051 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 4052 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 4053 (new_size != ifp->if_real_bytes))); 4054 4055 /* Free extent records */ 4056 if (new_size == 0) { 4057 xfs_iext_destroy(ifp); 4058 } 4059 /* Resize direct extent list and zero any new bytes */ 4060 else if (ifp->if_real_bytes) { 4061 /* Check if extents will fit inside the inode */ 4062 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 4063 xfs_iext_direct_to_inline(ifp, new_size / 4064 (uint)sizeof(xfs_bmbt_rec_t)); 4065 ifp->if_bytes = new_size; 4066 return; 4067 } 4068 if (!is_power_of_2(new_size)){ 4069 rnew_size = roundup_pow_of_two(new_size); 4070 } 4071 if (rnew_size != ifp->if_real_bytes) { 4072 ifp->if_u1.if_extents = 4073 kmem_realloc(ifp->if_u1.if_extents, 4074 rnew_size, 4075 ifp->if_real_bytes, 4076 KM_SLEEP); 4077 } 4078 if (rnew_size > ifp->if_real_bytes) { 4079 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 4080 (uint)sizeof(xfs_bmbt_rec_t)], 0, 4081 rnew_size - ifp->if_real_bytes); 4082 } 4083 } 4084 /* 4085 * Switch from the inline extent buffer to a direct 4086 * extent list. Be sure to include the inline extent 4087 * bytes in new_size. 4088 */ 4089 else { 4090 new_size += ifp->if_bytes; 4091 if (!is_power_of_2(new_size)) { 4092 rnew_size = roundup_pow_of_two(new_size); 4093 } 4094 xfs_iext_inline_to_direct(ifp, rnew_size); 4095 } 4096 ifp->if_real_bytes = rnew_size; 4097 ifp->if_bytes = new_size; 4098 } 4099 4100 /* 4101 * Switch from linear (direct) extent records to inline buffer. 4102 */ 4103 void 4104 xfs_iext_direct_to_inline( 4105 xfs_ifork_t *ifp, /* inode fork pointer */ 4106 xfs_extnum_t nextents) /* number of extents in file */ 4107 { 4108 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 4109 ASSERT(nextents <= XFS_INLINE_EXTS); 4110 /* 4111 * The inline buffer was zeroed when we switched 4112 * from inline to direct extent allocation mode, 4113 * so we don't need to clear it here. 4114 */ 4115 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 4116 nextents * sizeof(xfs_bmbt_rec_t)); 4117 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4118 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 4119 ifp->if_real_bytes = 0; 4120 } 4121 4122 /* 4123 * Switch from inline buffer to linear (direct) extent records. 4124 * new_size should already be rounded up to the next power of 2 4125 * by the caller (when appropriate), so use new_size as it is. 4126 * However, since new_size may be rounded up, we can't update 4127 * if_bytes here. It is the caller's responsibility to update 4128 * if_bytes upon return. 4129 */ 4130 void 4131 xfs_iext_inline_to_direct( 4132 xfs_ifork_t *ifp, /* inode fork pointer */ 4133 int new_size) /* number of extents in file */ 4134 { 4135 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP); 4136 memset(ifp->if_u1.if_extents, 0, new_size); 4137 if (ifp->if_bytes) { 4138 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 4139 ifp->if_bytes); 4140 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4141 sizeof(xfs_bmbt_rec_t)); 4142 } 4143 ifp->if_real_bytes = new_size; 4144 } 4145 4146 /* 4147 * Resize an extent indirection array to new_size bytes. 4148 */ 4149 void 4150 xfs_iext_realloc_indirect( 4151 xfs_ifork_t *ifp, /* inode fork pointer */ 4152 int new_size) /* new indirection array size */ 4153 { 4154 int nlists; /* number of irec's (ex lists) */ 4155 int size; /* current indirection array size */ 4156 4157 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4158 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4159 size = nlists * sizeof(xfs_ext_irec_t); 4160 ASSERT(ifp->if_real_bytes); 4161 ASSERT((new_size >= 0) && (new_size != size)); 4162 if (new_size == 0) { 4163 xfs_iext_destroy(ifp); 4164 } else { 4165 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 4166 kmem_realloc(ifp->if_u1.if_ext_irec, 4167 new_size, size, KM_SLEEP); 4168 } 4169 } 4170 4171 /* 4172 * Switch from indirection array to linear (direct) extent allocations. 4173 */ 4174 void 4175 xfs_iext_indirect_to_direct( 4176 xfs_ifork_t *ifp) /* inode fork pointer */ 4177 { 4178 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 4179 xfs_extnum_t nextents; /* number of extents in file */ 4180 int size; /* size of file extents */ 4181 4182 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4183 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4184 ASSERT(nextents <= XFS_LINEAR_EXTS); 4185 size = nextents * sizeof(xfs_bmbt_rec_t); 4186 4187 xfs_iext_irec_compact_full(ifp); 4188 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 4189 4190 ep = ifp->if_u1.if_ext_irec->er_extbuf; 4191 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); 4192 ifp->if_flags &= ~XFS_IFEXTIREC; 4193 ifp->if_u1.if_extents = ep; 4194 ifp->if_bytes = size; 4195 if (nextents < XFS_LINEAR_EXTS) { 4196 xfs_iext_realloc_direct(ifp, size); 4197 } 4198 } 4199 4200 /* 4201 * Free incore file extents. 4202 */ 4203 void 4204 xfs_iext_destroy( 4205 xfs_ifork_t *ifp) /* inode fork pointer */ 4206 { 4207 if (ifp->if_flags & XFS_IFEXTIREC) { 4208 int erp_idx; 4209 int nlists; 4210 4211 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4212 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 4213 xfs_iext_irec_remove(ifp, erp_idx); 4214 } 4215 ifp->if_flags &= ~XFS_IFEXTIREC; 4216 } else if (ifp->if_real_bytes) { 4217 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4218 } else if (ifp->if_bytes) { 4219 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4220 sizeof(xfs_bmbt_rec_t)); 4221 } 4222 ifp->if_u1.if_extents = NULL; 4223 ifp->if_real_bytes = 0; 4224 ifp->if_bytes = 0; 4225 } 4226 4227 /* 4228 * Return a pointer to the extent record for file system block bno. 4229 */ 4230 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 4231 xfs_iext_bno_to_ext( 4232 xfs_ifork_t *ifp, /* inode fork pointer */ 4233 xfs_fileoff_t bno, /* block number to search for */ 4234 xfs_extnum_t *idxp) /* index of target extent */ 4235 { 4236 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 4237 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 4238 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 4239 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4240 int high; /* upper boundary in search */ 4241 xfs_extnum_t idx = 0; /* index of target extent */ 4242 int low; /* lower boundary in search */ 4243 xfs_extnum_t nextents; /* number of file extents */ 4244 xfs_fileoff_t startoff = 0; /* start offset of extent */ 4245 4246 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4247 if (nextents == 0) { 4248 *idxp = 0; 4249 return NULL; 4250 } 4251 low = 0; 4252 if (ifp->if_flags & XFS_IFEXTIREC) { 4253 /* Find target extent list */ 4254 int erp_idx = 0; 4255 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 4256 base = erp->er_extbuf; 4257 high = erp->er_extcount - 1; 4258 } else { 4259 base = ifp->if_u1.if_extents; 4260 high = nextents - 1; 4261 } 4262 /* Binary search extent records */ 4263 while (low <= high) { 4264 idx = (low + high) >> 1; 4265 ep = base + idx; 4266 startoff = xfs_bmbt_get_startoff(ep); 4267 blockcount = xfs_bmbt_get_blockcount(ep); 4268 if (bno < startoff) { 4269 high = idx - 1; 4270 } else if (bno >= startoff + blockcount) { 4271 low = idx + 1; 4272 } else { 4273 /* Convert back to file-based extent index */ 4274 if (ifp->if_flags & XFS_IFEXTIREC) { 4275 idx += erp->er_extoff; 4276 } 4277 *idxp = idx; 4278 return ep; 4279 } 4280 } 4281 /* Convert back to file-based extent index */ 4282 if (ifp->if_flags & XFS_IFEXTIREC) { 4283 idx += erp->er_extoff; 4284 } 4285 if (bno >= startoff + blockcount) { 4286 if (++idx == nextents) { 4287 ep = NULL; 4288 } else { 4289 ep = xfs_iext_get_ext(ifp, idx); 4290 } 4291 } 4292 *idxp = idx; 4293 return ep; 4294 } 4295 4296 /* 4297 * Return a pointer to the indirection array entry containing the 4298 * extent record for filesystem block bno. Store the index of the 4299 * target irec in *erp_idxp. 4300 */ 4301 xfs_ext_irec_t * /* pointer to found extent record */ 4302 xfs_iext_bno_to_irec( 4303 xfs_ifork_t *ifp, /* inode fork pointer */ 4304 xfs_fileoff_t bno, /* block number to search for */ 4305 int *erp_idxp) /* irec index of target ext list */ 4306 { 4307 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4308 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 4309 int erp_idx; /* indirection array index */ 4310 int nlists; /* number of extent irec's (lists) */ 4311 int high; /* binary search upper limit */ 4312 int low; /* binary search lower limit */ 4313 4314 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4315 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4316 erp_idx = 0; 4317 low = 0; 4318 high = nlists - 1; 4319 while (low <= high) { 4320 erp_idx = (low + high) >> 1; 4321 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4322 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 4323 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 4324 high = erp_idx - 1; 4325 } else if (erp_next && bno >= 4326 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 4327 low = erp_idx + 1; 4328 } else { 4329 break; 4330 } 4331 } 4332 *erp_idxp = erp_idx; 4333 return erp; 4334 } 4335 4336 /* 4337 * Return a pointer to the indirection array entry containing the 4338 * extent record at file extent index *idxp. Store the index of the 4339 * target irec in *erp_idxp and store the page index of the target 4340 * extent record in *idxp. 4341 */ 4342 xfs_ext_irec_t * 4343 xfs_iext_idx_to_irec( 4344 xfs_ifork_t *ifp, /* inode fork pointer */ 4345 xfs_extnum_t *idxp, /* extent index (file -> page) */ 4346 int *erp_idxp, /* pointer to target irec */ 4347 int realloc) /* new bytes were just added */ 4348 { 4349 xfs_ext_irec_t *prev; /* pointer to previous irec */ 4350 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 4351 int erp_idx; /* indirection array index */ 4352 int nlists; /* number of irec's (ex lists) */ 4353 int high; /* binary search upper limit */ 4354 int low; /* binary search lower limit */ 4355 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 4356 4357 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4358 ASSERT(page_idx >= 0 && page_idx <= 4359 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 4360 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4361 erp_idx = 0; 4362 low = 0; 4363 high = nlists - 1; 4364 4365 /* Binary search extent irec's */ 4366 while (low <= high) { 4367 erp_idx = (low + high) >> 1; 4368 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4369 prev = erp_idx > 0 ? erp - 1 : NULL; 4370 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 4371 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 4372 high = erp_idx - 1; 4373 } else if (page_idx > erp->er_extoff + erp->er_extcount || 4374 (page_idx == erp->er_extoff + erp->er_extcount && 4375 !realloc)) { 4376 low = erp_idx + 1; 4377 } else if (page_idx == erp->er_extoff + erp->er_extcount && 4378 erp->er_extcount == XFS_LINEAR_EXTS) { 4379 ASSERT(realloc); 4380 page_idx = 0; 4381 erp_idx++; 4382 erp = erp_idx < nlists ? erp + 1 : NULL; 4383 break; 4384 } else { 4385 page_idx -= erp->er_extoff; 4386 break; 4387 } 4388 } 4389 *idxp = page_idx; 4390 *erp_idxp = erp_idx; 4391 return(erp); 4392 } 4393 4394 /* 4395 * Allocate and initialize an indirection array once the space needed 4396 * for incore extents increases above XFS_IEXT_BUFSZ. 4397 */ 4398 void 4399 xfs_iext_irec_init( 4400 xfs_ifork_t *ifp) /* inode fork pointer */ 4401 { 4402 xfs_ext_irec_t *erp; /* indirection array pointer */ 4403 xfs_extnum_t nextents; /* number of extents in file */ 4404 4405 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4406 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4407 ASSERT(nextents <= XFS_LINEAR_EXTS); 4408 4409 erp = (xfs_ext_irec_t *) 4410 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); 4411 4412 if (nextents == 0) { 4413 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4414 } else if (!ifp->if_real_bytes) { 4415 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4416 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4417 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 4418 } 4419 erp->er_extbuf = ifp->if_u1.if_extents; 4420 erp->er_extcount = nextents; 4421 erp->er_extoff = 0; 4422 4423 ifp->if_flags |= XFS_IFEXTIREC; 4424 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 4425 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 4426 ifp->if_u1.if_ext_irec = erp; 4427 4428 return; 4429 } 4430 4431 /* 4432 * Allocate and initialize a new entry in the indirection array. 4433 */ 4434 xfs_ext_irec_t * 4435 xfs_iext_irec_new( 4436 xfs_ifork_t *ifp, /* inode fork pointer */ 4437 int erp_idx) /* index for new irec */ 4438 { 4439 xfs_ext_irec_t *erp; /* indirection array pointer */ 4440 int i; /* loop counter */ 4441 int nlists; /* number of irec's (ex lists) */ 4442 4443 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4444 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4445 4446 /* Resize indirection array */ 4447 xfs_iext_realloc_indirect(ifp, ++nlists * 4448 sizeof(xfs_ext_irec_t)); 4449 /* 4450 * Move records down in the array so the 4451 * new page can use erp_idx. 4452 */ 4453 erp = ifp->if_u1.if_ext_irec; 4454 for (i = nlists - 1; i > erp_idx; i--) { 4455 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 4456 } 4457 ASSERT(i == erp_idx); 4458 4459 /* Initialize new extent record */ 4460 erp = ifp->if_u1.if_ext_irec; 4461 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4462 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4463 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4464 erp[erp_idx].er_extcount = 0; 4465 erp[erp_idx].er_extoff = erp_idx > 0 ? 4466 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 4467 return (&erp[erp_idx]); 4468 } 4469 4470 /* 4471 * Remove a record from the indirection array. 4472 */ 4473 void 4474 xfs_iext_irec_remove( 4475 xfs_ifork_t *ifp, /* inode fork pointer */ 4476 int erp_idx) /* irec index to remove */ 4477 { 4478 xfs_ext_irec_t *erp; /* indirection array pointer */ 4479 int i; /* loop counter */ 4480 int nlists; /* number of irec's (ex lists) */ 4481 4482 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4483 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4484 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4485 if (erp->er_extbuf) { 4486 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4487 -erp->er_extcount); 4488 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); 4489 } 4490 /* Compact extent records */ 4491 erp = ifp->if_u1.if_ext_irec; 4492 for (i = erp_idx; i < nlists - 1; i++) { 4493 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4494 } 4495 /* 4496 * Manually free the last extent record from the indirection 4497 * array. A call to xfs_iext_realloc_indirect() with a size 4498 * of zero would result in a call to xfs_iext_destroy() which 4499 * would in turn call this function again, creating a nasty 4500 * infinite loop. 4501 */ 4502 if (--nlists) { 4503 xfs_iext_realloc_indirect(ifp, 4504 nlists * sizeof(xfs_ext_irec_t)); 4505 } else { 4506 kmem_free(ifp->if_u1.if_ext_irec, 4507 sizeof(xfs_ext_irec_t)); 4508 } 4509 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4510 } 4511 4512 /* 4513 * This is called to clean up large amounts of unused memory allocated 4514 * by the indirection array. Before compacting anything though, verify 4515 * that the indirection array is still needed and switch back to the 4516 * linear extent list (or even the inline buffer) if possible. The 4517 * compaction policy is as follows: 4518 * 4519 * Full Compaction: Extents fit into a single page (or inline buffer) 4520 * Full Compaction: Extents occupy less than 10% of allocated space 4521 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space 4522 * No Compaction: Extents occupy at least 50% of allocated space 4523 */ 4524 void 4525 xfs_iext_irec_compact( 4526 xfs_ifork_t *ifp) /* inode fork pointer */ 4527 { 4528 xfs_extnum_t nextents; /* number of extents in file */ 4529 int nlists; /* number of irec's (ex lists) */ 4530 4531 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4532 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4533 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4534 4535 if (nextents == 0) { 4536 xfs_iext_destroy(ifp); 4537 } else if (nextents <= XFS_INLINE_EXTS) { 4538 xfs_iext_indirect_to_direct(ifp); 4539 xfs_iext_direct_to_inline(ifp, nextents); 4540 } else if (nextents <= XFS_LINEAR_EXTS) { 4541 xfs_iext_indirect_to_direct(ifp); 4542 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { 4543 xfs_iext_irec_compact_full(ifp); 4544 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4545 xfs_iext_irec_compact_pages(ifp); 4546 } 4547 } 4548 4549 /* 4550 * Combine extents from neighboring extent pages. 4551 */ 4552 void 4553 xfs_iext_irec_compact_pages( 4554 xfs_ifork_t *ifp) /* inode fork pointer */ 4555 { 4556 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4557 int erp_idx = 0; /* indirection array index */ 4558 int nlists; /* number of irec's (ex lists) */ 4559 4560 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4561 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4562 while (erp_idx < nlists - 1) { 4563 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4564 erp_next = erp + 1; 4565 if (erp_next->er_extcount <= 4566 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4567 memmove(&erp->er_extbuf[erp->er_extcount], 4568 erp_next->er_extbuf, erp_next->er_extcount * 4569 sizeof(xfs_bmbt_rec_t)); 4570 erp->er_extcount += erp_next->er_extcount; 4571 /* 4572 * Free page before removing extent record 4573 * so er_extoffs don't get modified in 4574 * xfs_iext_irec_remove. 4575 */ 4576 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); 4577 erp_next->er_extbuf = NULL; 4578 xfs_iext_irec_remove(ifp, erp_idx + 1); 4579 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4580 } else { 4581 erp_idx++; 4582 } 4583 } 4584 } 4585 4586 /* 4587 * Fully compact the extent records managed by the indirection array. 4588 */ 4589 void 4590 xfs_iext_irec_compact_full( 4591 xfs_ifork_t *ifp) /* inode fork pointer */ 4592 { 4593 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */ 4594 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ 4595 int erp_idx = 0; /* extent irec index */ 4596 int ext_avail; /* empty entries in ex list */ 4597 int ext_diff; /* number of exts to add */ 4598 int nlists; /* number of irec's (ex lists) */ 4599 4600 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4601 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4602 erp = ifp->if_u1.if_ext_irec; 4603 ep = &erp->er_extbuf[erp->er_extcount]; 4604 erp_next = erp + 1; 4605 ep_next = erp_next->er_extbuf; 4606 while (erp_idx < nlists - 1) { 4607 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 4608 ext_diff = MIN(ext_avail, erp_next->er_extcount); 4609 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); 4610 erp->er_extcount += ext_diff; 4611 erp_next->er_extcount -= ext_diff; 4612 /* Remove next page */ 4613 if (erp_next->er_extcount == 0) { 4614 /* 4615 * Free page before removing extent record 4616 * so er_extoffs don't get modified in 4617 * xfs_iext_irec_remove. 4618 */ 4619 kmem_free(erp_next->er_extbuf, 4620 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4621 erp_next->er_extbuf = NULL; 4622 xfs_iext_irec_remove(ifp, erp_idx + 1); 4623 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4624 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4625 /* Update next page */ 4626 } else { 4627 /* Move rest of page up to become next new page */ 4628 memmove(erp_next->er_extbuf, ep_next, 4629 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4630 ep_next = erp_next->er_extbuf; 4631 memset(&ep_next[erp_next->er_extcount], 0, 4632 (XFS_LINEAR_EXTS - erp_next->er_extcount) * 4633 sizeof(xfs_bmbt_rec_t)); 4634 } 4635 if (erp->er_extcount == XFS_LINEAR_EXTS) { 4636 erp_idx++; 4637 if (erp_idx < nlists) 4638 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4639 else 4640 break; 4641 } 4642 ep = &erp->er_extbuf[erp->er_extcount]; 4643 erp_next = erp + 1; 4644 ep_next = erp_next->er_extbuf; 4645 } 4646 } 4647 4648 /* 4649 * This is called to update the er_extoff field in the indirection 4650 * array when extents have been added or removed from one of the 4651 * extent lists. erp_idx contains the irec index to begin updating 4652 * at and ext_diff contains the number of extents that were added 4653 * or removed. 4654 */ 4655 void 4656 xfs_iext_irec_update_extoffs( 4657 xfs_ifork_t *ifp, /* inode fork pointer */ 4658 int erp_idx, /* irec index to update */ 4659 int ext_diff) /* number of new extents */ 4660 { 4661 int i; /* loop counter */ 4662 int nlists; /* number of irec's (ex lists */ 4663 4664 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4665 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4666 for (i = erp_idx; i < nlists; i++) { 4667 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4668 } 4669 } 4670