1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_imap.h" 25 #include "xfs_trans.h" 26 #include "xfs_trans_priv.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_dir2.h" 30 #include "xfs_dmapi.h" 31 #include "xfs_mount.h" 32 #include "xfs_bmap_btree.h" 33 #include "xfs_alloc_btree.h" 34 #include "xfs_ialloc_btree.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_inode_item.h" 41 #include "xfs_btree.h" 42 #include "xfs_alloc.h" 43 #include "xfs_ialloc.h" 44 #include "xfs_bmap.h" 45 #include "xfs_rw.h" 46 #include "xfs_error.h" 47 #include "xfs_utils.h" 48 #include "xfs_dir2_trace.h" 49 #include "xfs_quota.h" 50 #include "xfs_acl.h" 51 #include "xfs_filestream.h" 52 #include "xfs_vnodeops.h" 53 54 kmem_zone_t *xfs_ifork_zone; 55 kmem_zone_t *xfs_inode_zone; 56 kmem_zone_t *xfs_icluster_zone; 57 58 /* 59 * Used in xfs_itruncate(). This is the maximum number of extents 60 * freed from a file in a single transaction. 61 */ 62 #define XFS_ITRUNC_MAX_EXTENTS 2 63 64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 68 69 #ifdef DEBUG 70 /* 71 * Make sure that the extents in the given memory buffer 72 * are valid. 73 */ 74 STATIC void 75 xfs_validate_extents( 76 xfs_ifork_t *ifp, 77 int nrecs, 78 xfs_exntfmt_t fmt) 79 { 80 xfs_bmbt_irec_t irec; 81 xfs_bmbt_rec_host_t rec; 82 int i; 83 84 for (i = 0; i < nrecs; i++) { 85 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 86 rec.l0 = get_unaligned(&ep->l0); 87 rec.l1 = get_unaligned(&ep->l1); 88 xfs_bmbt_get_all(&rec, &irec); 89 if (fmt == XFS_EXTFMT_NOSTATE) 90 ASSERT(irec.br_state == XFS_EXT_NORM); 91 } 92 } 93 #else /* DEBUG */ 94 #define xfs_validate_extents(ifp, nrecs, fmt) 95 #endif /* DEBUG */ 96 97 /* 98 * Check that none of the inode's in the buffer have a next 99 * unlinked field of 0. 100 */ 101 #if defined(DEBUG) 102 void 103 xfs_inobp_check( 104 xfs_mount_t *mp, 105 xfs_buf_t *bp) 106 { 107 int i; 108 int j; 109 xfs_dinode_t *dip; 110 111 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 112 113 for (i = 0; i < j; i++) { 114 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 115 i * mp->m_sb.sb_inodesize); 116 if (!dip->di_next_unlinked) { 117 xfs_fs_cmn_err(CE_ALERT, mp, 118 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 119 bp); 120 ASSERT(dip->di_next_unlinked); 121 } 122 } 123 } 124 #endif 125 126 /* 127 * This routine is called to map an inode number within a file 128 * system to the buffer containing the on-disk version of the 129 * inode. It returns a pointer to the buffer containing the 130 * on-disk inode in the bpp parameter, and in the dip parameter 131 * it returns a pointer to the on-disk inode within that buffer. 132 * 133 * If a non-zero error is returned, then the contents of bpp and 134 * dipp are undefined. 135 * 136 * Use xfs_imap() to determine the size and location of the 137 * buffer to read from disk. 138 */ 139 STATIC int 140 xfs_inotobp( 141 xfs_mount_t *mp, 142 xfs_trans_t *tp, 143 xfs_ino_t ino, 144 xfs_dinode_t **dipp, 145 xfs_buf_t **bpp, 146 int *offset) 147 { 148 int di_ok; 149 xfs_imap_t imap; 150 xfs_buf_t *bp; 151 int error; 152 xfs_dinode_t *dip; 153 154 /* 155 * Call the space management code to find the location of the 156 * inode on disk. 157 */ 158 imap.im_blkno = 0; 159 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); 160 if (error != 0) { 161 cmn_err(CE_WARN, 162 "xfs_inotobp: xfs_imap() returned an " 163 "error %d on %s. Returning error.", error, mp->m_fsname); 164 return error; 165 } 166 167 /* 168 * If the inode number maps to a block outside the bounds of the 169 * file system then return NULL rather than calling read_buf 170 * and panicing when we get an error from the driver. 171 */ 172 if ((imap.im_blkno + imap.im_len) > 173 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 174 cmn_err(CE_WARN, 175 "xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds " 176 "of the file system %s. Returning EINVAL.", 177 (unsigned long long)imap.im_blkno, 178 imap.im_len, mp->m_fsname); 179 return XFS_ERROR(EINVAL); 180 } 181 182 /* 183 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will 184 * default to just a read_buf() call. 185 */ 186 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, 187 (int)imap.im_len, XFS_BUF_LOCK, &bp); 188 189 if (error) { 190 cmn_err(CE_WARN, 191 "xfs_inotobp: xfs_trans_read_buf() returned an " 192 "error %d on %s. Returning error.", error, mp->m_fsname); 193 return error; 194 } 195 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0); 196 di_ok = 197 be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && 198 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); 199 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 200 XFS_RANDOM_ITOBP_INOTOBP))) { 201 XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip); 202 xfs_trans_brelse(tp, bp); 203 cmn_err(CE_WARN, 204 "xfs_inotobp: XFS_TEST_ERROR() returned an " 205 "error on %s. Returning EFSCORRUPTED.", mp->m_fsname); 206 return XFS_ERROR(EFSCORRUPTED); 207 } 208 209 xfs_inobp_check(mp, bp); 210 211 /* 212 * Set *dipp to point to the on-disk inode in the buffer. 213 */ 214 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 215 *bpp = bp; 216 *offset = imap.im_boffset; 217 return 0; 218 } 219 220 221 /* 222 * This routine is called to map an inode to the buffer containing 223 * the on-disk version of the inode. It returns a pointer to the 224 * buffer containing the on-disk inode in the bpp parameter, and in 225 * the dip parameter it returns a pointer to the on-disk inode within 226 * that buffer. 227 * 228 * If a non-zero error is returned, then the contents of bpp and 229 * dipp are undefined. 230 * 231 * If the inode is new and has not yet been initialized, use xfs_imap() 232 * to determine the size and location of the buffer to read from disk. 233 * If the inode has already been mapped to its buffer and read in once, 234 * then use the mapping information stored in the inode rather than 235 * calling xfs_imap(). This allows us to avoid the overhead of looking 236 * at the inode btree for small block file systems (see xfs_dilocate()). 237 * We can tell whether the inode has been mapped in before by comparing 238 * its disk block address to 0. Only uninitialized inodes will have 239 * 0 for the disk block address. 240 */ 241 int 242 xfs_itobp( 243 xfs_mount_t *mp, 244 xfs_trans_t *tp, 245 xfs_inode_t *ip, 246 xfs_dinode_t **dipp, 247 xfs_buf_t **bpp, 248 xfs_daddr_t bno, 249 uint imap_flags) 250 { 251 xfs_imap_t imap; 252 xfs_buf_t *bp; 253 int error; 254 int i; 255 int ni; 256 257 if (ip->i_blkno == (xfs_daddr_t)0) { 258 /* 259 * Call the space management code to find the location of the 260 * inode on disk. 261 */ 262 imap.im_blkno = bno; 263 if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, 264 XFS_IMAP_LOOKUP | imap_flags))) 265 return error; 266 267 /* 268 * If the inode number maps to a block outside the bounds 269 * of the file system then return NULL rather than calling 270 * read_buf and panicing when we get an error from the 271 * driver. 272 */ 273 if ((imap.im_blkno + imap.im_len) > 274 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 275 #ifdef DEBUG 276 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " 277 "(imap.im_blkno (0x%llx) " 278 "+ imap.im_len (0x%llx)) > " 279 " XFS_FSB_TO_BB(mp, " 280 "mp->m_sb.sb_dblocks) (0x%llx)", 281 (unsigned long long) imap.im_blkno, 282 (unsigned long long) imap.im_len, 283 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 284 #endif /* DEBUG */ 285 return XFS_ERROR(EINVAL); 286 } 287 288 /* 289 * Fill in the fields in the inode that will be used to 290 * map the inode to its buffer from now on. 291 */ 292 ip->i_blkno = imap.im_blkno; 293 ip->i_len = imap.im_len; 294 ip->i_boffset = imap.im_boffset; 295 } else { 296 /* 297 * We've already mapped the inode once, so just use the 298 * mapping that we saved the first time. 299 */ 300 imap.im_blkno = ip->i_blkno; 301 imap.im_len = ip->i_len; 302 imap.im_boffset = ip->i_boffset; 303 } 304 ASSERT(bno == 0 || bno == imap.im_blkno); 305 306 /* 307 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will 308 * default to just a read_buf() call. 309 */ 310 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, 311 (int)imap.im_len, XFS_BUF_LOCK, &bp); 312 if (error) { 313 #ifdef DEBUG 314 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " 315 "xfs_trans_read_buf() returned error %d, " 316 "imap.im_blkno 0x%llx, imap.im_len 0x%llx", 317 error, (unsigned long long) imap.im_blkno, 318 (unsigned long long) imap.im_len); 319 #endif /* DEBUG */ 320 return error; 321 } 322 323 /* 324 * Validate the magic number and version of every inode in the buffer 325 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 326 * No validation is done here in userspace (xfs_repair). 327 */ 328 #if !defined(__KERNEL__) 329 ni = 0; 330 #elif defined(DEBUG) 331 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; 332 #else /* usual case */ 333 ni = 1; 334 #endif 335 336 for (i = 0; i < ni; i++) { 337 int di_ok; 338 xfs_dinode_t *dip; 339 340 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 341 (i << mp->m_sb.sb_inodelog)); 342 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && 343 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); 344 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 345 XFS_ERRTAG_ITOBP_INOTOBP, 346 XFS_RANDOM_ITOBP_INOTOBP))) { 347 if (imap_flags & XFS_IMAP_BULKSTAT) { 348 xfs_trans_brelse(tp, bp); 349 return XFS_ERROR(EINVAL); 350 } 351 #ifdef DEBUG 352 cmn_err(CE_ALERT, 353 "Device %s - bad inode magic/vsn " 354 "daddr %lld #%d (magic=%x)", 355 XFS_BUFTARG_NAME(mp->m_ddev_targp), 356 (unsigned long long)imap.im_blkno, i, 357 be16_to_cpu(dip->di_core.di_magic)); 358 #endif 359 XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH, 360 mp, dip); 361 xfs_trans_brelse(tp, bp); 362 return XFS_ERROR(EFSCORRUPTED); 363 } 364 } 365 366 xfs_inobp_check(mp, bp); 367 368 /* 369 * Mark the buffer as an inode buffer now that it looks good 370 */ 371 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 372 373 /* 374 * Set *dipp to point to the on-disk inode in the buffer. 375 */ 376 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 377 *bpp = bp; 378 return 0; 379 } 380 381 /* 382 * Move inode type and inode format specific information from the 383 * on-disk inode to the in-core inode. For fifos, devs, and sockets 384 * this means set if_rdev to the proper value. For files, directories, 385 * and symlinks this means to bring in the in-line data or extent 386 * pointers. For a file in B-tree format, only the root is immediately 387 * brought in-core. The rest will be in-lined in if_extents when it 388 * is first referenced (see xfs_iread_extents()). 389 */ 390 STATIC int 391 xfs_iformat( 392 xfs_inode_t *ip, 393 xfs_dinode_t *dip) 394 { 395 xfs_attr_shortform_t *atp; 396 int size; 397 int error; 398 xfs_fsize_t di_size; 399 ip->i_df.if_ext_max = 400 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 401 error = 0; 402 403 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) + 404 be16_to_cpu(dip->di_core.di_anextents) > 405 be64_to_cpu(dip->di_core.di_nblocks))) { 406 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 407 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 408 (unsigned long long)ip->i_ino, 409 (int)(be32_to_cpu(dip->di_core.di_nextents) + 410 be16_to_cpu(dip->di_core.di_anextents)), 411 (unsigned long long) 412 be64_to_cpu(dip->di_core.di_nblocks)); 413 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 414 ip->i_mount, dip); 415 return XFS_ERROR(EFSCORRUPTED); 416 } 417 418 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 419 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 420 "corrupt dinode %Lu, forkoff = 0x%x.", 421 (unsigned long long)ip->i_ino, 422 dip->di_core.di_forkoff); 423 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 424 ip->i_mount, dip); 425 return XFS_ERROR(EFSCORRUPTED); 426 } 427 428 switch (ip->i_d.di_mode & S_IFMT) { 429 case S_IFIFO: 430 case S_IFCHR: 431 case S_IFBLK: 432 case S_IFSOCK: 433 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) { 434 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 435 ip->i_mount, dip); 436 return XFS_ERROR(EFSCORRUPTED); 437 } 438 ip->i_d.di_size = 0; 439 ip->i_size = 0; 440 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev); 441 break; 442 443 case S_IFREG: 444 case S_IFLNK: 445 case S_IFDIR: 446 switch (dip->di_core.di_format) { 447 case XFS_DINODE_FMT_LOCAL: 448 /* 449 * no local regular files yet 450 */ 451 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) { 452 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 453 "corrupt inode %Lu " 454 "(local format for regular file).", 455 (unsigned long long) ip->i_ino); 456 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 457 XFS_ERRLEVEL_LOW, 458 ip->i_mount, dip); 459 return XFS_ERROR(EFSCORRUPTED); 460 } 461 462 di_size = be64_to_cpu(dip->di_core.di_size); 463 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 464 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 465 "corrupt inode %Lu " 466 "(bad size %Ld for local inode).", 467 (unsigned long long) ip->i_ino, 468 (long long) di_size); 469 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 470 XFS_ERRLEVEL_LOW, 471 ip->i_mount, dip); 472 return XFS_ERROR(EFSCORRUPTED); 473 } 474 475 size = (int)di_size; 476 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 477 break; 478 case XFS_DINODE_FMT_EXTENTS: 479 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 480 break; 481 case XFS_DINODE_FMT_BTREE: 482 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 483 break; 484 default: 485 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 486 ip->i_mount); 487 return XFS_ERROR(EFSCORRUPTED); 488 } 489 break; 490 491 default: 492 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 493 return XFS_ERROR(EFSCORRUPTED); 494 } 495 if (error) { 496 return error; 497 } 498 if (!XFS_DFORK_Q(dip)) 499 return 0; 500 ASSERT(ip->i_afp == NULL); 501 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 502 ip->i_afp->if_ext_max = 503 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 504 switch (dip->di_core.di_aformat) { 505 case XFS_DINODE_FMT_LOCAL: 506 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 507 size = be16_to_cpu(atp->hdr.totsize); 508 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 509 break; 510 case XFS_DINODE_FMT_EXTENTS: 511 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 512 break; 513 case XFS_DINODE_FMT_BTREE: 514 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 515 break; 516 default: 517 error = XFS_ERROR(EFSCORRUPTED); 518 break; 519 } 520 if (error) { 521 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 522 ip->i_afp = NULL; 523 xfs_idestroy_fork(ip, XFS_DATA_FORK); 524 } 525 return error; 526 } 527 528 /* 529 * The file is in-lined in the on-disk inode. 530 * If it fits into if_inline_data, then copy 531 * it there, otherwise allocate a buffer for it 532 * and copy the data there. Either way, set 533 * if_data to point at the data. 534 * If we allocate a buffer for the data, make 535 * sure that its size is a multiple of 4 and 536 * record the real size in i_real_bytes. 537 */ 538 STATIC int 539 xfs_iformat_local( 540 xfs_inode_t *ip, 541 xfs_dinode_t *dip, 542 int whichfork, 543 int size) 544 { 545 xfs_ifork_t *ifp; 546 int real_size; 547 548 /* 549 * If the size is unreasonable, then something 550 * is wrong and we just bail out rather than crash in 551 * kmem_alloc() or memcpy() below. 552 */ 553 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 554 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 555 "corrupt inode %Lu " 556 "(bad size %d for local fork, size = %d).", 557 (unsigned long long) ip->i_ino, size, 558 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 559 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 560 ip->i_mount, dip); 561 return XFS_ERROR(EFSCORRUPTED); 562 } 563 ifp = XFS_IFORK_PTR(ip, whichfork); 564 real_size = 0; 565 if (size == 0) 566 ifp->if_u1.if_data = NULL; 567 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 568 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 569 else { 570 real_size = roundup(size, 4); 571 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 572 } 573 ifp->if_bytes = size; 574 ifp->if_real_bytes = real_size; 575 if (size) 576 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 577 ifp->if_flags &= ~XFS_IFEXTENTS; 578 ifp->if_flags |= XFS_IFINLINE; 579 return 0; 580 } 581 582 /* 583 * The file consists of a set of extents all 584 * of which fit into the on-disk inode. 585 * If there are few enough extents to fit into 586 * the if_inline_ext, then copy them there. 587 * Otherwise allocate a buffer for them and copy 588 * them into it. Either way, set if_extents 589 * to point at the extents. 590 */ 591 STATIC int 592 xfs_iformat_extents( 593 xfs_inode_t *ip, 594 xfs_dinode_t *dip, 595 int whichfork) 596 { 597 xfs_bmbt_rec_t *dp; 598 xfs_ifork_t *ifp; 599 int nex; 600 int size; 601 int i; 602 603 ifp = XFS_IFORK_PTR(ip, whichfork); 604 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 605 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 606 607 /* 608 * If the number of extents is unreasonable, then something 609 * is wrong and we just bail out rather than crash in 610 * kmem_alloc() or memcpy() below. 611 */ 612 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 613 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 614 "corrupt inode %Lu ((a)extents = %d).", 615 (unsigned long long) ip->i_ino, nex); 616 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 617 ip->i_mount, dip); 618 return XFS_ERROR(EFSCORRUPTED); 619 } 620 621 ifp->if_real_bytes = 0; 622 if (nex == 0) 623 ifp->if_u1.if_extents = NULL; 624 else if (nex <= XFS_INLINE_EXTS) 625 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 626 else 627 xfs_iext_add(ifp, 0, nex); 628 629 ifp->if_bytes = size; 630 if (size) { 631 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 632 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 633 for (i = 0; i < nex; i++, dp++) { 634 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 635 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0)); 636 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1)); 637 } 638 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 639 if (whichfork != XFS_DATA_FORK || 640 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 641 if (unlikely(xfs_check_nostate_extents( 642 ifp, 0, nex))) { 643 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 644 XFS_ERRLEVEL_LOW, 645 ip->i_mount); 646 return XFS_ERROR(EFSCORRUPTED); 647 } 648 } 649 ifp->if_flags |= XFS_IFEXTENTS; 650 return 0; 651 } 652 653 /* 654 * The file has too many extents to fit into 655 * the inode, so they are in B-tree format. 656 * Allocate a buffer for the root of the B-tree 657 * and copy the root into it. The i_extents 658 * field will remain NULL until all of the 659 * extents are read in (when they are needed). 660 */ 661 STATIC int 662 xfs_iformat_btree( 663 xfs_inode_t *ip, 664 xfs_dinode_t *dip, 665 int whichfork) 666 { 667 xfs_bmdr_block_t *dfp; 668 xfs_ifork_t *ifp; 669 /* REFERENCED */ 670 int nrecs; 671 int size; 672 673 ifp = XFS_IFORK_PTR(ip, whichfork); 674 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 675 size = XFS_BMAP_BROOT_SPACE(dfp); 676 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); 677 678 /* 679 * blow out if -- fork has less extents than can fit in 680 * fork (fork shouldn't be a btree format), root btree 681 * block has more records than can fit into the fork, 682 * or the number of extents is greater than the number of 683 * blocks. 684 */ 685 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 686 || XFS_BMDR_SPACE_CALC(nrecs) > 687 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 688 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 689 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 690 "corrupt inode %Lu (btree).", 691 (unsigned long long) ip->i_ino); 692 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 693 ip->i_mount); 694 return XFS_ERROR(EFSCORRUPTED); 695 } 696 697 ifp->if_broot_bytes = size; 698 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 699 ASSERT(ifp->if_broot != NULL); 700 /* 701 * Copy and convert from the on-disk structure 702 * to the in-memory structure. 703 */ 704 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 705 ifp->if_broot, size); 706 ifp->if_flags &= ~XFS_IFEXTENTS; 707 ifp->if_flags |= XFS_IFBROOT; 708 709 return 0; 710 } 711 712 void 713 xfs_dinode_from_disk( 714 xfs_icdinode_t *to, 715 xfs_dinode_core_t *from) 716 { 717 to->di_magic = be16_to_cpu(from->di_magic); 718 to->di_mode = be16_to_cpu(from->di_mode); 719 to->di_version = from ->di_version; 720 to->di_format = from->di_format; 721 to->di_onlink = be16_to_cpu(from->di_onlink); 722 to->di_uid = be32_to_cpu(from->di_uid); 723 to->di_gid = be32_to_cpu(from->di_gid); 724 to->di_nlink = be32_to_cpu(from->di_nlink); 725 to->di_projid = be16_to_cpu(from->di_projid); 726 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 727 to->di_flushiter = be16_to_cpu(from->di_flushiter); 728 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 729 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 730 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 731 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 732 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 733 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 734 to->di_size = be64_to_cpu(from->di_size); 735 to->di_nblocks = be64_to_cpu(from->di_nblocks); 736 to->di_extsize = be32_to_cpu(from->di_extsize); 737 to->di_nextents = be32_to_cpu(from->di_nextents); 738 to->di_anextents = be16_to_cpu(from->di_anextents); 739 to->di_forkoff = from->di_forkoff; 740 to->di_aformat = from->di_aformat; 741 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 742 to->di_dmstate = be16_to_cpu(from->di_dmstate); 743 to->di_flags = be16_to_cpu(from->di_flags); 744 to->di_gen = be32_to_cpu(from->di_gen); 745 } 746 747 void 748 xfs_dinode_to_disk( 749 xfs_dinode_core_t *to, 750 xfs_icdinode_t *from) 751 { 752 to->di_magic = cpu_to_be16(from->di_magic); 753 to->di_mode = cpu_to_be16(from->di_mode); 754 to->di_version = from ->di_version; 755 to->di_format = from->di_format; 756 to->di_onlink = cpu_to_be16(from->di_onlink); 757 to->di_uid = cpu_to_be32(from->di_uid); 758 to->di_gid = cpu_to_be32(from->di_gid); 759 to->di_nlink = cpu_to_be32(from->di_nlink); 760 to->di_projid = cpu_to_be16(from->di_projid); 761 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 762 to->di_flushiter = cpu_to_be16(from->di_flushiter); 763 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 764 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 765 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 766 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 767 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 768 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 769 to->di_size = cpu_to_be64(from->di_size); 770 to->di_nblocks = cpu_to_be64(from->di_nblocks); 771 to->di_extsize = cpu_to_be32(from->di_extsize); 772 to->di_nextents = cpu_to_be32(from->di_nextents); 773 to->di_anextents = cpu_to_be16(from->di_anextents); 774 to->di_forkoff = from->di_forkoff; 775 to->di_aformat = from->di_aformat; 776 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 777 to->di_dmstate = cpu_to_be16(from->di_dmstate); 778 to->di_flags = cpu_to_be16(from->di_flags); 779 to->di_gen = cpu_to_be32(from->di_gen); 780 } 781 782 STATIC uint 783 _xfs_dic2xflags( 784 __uint16_t di_flags) 785 { 786 uint flags = 0; 787 788 if (di_flags & XFS_DIFLAG_ANY) { 789 if (di_flags & XFS_DIFLAG_REALTIME) 790 flags |= XFS_XFLAG_REALTIME; 791 if (di_flags & XFS_DIFLAG_PREALLOC) 792 flags |= XFS_XFLAG_PREALLOC; 793 if (di_flags & XFS_DIFLAG_IMMUTABLE) 794 flags |= XFS_XFLAG_IMMUTABLE; 795 if (di_flags & XFS_DIFLAG_APPEND) 796 flags |= XFS_XFLAG_APPEND; 797 if (di_flags & XFS_DIFLAG_SYNC) 798 flags |= XFS_XFLAG_SYNC; 799 if (di_flags & XFS_DIFLAG_NOATIME) 800 flags |= XFS_XFLAG_NOATIME; 801 if (di_flags & XFS_DIFLAG_NODUMP) 802 flags |= XFS_XFLAG_NODUMP; 803 if (di_flags & XFS_DIFLAG_RTINHERIT) 804 flags |= XFS_XFLAG_RTINHERIT; 805 if (di_flags & XFS_DIFLAG_PROJINHERIT) 806 flags |= XFS_XFLAG_PROJINHERIT; 807 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 808 flags |= XFS_XFLAG_NOSYMLINKS; 809 if (di_flags & XFS_DIFLAG_EXTSIZE) 810 flags |= XFS_XFLAG_EXTSIZE; 811 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 812 flags |= XFS_XFLAG_EXTSZINHERIT; 813 if (di_flags & XFS_DIFLAG_NODEFRAG) 814 flags |= XFS_XFLAG_NODEFRAG; 815 if (di_flags & XFS_DIFLAG_FILESTREAM) 816 flags |= XFS_XFLAG_FILESTREAM; 817 } 818 819 return flags; 820 } 821 822 uint 823 xfs_ip2xflags( 824 xfs_inode_t *ip) 825 { 826 xfs_icdinode_t *dic = &ip->i_d; 827 828 return _xfs_dic2xflags(dic->di_flags) | 829 (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0); 830 } 831 832 uint 833 xfs_dic2xflags( 834 xfs_dinode_core_t *dic) 835 { 836 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | 837 (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0); 838 } 839 840 /* 841 * Given a mount structure and an inode number, return a pointer 842 * to a newly allocated in-core inode corresponding to the given 843 * inode number. 844 * 845 * Initialize the inode's attributes and extent pointers if it 846 * already has them (it will not if the inode has no links). 847 */ 848 int 849 xfs_iread( 850 xfs_mount_t *mp, 851 xfs_trans_t *tp, 852 xfs_ino_t ino, 853 xfs_inode_t **ipp, 854 xfs_daddr_t bno, 855 uint imap_flags) 856 { 857 xfs_buf_t *bp; 858 xfs_dinode_t *dip; 859 xfs_inode_t *ip; 860 int error; 861 862 ASSERT(xfs_inode_zone != NULL); 863 864 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); 865 ip->i_ino = ino; 866 ip->i_mount = mp; 867 atomic_set(&ip->i_iocount, 0); 868 spin_lock_init(&ip->i_flags_lock); 869 870 /* 871 * Get pointer's to the on-disk inode and the buffer containing it. 872 * If the inode number refers to a block outside the file system 873 * then xfs_itobp() will return NULL. In this case we should 874 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 875 * know that this is a new incore inode. 876 */ 877 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags); 878 if (error) { 879 kmem_zone_free(xfs_inode_zone, ip); 880 return error; 881 } 882 883 /* 884 * Initialize inode's trace buffers. 885 * Do this before xfs_iformat in case it adds entries. 886 */ 887 #ifdef XFS_VNODE_TRACE 888 ip->i_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); 889 #endif 890 #ifdef XFS_BMAP_TRACE 891 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); 892 #endif 893 #ifdef XFS_BMBT_TRACE 894 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); 895 #endif 896 #ifdef XFS_RW_TRACE 897 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); 898 #endif 899 #ifdef XFS_ILOCK_TRACE 900 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); 901 #endif 902 #ifdef XFS_DIR2_TRACE 903 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); 904 #endif 905 906 /* 907 * If we got something that isn't an inode it means someone 908 * (nfs or dmi) has a stale handle. 909 */ 910 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) { 911 kmem_zone_free(xfs_inode_zone, ip); 912 xfs_trans_brelse(tp, bp); 913 #ifdef DEBUG 914 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 915 "dip->di_core.di_magic (0x%x) != " 916 "XFS_DINODE_MAGIC (0x%x)", 917 be16_to_cpu(dip->di_core.di_magic), 918 XFS_DINODE_MAGIC); 919 #endif /* DEBUG */ 920 return XFS_ERROR(EINVAL); 921 } 922 923 /* 924 * If the on-disk inode is already linked to a directory 925 * entry, copy all of the inode into the in-core inode. 926 * xfs_iformat() handles copying in the inode format 927 * specific information. 928 * Otherwise, just get the truly permanent information. 929 */ 930 if (dip->di_core.di_mode) { 931 xfs_dinode_from_disk(&ip->i_d, &dip->di_core); 932 error = xfs_iformat(ip, dip); 933 if (error) { 934 kmem_zone_free(xfs_inode_zone, ip); 935 xfs_trans_brelse(tp, bp); 936 #ifdef DEBUG 937 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 938 "xfs_iformat() returned error %d", 939 error); 940 #endif /* DEBUG */ 941 return error; 942 } 943 } else { 944 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic); 945 ip->i_d.di_version = dip->di_core.di_version; 946 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen); 947 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter); 948 /* 949 * Make sure to pull in the mode here as well in 950 * case the inode is released without being used. 951 * This ensures that xfs_inactive() will see that 952 * the inode is already free and not try to mess 953 * with the uninitialized part of it. 954 */ 955 ip->i_d.di_mode = 0; 956 /* 957 * Initialize the per-fork minima and maxima for a new 958 * inode here. xfs_iformat will do it for old inodes. 959 */ 960 ip->i_df.if_ext_max = 961 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 962 } 963 964 INIT_LIST_HEAD(&ip->i_reclaim); 965 966 /* 967 * The inode format changed when we moved the link count and 968 * made it 32 bits long. If this is an old format inode, 969 * convert it in memory to look like a new one. If it gets 970 * flushed to disk we will convert back before flushing or 971 * logging it. We zero out the new projid field and the old link 972 * count field. We'll handle clearing the pad field (the remains 973 * of the old uuid field) when we actually convert the inode to 974 * the new format. We don't change the version number so that we 975 * can distinguish this from a real new format inode. 976 */ 977 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 978 ip->i_d.di_nlink = ip->i_d.di_onlink; 979 ip->i_d.di_onlink = 0; 980 ip->i_d.di_projid = 0; 981 } 982 983 ip->i_delayed_blks = 0; 984 ip->i_size = ip->i_d.di_size; 985 986 /* 987 * Mark the buffer containing the inode as something to keep 988 * around for a while. This helps to keep recently accessed 989 * meta-data in-core longer. 990 */ 991 XFS_BUF_SET_REF(bp, XFS_INO_REF); 992 993 /* 994 * Use xfs_trans_brelse() to release the buffer containing the 995 * on-disk inode, because it was acquired with xfs_trans_read_buf() 996 * in xfs_itobp() above. If tp is NULL, this is just a normal 997 * brelse(). If we're within a transaction, then xfs_trans_brelse() 998 * will only release the buffer if it is not dirty within the 999 * transaction. It will be OK to release the buffer in this case, 1000 * because inodes on disk are never destroyed and we will be 1001 * locking the new in-core inode before putting it in the hash 1002 * table where other processes can find it. Thus we don't have 1003 * to worry about the inode being changed just because we released 1004 * the buffer. 1005 */ 1006 xfs_trans_brelse(tp, bp); 1007 *ipp = ip; 1008 return 0; 1009 } 1010 1011 /* 1012 * Read in extents from a btree-format inode. 1013 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 1014 */ 1015 int 1016 xfs_iread_extents( 1017 xfs_trans_t *tp, 1018 xfs_inode_t *ip, 1019 int whichfork) 1020 { 1021 int error; 1022 xfs_ifork_t *ifp; 1023 xfs_extnum_t nextents; 1024 size_t size; 1025 1026 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1027 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 1028 ip->i_mount); 1029 return XFS_ERROR(EFSCORRUPTED); 1030 } 1031 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1032 size = nextents * sizeof(xfs_bmbt_rec_t); 1033 ifp = XFS_IFORK_PTR(ip, whichfork); 1034 1035 /* 1036 * We know that the size is valid (it's checked in iformat_btree) 1037 */ 1038 ifp->if_lastex = NULLEXTNUM; 1039 ifp->if_bytes = ifp->if_real_bytes = 0; 1040 ifp->if_flags |= XFS_IFEXTENTS; 1041 xfs_iext_add(ifp, 0, nextents); 1042 error = xfs_bmap_read_extents(tp, ip, whichfork); 1043 if (error) { 1044 xfs_iext_destroy(ifp); 1045 ifp->if_flags &= ~XFS_IFEXTENTS; 1046 return error; 1047 } 1048 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 1049 return 0; 1050 } 1051 1052 /* 1053 * Allocate an inode on disk and return a copy of its in-core version. 1054 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 1055 * appropriately within the inode. The uid and gid for the inode are 1056 * set according to the contents of the given cred structure. 1057 * 1058 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 1059 * has a free inode available, call xfs_iget() 1060 * to obtain the in-core version of the allocated inode. Finally, 1061 * fill in the inode and log its initial contents. In this case, 1062 * ialloc_context would be set to NULL and call_again set to false. 1063 * 1064 * If xfs_dialloc() does not have an available inode, 1065 * it will replenish its supply by doing an allocation. Since we can 1066 * only do one allocation within a transaction without deadlocks, we 1067 * must commit the current transaction before returning the inode itself. 1068 * In this case, therefore, we will set call_again to true and return. 1069 * The caller should then commit the current transaction, start a new 1070 * transaction, and call xfs_ialloc() again to actually get the inode. 1071 * 1072 * To ensure that some other process does not grab the inode that 1073 * was allocated during the first call to xfs_ialloc(), this routine 1074 * also returns the [locked] bp pointing to the head of the freelist 1075 * as ialloc_context. The caller should hold this buffer across 1076 * the commit and pass it back into this routine on the second call. 1077 * 1078 * If we are allocating quota inodes, we do not have a parent inode 1079 * to attach to or associate with (i.e. pip == NULL) because they 1080 * are not linked into the directory structure - they are attached 1081 * directly to the superblock - and so have no parent. 1082 */ 1083 int 1084 xfs_ialloc( 1085 xfs_trans_t *tp, 1086 xfs_inode_t *pip, 1087 mode_t mode, 1088 xfs_nlink_t nlink, 1089 xfs_dev_t rdev, 1090 cred_t *cr, 1091 xfs_prid_t prid, 1092 int okalloc, 1093 xfs_buf_t **ialloc_context, 1094 boolean_t *call_again, 1095 xfs_inode_t **ipp) 1096 { 1097 xfs_ino_t ino; 1098 xfs_inode_t *ip; 1099 bhv_vnode_t *vp; 1100 uint flags; 1101 int error; 1102 1103 /* 1104 * Call the space management code to pick 1105 * the on-disk inode to be allocated. 1106 */ 1107 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 1108 ialloc_context, call_again, &ino); 1109 if (error != 0) { 1110 return error; 1111 } 1112 if (*call_again || ino == NULLFSINO) { 1113 *ipp = NULL; 1114 return 0; 1115 } 1116 ASSERT(*ialloc_context == NULL); 1117 1118 /* 1119 * Get the in-core inode with the lock held exclusively. 1120 * This is because we're setting fields here we need 1121 * to prevent others from looking at until we're done. 1122 */ 1123 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1124 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1125 if (error != 0) { 1126 return error; 1127 } 1128 ASSERT(ip != NULL); 1129 1130 vp = XFS_ITOV(ip); 1131 ip->i_d.di_mode = (__uint16_t)mode; 1132 ip->i_d.di_onlink = 0; 1133 ip->i_d.di_nlink = nlink; 1134 ASSERT(ip->i_d.di_nlink == nlink); 1135 ip->i_d.di_uid = current_fsuid(cr); 1136 ip->i_d.di_gid = current_fsgid(cr); 1137 ip->i_d.di_projid = prid; 1138 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1139 1140 /* 1141 * If the superblock version is up to where we support new format 1142 * inodes and this is currently an old format inode, then change 1143 * the inode version number now. This way we only do the conversion 1144 * here rather than here and in the flush/logging code. 1145 */ 1146 if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) && 1147 ip->i_d.di_version == XFS_DINODE_VERSION_1) { 1148 ip->i_d.di_version = XFS_DINODE_VERSION_2; 1149 /* 1150 * We've already zeroed the old link count, the projid field, 1151 * and the pad field. 1152 */ 1153 } 1154 1155 /* 1156 * Project ids won't be stored on disk if we are using a version 1 inode. 1157 */ 1158 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1159 xfs_bump_ino_vers2(tp, ip); 1160 1161 if (pip && XFS_INHERIT_GID(pip)) { 1162 ip->i_d.di_gid = pip->i_d.di_gid; 1163 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1164 ip->i_d.di_mode |= S_ISGID; 1165 } 1166 } 1167 1168 /* 1169 * If the group ID of the new file does not match the effective group 1170 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1171 * (and only if the irix_sgid_inherit compatibility variable is set). 1172 */ 1173 if ((irix_sgid_inherit) && 1174 (ip->i_d.di_mode & S_ISGID) && 1175 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1176 ip->i_d.di_mode &= ~S_ISGID; 1177 } 1178 1179 ip->i_d.di_size = 0; 1180 ip->i_size = 0; 1181 ip->i_d.di_nextents = 0; 1182 ASSERT(ip->i_d.di_nblocks == 0); 1183 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); 1184 /* 1185 * di_gen will have been taken care of in xfs_iread. 1186 */ 1187 ip->i_d.di_extsize = 0; 1188 ip->i_d.di_dmevmask = 0; 1189 ip->i_d.di_dmstate = 0; 1190 ip->i_d.di_flags = 0; 1191 flags = XFS_ILOG_CORE; 1192 switch (mode & S_IFMT) { 1193 case S_IFIFO: 1194 case S_IFCHR: 1195 case S_IFBLK: 1196 case S_IFSOCK: 1197 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1198 ip->i_df.if_u2.if_rdev = rdev; 1199 ip->i_df.if_flags = 0; 1200 flags |= XFS_ILOG_DEV; 1201 break; 1202 case S_IFREG: 1203 if (pip && xfs_inode_is_filestream(pip)) { 1204 error = xfs_filestream_associate(pip, ip); 1205 if (error < 0) 1206 return -error; 1207 if (!error) 1208 xfs_iflags_set(ip, XFS_IFILESTREAM); 1209 } 1210 /* fall through */ 1211 case S_IFDIR: 1212 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1213 uint di_flags = 0; 1214 1215 if ((mode & S_IFMT) == S_IFDIR) { 1216 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1217 di_flags |= XFS_DIFLAG_RTINHERIT; 1218 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1219 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1220 ip->i_d.di_extsize = pip->i_d.di_extsize; 1221 } 1222 } else if ((mode & S_IFMT) == S_IFREG) { 1223 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1224 di_flags |= XFS_DIFLAG_REALTIME; 1225 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1226 } 1227 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1228 di_flags |= XFS_DIFLAG_EXTSIZE; 1229 ip->i_d.di_extsize = pip->i_d.di_extsize; 1230 } 1231 } 1232 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1233 xfs_inherit_noatime) 1234 di_flags |= XFS_DIFLAG_NOATIME; 1235 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1236 xfs_inherit_nodump) 1237 di_flags |= XFS_DIFLAG_NODUMP; 1238 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1239 xfs_inherit_sync) 1240 di_flags |= XFS_DIFLAG_SYNC; 1241 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1242 xfs_inherit_nosymlinks) 1243 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1244 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1245 di_flags |= XFS_DIFLAG_PROJINHERIT; 1246 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1247 xfs_inherit_nodefrag) 1248 di_flags |= XFS_DIFLAG_NODEFRAG; 1249 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1250 di_flags |= XFS_DIFLAG_FILESTREAM; 1251 ip->i_d.di_flags |= di_flags; 1252 } 1253 /* FALLTHROUGH */ 1254 case S_IFLNK: 1255 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1256 ip->i_df.if_flags = XFS_IFEXTENTS; 1257 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1258 ip->i_df.if_u1.if_extents = NULL; 1259 break; 1260 default: 1261 ASSERT(0); 1262 } 1263 /* 1264 * Attribute fork settings for new inode. 1265 */ 1266 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1267 ip->i_d.di_anextents = 0; 1268 1269 /* 1270 * Log the new values stuffed into the inode. 1271 */ 1272 xfs_trans_log_inode(tp, ip, flags); 1273 1274 /* now that we have an i_mode we can setup inode ops and unlock */ 1275 xfs_initialize_vnode(tp->t_mountp, vp, ip); 1276 1277 *ipp = ip; 1278 return 0; 1279 } 1280 1281 /* 1282 * Check to make sure that there are no blocks allocated to the 1283 * file beyond the size of the file. We don't check this for 1284 * files with fixed size extents or real time extents, but we 1285 * at least do it for regular files. 1286 */ 1287 #ifdef DEBUG 1288 void 1289 xfs_isize_check( 1290 xfs_mount_t *mp, 1291 xfs_inode_t *ip, 1292 xfs_fsize_t isize) 1293 { 1294 xfs_fileoff_t map_first; 1295 int nimaps; 1296 xfs_bmbt_irec_t imaps[2]; 1297 1298 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1299 return; 1300 1301 if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE)) 1302 return; 1303 1304 nimaps = 2; 1305 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1306 /* 1307 * The filesystem could be shutting down, so bmapi may return 1308 * an error. 1309 */ 1310 if (xfs_bmapi(NULL, ip, map_first, 1311 (XFS_B_TO_FSB(mp, 1312 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1313 map_first), 1314 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1315 NULL, NULL)) 1316 return; 1317 ASSERT(nimaps == 1); 1318 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1319 } 1320 #endif /* DEBUG */ 1321 1322 /* 1323 * Calculate the last possible buffered byte in a file. This must 1324 * include data that was buffered beyond the EOF by the write code. 1325 * This also needs to deal with overflowing the xfs_fsize_t type 1326 * which can happen for sizes near the limit. 1327 * 1328 * We also need to take into account any blocks beyond the EOF. It 1329 * may be the case that they were buffered by a write which failed. 1330 * In that case the pages will still be in memory, but the inode size 1331 * will never have been updated. 1332 */ 1333 xfs_fsize_t 1334 xfs_file_last_byte( 1335 xfs_inode_t *ip) 1336 { 1337 xfs_mount_t *mp; 1338 xfs_fsize_t last_byte; 1339 xfs_fileoff_t last_block; 1340 xfs_fileoff_t size_last_block; 1341 int error; 1342 1343 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); 1344 1345 mp = ip->i_mount; 1346 /* 1347 * Only check for blocks beyond the EOF if the extents have 1348 * been read in. This eliminates the need for the inode lock, 1349 * and it also saves us from looking when it really isn't 1350 * necessary. 1351 */ 1352 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1353 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1354 XFS_DATA_FORK); 1355 if (error) { 1356 last_block = 0; 1357 } 1358 } else { 1359 last_block = 0; 1360 } 1361 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); 1362 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1363 1364 last_byte = XFS_FSB_TO_B(mp, last_block); 1365 if (last_byte < 0) { 1366 return XFS_MAXIOFFSET(mp); 1367 } 1368 last_byte += (1 << mp->m_writeio_log); 1369 if (last_byte < 0) { 1370 return XFS_MAXIOFFSET(mp); 1371 } 1372 return last_byte; 1373 } 1374 1375 #if defined(XFS_RW_TRACE) 1376 STATIC void 1377 xfs_itrunc_trace( 1378 int tag, 1379 xfs_inode_t *ip, 1380 int flag, 1381 xfs_fsize_t new_size, 1382 xfs_off_t toss_start, 1383 xfs_off_t toss_finish) 1384 { 1385 if (ip->i_rwtrace == NULL) { 1386 return; 1387 } 1388 1389 ktrace_enter(ip->i_rwtrace, 1390 (void*)((long)tag), 1391 (void*)ip, 1392 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), 1393 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), 1394 (void*)((long)flag), 1395 (void*)(unsigned long)((new_size >> 32) & 0xffffffff), 1396 (void*)(unsigned long)(new_size & 0xffffffff), 1397 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), 1398 (void*)(unsigned long)(toss_start & 0xffffffff), 1399 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1400 (void*)(unsigned long)(toss_finish & 0xffffffff), 1401 (void*)(unsigned long)current_cpu(), 1402 (void*)(unsigned long)current_pid(), 1403 (void*)NULL, 1404 (void*)NULL, 1405 (void*)NULL); 1406 } 1407 #else 1408 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1409 #endif 1410 1411 /* 1412 * Start the truncation of the file to new_size. The new size 1413 * must be smaller than the current size. This routine will 1414 * clear the buffer and page caches of file data in the removed 1415 * range, and xfs_itruncate_finish() will remove the underlying 1416 * disk blocks. 1417 * 1418 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1419 * must NOT have the inode lock held at all. This is because we're 1420 * calling into the buffer/page cache code and we can't hold the 1421 * inode lock when we do so. 1422 * 1423 * We need to wait for any direct I/Os in flight to complete before we 1424 * proceed with the truncate. This is needed to prevent the extents 1425 * being read or written by the direct I/Os from being removed while the 1426 * I/O is in flight as there is no other method of synchronising 1427 * direct I/O with the truncate operation. Also, because we hold 1428 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1429 * started until the truncate completes and drops the lock. Essentially, 1430 * the vn_iowait() call forms an I/O barrier that provides strict ordering 1431 * between direct I/Os and the truncate operation. 1432 * 1433 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1434 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1435 * in the case that the caller is locking things out of order and 1436 * may not be able to call xfs_itruncate_finish() with the inode lock 1437 * held without dropping the I/O lock. If the caller must drop the 1438 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1439 * must be called again with all the same restrictions as the initial 1440 * call. 1441 */ 1442 int 1443 xfs_itruncate_start( 1444 xfs_inode_t *ip, 1445 uint flags, 1446 xfs_fsize_t new_size) 1447 { 1448 xfs_fsize_t last_byte; 1449 xfs_off_t toss_start; 1450 xfs_mount_t *mp; 1451 bhv_vnode_t *vp; 1452 int error = 0; 1453 1454 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1455 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1456 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1457 (flags == XFS_ITRUNC_MAYBE)); 1458 1459 mp = ip->i_mount; 1460 vp = XFS_ITOV(ip); 1461 1462 vn_iowait(ip); /* wait for the completion of any pending DIOs */ 1463 1464 /* 1465 * Call toss_pages or flushinval_pages to get rid of pages 1466 * overlapping the region being removed. We have to use 1467 * the less efficient flushinval_pages in the case that the 1468 * caller may not be able to finish the truncate without 1469 * dropping the inode's I/O lock. Make sure 1470 * to catch any pages brought in by buffers overlapping 1471 * the EOF by searching out beyond the isize by our 1472 * block size. We round new_size up to a block boundary 1473 * so that we don't toss things on the same block as 1474 * new_size but before it. 1475 * 1476 * Before calling toss_page or flushinval_pages, make sure to 1477 * call remapf() over the same region if the file is mapped. 1478 * This frees up mapped file references to the pages in the 1479 * given range and for the flushinval_pages case it ensures 1480 * that we get the latest mapped changes flushed out. 1481 */ 1482 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1483 toss_start = XFS_FSB_TO_B(mp, toss_start); 1484 if (toss_start < 0) { 1485 /* 1486 * The place to start tossing is beyond our maximum 1487 * file size, so there is no way that the data extended 1488 * out there. 1489 */ 1490 return 0; 1491 } 1492 last_byte = xfs_file_last_byte(ip); 1493 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1494 last_byte); 1495 if (last_byte > toss_start) { 1496 if (flags & XFS_ITRUNC_DEFINITE) { 1497 xfs_tosspages(ip, toss_start, 1498 -1, FI_REMAPF_LOCKED); 1499 } else { 1500 error = xfs_flushinval_pages(ip, toss_start, 1501 -1, FI_REMAPF_LOCKED); 1502 } 1503 } 1504 1505 #ifdef DEBUG 1506 if (new_size == 0) { 1507 ASSERT(VN_CACHED(vp) == 0); 1508 } 1509 #endif 1510 return error; 1511 } 1512 1513 /* 1514 * Shrink the file to the given new_size. The new 1515 * size must be smaller than the current size. 1516 * This will free up the underlying blocks 1517 * in the removed range after a call to xfs_itruncate_start() 1518 * or xfs_atruncate_start(). 1519 * 1520 * The transaction passed to this routine must have made 1521 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. 1522 * This routine may commit the given transaction and 1523 * start new ones, so make sure everything involved in 1524 * the transaction is tidy before calling here. 1525 * Some transaction will be returned to the caller to be 1526 * committed. The incoming transaction must already include 1527 * the inode, and both inode locks must be held exclusively. 1528 * The inode must also be "held" within the transaction. On 1529 * return the inode will be "held" within the returned transaction. 1530 * This routine does NOT require any disk space to be reserved 1531 * for it within the transaction. 1532 * 1533 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, 1534 * and it indicates the fork which is to be truncated. For the 1535 * attribute fork we only support truncation to size 0. 1536 * 1537 * We use the sync parameter to indicate whether or not the first 1538 * transaction we perform might have to be synchronous. For the attr fork, 1539 * it needs to be so if the unlink of the inode is not yet known to be 1540 * permanent in the log. This keeps us from freeing and reusing the 1541 * blocks of the attribute fork before the unlink of the inode becomes 1542 * permanent. 1543 * 1544 * For the data fork, we normally have to run synchronously if we're 1545 * being called out of the inactive path or we're being called 1546 * out of the create path where we're truncating an existing file. 1547 * Either way, the truncate needs to be sync so blocks don't reappear 1548 * in the file with altered data in case of a crash. wsync filesystems 1549 * can run the first case async because anything that shrinks the inode 1550 * has to run sync so by the time we're called here from inactive, the 1551 * inode size is permanently set to 0. 1552 * 1553 * Calls from the truncate path always need to be sync unless we're 1554 * in a wsync filesystem and the file has already been unlinked. 1555 * 1556 * The caller is responsible for correctly setting the sync parameter. 1557 * It gets too hard for us to guess here which path we're being called 1558 * out of just based on inode state. 1559 */ 1560 int 1561 xfs_itruncate_finish( 1562 xfs_trans_t **tp, 1563 xfs_inode_t *ip, 1564 xfs_fsize_t new_size, 1565 int fork, 1566 int sync) 1567 { 1568 xfs_fsblock_t first_block; 1569 xfs_fileoff_t first_unmap_block; 1570 xfs_fileoff_t last_block; 1571 xfs_filblks_t unmap_len=0; 1572 xfs_mount_t *mp; 1573 xfs_trans_t *ntp; 1574 int done; 1575 int committed; 1576 xfs_bmap_free_t free_list; 1577 int error; 1578 1579 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); 1580 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 1581 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1582 ASSERT(*tp != NULL); 1583 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1584 ASSERT(ip->i_transp == *tp); 1585 ASSERT(ip->i_itemp != NULL); 1586 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1587 1588 1589 ntp = *tp; 1590 mp = (ntp)->t_mountp; 1591 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1592 1593 /* 1594 * We only support truncating the entire attribute fork. 1595 */ 1596 if (fork == XFS_ATTR_FORK) { 1597 new_size = 0LL; 1598 } 1599 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1600 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1601 /* 1602 * The first thing we do is set the size to new_size permanently 1603 * on disk. This way we don't have to worry about anyone ever 1604 * being able to look at the data being freed even in the face 1605 * of a crash. What we're getting around here is the case where 1606 * we free a block, it is allocated to another file, it is written 1607 * to, and then we crash. If the new data gets written to the 1608 * file but the log buffers containing the free and reallocation 1609 * don't, then we'd end up with garbage in the blocks being freed. 1610 * As long as we make the new_size permanent before actually 1611 * freeing any blocks it doesn't matter if they get writtten to. 1612 * 1613 * The callers must signal into us whether or not the size 1614 * setting here must be synchronous. There are a few cases 1615 * where it doesn't have to be synchronous. Those cases 1616 * occur if the file is unlinked and we know the unlink is 1617 * permanent or if the blocks being truncated are guaranteed 1618 * to be beyond the inode eof (regardless of the link count) 1619 * and the eof value is permanent. Both of these cases occur 1620 * only on wsync-mounted filesystems. In those cases, we're 1621 * guaranteed that no user will ever see the data in the blocks 1622 * that are being truncated so the truncate can run async. 1623 * In the free beyond eof case, the file may wind up with 1624 * more blocks allocated to it than it needs if we crash 1625 * and that won't get fixed until the next time the file 1626 * is re-opened and closed but that's ok as that shouldn't 1627 * be too many blocks. 1628 * 1629 * However, we can't just make all wsync xactions run async 1630 * because there's one call out of the create path that needs 1631 * to run sync where it's truncating an existing file to size 1632 * 0 whose size is > 0. 1633 * 1634 * It's probably possible to come up with a test in this 1635 * routine that would correctly distinguish all the above 1636 * cases from the values of the function parameters and the 1637 * inode state but for sanity's sake, I've decided to let the 1638 * layers above just tell us. It's simpler to correctly figure 1639 * out in the layer above exactly under what conditions we 1640 * can run async and I think it's easier for others read and 1641 * follow the logic in case something has to be changed. 1642 * cscope is your friend -- rcc. 1643 * 1644 * The attribute fork is much simpler. 1645 * 1646 * For the attribute fork we allow the caller to tell us whether 1647 * the unlink of the inode that led to this call is yet permanent 1648 * in the on disk log. If it is not and we will be freeing extents 1649 * in this inode then we make the first transaction synchronous 1650 * to make sure that the unlink is permanent by the time we free 1651 * the blocks. 1652 */ 1653 if (fork == XFS_DATA_FORK) { 1654 if (ip->i_d.di_nextents > 0) { 1655 /* 1656 * If we are not changing the file size then do 1657 * not update the on-disk file size - we may be 1658 * called from xfs_inactive_free_eofblocks(). If we 1659 * update the on-disk file size and then the system 1660 * crashes before the contents of the file are 1661 * flushed to disk then the files may be full of 1662 * holes (ie NULL files bug). 1663 */ 1664 if (ip->i_size != new_size) { 1665 ip->i_d.di_size = new_size; 1666 ip->i_size = new_size; 1667 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1668 } 1669 } 1670 } else if (sync) { 1671 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1672 if (ip->i_d.di_anextents > 0) 1673 xfs_trans_set_sync(ntp); 1674 } 1675 ASSERT(fork == XFS_DATA_FORK || 1676 (fork == XFS_ATTR_FORK && 1677 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1678 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1679 1680 /* 1681 * Since it is possible for space to become allocated beyond 1682 * the end of the file (in a crash where the space is allocated 1683 * but the inode size is not yet updated), simply remove any 1684 * blocks which show up between the new EOF and the maximum 1685 * possible file size. If the first block to be removed is 1686 * beyond the maximum file size (ie it is the same as last_block), 1687 * then there is nothing to do. 1688 */ 1689 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1690 ASSERT(first_unmap_block <= last_block); 1691 done = 0; 1692 if (last_block == first_unmap_block) { 1693 done = 1; 1694 } else { 1695 unmap_len = last_block - first_unmap_block + 1; 1696 } 1697 while (!done) { 1698 /* 1699 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1700 * will tell us whether it freed the entire range or 1701 * not. If this is a synchronous mount (wsync), 1702 * then we can tell bunmapi to keep all the 1703 * transactions asynchronous since the unlink 1704 * transaction that made this inode inactive has 1705 * already hit the disk. There's no danger of 1706 * the freed blocks being reused, there being a 1707 * crash, and the reused blocks suddenly reappearing 1708 * in this file with garbage in them once recovery 1709 * runs. 1710 */ 1711 XFS_BMAP_INIT(&free_list, &first_block); 1712 error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore, 1713 first_unmap_block, unmap_len, 1714 XFS_BMAPI_AFLAG(fork) | 1715 (sync ? 0 : XFS_BMAPI_ASYNC), 1716 XFS_ITRUNC_MAX_EXTENTS, 1717 &first_block, &free_list, 1718 NULL, &done); 1719 if (error) { 1720 /* 1721 * If the bunmapi call encounters an error, 1722 * return to the caller where the transaction 1723 * can be properly aborted. We just need to 1724 * make sure we're not holding any resources 1725 * that we were not when we came in. 1726 */ 1727 xfs_bmap_cancel(&free_list); 1728 return error; 1729 } 1730 1731 /* 1732 * Duplicate the transaction that has the permanent 1733 * reservation and commit the old transaction. 1734 */ 1735 error = xfs_bmap_finish(tp, &free_list, &committed); 1736 ntp = *tp; 1737 if (error) { 1738 /* 1739 * If the bmap finish call encounters an error, 1740 * return to the caller where the transaction 1741 * can be properly aborted. We just need to 1742 * make sure we're not holding any resources 1743 * that we were not when we came in. 1744 * 1745 * Aborting from this point might lose some 1746 * blocks in the file system, but oh well. 1747 */ 1748 xfs_bmap_cancel(&free_list); 1749 if (committed) { 1750 /* 1751 * If the passed in transaction committed 1752 * in xfs_bmap_finish(), then we want to 1753 * add the inode to this one before returning. 1754 * This keeps things simple for the higher 1755 * level code, because it always knows that 1756 * the inode is locked and held in the 1757 * transaction that returns to it whether 1758 * errors occur or not. We don't mark the 1759 * inode dirty so that this transaction can 1760 * be easily aborted if possible. 1761 */ 1762 xfs_trans_ijoin(ntp, ip, 1763 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1764 xfs_trans_ihold(ntp, ip); 1765 } 1766 return error; 1767 } 1768 1769 if (committed) { 1770 /* 1771 * The first xact was committed, 1772 * so add the inode to the new one. 1773 * Mark it dirty so it will be logged 1774 * and moved forward in the log as 1775 * part of every commit. 1776 */ 1777 xfs_trans_ijoin(ntp, ip, 1778 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1779 xfs_trans_ihold(ntp, ip); 1780 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1781 } 1782 ntp = xfs_trans_dup(ntp); 1783 (void) xfs_trans_commit(*tp, 0); 1784 *tp = ntp; 1785 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 1786 XFS_TRANS_PERM_LOG_RES, 1787 XFS_ITRUNCATE_LOG_COUNT); 1788 /* 1789 * Add the inode being truncated to the next chained 1790 * transaction. 1791 */ 1792 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1793 xfs_trans_ihold(ntp, ip); 1794 if (error) 1795 return (error); 1796 } 1797 /* 1798 * Only update the size in the case of the data fork, but 1799 * always re-log the inode so that our permanent transaction 1800 * can keep on rolling it forward in the log. 1801 */ 1802 if (fork == XFS_DATA_FORK) { 1803 xfs_isize_check(mp, ip, new_size); 1804 /* 1805 * If we are not changing the file size then do 1806 * not update the on-disk file size - we may be 1807 * called from xfs_inactive_free_eofblocks(). If we 1808 * update the on-disk file size and then the system 1809 * crashes before the contents of the file are 1810 * flushed to disk then the files may be full of 1811 * holes (ie NULL files bug). 1812 */ 1813 if (ip->i_size != new_size) { 1814 ip->i_d.di_size = new_size; 1815 ip->i_size = new_size; 1816 } 1817 } 1818 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1819 ASSERT((new_size != 0) || 1820 (fork == XFS_ATTR_FORK) || 1821 (ip->i_delayed_blks == 0)); 1822 ASSERT((new_size != 0) || 1823 (fork == XFS_ATTR_FORK) || 1824 (ip->i_d.di_nextents == 0)); 1825 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1826 return 0; 1827 } 1828 1829 1830 /* 1831 * xfs_igrow_start 1832 * 1833 * Do the first part of growing a file: zero any data in the last 1834 * block that is beyond the old EOF. We need to do this before 1835 * the inode is joined to the transaction to modify the i_size. 1836 * That way we can drop the inode lock and call into the buffer 1837 * cache to get the buffer mapping the EOF. 1838 */ 1839 int 1840 xfs_igrow_start( 1841 xfs_inode_t *ip, 1842 xfs_fsize_t new_size, 1843 cred_t *credp) 1844 { 1845 int error; 1846 1847 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1848 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1849 ASSERT(new_size > ip->i_size); 1850 1851 /* 1852 * Zero any pages that may have been created by 1853 * xfs_write_file() beyond the end of the file 1854 * and any blocks between the old and new file sizes. 1855 */ 1856 error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, 1857 ip->i_size); 1858 return error; 1859 } 1860 1861 /* 1862 * xfs_igrow_finish 1863 * 1864 * This routine is called to extend the size of a file. 1865 * The inode must have both the iolock and the ilock locked 1866 * for update and it must be a part of the current transaction. 1867 * The xfs_igrow_start() function must have been called previously. 1868 * If the change_flag is not zero, the inode change timestamp will 1869 * be updated. 1870 */ 1871 void 1872 xfs_igrow_finish( 1873 xfs_trans_t *tp, 1874 xfs_inode_t *ip, 1875 xfs_fsize_t new_size, 1876 int change_flag) 1877 { 1878 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1879 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1880 ASSERT(ip->i_transp == tp); 1881 ASSERT(new_size > ip->i_size); 1882 1883 /* 1884 * Update the file size. Update the inode change timestamp 1885 * if change_flag set. 1886 */ 1887 ip->i_d.di_size = new_size; 1888 ip->i_size = new_size; 1889 if (change_flag) 1890 xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 1891 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1892 1893 } 1894 1895 1896 /* 1897 * This is called when the inode's link count goes to 0. 1898 * We place the on-disk inode on a list in the AGI. It 1899 * will be pulled from this list when the inode is freed. 1900 */ 1901 int 1902 xfs_iunlink( 1903 xfs_trans_t *tp, 1904 xfs_inode_t *ip) 1905 { 1906 xfs_mount_t *mp; 1907 xfs_agi_t *agi; 1908 xfs_dinode_t *dip; 1909 xfs_buf_t *agibp; 1910 xfs_buf_t *ibp; 1911 xfs_agnumber_t agno; 1912 xfs_daddr_t agdaddr; 1913 xfs_agino_t agino; 1914 short bucket_index; 1915 int offset; 1916 int error; 1917 int agi_ok; 1918 1919 ASSERT(ip->i_d.di_nlink == 0); 1920 ASSERT(ip->i_d.di_mode != 0); 1921 ASSERT(ip->i_transp == tp); 1922 1923 mp = tp->t_mountp; 1924 1925 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1926 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 1927 1928 /* 1929 * Get the agi buffer first. It ensures lock ordering 1930 * on the list. 1931 */ 1932 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 1933 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 1934 if (error) 1935 return error; 1936 1937 /* 1938 * Validate the magic number of the agi block. 1939 */ 1940 agi = XFS_BUF_TO_AGI(agibp); 1941 agi_ok = 1942 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 1943 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 1944 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, 1945 XFS_RANDOM_IUNLINK))) { 1946 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); 1947 xfs_trans_brelse(tp, agibp); 1948 return XFS_ERROR(EFSCORRUPTED); 1949 } 1950 /* 1951 * Get the index into the agi hash table for the 1952 * list this inode will go on. 1953 */ 1954 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1955 ASSERT(agino != 0); 1956 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1957 ASSERT(agi->agi_unlinked[bucket_index]); 1958 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1959 1960 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 1961 if (error) 1962 return error; 1963 1964 /* 1965 * Clear the on-disk di_nlink. This is to prevent xfs_bulkstat 1966 * from picking up this inode when it is reclaimed (its incore state 1967 * initialzed but not flushed to disk yet). The in-core di_nlink is 1968 * already cleared in xfs_droplink() and a corresponding transaction 1969 * logged. The hack here just synchronizes the in-core to on-disk 1970 * di_nlink value in advance before the actual inode sync to disk. 1971 * This is OK because the inode is already unlinked and would never 1972 * change its di_nlink again for this inode generation. 1973 * This is a temporary hack that would require a proper fix 1974 * in the future. 1975 */ 1976 dip->di_core.di_nlink = 0; 1977 1978 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1979 /* 1980 * There is already another inode in the bucket we need 1981 * to add ourselves to. Add us at the front of the list. 1982 * Here we put the head pointer into our next pointer, 1983 * and then we fall through to point the head at us. 1984 */ 1985 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1986 /* both on-disk, don't endian flip twice */ 1987 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1988 offset = ip->i_boffset + 1989 offsetof(xfs_dinode_t, di_next_unlinked); 1990 xfs_trans_inode_buf(tp, ibp); 1991 xfs_trans_log_buf(tp, ibp, offset, 1992 (offset + sizeof(xfs_agino_t) - 1)); 1993 xfs_inobp_check(mp, ibp); 1994 } 1995 1996 /* 1997 * Point the bucket head pointer at the inode being inserted. 1998 */ 1999 ASSERT(agino != 0); 2000 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 2001 offset = offsetof(xfs_agi_t, agi_unlinked) + 2002 (sizeof(xfs_agino_t) * bucket_index); 2003 xfs_trans_log_buf(tp, agibp, offset, 2004 (offset + sizeof(xfs_agino_t) - 1)); 2005 return 0; 2006 } 2007 2008 /* 2009 * Pull the on-disk inode from the AGI unlinked list. 2010 */ 2011 STATIC int 2012 xfs_iunlink_remove( 2013 xfs_trans_t *tp, 2014 xfs_inode_t *ip) 2015 { 2016 xfs_ino_t next_ino; 2017 xfs_mount_t *mp; 2018 xfs_agi_t *agi; 2019 xfs_dinode_t *dip; 2020 xfs_buf_t *agibp; 2021 xfs_buf_t *ibp; 2022 xfs_agnumber_t agno; 2023 xfs_daddr_t agdaddr; 2024 xfs_agino_t agino; 2025 xfs_agino_t next_agino; 2026 xfs_buf_t *last_ibp; 2027 xfs_dinode_t *last_dip = NULL; 2028 short bucket_index; 2029 int offset, last_offset = 0; 2030 int error; 2031 int agi_ok; 2032 2033 /* 2034 * First pull the on-disk inode from the AGI unlinked list. 2035 */ 2036 mp = tp->t_mountp; 2037 2038 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 2039 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); 2040 2041 /* 2042 * Get the agi buffer first. It ensures lock ordering 2043 * on the list. 2044 */ 2045 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, 2046 XFS_FSS_TO_BB(mp, 1), 0, &agibp); 2047 if (error) { 2048 cmn_err(CE_WARN, 2049 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", 2050 error, mp->m_fsname); 2051 return error; 2052 } 2053 /* 2054 * Validate the magic number of the agi block. 2055 */ 2056 agi = XFS_BUF_TO_AGI(agibp); 2057 agi_ok = 2058 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && 2059 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); 2060 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, 2061 XFS_RANDOM_IUNLINK_REMOVE))) { 2062 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, 2063 mp, agi); 2064 xfs_trans_brelse(tp, agibp); 2065 cmn_err(CE_WARN, 2066 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", 2067 mp->m_fsname); 2068 return XFS_ERROR(EFSCORRUPTED); 2069 } 2070 /* 2071 * Get the index into the agi hash table for the 2072 * list this inode will go on. 2073 */ 2074 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2075 ASSERT(agino != 0); 2076 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 2077 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 2078 ASSERT(agi->agi_unlinked[bucket_index]); 2079 2080 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 2081 /* 2082 * We're at the head of the list. Get the inode's 2083 * on-disk buffer to see if there is anyone after us 2084 * on the list. Only modify our next pointer if it 2085 * is not already NULLAGINO. This saves us the overhead 2086 * of dealing with the buffer when there is no need to 2087 * change it. 2088 */ 2089 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2090 if (error) { 2091 cmn_err(CE_WARN, 2092 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2093 error, mp->m_fsname); 2094 return error; 2095 } 2096 next_agino = be32_to_cpu(dip->di_next_unlinked); 2097 ASSERT(next_agino != 0); 2098 if (next_agino != NULLAGINO) { 2099 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 2100 offset = ip->i_boffset + 2101 offsetof(xfs_dinode_t, di_next_unlinked); 2102 xfs_trans_inode_buf(tp, ibp); 2103 xfs_trans_log_buf(tp, ibp, offset, 2104 (offset + sizeof(xfs_agino_t) - 1)); 2105 xfs_inobp_check(mp, ibp); 2106 } else { 2107 xfs_trans_brelse(tp, ibp); 2108 } 2109 /* 2110 * Point the bucket head pointer at the next inode. 2111 */ 2112 ASSERT(next_agino != 0); 2113 ASSERT(next_agino != agino); 2114 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 2115 offset = offsetof(xfs_agi_t, agi_unlinked) + 2116 (sizeof(xfs_agino_t) * bucket_index); 2117 xfs_trans_log_buf(tp, agibp, offset, 2118 (offset + sizeof(xfs_agino_t) - 1)); 2119 } else { 2120 /* 2121 * We need to search the list for the inode being freed. 2122 */ 2123 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2124 last_ibp = NULL; 2125 while (next_agino != agino) { 2126 /* 2127 * If the last inode wasn't the one pointing to 2128 * us, then release its buffer since we're not 2129 * going to do anything with it. 2130 */ 2131 if (last_ibp != NULL) { 2132 xfs_trans_brelse(tp, last_ibp); 2133 } 2134 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 2135 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 2136 &last_ibp, &last_offset); 2137 if (error) { 2138 cmn_err(CE_WARN, 2139 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 2140 error, mp->m_fsname); 2141 return error; 2142 } 2143 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 2144 ASSERT(next_agino != NULLAGINO); 2145 ASSERT(next_agino != 0); 2146 } 2147 /* 2148 * Now last_ibp points to the buffer previous to us on 2149 * the unlinked list. Pull us from the list. 2150 */ 2151 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 2152 if (error) { 2153 cmn_err(CE_WARN, 2154 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2155 error, mp->m_fsname); 2156 return error; 2157 } 2158 next_agino = be32_to_cpu(dip->di_next_unlinked); 2159 ASSERT(next_agino != 0); 2160 ASSERT(next_agino != agino); 2161 if (next_agino != NULLAGINO) { 2162 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 2163 offset = ip->i_boffset + 2164 offsetof(xfs_dinode_t, di_next_unlinked); 2165 xfs_trans_inode_buf(tp, ibp); 2166 xfs_trans_log_buf(tp, ibp, offset, 2167 (offset + sizeof(xfs_agino_t) - 1)); 2168 xfs_inobp_check(mp, ibp); 2169 } else { 2170 xfs_trans_brelse(tp, ibp); 2171 } 2172 /* 2173 * Point the previous inode on the list to the next inode. 2174 */ 2175 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 2176 ASSERT(next_agino != 0); 2177 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 2178 xfs_trans_inode_buf(tp, last_ibp); 2179 xfs_trans_log_buf(tp, last_ibp, offset, 2180 (offset + sizeof(xfs_agino_t) - 1)); 2181 xfs_inobp_check(mp, last_ibp); 2182 } 2183 return 0; 2184 } 2185 2186 STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip) 2187 { 2188 return (((ip->i_itemp == NULL) || 2189 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && 2190 (ip->i_update_core == 0)); 2191 } 2192 2193 STATIC void 2194 xfs_ifree_cluster( 2195 xfs_inode_t *free_ip, 2196 xfs_trans_t *tp, 2197 xfs_ino_t inum) 2198 { 2199 xfs_mount_t *mp = free_ip->i_mount; 2200 int blks_per_cluster; 2201 int nbufs; 2202 int ninodes; 2203 int i, j, found, pre_flushed; 2204 xfs_daddr_t blkno; 2205 xfs_buf_t *bp; 2206 xfs_inode_t *ip, **ip_found; 2207 xfs_inode_log_item_t *iip; 2208 xfs_log_item_t *lip; 2209 xfs_perag_t *pag = xfs_get_perag(mp, inum); 2210 SPLDECL(s); 2211 2212 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2213 blks_per_cluster = 1; 2214 ninodes = mp->m_sb.sb_inopblock; 2215 nbufs = XFS_IALLOC_BLOCKS(mp); 2216 } else { 2217 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 2218 mp->m_sb.sb_blocksize; 2219 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 2220 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 2221 } 2222 2223 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 2224 2225 for (j = 0; j < nbufs; j++, inum += ninodes) { 2226 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2227 XFS_INO_TO_AGBNO(mp, inum)); 2228 2229 2230 /* 2231 * Look for each inode in memory and attempt to lock it, 2232 * we can be racing with flush and tail pushing here. 2233 * any inode we get the locks on, add to an array of 2234 * inode items to process later. 2235 * 2236 * The get the buffer lock, we could beat a flush 2237 * or tail pushing thread to the lock here, in which 2238 * case they will go looking for the inode buffer 2239 * and fail, we need some other form of interlock 2240 * here. 2241 */ 2242 found = 0; 2243 for (i = 0; i < ninodes; i++) { 2244 read_lock(&pag->pag_ici_lock); 2245 ip = radix_tree_lookup(&pag->pag_ici_root, 2246 XFS_INO_TO_AGINO(mp, (inum + i))); 2247 2248 /* Inode not in memory or we found it already, 2249 * nothing to do 2250 */ 2251 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2252 read_unlock(&pag->pag_ici_lock); 2253 continue; 2254 } 2255 2256 if (xfs_inode_clean(ip)) { 2257 read_unlock(&pag->pag_ici_lock); 2258 continue; 2259 } 2260 2261 /* If we can get the locks then add it to the 2262 * list, otherwise by the time we get the bp lock 2263 * below it will already be attached to the 2264 * inode buffer. 2265 */ 2266 2267 /* This inode will already be locked - by us, lets 2268 * keep it that way. 2269 */ 2270 2271 if (ip == free_ip) { 2272 if (xfs_iflock_nowait(ip)) { 2273 xfs_iflags_set(ip, XFS_ISTALE); 2274 if (xfs_inode_clean(ip)) { 2275 xfs_ifunlock(ip); 2276 } else { 2277 ip_found[found++] = ip; 2278 } 2279 } 2280 read_unlock(&pag->pag_ici_lock); 2281 continue; 2282 } 2283 2284 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2285 if (xfs_iflock_nowait(ip)) { 2286 xfs_iflags_set(ip, XFS_ISTALE); 2287 2288 if (xfs_inode_clean(ip)) { 2289 xfs_ifunlock(ip); 2290 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2291 } else { 2292 ip_found[found++] = ip; 2293 } 2294 } else { 2295 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2296 } 2297 } 2298 read_unlock(&pag->pag_ici_lock); 2299 } 2300 2301 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2302 mp->m_bsize * blks_per_cluster, 2303 XFS_BUF_LOCK); 2304 2305 pre_flushed = 0; 2306 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2307 while (lip) { 2308 if (lip->li_type == XFS_LI_INODE) { 2309 iip = (xfs_inode_log_item_t *)lip; 2310 ASSERT(iip->ili_logged == 1); 2311 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2312 AIL_LOCK(mp,s); 2313 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2314 AIL_UNLOCK(mp, s); 2315 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 2316 pre_flushed++; 2317 } 2318 lip = lip->li_bio_list; 2319 } 2320 2321 for (i = 0; i < found; i++) { 2322 ip = ip_found[i]; 2323 iip = ip->i_itemp; 2324 2325 if (!iip) { 2326 ip->i_update_core = 0; 2327 xfs_ifunlock(ip); 2328 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2329 continue; 2330 } 2331 2332 iip->ili_last_fields = iip->ili_format.ilf_fields; 2333 iip->ili_format.ilf_fields = 0; 2334 iip->ili_logged = 1; 2335 AIL_LOCK(mp,s); 2336 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2337 AIL_UNLOCK(mp, s); 2338 2339 xfs_buf_attach_iodone(bp, 2340 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2341 xfs_istale_done, (xfs_log_item_t *)iip); 2342 if (ip != free_ip) { 2343 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2344 } 2345 } 2346 2347 if (found || pre_flushed) 2348 xfs_trans_stale_inode_buf(tp, bp); 2349 xfs_trans_binval(tp, bp); 2350 } 2351 2352 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *)); 2353 xfs_put_perag(mp, pag); 2354 } 2355 2356 /* 2357 * This is called to return an inode to the inode free list. 2358 * The inode should already be truncated to 0 length and have 2359 * no pages associated with it. This routine also assumes that 2360 * the inode is already a part of the transaction. 2361 * 2362 * The on-disk copy of the inode will have been added to the list 2363 * of unlinked inodes in the AGI. We need to remove the inode from 2364 * that list atomically with respect to freeing it here. 2365 */ 2366 int 2367 xfs_ifree( 2368 xfs_trans_t *tp, 2369 xfs_inode_t *ip, 2370 xfs_bmap_free_t *flist) 2371 { 2372 int error; 2373 int delete; 2374 xfs_ino_t first_ino; 2375 2376 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2377 ASSERT(ip->i_transp == tp); 2378 ASSERT(ip->i_d.di_nlink == 0); 2379 ASSERT(ip->i_d.di_nextents == 0); 2380 ASSERT(ip->i_d.di_anextents == 0); 2381 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 2382 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2383 ASSERT(ip->i_d.di_nblocks == 0); 2384 2385 /* 2386 * Pull the on-disk inode from the AGI unlinked list. 2387 */ 2388 error = xfs_iunlink_remove(tp, ip); 2389 if (error != 0) { 2390 return error; 2391 } 2392 2393 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2394 if (error != 0) { 2395 return error; 2396 } 2397 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2398 ip->i_d.di_flags = 0; 2399 ip->i_d.di_dmevmask = 0; 2400 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2401 ip->i_df.if_ext_max = 2402 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2403 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2404 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2405 /* 2406 * Bump the generation count so no one will be confused 2407 * by reincarnations of this inode. 2408 */ 2409 ip->i_d.di_gen++; 2410 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2411 2412 if (delete) { 2413 xfs_ifree_cluster(ip, tp, first_ino); 2414 } 2415 2416 return 0; 2417 } 2418 2419 /* 2420 * Reallocate the space for if_broot based on the number of records 2421 * being added or deleted as indicated in rec_diff. Move the records 2422 * and pointers in if_broot to fit the new size. When shrinking this 2423 * will eliminate holes between the records and pointers created by 2424 * the caller. When growing this will create holes to be filled in 2425 * by the caller. 2426 * 2427 * The caller must not request to add more records than would fit in 2428 * the on-disk inode root. If the if_broot is currently NULL, then 2429 * if we adding records one will be allocated. The caller must also 2430 * not request that the number of records go below zero, although 2431 * it can go to zero. 2432 * 2433 * ip -- the inode whose if_broot area is changing 2434 * ext_diff -- the change in the number of records, positive or negative, 2435 * requested for the if_broot array. 2436 */ 2437 void 2438 xfs_iroot_realloc( 2439 xfs_inode_t *ip, 2440 int rec_diff, 2441 int whichfork) 2442 { 2443 int cur_max; 2444 xfs_ifork_t *ifp; 2445 xfs_bmbt_block_t *new_broot; 2446 int new_max; 2447 size_t new_size; 2448 char *np; 2449 char *op; 2450 2451 /* 2452 * Handle the degenerate case quietly. 2453 */ 2454 if (rec_diff == 0) { 2455 return; 2456 } 2457 2458 ifp = XFS_IFORK_PTR(ip, whichfork); 2459 if (rec_diff > 0) { 2460 /* 2461 * If there wasn't any memory allocated before, just 2462 * allocate it now and get out. 2463 */ 2464 if (ifp->if_broot_bytes == 0) { 2465 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2466 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, 2467 KM_SLEEP); 2468 ifp->if_broot_bytes = (int)new_size; 2469 return; 2470 } 2471 2472 /* 2473 * If there is already an existing if_broot, then we need 2474 * to realloc() it and shift the pointers to their new 2475 * location. The records don't change location because 2476 * they are kept butted up against the btree block header. 2477 */ 2478 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2479 new_max = cur_max + rec_diff; 2480 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2481 ifp->if_broot = (xfs_bmbt_block_t *) 2482 kmem_realloc(ifp->if_broot, 2483 new_size, 2484 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2485 KM_SLEEP); 2486 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2487 ifp->if_broot_bytes); 2488 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2489 (int)new_size); 2490 ifp->if_broot_bytes = (int)new_size; 2491 ASSERT(ifp->if_broot_bytes <= 2492 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2493 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2494 return; 2495 } 2496 2497 /* 2498 * rec_diff is less than 0. In this case, we are shrinking the 2499 * if_broot buffer. It must already exist. If we go to zero 2500 * records, just get rid of the root and clear the status bit. 2501 */ 2502 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2503 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); 2504 new_max = cur_max + rec_diff; 2505 ASSERT(new_max >= 0); 2506 if (new_max > 0) 2507 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2508 else 2509 new_size = 0; 2510 if (new_size > 0) { 2511 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); 2512 /* 2513 * First copy over the btree block header. 2514 */ 2515 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); 2516 } else { 2517 new_broot = NULL; 2518 ifp->if_flags &= ~XFS_IFBROOT; 2519 } 2520 2521 /* 2522 * Only copy the records and pointers if there are any. 2523 */ 2524 if (new_max > 0) { 2525 /* 2526 * First copy the records. 2527 */ 2528 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, 2529 ifp->if_broot_bytes); 2530 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, 2531 (int)new_size); 2532 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2533 2534 /* 2535 * Then copy the pointers. 2536 */ 2537 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, 2538 ifp->if_broot_bytes); 2539 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, 2540 (int)new_size); 2541 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2542 } 2543 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2544 ifp->if_broot = new_broot; 2545 ifp->if_broot_bytes = (int)new_size; 2546 ASSERT(ifp->if_broot_bytes <= 2547 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2548 return; 2549 } 2550 2551 2552 /* 2553 * This is called when the amount of space needed for if_data 2554 * is increased or decreased. The change in size is indicated by 2555 * the number of bytes that need to be added or deleted in the 2556 * byte_diff parameter. 2557 * 2558 * If the amount of space needed has decreased below the size of the 2559 * inline buffer, then switch to using the inline buffer. Otherwise, 2560 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2561 * to what is needed. 2562 * 2563 * ip -- the inode whose if_data area is changing 2564 * byte_diff -- the change in the number of bytes, positive or negative, 2565 * requested for the if_data array. 2566 */ 2567 void 2568 xfs_idata_realloc( 2569 xfs_inode_t *ip, 2570 int byte_diff, 2571 int whichfork) 2572 { 2573 xfs_ifork_t *ifp; 2574 int new_size; 2575 int real_size; 2576 2577 if (byte_diff == 0) { 2578 return; 2579 } 2580 2581 ifp = XFS_IFORK_PTR(ip, whichfork); 2582 new_size = (int)ifp->if_bytes + byte_diff; 2583 ASSERT(new_size >= 0); 2584 2585 if (new_size == 0) { 2586 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2587 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2588 } 2589 ifp->if_u1.if_data = NULL; 2590 real_size = 0; 2591 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2592 /* 2593 * If the valid extents/data can fit in if_inline_ext/data, 2594 * copy them from the malloc'd vector and free it. 2595 */ 2596 if (ifp->if_u1.if_data == NULL) { 2597 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2598 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2599 ASSERT(ifp->if_real_bytes != 0); 2600 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2601 new_size); 2602 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2603 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2604 } 2605 real_size = 0; 2606 } else { 2607 /* 2608 * Stuck with malloc/realloc. 2609 * For inline data, the underlying buffer must be 2610 * a multiple of 4 bytes in size so that it can be 2611 * logged and stay on word boundaries. We enforce 2612 * that here. 2613 */ 2614 real_size = roundup(new_size, 4); 2615 if (ifp->if_u1.if_data == NULL) { 2616 ASSERT(ifp->if_real_bytes == 0); 2617 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2618 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2619 /* 2620 * Only do the realloc if the underlying size 2621 * is really changing. 2622 */ 2623 if (ifp->if_real_bytes != real_size) { 2624 ifp->if_u1.if_data = 2625 kmem_realloc(ifp->if_u1.if_data, 2626 real_size, 2627 ifp->if_real_bytes, 2628 KM_SLEEP); 2629 } 2630 } else { 2631 ASSERT(ifp->if_real_bytes == 0); 2632 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2633 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2634 ifp->if_bytes); 2635 } 2636 } 2637 ifp->if_real_bytes = real_size; 2638 ifp->if_bytes = new_size; 2639 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2640 } 2641 2642 2643 2644 2645 /* 2646 * Map inode to disk block and offset. 2647 * 2648 * mp -- the mount point structure for the current file system 2649 * tp -- the current transaction 2650 * ino -- the inode number of the inode to be located 2651 * imap -- this structure is filled in with the information necessary 2652 * to retrieve the given inode from disk 2653 * flags -- flags to pass to xfs_dilocate indicating whether or not 2654 * lookups in the inode btree were OK or not 2655 */ 2656 int 2657 xfs_imap( 2658 xfs_mount_t *mp, 2659 xfs_trans_t *tp, 2660 xfs_ino_t ino, 2661 xfs_imap_t *imap, 2662 uint flags) 2663 { 2664 xfs_fsblock_t fsbno; 2665 int len; 2666 int off; 2667 int error; 2668 2669 fsbno = imap->im_blkno ? 2670 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; 2671 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); 2672 if (error != 0) { 2673 return error; 2674 } 2675 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); 2676 imap->im_len = XFS_FSB_TO_BB(mp, len); 2677 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); 2678 imap->im_ioffset = (ushort)off; 2679 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); 2680 return 0; 2681 } 2682 2683 void 2684 xfs_idestroy_fork( 2685 xfs_inode_t *ip, 2686 int whichfork) 2687 { 2688 xfs_ifork_t *ifp; 2689 2690 ifp = XFS_IFORK_PTR(ip, whichfork); 2691 if (ifp->if_broot != NULL) { 2692 kmem_free(ifp->if_broot, ifp->if_broot_bytes); 2693 ifp->if_broot = NULL; 2694 } 2695 2696 /* 2697 * If the format is local, then we can't have an extents 2698 * array so just look for an inline data array. If we're 2699 * not local then we may or may not have an extents list, 2700 * so check and free it up if we do. 2701 */ 2702 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2703 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2704 (ifp->if_u1.if_data != NULL)) { 2705 ASSERT(ifp->if_real_bytes != 0); 2706 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes); 2707 ifp->if_u1.if_data = NULL; 2708 ifp->if_real_bytes = 0; 2709 } 2710 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2711 ((ifp->if_flags & XFS_IFEXTIREC) || 2712 ((ifp->if_u1.if_extents != NULL) && 2713 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2714 ASSERT(ifp->if_real_bytes != 0); 2715 xfs_iext_destroy(ifp); 2716 } 2717 ASSERT(ifp->if_u1.if_extents == NULL || 2718 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2719 ASSERT(ifp->if_real_bytes == 0); 2720 if (whichfork == XFS_ATTR_FORK) { 2721 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2722 ip->i_afp = NULL; 2723 } 2724 } 2725 2726 /* 2727 * This is called free all the memory associated with an inode. 2728 * It must free the inode itself and any buffers allocated for 2729 * if_extents/if_data and if_broot. It must also free the lock 2730 * associated with the inode. 2731 */ 2732 void 2733 xfs_idestroy( 2734 xfs_inode_t *ip) 2735 { 2736 2737 switch (ip->i_d.di_mode & S_IFMT) { 2738 case S_IFREG: 2739 case S_IFDIR: 2740 case S_IFLNK: 2741 xfs_idestroy_fork(ip, XFS_DATA_FORK); 2742 break; 2743 } 2744 if (ip->i_afp) 2745 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 2746 mrfree(&ip->i_lock); 2747 mrfree(&ip->i_iolock); 2748 freesema(&ip->i_flock); 2749 2750 #ifdef XFS_VNODE_TRACE 2751 ktrace_free(ip->i_trace); 2752 #endif 2753 #ifdef XFS_BMAP_TRACE 2754 ktrace_free(ip->i_xtrace); 2755 #endif 2756 #ifdef XFS_BMBT_TRACE 2757 ktrace_free(ip->i_btrace); 2758 #endif 2759 #ifdef XFS_RW_TRACE 2760 ktrace_free(ip->i_rwtrace); 2761 #endif 2762 #ifdef XFS_ILOCK_TRACE 2763 ktrace_free(ip->i_lock_trace); 2764 #endif 2765 #ifdef XFS_DIR2_TRACE 2766 ktrace_free(ip->i_dir_trace); 2767 #endif 2768 if (ip->i_itemp) { 2769 /* 2770 * Only if we are shutting down the fs will we see an 2771 * inode still in the AIL. If it is there, we should remove 2772 * it to prevent a use-after-free from occurring. 2773 */ 2774 xfs_mount_t *mp = ip->i_mount; 2775 xfs_log_item_t *lip = &ip->i_itemp->ili_item; 2776 int s; 2777 2778 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || 2779 XFS_FORCED_SHUTDOWN(ip->i_mount)); 2780 if (lip->li_flags & XFS_LI_IN_AIL) { 2781 AIL_LOCK(mp, s); 2782 if (lip->li_flags & XFS_LI_IN_AIL) 2783 xfs_trans_delete_ail(mp, lip, s); 2784 else 2785 AIL_UNLOCK(mp, s); 2786 } 2787 xfs_inode_item_destroy(ip); 2788 } 2789 kmem_zone_free(xfs_inode_zone, ip); 2790 } 2791 2792 2793 /* 2794 * Increment the pin count of the given buffer. 2795 * This value is protected by ipinlock spinlock in the mount structure. 2796 */ 2797 void 2798 xfs_ipin( 2799 xfs_inode_t *ip) 2800 { 2801 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2802 2803 atomic_inc(&ip->i_pincount); 2804 } 2805 2806 /* 2807 * Decrement the pin count of the given inode, and wake up 2808 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2809 * inode must have been previously pinned with a call to xfs_ipin(). 2810 */ 2811 void 2812 xfs_iunpin( 2813 xfs_inode_t *ip) 2814 { 2815 ASSERT(atomic_read(&ip->i_pincount) > 0); 2816 2817 if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { 2818 2819 /* 2820 * If the inode is currently being reclaimed, the link between 2821 * the bhv_vnode and the xfs_inode will be broken after the 2822 * XFS_IRECLAIM* flag is set. Hence, if these flags are not 2823 * set, then we can move forward and mark the linux inode dirty 2824 * knowing that it is still valid as it won't freed until after 2825 * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The 2826 * i_flags_lock is used to synchronise the setting of the 2827 * XFS_IRECLAIM* flags and the breaking of the link, and so we 2828 * can execute atomically w.r.t to reclaim by holding this lock 2829 * here. 2830 * 2831 * However, we still need to issue the unpin wakeup call as the 2832 * inode reclaim may be blocked waiting for the inode to become 2833 * unpinned. 2834 */ 2835 2836 if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { 2837 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2838 struct inode *inode = NULL; 2839 2840 BUG_ON(vp == NULL); 2841 inode = vn_to_inode(vp); 2842 BUG_ON(inode->i_state & I_CLEAR); 2843 2844 /* make sync come back and flush this inode */ 2845 if (!(inode->i_state & (I_NEW|I_FREEING))) 2846 mark_inode_dirty_sync(inode); 2847 } 2848 spin_unlock(&ip->i_flags_lock); 2849 wake_up(&ip->i_ipin_wait); 2850 } 2851 } 2852 2853 /* 2854 * This is called to wait for the given inode to be unpinned. 2855 * It will sleep until this happens. The caller must have the 2856 * inode locked in at least shared mode so that the buffer cannot 2857 * be subsequently pinned once someone is waiting for it to be 2858 * unpinned. 2859 */ 2860 STATIC void 2861 xfs_iunpin_wait( 2862 xfs_inode_t *ip) 2863 { 2864 xfs_inode_log_item_t *iip; 2865 xfs_lsn_t lsn; 2866 2867 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); 2868 2869 if (atomic_read(&ip->i_pincount) == 0) { 2870 return; 2871 } 2872 2873 iip = ip->i_itemp; 2874 if (iip && iip->ili_last_lsn) { 2875 lsn = iip->ili_last_lsn; 2876 } else { 2877 lsn = (xfs_lsn_t)0; 2878 } 2879 2880 /* 2881 * Give the log a push so we don't wait here too long. 2882 */ 2883 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); 2884 2885 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2886 } 2887 2888 2889 /* 2890 * xfs_iextents_copy() 2891 * 2892 * This is called to copy the REAL extents (as opposed to the delayed 2893 * allocation extents) from the inode into the given buffer. It 2894 * returns the number of bytes copied into the buffer. 2895 * 2896 * If there are no delayed allocation extents, then we can just 2897 * memcpy() the extents into the buffer. Otherwise, we need to 2898 * examine each extent in turn and skip those which are delayed. 2899 */ 2900 int 2901 xfs_iextents_copy( 2902 xfs_inode_t *ip, 2903 xfs_bmbt_rec_t *dp, 2904 int whichfork) 2905 { 2906 int copied; 2907 int i; 2908 xfs_ifork_t *ifp; 2909 int nrecs; 2910 xfs_fsblock_t start_block; 2911 2912 ifp = XFS_IFORK_PTR(ip, whichfork); 2913 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 2914 ASSERT(ifp->if_bytes > 0); 2915 2916 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2917 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2918 ASSERT(nrecs > 0); 2919 2920 /* 2921 * There are some delayed allocation extents in the 2922 * inode, so copy the extents one at a time and skip 2923 * the delayed ones. There must be at least one 2924 * non-delayed extent. 2925 */ 2926 copied = 0; 2927 for (i = 0; i < nrecs; i++) { 2928 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2929 start_block = xfs_bmbt_get_startblock(ep); 2930 if (ISNULLSTARTBLOCK(start_block)) { 2931 /* 2932 * It's a delayed allocation extent, so skip it. 2933 */ 2934 continue; 2935 } 2936 2937 /* Translate to on disk format */ 2938 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2939 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2940 dp++; 2941 copied++; 2942 } 2943 ASSERT(copied != 0); 2944 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2945 2946 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2947 } 2948 2949 /* 2950 * Each of the following cases stores data into the same region 2951 * of the on-disk inode, so only one of them can be valid at 2952 * any given time. While it is possible to have conflicting formats 2953 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2954 * in EXTENTS format, this can only happen when the fork has 2955 * changed formats after being modified but before being flushed. 2956 * In these cases, the format always takes precedence, because the 2957 * format indicates the current state of the fork. 2958 */ 2959 /*ARGSUSED*/ 2960 STATIC int 2961 xfs_iflush_fork( 2962 xfs_inode_t *ip, 2963 xfs_dinode_t *dip, 2964 xfs_inode_log_item_t *iip, 2965 int whichfork, 2966 xfs_buf_t *bp) 2967 { 2968 char *cp; 2969 xfs_ifork_t *ifp; 2970 xfs_mount_t *mp; 2971 #ifdef XFS_TRANS_DEBUG 2972 int first; 2973 #endif 2974 static const short brootflag[2] = 2975 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2976 static const short dataflag[2] = 2977 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2978 static const short extflag[2] = 2979 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2980 2981 if (iip == NULL) 2982 return 0; 2983 ifp = XFS_IFORK_PTR(ip, whichfork); 2984 /* 2985 * This can happen if we gave up in iformat in an error path, 2986 * for the attribute fork. 2987 */ 2988 if (ifp == NULL) { 2989 ASSERT(whichfork == XFS_ATTR_FORK); 2990 return 0; 2991 } 2992 cp = XFS_DFORK_PTR(dip, whichfork); 2993 mp = ip->i_mount; 2994 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2995 case XFS_DINODE_FMT_LOCAL: 2996 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2997 (ifp->if_bytes > 0)) { 2998 ASSERT(ifp->if_u1.if_data != NULL); 2999 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 3000 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 3001 } 3002 break; 3003 3004 case XFS_DINODE_FMT_EXTENTS: 3005 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 3006 !(iip->ili_format.ilf_fields & extflag[whichfork])); 3007 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || 3008 (ifp->if_bytes == 0)); 3009 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || 3010 (ifp->if_bytes > 0)); 3011 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 3012 (ifp->if_bytes > 0)) { 3013 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 3014 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 3015 whichfork); 3016 } 3017 break; 3018 3019 case XFS_DINODE_FMT_BTREE: 3020 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 3021 (ifp->if_broot_bytes > 0)) { 3022 ASSERT(ifp->if_broot != NULL); 3023 ASSERT(ifp->if_broot_bytes <= 3024 (XFS_IFORK_SIZE(ip, whichfork) + 3025 XFS_BROOT_SIZE_ADJ)); 3026 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, 3027 (xfs_bmdr_block_t *)cp, 3028 XFS_DFORK_SIZE(dip, mp, whichfork)); 3029 } 3030 break; 3031 3032 case XFS_DINODE_FMT_DEV: 3033 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 3034 ASSERT(whichfork == XFS_DATA_FORK); 3035 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev); 3036 } 3037 break; 3038 3039 case XFS_DINODE_FMT_UUID: 3040 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 3041 ASSERT(whichfork == XFS_DATA_FORK); 3042 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, 3043 sizeof(uuid_t)); 3044 } 3045 break; 3046 3047 default: 3048 ASSERT(0); 3049 break; 3050 } 3051 3052 return 0; 3053 } 3054 3055 /* 3056 * xfs_iflush() will write a modified inode's changes out to the 3057 * inode's on disk home. The caller must have the inode lock held 3058 * in at least shared mode and the inode flush semaphore must be 3059 * held as well. The inode lock will still be held upon return from 3060 * the call and the caller is free to unlock it. 3061 * The inode flush lock will be unlocked when the inode reaches the disk. 3062 * The flags indicate how the inode's buffer should be written out. 3063 */ 3064 int 3065 xfs_iflush( 3066 xfs_inode_t *ip, 3067 uint flags) 3068 { 3069 xfs_inode_log_item_t *iip; 3070 xfs_buf_t *bp; 3071 xfs_dinode_t *dip; 3072 xfs_mount_t *mp; 3073 int error; 3074 /* REFERENCED */ 3075 xfs_inode_t *iq; 3076 int clcount; /* count of inodes clustered */ 3077 int bufwasdelwri; 3078 struct hlist_node *entry; 3079 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 3080 3081 XFS_STATS_INC(xs_iflush_count); 3082 3083 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3084 ASSERT(issemalocked(&(ip->i_flock))); 3085 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3086 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3087 3088 iip = ip->i_itemp; 3089 mp = ip->i_mount; 3090 3091 /* 3092 * If the inode isn't dirty, then just release the inode 3093 * flush lock and do nothing. 3094 */ 3095 if ((ip->i_update_core == 0) && 3096 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3097 ASSERT((iip != NULL) ? 3098 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); 3099 xfs_ifunlock(ip); 3100 return 0; 3101 } 3102 3103 /* 3104 * We can't flush the inode until it is unpinned, so 3105 * wait for it. We know noone new can pin it, because 3106 * we are holding the inode lock shared and you need 3107 * to hold it exclusively to pin the inode. 3108 */ 3109 xfs_iunpin_wait(ip); 3110 3111 /* 3112 * This may have been unpinned because the filesystem is shutting 3113 * down forcibly. If that's the case we must not write this inode 3114 * to disk, because the log record didn't make it to disk! 3115 */ 3116 if (XFS_FORCED_SHUTDOWN(mp)) { 3117 ip->i_update_core = 0; 3118 if (iip) 3119 iip->ili_format.ilf_fields = 0; 3120 xfs_ifunlock(ip); 3121 return XFS_ERROR(EIO); 3122 } 3123 3124 /* 3125 * Get the buffer containing the on-disk inode. 3126 */ 3127 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); 3128 if (error) { 3129 xfs_ifunlock(ip); 3130 return error; 3131 } 3132 3133 /* 3134 * Decide how buffer will be flushed out. This is done before 3135 * the call to xfs_iflush_int because this field is zeroed by it. 3136 */ 3137 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3138 /* 3139 * Flush out the inode buffer according to the directions 3140 * of the caller. In the cases where the caller has given 3141 * us a choice choose the non-delwri case. This is because 3142 * the inode is in the AIL and we need to get it out soon. 3143 */ 3144 switch (flags) { 3145 case XFS_IFLUSH_SYNC: 3146 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3147 flags = 0; 3148 break; 3149 case XFS_IFLUSH_ASYNC: 3150 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3151 flags = INT_ASYNC; 3152 break; 3153 case XFS_IFLUSH_DELWRI: 3154 flags = INT_DELWRI; 3155 break; 3156 default: 3157 ASSERT(0); 3158 flags = 0; 3159 break; 3160 } 3161 } else { 3162 switch (flags) { 3163 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 3164 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 3165 case XFS_IFLUSH_DELWRI: 3166 flags = INT_DELWRI; 3167 break; 3168 case XFS_IFLUSH_ASYNC: 3169 flags = INT_ASYNC; 3170 break; 3171 case XFS_IFLUSH_SYNC: 3172 flags = 0; 3173 break; 3174 default: 3175 ASSERT(0); 3176 flags = 0; 3177 break; 3178 } 3179 } 3180 3181 /* 3182 * First flush out the inode that xfs_iflush was called with. 3183 */ 3184 error = xfs_iflush_int(ip, bp); 3185 if (error) { 3186 goto corrupt_out; 3187 } 3188 3189 /* 3190 * inode clustering: 3191 * see if other inodes can be gathered into this write 3192 */ 3193 spin_lock(&ip->i_cluster->icl_lock); 3194 ip->i_cluster->icl_buf = bp; 3195 3196 clcount = 0; 3197 hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) { 3198 if (iq == ip) 3199 continue; 3200 3201 /* 3202 * Do an un-protected check to see if the inode is dirty and 3203 * is a candidate for flushing. These checks will be repeated 3204 * later after the appropriate locks are acquired. 3205 */ 3206 iip = iq->i_itemp; 3207 if ((iq->i_update_core == 0) && 3208 ((iip == NULL) || 3209 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) && 3210 xfs_ipincount(iq) == 0) { 3211 continue; 3212 } 3213 3214 /* 3215 * Try to get locks. If any are unavailable, 3216 * then this inode cannot be flushed and is skipped. 3217 */ 3218 3219 /* get inode locks (just i_lock) */ 3220 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) { 3221 /* get inode flush lock */ 3222 if (xfs_iflock_nowait(iq)) { 3223 /* check if pinned */ 3224 if (xfs_ipincount(iq) == 0) { 3225 /* arriving here means that 3226 * this inode can be flushed. 3227 * first re-check that it's 3228 * dirty 3229 */ 3230 iip = iq->i_itemp; 3231 if ((iq->i_update_core != 0)|| 3232 ((iip != NULL) && 3233 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3234 clcount++; 3235 error = xfs_iflush_int(iq, bp); 3236 if (error) { 3237 xfs_iunlock(iq, 3238 XFS_ILOCK_SHARED); 3239 goto cluster_corrupt_out; 3240 } 3241 } else { 3242 xfs_ifunlock(iq); 3243 } 3244 } else { 3245 xfs_ifunlock(iq); 3246 } 3247 } 3248 xfs_iunlock(iq, XFS_ILOCK_SHARED); 3249 } 3250 } 3251 spin_unlock(&ip->i_cluster->icl_lock); 3252 3253 if (clcount) { 3254 XFS_STATS_INC(xs_icluster_flushcnt); 3255 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 3256 } 3257 3258 /* 3259 * If the buffer is pinned then push on the log so we won't 3260 * get stuck waiting in the write for too long. 3261 */ 3262 if (XFS_BUF_ISPINNED(bp)){ 3263 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 3264 } 3265 3266 if (flags & INT_DELWRI) { 3267 xfs_bdwrite(mp, bp); 3268 } else if (flags & INT_ASYNC) { 3269 xfs_bawrite(mp, bp); 3270 } else { 3271 error = xfs_bwrite(mp, bp); 3272 } 3273 return error; 3274 3275 corrupt_out: 3276 xfs_buf_relse(bp); 3277 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3278 xfs_iflush_abort(ip); 3279 /* 3280 * Unlocks the flush lock 3281 */ 3282 return XFS_ERROR(EFSCORRUPTED); 3283 3284 cluster_corrupt_out: 3285 /* Corruption detected in the clustering loop. Invalidate the 3286 * inode buffer and shut down the filesystem. 3287 */ 3288 spin_unlock(&ip->i_cluster->icl_lock); 3289 3290 /* 3291 * Clean up the buffer. If it was B_DELWRI, just release it -- 3292 * brelse can handle it with no problems. If not, shut down the 3293 * filesystem before releasing the buffer. 3294 */ 3295 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) { 3296 xfs_buf_relse(bp); 3297 } 3298 3299 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3300 3301 if(!bufwasdelwri) { 3302 /* 3303 * Just like incore_relse: if we have b_iodone functions, 3304 * mark the buffer as an error and call them. Otherwise 3305 * mark it as stale and brelse. 3306 */ 3307 if (XFS_BUF_IODONE_FUNC(bp)) { 3308 XFS_BUF_CLR_BDSTRAT_FUNC(bp); 3309 XFS_BUF_UNDONE(bp); 3310 XFS_BUF_STALE(bp); 3311 XFS_BUF_SHUT(bp); 3312 XFS_BUF_ERROR(bp,EIO); 3313 xfs_biodone(bp); 3314 } else { 3315 XFS_BUF_STALE(bp); 3316 xfs_buf_relse(bp); 3317 } 3318 } 3319 3320 xfs_iflush_abort(iq); 3321 /* 3322 * Unlocks the flush lock 3323 */ 3324 return XFS_ERROR(EFSCORRUPTED); 3325 } 3326 3327 3328 STATIC int 3329 xfs_iflush_int( 3330 xfs_inode_t *ip, 3331 xfs_buf_t *bp) 3332 { 3333 xfs_inode_log_item_t *iip; 3334 xfs_dinode_t *dip; 3335 xfs_mount_t *mp; 3336 #ifdef XFS_TRANS_DEBUG 3337 int first; 3338 #endif 3339 SPLDECL(s); 3340 3341 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3342 ASSERT(issemalocked(&(ip->i_flock))); 3343 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3344 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3345 3346 iip = ip->i_itemp; 3347 mp = ip->i_mount; 3348 3349 3350 /* 3351 * If the inode isn't dirty, then just release the inode 3352 * flush lock and do nothing. 3353 */ 3354 if ((ip->i_update_core == 0) && 3355 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { 3356 xfs_ifunlock(ip); 3357 return 0; 3358 } 3359 3360 /* set *dip = inode's place in the buffer */ 3361 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); 3362 3363 /* 3364 * Clear i_update_core before copying out the data. 3365 * This is for coordination with our timestamp updates 3366 * that don't hold the inode lock. They will always 3367 * update the timestamps BEFORE setting i_update_core, 3368 * so if we clear i_update_core after they set it we 3369 * are guaranteed to see their updates to the timestamps. 3370 * I believe that this depends on strongly ordered memory 3371 * semantics, but we have that. We use the SYNCHRONIZE 3372 * macro to make sure that the compiler does not reorder 3373 * the i_update_core access below the data copy below. 3374 */ 3375 ip->i_update_core = 0; 3376 SYNCHRONIZE(); 3377 3378 /* 3379 * Make sure to get the latest atime from the Linux inode. 3380 */ 3381 xfs_synchronize_atime(ip); 3382 3383 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC, 3384 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3385 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3386 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 3387 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip); 3388 goto corrupt_out; 3389 } 3390 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 3391 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 3392 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3393 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 3394 ip->i_ino, ip, ip->i_d.di_magic); 3395 goto corrupt_out; 3396 } 3397 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3398 if (XFS_TEST_ERROR( 3399 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3400 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 3401 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 3402 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3403 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 3404 ip->i_ino, ip); 3405 goto corrupt_out; 3406 } 3407 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 3408 if (XFS_TEST_ERROR( 3409 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3410 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3411 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 3412 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 3413 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3414 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 3415 ip->i_ino, ip); 3416 goto corrupt_out; 3417 } 3418 } 3419 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 3420 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 3421 XFS_RANDOM_IFLUSH_5)) { 3422 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3423 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 3424 ip->i_ino, 3425 ip->i_d.di_nextents + ip->i_d.di_anextents, 3426 ip->i_d.di_nblocks, 3427 ip); 3428 goto corrupt_out; 3429 } 3430 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 3431 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 3432 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3433 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 3434 ip->i_ino, ip->i_d.di_forkoff, ip); 3435 goto corrupt_out; 3436 } 3437 /* 3438 * bump the flush iteration count, used to detect flushes which 3439 * postdate a log record during recovery. 3440 */ 3441 3442 ip->i_d.di_flushiter++; 3443 3444 /* 3445 * Copy the dirty parts of the inode into the on-disk 3446 * inode. We always copy out the core of the inode, 3447 * because if the inode is dirty at all the core must 3448 * be. 3449 */ 3450 xfs_dinode_to_disk(&dip->di_core, &ip->i_d); 3451 3452 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3453 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3454 ip->i_d.di_flushiter = 0; 3455 3456 /* 3457 * If this is really an old format inode and the superblock version 3458 * has not been updated to support only new format inodes, then 3459 * convert back to the old inode format. If the superblock version 3460 * has been updated, then make the conversion permanent. 3461 */ 3462 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || 3463 XFS_SB_VERSION_HASNLINK(&mp->m_sb)); 3464 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { 3465 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { 3466 /* 3467 * Convert it back. 3468 */ 3469 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3470 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink); 3471 } else { 3472 /* 3473 * The superblock version has already been bumped, 3474 * so just make the conversion to the new inode 3475 * format permanent. 3476 */ 3477 ip->i_d.di_version = XFS_DINODE_VERSION_2; 3478 dip->di_core.di_version = XFS_DINODE_VERSION_2; 3479 ip->i_d.di_onlink = 0; 3480 dip->di_core.di_onlink = 0; 3481 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3482 memset(&(dip->di_core.di_pad[0]), 0, 3483 sizeof(dip->di_core.di_pad)); 3484 ASSERT(ip->i_d.di_projid == 0); 3485 } 3486 } 3487 3488 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) { 3489 goto corrupt_out; 3490 } 3491 3492 if (XFS_IFORK_Q(ip)) { 3493 /* 3494 * The only error from xfs_iflush_fork is on the data fork. 3495 */ 3496 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3497 } 3498 xfs_inobp_check(mp, bp); 3499 3500 /* 3501 * We've recorded everything logged in the inode, so we'd 3502 * like to clear the ilf_fields bits so we don't log and 3503 * flush things unnecessarily. However, we can't stop 3504 * logging all this information until the data we've copied 3505 * into the disk buffer is written to disk. If we did we might 3506 * overwrite the copy of the inode in the log with all the 3507 * data after re-logging only part of it, and in the face of 3508 * a crash we wouldn't have all the data we need to recover. 3509 * 3510 * What we do is move the bits to the ili_last_fields field. 3511 * When logging the inode, these bits are moved back to the 3512 * ilf_fields field. In the xfs_iflush_done() routine we 3513 * clear ili_last_fields, since we know that the information 3514 * those bits represent is permanently on disk. As long as 3515 * the flush completes before the inode is logged again, then 3516 * both ilf_fields and ili_last_fields will be cleared. 3517 * 3518 * We can play with the ilf_fields bits here, because the inode 3519 * lock must be held exclusively in order to set bits there 3520 * and the flush lock protects the ili_last_fields bits. 3521 * Set ili_logged so the flush done 3522 * routine can tell whether or not to look in the AIL. 3523 * Also, store the current LSN of the inode so that we can tell 3524 * whether the item has moved in the AIL from xfs_iflush_done(). 3525 * In order to read the lsn we need the AIL lock, because 3526 * it is a 64 bit value that cannot be read atomically. 3527 */ 3528 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3529 iip->ili_last_fields = iip->ili_format.ilf_fields; 3530 iip->ili_format.ilf_fields = 0; 3531 iip->ili_logged = 1; 3532 3533 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ 3534 AIL_LOCK(mp,s); 3535 iip->ili_flush_lsn = iip->ili_item.li_lsn; 3536 AIL_UNLOCK(mp, s); 3537 3538 /* 3539 * Attach the function xfs_iflush_done to the inode's 3540 * buffer. This will remove the inode from the AIL 3541 * and unlock the inode's flush lock when the inode is 3542 * completely written to disk. 3543 */ 3544 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3545 xfs_iflush_done, (xfs_log_item_t *)iip); 3546 3547 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3548 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3549 } else { 3550 /* 3551 * We're flushing an inode which is not in the AIL and has 3552 * not been logged but has i_update_core set. For this 3553 * case we can use a B_DELWRI flush and immediately drop 3554 * the inode flush lock because we can avoid the whole 3555 * AIL state thing. It's OK to drop the flush lock now, 3556 * because we've already locked the buffer and to do anything 3557 * you really need both. 3558 */ 3559 if (iip != NULL) { 3560 ASSERT(iip->ili_logged == 0); 3561 ASSERT(iip->ili_last_fields == 0); 3562 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3563 } 3564 xfs_ifunlock(ip); 3565 } 3566 3567 return 0; 3568 3569 corrupt_out: 3570 return XFS_ERROR(EFSCORRUPTED); 3571 } 3572 3573 3574 /* 3575 * Flush all inactive inodes in mp. 3576 */ 3577 void 3578 xfs_iflush_all( 3579 xfs_mount_t *mp) 3580 { 3581 xfs_inode_t *ip; 3582 bhv_vnode_t *vp; 3583 3584 again: 3585 XFS_MOUNT_ILOCK(mp); 3586 ip = mp->m_inodes; 3587 if (ip == NULL) 3588 goto out; 3589 3590 do { 3591 /* Make sure we skip markers inserted by sync */ 3592 if (ip->i_mount == NULL) { 3593 ip = ip->i_mnext; 3594 continue; 3595 } 3596 3597 vp = XFS_ITOV_NULL(ip); 3598 if (!vp) { 3599 XFS_MOUNT_IUNLOCK(mp); 3600 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); 3601 goto again; 3602 } 3603 3604 ASSERT(vn_count(vp) == 0); 3605 3606 ip = ip->i_mnext; 3607 } while (ip != mp->m_inodes); 3608 out: 3609 XFS_MOUNT_IUNLOCK(mp); 3610 } 3611 3612 /* 3613 * xfs_iaccess: check accessibility of inode for mode. 3614 */ 3615 int 3616 xfs_iaccess( 3617 xfs_inode_t *ip, 3618 mode_t mode, 3619 cred_t *cr) 3620 { 3621 int error; 3622 mode_t orgmode = mode; 3623 struct inode *inode = vn_to_inode(XFS_ITOV(ip)); 3624 3625 if (mode & S_IWUSR) { 3626 umode_t imode = inode->i_mode; 3627 3628 if (IS_RDONLY(inode) && 3629 (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) 3630 return XFS_ERROR(EROFS); 3631 3632 if (IS_IMMUTABLE(inode)) 3633 return XFS_ERROR(EACCES); 3634 } 3635 3636 /* 3637 * If there's an Access Control List it's used instead of 3638 * the mode bits. 3639 */ 3640 if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) 3641 return error ? XFS_ERROR(error) : 0; 3642 3643 if (current_fsuid(cr) != ip->i_d.di_uid) { 3644 mode >>= 3; 3645 if (!in_group_p((gid_t)ip->i_d.di_gid)) 3646 mode >>= 3; 3647 } 3648 3649 /* 3650 * If the DACs are ok we don't need any capability check. 3651 */ 3652 if ((ip->i_d.di_mode & mode) == mode) 3653 return 0; 3654 /* 3655 * Read/write DACs are always overridable. 3656 * Executable DACs are overridable if at least one exec bit is set. 3657 */ 3658 if (!(orgmode & S_IXUSR) || 3659 (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)) 3660 if (capable_cred(cr, CAP_DAC_OVERRIDE)) 3661 return 0; 3662 3663 if ((orgmode == S_IRUSR) || 3664 (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) { 3665 if (capable_cred(cr, CAP_DAC_READ_SEARCH)) 3666 return 0; 3667 #ifdef NOISE 3668 cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); 3669 #endif /* NOISE */ 3670 return XFS_ERROR(EACCES); 3671 } 3672 return XFS_ERROR(EACCES); 3673 } 3674 3675 /* 3676 * xfs_iroundup: round up argument to next power of two 3677 */ 3678 uint 3679 xfs_iroundup( 3680 uint v) 3681 { 3682 int i; 3683 uint m; 3684 3685 if ((v & (v - 1)) == 0) 3686 return v; 3687 ASSERT((v & 0x80000000) == 0); 3688 if ((v & (v + 1)) == 0) 3689 return v + 1; 3690 for (i = 0, m = 1; i < 31; i++, m <<= 1) { 3691 if (v & m) 3692 continue; 3693 v |= m; 3694 if ((v & (v + 1)) == 0) 3695 return v + 1; 3696 } 3697 ASSERT(0); 3698 return( 0 ); 3699 } 3700 3701 #ifdef XFS_ILOCK_TRACE 3702 ktrace_t *xfs_ilock_trace_buf; 3703 3704 void 3705 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) 3706 { 3707 ktrace_enter(ip->i_lock_trace, 3708 (void *)ip, 3709 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ 3710 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ 3711 (void *)ra, /* caller of ilock */ 3712 (void *)(unsigned long)current_cpu(), 3713 (void *)(unsigned long)current_pid(), 3714 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3715 } 3716 #endif 3717 3718 /* 3719 * Return a pointer to the extent record at file index idx. 3720 */ 3721 xfs_bmbt_rec_host_t * 3722 xfs_iext_get_ext( 3723 xfs_ifork_t *ifp, /* inode fork pointer */ 3724 xfs_extnum_t idx) /* index of target extent */ 3725 { 3726 ASSERT(idx >= 0); 3727 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3728 return ifp->if_u1.if_ext_irec->er_extbuf; 3729 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3730 xfs_ext_irec_t *erp; /* irec pointer */ 3731 int erp_idx = 0; /* irec index */ 3732 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3733 3734 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3735 return &erp->er_extbuf[page_idx]; 3736 } else if (ifp->if_bytes) { 3737 return &ifp->if_u1.if_extents[idx]; 3738 } else { 3739 return NULL; 3740 } 3741 } 3742 3743 /* 3744 * Insert new item(s) into the extent records for incore inode 3745 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3746 */ 3747 void 3748 xfs_iext_insert( 3749 xfs_ifork_t *ifp, /* inode fork pointer */ 3750 xfs_extnum_t idx, /* starting index of new items */ 3751 xfs_extnum_t count, /* number of inserted items */ 3752 xfs_bmbt_irec_t *new) /* items to insert */ 3753 { 3754 xfs_extnum_t i; /* extent record index */ 3755 3756 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3757 xfs_iext_add(ifp, idx, count); 3758 for (i = idx; i < idx + count; i++, new++) 3759 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 3760 } 3761 3762 /* 3763 * This is called when the amount of space required for incore file 3764 * extents needs to be increased. The ext_diff parameter stores the 3765 * number of new extents being added and the idx parameter contains 3766 * the extent index where the new extents will be added. If the new 3767 * extents are being appended, then we just need to (re)allocate and 3768 * initialize the space. Otherwise, if the new extents are being 3769 * inserted into the middle of the existing entries, a bit more work 3770 * is required to make room for the new extents to be inserted. The 3771 * caller is responsible for filling in the new extent entries upon 3772 * return. 3773 */ 3774 void 3775 xfs_iext_add( 3776 xfs_ifork_t *ifp, /* inode fork pointer */ 3777 xfs_extnum_t idx, /* index to begin adding exts */ 3778 int ext_diff) /* number of extents to add */ 3779 { 3780 int byte_diff; /* new bytes being added */ 3781 int new_size; /* size of extents after adding */ 3782 xfs_extnum_t nextents; /* number of extents in file */ 3783 3784 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3785 ASSERT((idx >= 0) && (idx <= nextents)); 3786 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3787 new_size = ifp->if_bytes + byte_diff; 3788 /* 3789 * If the new number of extents (nextents + ext_diff) 3790 * fits inside the inode, then continue to use the inline 3791 * extent buffer. 3792 */ 3793 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3794 if (idx < nextents) { 3795 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3796 &ifp->if_u2.if_inline_ext[idx], 3797 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3798 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3799 } 3800 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3801 ifp->if_real_bytes = 0; 3802 ifp->if_lastex = nextents + ext_diff; 3803 } 3804 /* 3805 * Otherwise use a linear (direct) extent list. 3806 * If the extents are currently inside the inode, 3807 * xfs_iext_realloc_direct will switch us from 3808 * inline to direct extent allocation mode. 3809 */ 3810 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3811 xfs_iext_realloc_direct(ifp, new_size); 3812 if (idx < nextents) { 3813 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3814 &ifp->if_u1.if_extents[idx], 3815 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3816 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3817 } 3818 } 3819 /* Indirection array */ 3820 else { 3821 xfs_ext_irec_t *erp; 3822 int erp_idx = 0; 3823 int page_idx = idx; 3824 3825 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3826 if (ifp->if_flags & XFS_IFEXTIREC) { 3827 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3828 } else { 3829 xfs_iext_irec_init(ifp); 3830 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3831 erp = ifp->if_u1.if_ext_irec; 3832 } 3833 /* Extents fit in target extent page */ 3834 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3835 if (page_idx < erp->er_extcount) { 3836 memmove(&erp->er_extbuf[page_idx + ext_diff], 3837 &erp->er_extbuf[page_idx], 3838 (erp->er_extcount - page_idx) * 3839 sizeof(xfs_bmbt_rec_t)); 3840 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3841 } 3842 erp->er_extcount += ext_diff; 3843 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3844 } 3845 /* Insert a new extent page */ 3846 else if (erp) { 3847 xfs_iext_add_indirect_multi(ifp, 3848 erp_idx, page_idx, ext_diff); 3849 } 3850 /* 3851 * If extent(s) are being appended to the last page in 3852 * the indirection array and the new extent(s) don't fit 3853 * in the page, then erp is NULL and erp_idx is set to 3854 * the next index needed in the indirection array. 3855 */ 3856 else { 3857 int count = ext_diff; 3858 3859 while (count) { 3860 erp = xfs_iext_irec_new(ifp, erp_idx); 3861 erp->er_extcount = count; 3862 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3863 if (count) { 3864 erp_idx++; 3865 } 3866 } 3867 } 3868 } 3869 ifp->if_bytes = new_size; 3870 } 3871 3872 /* 3873 * This is called when incore extents are being added to the indirection 3874 * array and the new extents do not fit in the target extent list. The 3875 * erp_idx parameter contains the irec index for the target extent list 3876 * in the indirection array, and the idx parameter contains the extent 3877 * index within the list. The number of extents being added is stored 3878 * in the count parameter. 3879 * 3880 * |-------| |-------| 3881 * | | | | idx - number of extents before idx 3882 * | idx | | count | 3883 * | | | | count - number of extents being inserted at idx 3884 * |-------| |-------| 3885 * | count | | nex2 | nex2 - number of extents after idx + count 3886 * |-------| |-------| 3887 */ 3888 void 3889 xfs_iext_add_indirect_multi( 3890 xfs_ifork_t *ifp, /* inode fork pointer */ 3891 int erp_idx, /* target extent irec index */ 3892 xfs_extnum_t idx, /* index within target list */ 3893 int count) /* new extents being added */ 3894 { 3895 int byte_diff; /* new bytes being added */ 3896 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3897 xfs_extnum_t ext_diff; /* number of extents to add */ 3898 xfs_extnum_t ext_cnt; /* new extents still needed */ 3899 xfs_extnum_t nex2; /* extents after idx + count */ 3900 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3901 int nlists; /* number of irec's (lists) */ 3902 3903 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3904 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3905 nex2 = erp->er_extcount - idx; 3906 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3907 3908 /* 3909 * Save second part of target extent list 3910 * (all extents past */ 3911 if (nex2) { 3912 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3913 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); 3914 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3915 erp->er_extcount -= nex2; 3916 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3917 memset(&erp->er_extbuf[idx], 0, byte_diff); 3918 } 3919 3920 /* 3921 * Add the new extents to the end of the target 3922 * list, then allocate new irec record(s) and 3923 * extent buffer(s) as needed to store the rest 3924 * of the new extents. 3925 */ 3926 ext_cnt = count; 3927 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3928 if (ext_diff) { 3929 erp->er_extcount += ext_diff; 3930 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3931 ext_cnt -= ext_diff; 3932 } 3933 while (ext_cnt) { 3934 erp_idx++; 3935 erp = xfs_iext_irec_new(ifp, erp_idx); 3936 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3937 erp->er_extcount = ext_diff; 3938 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3939 ext_cnt -= ext_diff; 3940 } 3941 3942 /* Add nex2 extents back to indirection array */ 3943 if (nex2) { 3944 xfs_extnum_t ext_avail; 3945 int i; 3946 3947 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3948 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3949 i = 0; 3950 /* 3951 * If nex2 extents fit in the current page, append 3952 * nex2_ep after the new extents. 3953 */ 3954 if (nex2 <= ext_avail) { 3955 i = erp->er_extcount; 3956 } 3957 /* 3958 * Otherwise, check if space is available in the 3959 * next page. 3960 */ 3961 else if ((erp_idx < nlists - 1) && 3962 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3963 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3964 erp_idx++; 3965 erp++; 3966 /* Create a hole for nex2 extents */ 3967 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3968 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3969 } 3970 /* 3971 * Final choice, create a new extent page for 3972 * nex2 extents. 3973 */ 3974 else { 3975 erp_idx++; 3976 erp = xfs_iext_irec_new(ifp, erp_idx); 3977 } 3978 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3979 kmem_free(nex2_ep, byte_diff); 3980 erp->er_extcount += nex2; 3981 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3982 } 3983 } 3984 3985 /* 3986 * This is called when the amount of space required for incore file 3987 * extents needs to be decreased. The ext_diff parameter stores the 3988 * number of extents to be removed and the idx parameter contains 3989 * the extent index where the extents will be removed from. 3990 * 3991 * If the amount of space needed has decreased below the linear 3992 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3993 * extent array. Otherwise, use kmem_realloc() to adjust the 3994 * size to what is needed. 3995 */ 3996 void 3997 xfs_iext_remove( 3998 xfs_ifork_t *ifp, /* inode fork pointer */ 3999 xfs_extnum_t idx, /* index to begin removing exts */ 4000 int ext_diff) /* number of extents to remove */ 4001 { 4002 xfs_extnum_t nextents; /* number of extents in file */ 4003 int new_size; /* size of extents after removal */ 4004 4005 ASSERT(ext_diff > 0); 4006 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4007 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 4008 4009 if (new_size == 0) { 4010 xfs_iext_destroy(ifp); 4011 } else if (ifp->if_flags & XFS_IFEXTIREC) { 4012 xfs_iext_remove_indirect(ifp, idx, ext_diff); 4013 } else if (ifp->if_real_bytes) { 4014 xfs_iext_remove_direct(ifp, idx, ext_diff); 4015 } else { 4016 xfs_iext_remove_inline(ifp, idx, ext_diff); 4017 } 4018 ifp->if_bytes = new_size; 4019 } 4020 4021 /* 4022 * This removes ext_diff extents from the inline buffer, beginning 4023 * at extent index idx. 4024 */ 4025 void 4026 xfs_iext_remove_inline( 4027 xfs_ifork_t *ifp, /* inode fork pointer */ 4028 xfs_extnum_t idx, /* index to begin removing exts */ 4029 int ext_diff) /* number of extents to remove */ 4030 { 4031 int nextents; /* number of extents in file */ 4032 4033 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4034 ASSERT(idx < XFS_INLINE_EXTS); 4035 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4036 ASSERT(((nextents - ext_diff) > 0) && 4037 (nextents - ext_diff) < XFS_INLINE_EXTS); 4038 4039 if (idx + ext_diff < nextents) { 4040 memmove(&ifp->if_u2.if_inline_ext[idx], 4041 &ifp->if_u2.if_inline_ext[idx + ext_diff], 4042 (nextents - (idx + ext_diff)) * 4043 sizeof(xfs_bmbt_rec_t)); 4044 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 4045 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 4046 } else { 4047 memset(&ifp->if_u2.if_inline_ext[idx], 0, 4048 ext_diff * sizeof(xfs_bmbt_rec_t)); 4049 } 4050 } 4051 4052 /* 4053 * This removes ext_diff extents from a linear (direct) extent list, 4054 * beginning at extent index idx. If the extents are being removed 4055 * from the end of the list (ie. truncate) then we just need to re- 4056 * allocate the list to remove the extra space. Otherwise, if the 4057 * extents are being removed from the middle of the existing extent 4058 * entries, then we first need to move the extent records beginning 4059 * at idx + ext_diff up in the list to overwrite the records being 4060 * removed, then remove the extra space via kmem_realloc. 4061 */ 4062 void 4063 xfs_iext_remove_direct( 4064 xfs_ifork_t *ifp, /* inode fork pointer */ 4065 xfs_extnum_t idx, /* index to begin removing exts */ 4066 int ext_diff) /* number of extents to remove */ 4067 { 4068 xfs_extnum_t nextents; /* number of extents in file */ 4069 int new_size; /* size of extents after removal */ 4070 4071 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4072 new_size = ifp->if_bytes - 4073 (ext_diff * sizeof(xfs_bmbt_rec_t)); 4074 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4075 4076 if (new_size == 0) { 4077 xfs_iext_destroy(ifp); 4078 return; 4079 } 4080 /* Move extents up in the list (if needed) */ 4081 if (idx + ext_diff < nextents) { 4082 memmove(&ifp->if_u1.if_extents[idx], 4083 &ifp->if_u1.if_extents[idx + ext_diff], 4084 (nextents - (idx + ext_diff)) * 4085 sizeof(xfs_bmbt_rec_t)); 4086 } 4087 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 4088 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 4089 /* 4090 * Reallocate the direct extent list. If the extents 4091 * will fit inside the inode then xfs_iext_realloc_direct 4092 * will switch from direct to inline extent allocation 4093 * mode for us. 4094 */ 4095 xfs_iext_realloc_direct(ifp, new_size); 4096 ifp->if_bytes = new_size; 4097 } 4098 4099 /* 4100 * This is called when incore extents are being removed from the 4101 * indirection array and the extents being removed span multiple extent 4102 * buffers. The idx parameter contains the file extent index where we 4103 * want to begin removing extents, and the count parameter contains 4104 * how many extents need to be removed. 4105 * 4106 * |-------| |-------| 4107 * | nex1 | | | nex1 - number of extents before idx 4108 * |-------| | count | 4109 * | | | | count - number of extents being removed at idx 4110 * | count | |-------| 4111 * | | | nex2 | nex2 - number of extents after idx + count 4112 * |-------| |-------| 4113 */ 4114 void 4115 xfs_iext_remove_indirect( 4116 xfs_ifork_t *ifp, /* inode fork pointer */ 4117 xfs_extnum_t idx, /* index to begin removing extents */ 4118 int count) /* number of extents to remove */ 4119 { 4120 xfs_ext_irec_t *erp; /* indirection array pointer */ 4121 int erp_idx = 0; /* indirection array index */ 4122 xfs_extnum_t ext_cnt; /* extents left to remove */ 4123 xfs_extnum_t ext_diff; /* extents to remove in current list */ 4124 xfs_extnum_t nex1; /* number of extents before idx */ 4125 xfs_extnum_t nex2; /* extents after idx + count */ 4126 int nlists; /* entries in indirection array */ 4127 int page_idx = idx; /* index in target extent list */ 4128 4129 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4130 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 4131 ASSERT(erp != NULL); 4132 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4133 nex1 = page_idx; 4134 ext_cnt = count; 4135 while (ext_cnt) { 4136 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 4137 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 4138 /* 4139 * Check for deletion of entire list; 4140 * xfs_iext_irec_remove() updates extent offsets. 4141 */ 4142 if (ext_diff == erp->er_extcount) { 4143 xfs_iext_irec_remove(ifp, erp_idx); 4144 ext_cnt -= ext_diff; 4145 nex1 = 0; 4146 if (ext_cnt) { 4147 ASSERT(erp_idx < ifp->if_real_bytes / 4148 XFS_IEXT_BUFSZ); 4149 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4150 nex1 = 0; 4151 continue; 4152 } else { 4153 break; 4154 } 4155 } 4156 /* Move extents up (if needed) */ 4157 if (nex2) { 4158 memmove(&erp->er_extbuf[nex1], 4159 &erp->er_extbuf[nex1 + ext_diff], 4160 nex2 * sizeof(xfs_bmbt_rec_t)); 4161 } 4162 /* Zero out rest of page */ 4163 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 4164 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 4165 /* Update remaining counters */ 4166 erp->er_extcount -= ext_diff; 4167 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 4168 ext_cnt -= ext_diff; 4169 nex1 = 0; 4170 erp_idx++; 4171 erp++; 4172 } 4173 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 4174 xfs_iext_irec_compact(ifp); 4175 } 4176 4177 /* 4178 * Create, destroy, or resize a linear (direct) block of extents. 4179 */ 4180 void 4181 xfs_iext_realloc_direct( 4182 xfs_ifork_t *ifp, /* inode fork pointer */ 4183 int new_size) /* new size of extents */ 4184 { 4185 int rnew_size; /* real new size of extents */ 4186 4187 rnew_size = new_size; 4188 4189 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 4190 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 4191 (new_size != ifp->if_real_bytes))); 4192 4193 /* Free extent records */ 4194 if (new_size == 0) { 4195 xfs_iext_destroy(ifp); 4196 } 4197 /* Resize direct extent list and zero any new bytes */ 4198 else if (ifp->if_real_bytes) { 4199 /* Check if extents will fit inside the inode */ 4200 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 4201 xfs_iext_direct_to_inline(ifp, new_size / 4202 (uint)sizeof(xfs_bmbt_rec_t)); 4203 ifp->if_bytes = new_size; 4204 return; 4205 } 4206 if (!is_power_of_2(new_size)){ 4207 rnew_size = xfs_iroundup(new_size); 4208 } 4209 if (rnew_size != ifp->if_real_bytes) { 4210 ifp->if_u1.if_extents = 4211 kmem_realloc(ifp->if_u1.if_extents, 4212 rnew_size, 4213 ifp->if_real_bytes, 4214 KM_SLEEP); 4215 } 4216 if (rnew_size > ifp->if_real_bytes) { 4217 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 4218 (uint)sizeof(xfs_bmbt_rec_t)], 0, 4219 rnew_size - ifp->if_real_bytes); 4220 } 4221 } 4222 /* 4223 * Switch from the inline extent buffer to a direct 4224 * extent list. Be sure to include the inline extent 4225 * bytes in new_size. 4226 */ 4227 else { 4228 new_size += ifp->if_bytes; 4229 if (!is_power_of_2(new_size)) { 4230 rnew_size = xfs_iroundup(new_size); 4231 } 4232 xfs_iext_inline_to_direct(ifp, rnew_size); 4233 } 4234 ifp->if_real_bytes = rnew_size; 4235 ifp->if_bytes = new_size; 4236 } 4237 4238 /* 4239 * Switch from linear (direct) extent records to inline buffer. 4240 */ 4241 void 4242 xfs_iext_direct_to_inline( 4243 xfs_ifork_t *ifp, /* inode fork pointer */ 4244 xfs_extnum_t nextents) /* number of extents in file */ 4245 { 4246 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 4247 ASSERT(nextents <= XFS_INLINE_EXTS); 4248 /* 4249 * The inline buffer was zeroed when we switched 4250 * from inline to direct extent allocation mode, 4251 * so we don't need to clear it here. 4252 */ 4253 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 4254 nextents * sizeof(xfs_bmbt_rec_t)); 4255 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4256 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 4257 ifp->if_real_bytes = 0; 4258 } 4259 4260 /* 4261 * Switch from inline buffer to linear (direct) extent records. 4262 * new_size should already be rounded up to the next power of 2 4263 * by the caller (when appropriate), so use new_size as it is. 4264 * However, since new_size may be rounded up, we can't update 4265 * if_bytes here. It is the caller's responsibility to update 4266 * if_bytes upon return. 4267 */ 4268 void 4269 xfs_iext_inline_to_direct( 4270 xfs_ifork_t *ifp, /* inode fork pointer */ 4271 int new_size) /* number of extents in file */ 4272 { 4273 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP); 4274 memset(ifp->if_u1.if_extents, 0, new_size); 4275 if (ifp->if_bytes) { 4276 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 4277 ifp->if_bytes); 4278 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4279 sizeof(xfs_bmbt_rec_t)); 4280 } 4281 ifp->if_real_bytes = new_size; 4282 } 4283 4284 /* 4285 * Resize an extent indirection array to new_size bytes. 4286 */ 4287 void 4288 xfs_iext_realloc_indirect( 4289 xfs_ifork_t *ifp, /* inode fork pointer */ 4290 int new_size) /* new indirection array size */ 4291 { 4292 int nlists; /* number of irec's (ex lists) */ 4293 int size; /* current indirection array size */ 4294 4295 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4296 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4297 size = nlists * sizeof(xfs_ext_irec_t); 4298 ASSERT(ifp->if_real_bytes); 4299 ASSERT((new_size >= 0) && (new_size != size)); 4300 if (new_size == 0) { 4301 xfs_iext_destroy(ifp); 4302 } else { 4303 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 4304 kmem_realloc(ifp->if_u1.if_ext_irec, 4305 new_size, size, KM_SLEEP); 4306 } 4307 } 4308 4309 /* 4310 * Switch from indirection array to linear (direct) extent allocations. 4311 */ 4312 void 4313 xfs_iext_indirect_to_direct( 4314 xfs_ifork_t *ifp) /* inode fork pointer */ 4315 { 4316 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 4317 xfs_extnum_t nextents; /* number of extents in file */ 4318 int size; /* size of file extents */ 4319 4320 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4321 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4322 ASSERT(nextents <= XFS_LINEAR_EXTS); 4323 size = nextents * sizeof(xfs_bmbt_rec_t); 4324 4325 xfs_iext_irec_compact_full(ifp); 4326 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 4327 4328 ep = ifp->if_u1.if_ext_irec->er_extbuf; 4329 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); 4330 ifp->if_flags &= ~XFS_IFEXTIREC; 4331 ifp->if_u1.if_extents = ep; 4332 ifp->if_bytes = size; 4333 if (nextents < XFS_LINEAR_EXTS) { 4334 xfs_iext_realloc_direct(ifp, size); 4335 } 4336 } 4337 4338 /* 4339 * Free incore file extents. 4340 */ 4341 void 4342 xfs_iext_destroy( 4343 xfs_ifork_t *ifp) /* inode fork pointer */ 4344 { 4345 if (ifp->if_flags & XFS_IFEXTIREC) { 4346 int erp_idx; 4347 int nlists; 4348 4349 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4350 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 4351 xfs_iext_irec_remove(ifp, erp_idx); 4352 } 4353 ifp->if_flags &= ~XFS_IFEXTIREC; 4354 } else if (ifp->if_real_bytes) { 4355 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 4356 } else if (ifp->if_bytes) { 4357 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 4358 sizeof(xfs_bmbt_rec_t)); 4359 } 4360 ifp->if_u1.if_extents = NULL; 4361 ifp->if_real_bytes = 0; 4362 ifp->if_bytes = 0; 4363 } 4364 4365 /* 4366 * Return a pointer to the extent record for file system block bno. 4367 */ 4368 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 4369 xfs_iext_bno_to_ext( 4370 xfs_ifork_t *ifp, /* inode fork pointer */ 4371 xfs_fileoff_t bno, /* block number to search for */ 4372 xfs_extnum_t *idxp) /* index of target extent */ 4373 { 4374 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 4375 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 4376 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 4377 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4378 int high; /* upper boundary in search */ 4379 xfs_extnum_t idx = 0; /* index of target extent */ 4380 int low; /* lower boundary in search */ 4381 xfs_extnum_t nextents; /* number of file extents */ 4382 xfs_fileoff_t startoff = 0; /* start offset of extent */ 4383 4384 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4385 if (nextents == 0) { 4386 *idxp = 0; 4387 return NULL; 4388 } 4389 low = 0; 4390 if (ifp->if_flags & XFS_IFEXTIREC) { 4391 /* Find target extent list */ 4392 int erp_idx = 0; 4393 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 4394 base = erp->er_extbuf; 4395 high = erp->er_extcount - 1; 4396 } else { 4397 base = ifp->if_u1.if_extents; 4398 high = nextents - 1; 4399 } 4400 /* Binary search extent records */ 4401 while (low <= high) { 4402 idx = (low + high) >> 1; 4403 ep = base + idx; 4404 startoff = xfs_bmbt_get_startoff(ep); 4405 blockcount = xfs_bmbt_get_blockcount(ep); 4406 if (bno < startoff) { 4407 high = idx - 1; 4408 } else if (bno >= startoff + blockcount) { 4409 low = idx + 1; 4410 } else { 4411 /* Convert back to file-based extent index */ 4412 if (ifp->if_flags & XFS_IFEXTIREC) { 4413 idx += erp->er_extoff; 4414 } 4415 *idxp = idx; 4416 return ep; 4417 } 4418 } 4419 /* Convert back to file-based extent index */ 4420 if (ifp->if_flags & XFS_IFEXTIREC) { 4421 idx += erp->er_extoff; 4422 } 4423 if (bno >= startoff + blockcount) { 4424 if (++idx == nextents) { 4425 ep = NULL; 4426 } else { 4427 ep = xfs_iext_get_ext(ifp, idx); 4428 } 4429 } 4430 *idxp = idx; 4431 return ep; 4432 } 4433 4434 /* 4435 * Return a pointer to the indirection array entry containing the 4436 * extent record for filesystem block bno. Store the index of the 4437 * target irec in *erp_idxp. 4438 */ 4439 xfs_ext_irec_t * /* pointer to found extent record */ 4440 xfs_iext_bno_to_irec( 4441 xfs_ifork_t *ifp, /* inode fork pointer */ 4442 xfs_fileoff_t bno, /* block number to search for */ 4443 int *erp_idxp) /* irec index of target ext list */ 4444 { 4445 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4446 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 4447 int erp_idx; /* indirection array index */ 4448 int nlists; /* number of extent irec's (lists) */ 4449 int high; /* binary search upper limit */ 4450 int low; /* binary search lower limit */ 4451 4452 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4453 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4454 erp_idx = 0; 4455 low = 0; 4456 high = nlists - 1; 4457 while (low <= high) { 4458 erp_idx = (low + high) >> 1; 4459 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4460 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 4461 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 4462 high = erp_idx - 1; 4463 } else if (erp_next && bno >= 4464 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 4465 low = erp_idx + 1; 4466 } else { 4467 break; 4468 } 4469 } 4470 *erp_idxp = erp_idx; 4471 return erp; 4472 } 4473 4474 /* 4475 * Return a pointer to the indirection array entry containing the 4476 * extent record at file extent index *idxp. Store the index of the 4477 * target irec in *erp_idxp and store the page index of the target 4478 * extent record in *idxp. 4479 */ 4480 xfs_ext_irec_t * 4481 xfs_iext_idx_to_irec( 4482 xfs_ifork_t *ifp, /* inode fork pointer */ 4483 xfs_extnum_t *idxp, /* extent index (file -> page) */ 4484 int *erp_idxp, /* pointer to target irec */ 4485 int realloc) /* new bytes were just added */ 4486 { 4487 xfs_ext_irec_t *prev; /* pointer to previous irec */ 4488 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 4489 int erp_idx; /* indirection array index */ 4490 int nlists; /* number of irec's (ex lists) */ 4491 int high; /* binary search upper limit */ 4492 int low; /* binary search lower limit */ 4493 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 4494 4495 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4496 ASSERT(page_idx >= 0 && page_idx <= 4497 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 4498 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4499 erp_idx = 0; 4500 low = 0; 4501 high = nlists - 1; 4502 4503 /* Binary search extent irec's */ 4504 while (low <= high) { 4505 erp_idx = (low + high) >> 1; 4506 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4507 prev = erp_idx > 0 ? erp - 1 : NULL; 4508 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 4509 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 4510 high = erp_idx - 1; 4511 } else if (page_idx > erp->er_extoff + erp->er_extcount || 4512 (page_idx == erp->er_extoff + erp->er_extcount && 4513 !realloc)) { 4514 low = erp_idx + 1; 4515 } else if (page_idx == erp->er_extoff + erp->er_extcount && 4516 erp->er_extcount == XFS_LINEAR_EXTS) { 4517 ASSERT(realloc); 4518 page_idx = 0; 4519 erp_idx++; 4520 erp = erp_idx < nlists ? erp + 1 : NULL; 4521 break; 4522 } else { 4523 page_idx -= erp->er_extoff; 4524 break; 4525 } 4526 } 4527 *idxp = page_idx; 4528 *erp_idxp = erp_idx; 4529 return(erp); 4530 } 4531 4532 /* 4533 * Allocate and initialize an indirection array once the space needed 4534 * for incore extents increases above XFS_IEXT_BUFSZ. 4535 */ 4536 void 4537 xfs_iext_irec_init( 4538 xfs_ifork_t *ifp) /* inode fork pointer */ 4539 { 4540 xfs_ext_irec_t *erp; /* indirection array pointer */ 4541 xfs_extnum_t nextents; /* number of extents in file */ 4542 4543 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4544 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4545 ASSERT(nextents <= XFS_LINEAR_EXTS); 4546 4547 erp = (xfs_ext_irec_t *) 4548 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); 4549 4550 if (nextents == 0) { 4551 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4552 } else if (!ifp->if_real_bytes) { 4553 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4554 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4555 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 4556 } 4557 erp->er_extbuf = ifp->if_u1.if_extents; 4558 erp->er_extcount = nextents; 4559 erp->er_extoff = 0; 4560 4561 ifp->if_flags |= XFS_IFEXTIREC; 4562 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 4563 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 4564 ifp->if_u1.if_ext_irec = erp; 4565 4566 return; 4567 } 4568 4569 /* 4570 * Allocate and initialize a new entry in the indirection array. 4571 */ 4572 xfs_ext_irec_t * 4573 xfs_iext_irec_new( 4574 xfs_ifork_t *ifp, /* inode fork pointer */ 4575 int erp_idx) /* index for new irec */ 4576 { 4577 xfs_ext_irec_t *erp; /* indirection array pointer */ 4578 int i; /* loop counter */ 4579 int nlists; /* number of irec's (ex lists) */ 4580 4581 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4582 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4583 4584 /* Resize indirection array */ 4585 xfs_iext_realloc_indirect(ifp, ++nlists * 4586 sizeof(xfs_ext_irec_t)); 4587 /* 4588 * Move records down in the array so the 4589 * new page can use erp_idx. 4590 */ 4591 erp = ifp->if_u1.if_ext_irec; 4592 for (i = nlists - 1; i > erp_idx; i--) { 4593 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 4594 } 4595 ASSERT(i == erp_idx); 4596 4597 /* Initialize new extent record */ 4598 erp = ifp->if_u1.if_ext_irec; 4599 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); 4600 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4601 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4602 erp[erp_idx].er_extcount = 0; 4603 erp[erp_idx].er_extoff = erp_idx > 0 ? 4604 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 4605 return (&erp[erp_idx]); 4606 } 4607 4608 /* 4609 * Remove a record from the indirection array. 4610 */ 4611 void 4612 xfs_iext_irec_remove( 4613 xfs_ifork_t *ifp, /* inode fork pointer */ 4614 int erp_idx) /* irec index to remove */ 4615 { 4616 xfs_ext_irec_t *erp; /* indirection array pointer */ 4617 int i; /* loop counter */ 4618 int nlists; /* number of irec's (ex lists) */ 4619 4620 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4621 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4622 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4623 if (erp->er_extbuf) { 4624 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4625 -erp->er_extcount); 4626 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); 4627 } 4628 /* Compact extent records */ 4629 erp = ifp->if_u1.if_ext_irec; 4630 for (i = erp_idx; i < nlists - 1; i++) { 4631 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4632 } 4633 /* 4634 * Manually free the last extent record from the indirection 4635 * array. A call to xfs_iext_realloc_indirect() with a size 4636 * of zero would result in a call to xfs_iext_destroy() which 4637 * would in turn call this function again, creating a nasty 4638 * infinite loop. 4639 */ 4640 if (--nlists) { 4641 xfs_iext_realloc_indirect(ifp, 4642 nlists * sizeof(xfs_ext_irec_t)); 4643 } else { 4644 kmem_free(ifp->if_u1.if_ext_irec, 4645 sizeof(xfs_ext_irec_t)); 4646 } 4647 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4648 } 4649 4650 /* 4651 * This is called to clean up large amounts of unused memory allocated 4652 * by the indirection array. Before compacting anything though, verify 4653 * that the indirection array is still needed and switch back to the 4654 * linear extent list (or even the inline buffer) if possible. The 4655 * compaction policy is as follows: 4656 * 4657 * Full Compaction: Extents fit into a single page (or inline buffer) 4658 * Full Compaction: Extents occupy less than 10% of allocated space 4659 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space 4660 * No Compaction: Extents occupy at least 50% of allocated space 4661 */ 4662 void 4663 xfs_iext_irec_compact( 4664 xfs_ifork_t *ifp) /* inode fork pointer */ 4665 { 4666 xfs_extnum_t nextents; /* number of extents in file */ 4667 int nlists; /* number of irec's (ex lists) */ 4668 4669 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4670 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4671 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4672 4673 if (nextents == 0) { 4674 xfs_iext_destroy(ifp); 4675 } else if (nextents <= XFS_INLINE_EXTS) { 4676 xfs_iext_indirect_to_direct(ifp); 4677 xfs_iext_direct_to_inline(ifp, nextents); 4678 } else if (nextents <= XFS_LINEAR_EXTS) { 4679 xfs_iext_indirect_to_direct(ifp); 4680 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { 4681 xfs_iext_irec_compact_full(ifp); 4682 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4683 xfs_iext_irec_compact_pages(ifp); 4684 } 4685 } 4686 4687 /* 4688 * Combine extents from neighboring extent pages. 4689 */ 4690 void 4691 xfs_iext_irec_compact_pages( 4692 xfs_ifork_t *ifp) /* inode fork pointer */ 4693 { 4694 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4695 int erp_idx = 0; /* indirection array index */ 4696 int nlists; /* number of irec's (ex lists) */ 4697 4698 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4699 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4700 while (erp_idx < nlists - 1) { 4701 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4702 erp_next = erp + 1; 4703 if (erp_next->er_extcount <= 4704 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4705 memmove(&erp->er_extbuf[erp->er_extcount], 4706 erp_next->er_extbuf, erp_next->er_extcount * 4707 sizeof(xfs_bmbt_rec_t)); 4708 erp->er_extcount += erp_next->er_extcount; 4709 /* 4710 * Free page before removing extent record 4711 * so er_extoffs don't get modified in 4712 * xfs_iext_irec_remove. 4713 */ 4714 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); 4715 erp_next->er_extbuf = NULL; 4716 xfs_iext_irec_remove(ifp, erp_idx + 1); 4717 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4718 } else { 4719 erp_idx++; 4720 } 4721 } 4722 } 4723 4724 /* 4725 * Fully compact the extent records managed by the indirection array. 4726 */ 4727 void 4728 xfs_iext_irec_compact_full( 4729 xfs_ifork_t *ifp) /* inode fork pointer */ 4730 { 4731 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */ 4732 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ 4733 int erp_idx = 0; /* extent irec index */ 4734 int ext_avail; /* empty entries in ex list */ 4735 int ext_diff; /* number of exts to add */ 4736 int nlists; /* number of irec's (ex lists) */ 4737 4738 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4739 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4740 erp = ifp->if_u1.if_ext_irec; 4741 ep = &erp->er_extbuf[erp->er_extcount]; 4742 erp_next = erp + 1; 4743 ep_next = erp_next->er_extbuf; 4744 while (erp_idx < nlists - 1) { 4745 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 4746 ext_diff = MIN(ext_avail, erp_next->er_extcount); 4747 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); 4748 erp->er_extcount += ext_diff; 4749 erp_next->er_extcount -= ext_diff; 4750 /* Remove next page */ 4751 if (erp_next->er_extcount == 0) { 4752 /* 4753 * Free page before removing extent record 4754 * so er_extoffs don't get modified in 4755 * xfs_iext_irec_remove. 4756 */ 4757 kmem_free(erp_next->er_extbuf, 4758 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4759 erp_next->er_extbuf = NULL; 4760 xfs_iext_irec_remove(ifp, erp_idx + 1); 4761 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4762 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4763 /* Update next page */ 4764 } else { 4765 /* Move rest of page up to become next new page */ 4766 memmove(erp_next->er_extbuf, ep_next, 4767 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); 4768 ep_next = erp_next->er_extbuf; 4769 memset(&ep_next[erp_next->er_extcount], 0, 4770 (XFS_LINEAR_EXTS - erp_next->er_extcount) * 4771 sizeof(xfs_bmbt_rec_t)); 4772 } 4773 if (erp->er_extcount == XFS_LINEAR_EXTS) { 4774 erp_idx++; 4775 if (erp_idx < nlists) 4776 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4777 else 4778 break; 4779 } 4780 ep = &erp->er_extbuf[erp->er_extcount]; 4781 erp_next = erp + 1; 4782 ep_next = erp_next->er_extbuf; 4783 } 4784 } 4785 4786 /* 4787 * This is called to update the er_extoff field in the indirection 4788 * array when extents have been added or removed from one of the 4789 * extent lists. erp_idx contains the irec index to begin updating 4790 * at and ext_diff contains the number of extents that were added 4791 * or removed. 4792 */ 4793 void 4794 xfs_iext_irec_update_extoffs( 4795 xfs_ifork_t *ifp, /* inode fork pointer */ 4796 int erp_idx, /* irec index to update */ 4797 int ext_diff) /* number of new extents */ 4798 { 4799 int i; /* loop counter */ 4800 int nlists; /* number of irec's (ex lists */ 4801 4802 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4803 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4804 for (i = erp_idx; i < nlists; i++) { 4805 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4806 } 4807 } 4808