1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_bit.h" 24 #include "xfs_log.h" 25 #include "xfs_inum.h" 26 #include "xfs_trans.h" 27 #include "xfs_trans_priv.h" 28 #include "xfs_sb.h" 29 #include "xfs_ag.h" 30 #include "xfs_dir2.h" 31 #include "xfs_dmapi.h" 32 #include "xfs_mount.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_alloc_btree.h" 35 #include "xfs_ialloc_btree.h" 36 #include "xfs_dir2_sf.h" 37 #include "xfs_attr_sf.h" 38 #include "xfs_dinode.h" 39 #include "xfs_inode.h" 40 #include "xfs_buf_item.h" 41 #include "xfs_inode_item.h" 42 #include "xfs_btree.h" 43 #include "xfs_btree_trace.h" 44 #include "xfs_alloc.h" 45 #include "xfs_ialloc.h" 46 #include "xfs_bmap.h" 47 #include "xfs_rw.h" 48 #include "xfs_error.h" 49 #include "xfs_utils.h" 50 #include "xfs_dir2_trace.h" 51 #include "xfs_quota.h" 52 #include "xfs_acl.h" 53 #include "xfs_filestream.h" 54 #include "xfs_vnodeops.h" 55 56 kmem_zone_t *xfs_ifork_zone; 57 kmem_zone_t *xfs_inode_zone; 58 59 /* 60 * Used in xfs_itruncate(). This is the maximum number of extents 61 * freed from a file in a single transaction. 62 */ 63 #define XFS_ITRUNC_MAX_EXTENTS 2 64 65 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 66 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 67 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 68 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 69 70 #ifdef DEBUG 71 /* 72 * Make sure that the extents in the given memory buffer 73 * are valid. 74 */ 75 STATIC void 76 xfs_validate_extents( 77 xfs_ifork_t *ifp, 78 int nrecs, 79 xfs_exntfmt_t fmt) 80 { 81 xfs_bmbt_irec_t irec; 82 xfs_bmbt_rec_host_t rec; 83 int i; 84 85 for (i = 0; i < nrecs; i++) { 86 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 87 rec.l0 = get_unaligned(&ep->l0); 88 rec.l1 = get_unaligned(&ep->l1); 89 xfs_bmbt_get_all(&rec, &irec); 90 if (fmt == XFS_EXTFMT_NOSTATE) 91 ASSERT(irec.br_state == XFS_EXT_NORM); 92 } 93 } 94 #else /* DEBUG */ 95 #define xfs_validate_extents(ifp, nrecs, fmt) 96 #endif /* DEBUG */ 97 98 /* 99 * Check that none of the inode's in the buffer have a next 100 * unlinked field of 0. 101 */ 102 #if defined(DEBUG) 103 void 104 xfs_inobp_check( 105 xfs_mount_t *mp, 106 xfs_buf_t *bp) 107 { 108 int i; 109 int j; 110 xfs_dinode_t *dip; 111 112 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 113 114 for (i = 0; i < j; i++) { 115 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 116 i * mp->m_sb.sb_inodesize); 117 if (!dip->di_next_unlinked) { 118 xfs_fs_cmn_err(CE_ALERT, mp, 119 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 120 bp); 121 ASSERT(dip->di_next_unlinked); 122 } 123 } 124 } 125 #endif 126 127 /* 128 * Find the buffer associated with the given inode map 129 * We do basic validation checks on the buffer once it has been 130 * retrieved from disk. 131 */ 132 STATIC int 133 xfs_imap_to_bp( 134 xfs_mount_t *mp, 135 xfs_trans_t *tp, 136 struct xfs_imap *imap, 137 xfs_buf_t **bpp, 138 uint buf_flags, 139 uint iget_flags) 140 { 141 int error; 142 int i; 143 int ni; 144 xfs_buf_t *bp; 145 146 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 147 (int)imap->im_len, buf_flags, &bp); 148 if (error) { 149 if (error != EAGAIN) { 150 cmn_err(CE_WARN, 151 "xfs_imap_to_bp: xfs_trans_read_buf()returned " 152 "an error %d on %s. Returning error.", 153 error, mp->m_fsname); 154 } else { 155 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 156 } 157 return error; 158 } 159 160 /* 161 * Validate the magic number and version of every inode in the buffer 162 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 163 */ 164 #ifdef DEBUG 165 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 166 #else /* usual case */ 167 ni = 1; 168 #endif 169 170 for (i = 0; i < ni; i++) { 171 int di_ok; 172 xfs_dinode_t *dip; 173 174 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 175 (i << mp->m_sb.sb_inodelog)); 176 di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && 177 XFS_DINODE_GOOD_VERSION(dip->di_version); 178 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 179 XFS_ERRTAG_ITOBP_INOTOBP, 180 XFS_RANDOM_ITOBP_INOTOBP))) { 181 if (iget_flags & XFS_IGET_BULKSTAT) { 182 xfs_trans_brelse(tp, bp); 183 return XFS_ERROR(EINVAL); 184 } 185 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 186 XFS_ERRLEVEL_HIGH, mp, dip); 187 #ifdef DEBUG 188 cmn_err(CE_PANIC, 189 "Device %s - bad inode magic/vsn " 190 "daddr %lld #%d (magic=%x)", 191 XFS_BUFTARG_NAME(mp->m_ddev_targp), 192 (unsigned long long)imap->im_blkno, i, 193 be16_to_cpu(dip->di_magic)); 194 #endif 195 xfs_trans_brelse(tp, bp); 196 return XFS_ERROR(EFSCORRUPTED); 197 } 198 } 199 200 xfs_inobp_check(mp, bp); 201 202 /* 203 * Mark the buffer as an inode buffer now that it looks good 204 */ 205 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 206 207 *bpp = bp; 208 return 0; 209 } 210 211 /* 212 * This routine is called to map an inode number within a file 213 * system to the buffer containing the on-disk version of the 214 * inode. It returns a pointer to the buffer containing the 215 * on-disk inode in the bpp parameter, and in the dip parameter 216 * it returns a pointer to the on-disk inode within that buffer. 217 * 218 * If a non-zero error is returned, then the contents of bpp and 219 * dipp are undefined. 220 * 221 * Use xfs_imap() to determine the size and location of the 222 * buffer to read from disk. 223 */ 224 int 225 xfs_inotobp( 226 xfs_mount_t *mp, 227 xfs_trans_t *tp, 228 xfs_ino_t ino, 229 xfs_dinode_t **dipp, 230 xfs_buf_t **bpp, 231 int *offset, 232 uint imap_flags) 233 { 234 struct xfs_imap imap; 235 xfs_buf_t *bp; 236 int error; 237 238 imap.im_blkno = 0; 239 error = xfs_imap(mp, tp, ino, &imap, imap_flags); 240 if (error) 241 return error; 242 243 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); 244 if (error) 245 return error; 246 247 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 248 *bpp = bp; 249 *offset = imap.im_boffset; 250 return 0; 251 } 252 253 254 /* 255 * This routine is called to map an inode to the buffer containing 256 * the on-disk version of the inode. It returns a pointer to the 257 * buffer containing the on-disk inode in the bpp parameter, and in 258 * the dip parameter it returns a pointer to the on-disk inode within 259 * that buffer. 260 * 261 * If a non-zero error is returned, then the contents of bpp and 262 * dipp are undefined. 263 * 264 * The inode is expected to already been mapped to its buffer and read 265 * in once, thus we can use the mapping information stored in the inode 266 * rather than calling xfs_imap(). This allows us to avoid the overhead 267 * of looking at the inode btree for small block file systems 268 * (see xfs_imap()). 269 */ 270 int 271 xfs_itobp( 272 xfs_mount_t *mp, 273 xfs_trans_t *tp, 274 xfs_inode_t *ip, 275 xfs_dinode_t **dipp, 276 xfs_buf_t **bpp, 277 uint buf_flags) 278 { 279 xfs_buf_t *bp; 280 int error; 281 282 ASSERT(ip->i_imap.im_blkno != 0); 283 284 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0); 285 if (error) 286 return error; 287 288 if (!bp) { 289 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 290 ASSERT(tp == NULL); 291 *bpp = NULL; 292 return EAGAIN; 293 } 294 295 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 296 *bpp = bp; 297 return 0; 298 } 299 300 /* 301 * Move inode type and inode format specific information from the 302 * on-disk inode to the in-core inode. For fifos, devs, and sockets 303 * this means set if_rdev to the proper value. For files, directories, 304 * and symlinks this means to bring in the in-line data or extent 305 * pointers. For a file in B-tree format, only the root is immediately 306 * brought in-core. The rest will be in-lined in if_extents when it 307 * is first referenced (see xfs_iread_extents()). 308 */ 309 STATIC int 310 xfs_iformat( 311 xfs_inode_t *ip, 312 xfs_dinode_t *dip) 313 { 314 xfs_attr_shortform_t *atp; 315 int size; 316 int error; 317 xfs_fsize_t di_size; 318 ip->i_df.if_ext_max = 319 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 320 error = 0; 321 322 if (unlikely(be32_to_cpu(dip->di_nextents) + 323 be16_to_cpu(dip->di_anextents) > 324 be64_to_cpu(dip->di_nblocks))) { 325 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 326 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 327 (unsigned long long)ip->i_ino, 328 (int)(be32_to_cpu(dip->di_nextents) + 329 be16_to_cpu(dip->di_anextents)), 330 (unsigned long long) 331 be64_to_cpu(dip->di_nblocks)); 332 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 333 ip->i_mount, dip); 334 return XFS_ERROR(EFSCORRUPTED); 335 } 336 337 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 338 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 339 "corrupt dinode %Lu, forkoff = 0x%x.", 340 (unsigned long long)ip->i_ino, 341 dip->di_forkoff); 342 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 343 ip->i_mount, dip); 344 return XFS_ERROR(EFSCORRUPTED); 345 } 346 347 switch (ip->i_d.di_mode & S_IFMT) { 348 case S_IFIFO: 349 case S_IFCHR: 350 case S_IFBLK: 351 case S_IFSOCK: 352 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 353 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 354 ip->i_mount, dip); 355 return XFS_ERROR(EFSCORRUPTED); 356 } 357 ip->i_d.di_size = 0; 358 ip->i_size = 0; 359 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); 360 break; 361 362 case S_IFREG: 363 case S_IFLNK: 364 case S_IFDIR: 365 switch (dip->di_format) { 366 case XFS_DINODE_FMT_LOCAL: 367 /* 368 * no local regular files yet 369 */ 370 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 371 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 372 "corrupt inode %Lu " 373 "(local format for regular file).", 374 (unsigned long long) ip->i_ino); 375 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 376 XFS_ERRLEVEL_LOW, 377 ip->i_mount, dip); 378 return XFS_ERROR(EFSCORRUPTED); 379 } 380 381 di_size = be64_to_cpu(dip->di_size); 382 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 383 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 384 "corrupt inode %Lu " 385 "(bad size %Ld for local inode).", 386 (unsigned long long) ip->i_ino, 387 (long long) di_size); 388 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 389 XFS_ERRLEVEL_LOW, 390 ip->i_mount, dip); 391 return XFS_ERROR(EFSCORRUPTED); 392 } 393 394 size = (int)di_size; 395 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 396 break; 397 case XFS_DINODE_FMT_EXTENTS: 398 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 399 break; 400 case XFS_DINODE_FMT_BTREE: 401 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 402 break; 403 default: 404 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 405 ip->i_mount); 406 return XFS_ERROR(EFSCORRUPTED); 407 } 408 break; 409 410 default: 411 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 412 return XFS_ERROR(EFSCORRUPTED); 413 } 414 if (error) { 415 return error; 416 } 417 if (!XFS_DFORK_Q(dip)) 418 return 0; 419 ASSERT(ip->i_afp == NULL); 420 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 421 ip->i_afp->if_ext_max = 422 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 423 switch (dip->di_aformat) { 424 case XFS_DINODE_FMT_LOCAL: 425 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 426 size = be16_to_cpu(atp->hdr.totsize); 427 428 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 429 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 430 "corrupt inode %Lu " 431 "(bad attr fork size %Ld).", 432 (unsigned long long) ip->i_ino, 433 (long long) size); 434 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 435 XFS_ERRLEVEL_LOW, 436 ip->i_mount, dip); 437 return XFS_ERROR(EFSCORRUPTED); 438 } 439 440 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 441 break; 442 case XFS_DINODE_FMT_EXTENTS: 443 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 444 break; 445 case XFS_DINODE_FMT_BTREE: 446 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 447 break; 448 default: 449 error = XFS_ERROR(EFSCORRUPTED); 450 break; 451 } 452 if (error) { 453 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 454 ip->i_afp = NULL; 455 xfs_idestroy_fork(ip, XFS_DATA_FORK); 456 } 457 return error; 458 } 459 460 /* 461 * The file is in-lined in the on-disk inode. 462 * If it fits into if_inline_data, then copy 463 * it there, otherwise allocate a buffer for it 464 * and copy the data there. Either way, set 465 * if_data to point at the data. 466 * If we allocate a buffer for the data, make 467 * sure that its size is a multiple of 4 and 468 * record the real size in i_real_bytes. 469 */ 470 STATIC int 471 xfs_iformat_local( 472 xfs_inode_t *ip, 473 xfs_dinode_t *dip, 474 int whichfork, 475 int size) 476 { 477 xfs_ifork_t *ifp; 478 int real_size; 479 480 /* 481 * If the size is unreasonable, then something 482 * is wrong and we just bail out rather than crash in 483 * kmem_alloc() or memcpy() below. 484 */ 485 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 486 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 487 "corrupt inode %Lu " 488 "(bad size %d for local fork, size = %d).", 489 (unsigned long long) ip->i_ino, size, 490 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 491 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 492 ip->i_mount, dip); 493 return XFS_ERROR(EFSCORRUPTED); 494 } 495 ifp = XFS_IFORK_PTR(ip, whichfork); 496 real_size = 0; 497 if (size == 0) 498 ifp->if_u1.if_data = NULL; 499 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 500 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 501 else { 502 real_size = roundup(size, 4); 503 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 504 } 505 ifp->if_bytes = size; 506 ifp->if_real_bytes = real_size; 507 if (size) 508 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 509 ifp->if_flags &= ~XFS_IFEXTENTS; 510 ifp->if_flags |= XFS_IFINLINE; 511 return 0; 512 } 513 514 /* 515 * The file consists of a set of extents all 516 * of which fit into the on-disk inode. 517 * If there are few enough extents to fit into 518 * the if_inline_ext, then copy them there. 519 * Otherwise allocate a buffer for them and copy 520 * them into it. Either way, set if_extents 521 * to point at the extents. 522 */ 523 STATIC int 524 xfs_iformat_extents( 525 xfs_inode_t *ip, 526 xfs_dinode_t *dip, 527 int whichfork) 528 { 529 xfs_bmbt_rec_t *dp; 530 xfs_ifork_t *ifp; 531 int nex; 532 int size; 533 int i; 534 535 ifp = XFS_IFORK_PTR(ip, whichfork); 536 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 537 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 538 539 /* 540 * If the number of extents is unreasonable, then something 541 * is wrong and we just bail out rather than crash in 542 * kmem_alloc() or memcpy() below. 543 */ 544 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 545 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 546 "corrupt inode %Lu ((a)extents = %d).", 547 (unsigned long long) ip->i_ino, nex); 548 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 549 ip->i_mount, dip); 550 return XFS_ERROR(EFSCORRUPTED); 551 } 552 553 ifp->if_real_bytes = 0; 554 if (nex == 0) 555 ifp->if_u1.if_extents = NULL; 556 else if (nex <= XFS_INLINE_EXTS) 557 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 558 else 559 xfs_iext_add(ifp, 0, nex); 560 561 ifp->if_bytes = size; 562 if (size) { 563 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 564 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 565 for (i = 0; i < nex; i++, dp++) { 566 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 567 ep->l0 = get_unaligned_be64(&dp->l0); 568 ep->l1 = get_unaligned_be64(&dp->l1); 569 } 570 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 571 if (whichfork != XFS_DATA_FORK || 572 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 573 if (unlikely(xfs_check_nostate_extents( 574 ifp, 0, nex))) { 575 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 576 XFS_ERRLEVEL_LOW, 577 ip->i_mount); 578 return XFS_ERROR(EFSCORRUPTED); 579 } 580 } 581 ifp->if_flags |= XFS_IFEXTENTS; 582 return 0; 583 } 584 585 /* 586 * The file has too many extents to fit into 587 * the inode, so they are in B-tree format. 588 * Allocate a buffer for the root of the B-tree 589 * and copy the root into it. The i_extents 590 * field will remain NULL until all of the 591 * extents are read in (when they are needed). 592 */ 593 STATIC int 594 xfs_iformat_btree( 595 xfs_inode_t *ip, 596 xfs_dinode_t *dip, 597 int whichfork) 598 { 599 xfs_bmdr_block_t *dfp; 600 xfs_ifork_t *ifp; 601 /* REFERENCED */ 602 int nrecs; 603 int size; 604 605 ifp = XFS_IFORK_PTR(ip, whichfork); 606 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 607 size = XFS_BMAP_BROOT_SPACE(dfp); 608 nrecs = be16_to_cpu(dfp->bb_numrecs); 609 610 /* 611 * blow out if -- fork has less extents than can fit in 612 * fork (fork shouldn't be a btree format), root btree 613 * block has more records than can fit into the fork, 614 * or the number of extents is greater than the number of 615 * blocks. 616 */ 617 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 618 || XFS_BMDR_SPACE_CALC(nrecs) > 619 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 620 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 621 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 622 "corrupt inode %Lu (btree).", 623 (unsigned long long) ip->i_ino); 624 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 625 ip->i_mount); 626 return XFS_ERROR(EFSCORRUPTED); 627 } 628 629 ifp->if_broot_bytes = size; 630 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 631 ASSERT(ifp->if_broot != NULL); 632 /* 633 * Copy and convert from the on-disk structure 634 * to the in-memory structure. 635 */ 636 xfs_bmdr_to_bmbt(ip->i_mount, dfp, 637 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 638 ifp->if_broot, size); 639 ifp->if_flags &= ~XFS_IFEXTENTS; 640 ifp->if_flags |= XFS_IFBROOT; 641 642 return 0; 643 } 644 645 void 646 xfs_dinode_from_disk( 647 xfs_icdinode_t *to, 648 xfs_dinode_t *from) 649 { 650 to->di_magic = be16_to_cpu(from->di_magic); 651 to->di_mode = be16_to_cpu(from->di_mode); 652 to->di_version = from ->di_version; 653 to->di_format = from->di_format; 654 to->di_onlink = be16_to_cpu(from->di_onlink); 655 to->di_uid = be32_to_cpu(from->di_uid); 656 to->di_gid = be32_to_cpu(from->di_gid); 657 to->di_nlink = be32_to_cpu(from->di_nlink); 658 to->di_projid = be16_to_cpu(from->di_projid); 659 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 660 to->di_flushiter = be16_to_cpu(from->di_flushiter); 661 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 662 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 663 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 664 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 665 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 666 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 667 to->di_size = be64_to_cpu(from->di_size); 668 to->di_nblocks = be64_to_cpu(from->di_nblocks); 669 to->di_extsize = be32_to_cpu(from->di_extsize); 670 to->di_nextents = be32_to_cpu(from->di_nextents); 671 to->di_anextents = be16_to_cpu(from->di_anextents); 672 to->di_forkoff = from->di_forkoff; 673 to->di_aformat = from->di_aformat; 674 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 675 to->di_dmstate = be16_to_cpu(from->di_dmstate); 676 to->di_flags = be16_to_cpu(from->di_flags); 677 to->di_gen = be32_to_cpu(from->di_gen); 678 } 679 680 void 681 xfs_dinode_to_disk( 682 xfs_dinode_t *to, 683 xfs_icdinode_t *from) 684 { 685 to->di_magic = cpu_to_be16(from->di_magic); 686 to->di_mode = cpu_to_be16(from->di_mode); 687 to->di_version = from ->di_version; 688 to->di_format = from->di_format; 689 to->di_onlink = cpu_to_be16(from->di_onlink); 690 to->di_uid = cpu_to_be32(from->di_uid); 691 to->di_gid = cpu_to_be32(from->di_gid); 692 to->di_nlink = cpu_to_be32(from->di_nlink); 693 to->di_projid = cpu_to_be16(from->di_projid); 694 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 695 to->di_flushiter = cpu_to_be16(from->di_flushiter); 696 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 697 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 698 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 699 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 700 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 701 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 702 to->di_size = cpu_to_be64(from->di_size); 703 to->di_nblocks = cpu_to_be64(from->di_nblocks); 704 to->di_extsize = cpu_to_be32(from->di_extsize); 705 to->di_nextents = cpu_to_be32(from->di_nextents); 706 to->di_anextents = cpu_to_be16(from->di_anextents); 707 to->di_forkoff = from->di_forkoff; 708 to->di_aformat = from->di_aformat; 709 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 710 to->di_dmstate = cpu_to_be16(from->di_dmstate); 711 to->di_flags = cpu_to_be16(from->di_flags); 712 to->di_gen = cpu_to_be32(from->di_gen); 713 } 714 715 STATIC uint 716 _xfs_dic2xflags( 717 __uint16_t di_flags) 718 { 719 uint flags = 0; 720 721 if (di_flags & XFS_DIFLAG_ANY) { 722 if (di_flags & XFS_DIFLAG_REALTIME) 723 flags |= XFS_XFLAG_REALTIME; 724 if (di_flags & XFS_DIFLAG_PREALLOC) 725 flags |= XFS_XFLAG_PREALLOC; 726 if (di_flags & XFS_DIFLAG_IMMUTABLE) 727 flags |= XFS_XFLAG_IMMUTABLE; 728 if (di_flags & XFS_DIFLAG_APPEND) 729 flags |= XFS_XFLAG_APPEND; 730 if (di_flags & XFS_DIFLAG_SYNC) 731 flags |= XFS_XFLAG_SYNC; 732 if (di_flags & XFS_DIFLAG_NOATIME) 733 flags |= XFS_XFLAG_NOATIME; 734 if (di_flags & XFS_DIFLAG_NODUMP) 735 flags |= XFS_XFLAG_NODUMP; 736 if (di_flags & XFS_DIFLAG_RTINHERIT) 737 flags |= XFS_XFLAG_RTINHERIT; 738 if (di_flags & XFS_DIFLAG_PROJINHERIT) 739 flags |= XFS_XFLAG_PROJINHERIT; 740 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 741 flags |= XFS_XFLAG_NOSYMLINKS; 742 if (di_flags & XFS_DIFLAG_EXTSIZE) 743 flags |= XFS_XFLAG_EXTSIZE; 744 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 745 flags |= XFS_XFLAG_EXTSZINHERIT; 746 if (di_flags & XFS_DIFLAG_NODEFRAG) 747 flags |= XFS_XFLAG_NODEFRAG; 748 if (di_flags & XFS_DIFLAG_FILESTREAM) 749 flags |= XFS_XFLAG_FILESTREAM; 750 } 751 752 return flags; 753 } 754 755 uint 756 xfs_ip2xflags( 757 xfs_inode_t *ip) 758 { 759 xfs_icdinode_t *dic = &ip->i_d; 760 761 return _xfs_dic2xflags(dic->di_flags) | 762 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 763 } 764 765 uint 766 xfs_dic2xflags( 767 xfs_dinode_t *dip) 768 { 769 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | 770 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 771 } 772 773 /* 774 * Read the disk inode attributes into the in-core inode structure. 775 */ 776 int 777 xfs_iread( 778 xfs_mount_t *mp, 779 xfs_trans_t *tp, 780 xfs_inode_t *ip, 781 xfs_daddr_t bno, 782 uint iget_flags) 783 { 784 xfs_buf_t *bp; 785 xfs_dinode_t *dip; 786 int error; 787 788 /* 789 * Fill in the location information in the in-core inode. 790 */ 791 ip->i_imap.im_blkno = bno; 792 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 793 if (error) 794 return error; 795 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); 796 797 /* 798 * Get pointers to the on-disk inode and the buffer containing it. 799 */ 800 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 801 XFS_BUF_LOCK, iget_flags); 802 if (error) 803 return error; 804 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 805 806 /* 807 * If we got something that isn't an inode it means someone 808 * (nfs or dmi) has a stale handle. 809 */ 810 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { 811 #ifdef DEBUG 812 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 813 "dip->di_magic (0x%x) != " 814 "XFS_DINODE_MAGIC (0x%x)", 815 be16_to_cpu(dip->di_magic), 816 XFS_DINODE_MAGIC); 817 #endif /* DEBUG */ 818 error = XFS_ERROR(EINVAL); 819 goto out_brelse; 820 } 821 822 /* 823 * If the on-disk inode is already linked to a directory 824 * entry, copy all of the inode into the in-core inode. 825 * xfs_iformat() handles copying in the inode format 826 * specific information. 827 * Otherwise, just get the truly permanent information. 828 */ 829 if (dip->di_mode) { 830 xfs_dinode_from_disk(&ip->i_d, dip); 831 error = xfs_iformat(ip, dip); 832 if (error) { 833 #ifdef DEBUG 834 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 835 "xfs_iformat() returned error %d", 836 error); 837 #endif /* DEBUG */ 838 goto out_brelse; 839 } 840 } else { 841 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); 842 ip->i_d.di_version = dip->di_version; 843 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); 844 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 845 /* 846 * Make sure to pull in the mode here as well in 847 * case the inode is released without being used. 848 * This ensures that xfs_inactive() will see that 849 * the inode is already free and not try to mess 850 * with the uninitialized part of it. 851 */ 852 ip->i_d.di_mode = 0; 853 /* 854 * Initialize the per-fork minima and maxima for a new 855 * inode here. xfs_iformat will do it for old inodes. 856 */ 857 ip->i_df.if_ext_max = 858 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 859 } 860 861 /* 862 * The inode format changed when we moved the link count and 863 * made it 32 bits long. If this is an old format inode, 864 * convert it in memory to look like a new one. If it gets 865 * flushed to disk we will convert back before flushing or 866 * logging it. We zero out the new projid field and the old link 867 * count field. We'll handle clearing the pad field (the remains 868 * of the old uuid field) when we actually convert the inode to 869 * the new format. We don't change the version number so that we 870 * can distinguish this from a real new format inode. 871 */ 872 if (ip->i_d.di_version == 1) { 873 ip->i_d.di_nlink = ip->i_d.di_onlink; 874 ip->i_d.di_onlink = 0; 875 ip->i_d.di_projid = 0; 876 } 877 878 ip->i_delayed_blks = 0; 879 ip->i_size = ip->i_d.di_size; 880 881 /* 882 * Mark the buffer containing the inode as something to keep 883 * around for a while. This helps to keep recently accessed 884 * meta-data in-core longer. 885 */ 886 XFS_BUF_SET_REF(bp, XFS_INO_REF); 887 888 /* 889 * Use xfs_trans_brelse() to release the buffer containing the 890 * on-disk inode, because it was acquired with xfs_trans_read_buf() 891 * in xfs_itobp() above. If tp is NULL, this is just a normal 892 * brelse(). If we're within a transaction, then xfs_trans_brelse() 893 * will only release the buffer if it is not dirty within the 894 * transaction. It will be OK to release the buffer in this case, 895 * because inodes on disk are never destroyed and we will be 896 * locking the new in-core inode before putting it in the hash 897 * table where other processes can find it. Thus we don't have 898 * to worry about the inode being changed just because we released 899 * the buffer. 900 */ 901 out_brelse: 902 xfs_trans_brelse(tp, bp); 903 return error; 904 } 905 906 /* 907 * Read in extents from a btree-format inode. 908 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 909 */ 910 int 911 xfs_iread_extents( 912 xfs_trans_t *tp, 913 xfs_inode_t *ip, 914 int whichfork) 915 { 916 int error; 917 xfs_ifork_t *ifp; 918 xfs_extnum_t nextents; 919 size_t size; 920 921 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 922 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 923 ip->i_mount); 924 return XFS_ERROR(EFSCORRUPTED); 925 } 926 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 927 size = nextents * sizeof(xfs_bmbt_rec_t); 928 ifp = XFS_IFORK_PTR(ip, whichfork); 929 930 /* 931 * We know that the size is valid (it's checked in iformat_btree) 932 */ 933 ifp->if_lastex = NULLEXTNUM; 934 ifp->if_bytes = ifp->if_real_bytes = 0; 935 ifp->if_flags |= XFS_IFEXTENTS; 936 xfs_iext_add(ifp, 0, nextents); 937 error = xfs_bmap_read_extents(tp, ip, whichfork); 938 if (error) { 939 xfs_iext_destroy(ifp); 940 ifp->if_flags &= ~XFS_IFEXTENTS; 941 return error; 942 } 943 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 944 return 0; 945 } 946 947 /* 948 * Allocate an inode on disk and return a copy of its in-core version. 949 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 950 * appropriately within the inode. The uid and gid for the inode are 951 * set according to the contents of the given cred structure. 952 * 953 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 954 * has a free inode available, call xfs_iget() 955 * to obtain the in-core version of the allocated inode. Finally, 956 * fill in the inode and log its initial contents. In this case, 957 * ialloc_context would be set to NULL and call_again set to false. 958 * 959 * If xfs_dialloc() does not have an available inode, 960 * it will replenish its supply by doing an allocation. Since we can 961 * only do one allocation within a transaction without deadlocks, we 962 * must commit the current transaction before returning the inode itself. 963 * In this case, therefore, we will set call_again to true and return. 964 * The caller should then commit the current transaction, start a new 965 * transaction, and call xfs_ialloc() again to actually get the inode. 966 * 967 * To ensure that some other process does not grab the inode that 968 * was allocated during the first call to xfs_ialloc(), this routine 969 * also returns the [locked] bp pointing to the head of the freelist 970 * as ialloc_context. The caller should hold this buffer across 971 * the commit and pass it back into this routine on the second call. 972 * 973 * If we are allocating quota inodes, we do not have a parent inode 974 * to attach to or associate with (i.e. pip == NULL) because they 975 * are not linked into the directory structure - they are attached 976 * directly to the superblock - and so have no parent. 977 */ 978 int 979 xfs_ialloc( 980 xfs_trans_t *tp, 981 xfs_inode_t *pip, 982 mode_t mode, 983 xfs_nlink_t nlink, 984 xfs_dev_t rdev, 985 cred_t *cr, 986 xfs_prid_t prid, 987 int okalloc, 988 xfs_buf_t **ialloc_context, 989 boolean_t *call_again, 990 xfs_inode_t **ipp) 991 { 992 xfs_ino_t ino; 993 xfs_inode_t *ip; 994 uint flags; 995 int error; 996 timespec_t tv; 997 int filestreams = 0; 998 999 /* 1000 * Call the space management code to pick 1001 * the on-disk inode to be allocated. 1002 */ 1003 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 1004 ialloc_context, call_again, &ino); 1005 if (error) 1006 return error; 1007 if (*call_again || ino == NULLFSINO) { 1008 *ipp = NULL; 1009 return 0; 1010 } 1011 ASSERT(*ialloc_context == NULL); 1012 1013 /* 1014 * Get the in-core inode with the lock held exclusively. 1015 * This is because we're setting fields here we need 1016 * to prevent others from looking at until we're done. 1017 */ 1018 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1019 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1020 if (error) 1021 return error; 1022 ASSERT(ip != NULL); 1023 1024 ip->i_d.di_mode = (__uint16_t)mode; 1025 ip->i_d.di_onlink = 0; 1026 ip->i_d.di_nlink = nlink; 1027 ASSERT(ip->i_d.di_nlink == nlink); 1028 ip->i_d.di_uid = current_fsuid(); 1029 ip->i_d.di_gid = current_fsgid(); 1030 ip->i_d.di_projid = prid; 1031 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1032 1033 /* 1034 * If the superblock version is up to where we support new format 1035 * inodes and this is currently an old format inode, then change 1036 * the inode version number now. This way we only do the conversion 1037 * here rather than here and in the flush/logging code. 1038 */ 1039 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 1040 ip->i_d.di_version == 1) { 1041 ip->i_d.di_version = 2; 1042 /* 1043 * We've already zeroed the old link count, the projid field, 1044 * and the pad field. 1045 */ 1046 } 1047 1048 /* 1049 * Project ids won't be stored on disk if we are using a version 1 inode. 1050 */ 1051 if ((prid != 0) && (ip->i_d.di_version == 1)) 1052 xfs_bump_ino_vers2(tp, ip); 1053 1054 if (pip && XFS_INHERIT_GID(pip)) { 1055 ip->i_d.di_gid = pip->i_d.di_gid; 1056 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1057 ip->i_d.di_mode |= S_ISGID; 1058 } 1059 } 1060 1061 /* 1062 * If the group ID of the new file does not match the effective group 1063 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1064 * (and only if the irix_sgid_inherit compatibility variable is set). 1065 */ 1066 if ((irix_sgid_inherit) && 1067 (ip->i_d.di_mode & S_ISGID) && 1068 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1069 ip->i_d.di_mode &= ~S_ISGID; 1070 } 1071 1072 ip->i_d.di_size = 0; 1073 ip->i_size = 0; 1074 ip->i_d.di_nextents = 0; 1075 ASSERT(ip->i_d.di_nblocks == 0); 1076 1077 nanotime(&tv); 1078 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 1079 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 1080 ip->i_d.di_atime = ip->i_d.di_mtime; 1081 ip->i_d.di_ctime = ip->i_d.di_mtime; 1082 1083 /* 1084 * di_gen will have been taken care of in xfs_iread. 1085 */ 1086 ip->i_d.di_extsize = 0; 1087 ip->i_d.di_dmevmask = 0; 1088 ip->i_d.di_dmstate = 0; 1089 ip->i_d.di_flags = 0; 1090 flags = XFS_ILOG_CORE; 1091 switch (mode & S_IFMT) { 1092 case S_IFIFO: 1093 case S_IFCHR: 1094 case S_IFBLK: 1095 case S_IFSOCK: 1096 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1097 ip->i_df.if_u2.if_rdev = rdev; 1098 ip->i_df.if_flags = 0; 1099 flags |= XFS_ILOG_DEV; 1100 break; 1101 case S_IFREG: 1102 /* 1103 * we can't set up filestreams until after the VFS inode 1104 * is set up properly. 1105 */ 1106 if (pip && xfs_inode_is_filestream(pip)) 1107 filestreams = 1; 1108 /* fall through */ 1109 case S_IFDIR: 1110 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1111 uint di_flags = 0; 1112 1113 if ((mode & S_IFMT) == S_IFDIR) { 1114 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1115 di_flags |= XFS_DIFLAG_RTINHERIT; 1116 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1117 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1118 ip->i_d.di_extsize = pip->i_d.di_extsize; 1119 } 1120 } else if ((mode & S_IFMT) == S_IFREG) { 1121 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1122 di_flags |= XFS_DIFLAG_REALTIME; 1123 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1124 di_flags |= XFS_DIFLAG_EXTSIZE; 1125 ip->i_d.di_extsize = pip->i_d.di_extsize; 1126 } 1127 } 1128 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1129 xfs_inherit_noatime) 1130 di_flags |= XFS_DIFLAG_NOATIME; 1131 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1132 xfs_inherit_nodump) 1133 di_flags |= XFS_DIFLAG_NODUMP; 1134 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1135 xfs_inherit_sync) 1136 di_flags |= XFS_DIFLAG_SYNC; 1137 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1138 xfs_inherit_nosymlinks) 1139 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1140 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1141 di_flags |= XFS_DIFLAG_PROJINHERIT; 1142 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1143 xfs_inherit_nodefrag) 1144 di_flags |= XFS_DIFLAG_NODEFRAG; 1145 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1146 di_flags |= XFS_DIFLAG_FILESTREAM; 1147 ip->i_d.di_flags |= di_flags; 1148 } 1149 /* FALLTHROUGH */ 1150 case S_IFLNK: 1151 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1152 ip->i_df.if_flags = XFS_IFEXTENTS; 1153 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1154 ip->i_df.if_u1.if_extents = NULL; 1155 break; 1156 default: 1157 ASSERT(0); 1158 } 1159 /* 1160 * Attribute fork settings for new inode. 1161 */ 1162 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1163 ip->i_d.di_anextents = 0; 1164 1165 /* 1166 * Log the new values stuffed into the inode. 1167 */ 1168 xfs_trans_log_inode(tp, ip, flags); 1169 1170 /* now that we have an i_mode we can setup inode ops and unlock */ 1171 xfs_setup_inode(ip); 1172 1173 /* now we have set up the vfs inode we can associate the filestream */ 1174 if (filestreams) { 1175 error = xfs_filestream_associate(pip, ip); 1176 if (error < 0) 1177 return -error; 1178 if (!error) 1179 xfs_iflags_set(ip, XFS_IFILESTREAM); 1180 } 1181 1182 *ipp = ip; 1183 return 0; 1184 } 1185 1186 /* 1187 * Check to make sure that there are no blocks allocated to the 1188 * file beyond the size of the file. We don't check this for 1189 * files with fixed size extents or real time extents, but we 1190 * at least do it for regular files. 1191 */ 1192 #ifdef DEBUG 1193 void 1194 xfs_isize_check( 1195 xfs_mount_t *mp, 1196 xfs_inode_t *ip, 1197 xfs_fsize_t isize) 1198 { 1199 xfs_fileoff_t map_first; 1200 int nimaps; 1201 xfs_bmbt_irec_t imaps[2]; 1202 1203 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1204 return; 1205 1206 if (XFS_IS_REALTIME_INODE(ip)) 1207 return; 1208 1209 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 1210 return; 1211 1212 nimaps = 2; 1213 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1214 /* 1215 * The filesystem could be shutting down, so bmapi may return 1216 * an error. 1217 */ 1218 if (xfs_bmapi(NULL, ip, map_first, 1219 (XFS_B_TO_FSB(mp, 1220 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1221 map_first), 1222 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1223 NULL, NULL)) 1224 return; 1225 ASSERT(nimaps == 1); 1226 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1227 } 1228 #endif /* DEBUG */ 1229 1230 /* 1231 * Calculate the last possible buffered byte in a file. This must 1232 * include data that was buffered beyond the EOF by the write code. 1233 * This also needs to deal with overflowing the xfs_fsize_t type 1234 * which can happen for sizes near the limit. 1235 * 1236 * We also need to take into account any blocks beyond the EOF. It 1237 * may be the case that they were buffered by a write which failed. 1238 * In that case the pages will still be in memory, but the inode size 1239 * will never have been updated. 1240 */ 1241 xfs_fsize_t 1242 xfs_file_last_byte( 1243 xfs_inode_t *ip) 1244 { 1245 xfs_mount_t *mp; 1246 xfs_fsize_t last_byte; 1247 xfs_fileoff_t last_block; 1248 xfs_fileoff_t size_last_block; 1249 int error; 1250 1251 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); 1252 1253 mp = ip->i_mount; 1254 /* 1255 * Only check for blocks beyond the EOF if the extents have 1256 * been read in. This eliminates the need for the inode lock, 1257 * and it also saves us from looking when it really isn't 1258 * necessary. 1259 */ 1260 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1261 xfs_ilock(ip, XFS_ILOCK_SHARED); 1262 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1263 XFS_DATA_FORK); 1264 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1265 if (error) { 1266 last_block = 0; 1267 } 1268 } else { 1269 last_block = 0; 1270 } 1271 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); 1272 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1273 1274 last_byte = XFS_FSB_TO_B(mp, last_block); 1275 if (last_byte < 0) { 1276 return XFS_MAXIOFFSET(mp); 1277 } 1278 last_byte += (1 << mp->m_writeio_log); 1279 if (last_byte < 0) { 1280 return XFS_MAXIOFFSET(mp); 1281 } 1282 return last_byte; 1283 } 1284 1285 #if defined(XFS_RW_TRACE) 1286 STATIC void 1287 xfs_itrunc_trace( 1288 int tag, 1289 xfs_inode_t *ip, 1290 int flag, 1291 xfs_fsize_t new_size, 1292 xfs_off_t toss_start, 1293 xfs_off_t toss_finish) 1294 { 1295 if (ip->i_rwtrace == NULL) { 1296 return; 1297 } 1298 1299 ktrace_enter(ip->i_rwtrace, 1300 (void*)((long)tag), 1301 (void*)ip, 1302 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), 1303 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), 1304 (void*)((long)flag), 1305 (void*)(unsigned long)((new_size >> 32) & 0xffffffff), 1306 (void*)(unsigned long)(new_size & 0xffffffff), 1307 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), 1308 (void*)(unsigned long)(toss_start & 0xffffffff), 1309 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1310 (void*)(unsigned long)(toss_finish & 0xffffffff), 1311 (void*)(unsigned long)current_cpu(), 1312 (void*)(unsigned long)current_pid(), 1313 (void*)NULL, 1314 (void*)NULL, 1315 (void*)NULL); 1316 } 1317 #else 1318 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1319 #endif 1320 1321 /* 1322 * Start the truncation of the file to new_size. The new size 1323 * must be smaller than the current size. This routine will 1324 * clear the buffer and page caches of file data in the removed 1325 * range, and xfs_itruncate_finish() will remove the underlying 1326 * disk blocks. 1327 * 1328 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1329 * must NOT have the inode lock held at all. This is because we're 1330 * calling into the buffer/page cache code and we can't hold the 1331 * inode lock when we do so. 1332 * 1333 * We need to wait for any direct I/Os in flight to complete before we 1334 * proceed with the truncate. This is needed to prevent the extents 1335 * being read or written by the direct I/Os from being removed while the 1336 * I/O is in flight as there is no other method of synchronising 1337 * direct I/O with the truncate operation. Also, because we hold 1338 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1339 * started until the truncate completes and drops the lock. Essentially, 1340 * the xfs_ioend_wait() call forms an I/O barrier that provides strict 1341 * ordering between direct I/Os and the truncate operation. 1342 * 1343 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1344 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1345 * in the case that the caller is locking things out of order and 1346 * may not be able to call xfs_itruncate_finish() with the inode lock 1347 * held without dropping the I/O lock. If the caller must drop the 1348 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1349 * must be called again with all the same restrictions as the initial 1350 * call. 1351 */ 1352 int 1353 xfs_itruncate_start( 1354 xfs_inode_t *ip, 1355 uint flags, 1356 xfs_fsize_t new_size) 1357 { 1358 xfs_fsize_t last_byte; 1359 xfs_off_t toss_start; 1360 xfs_mount_t *mp; 1361 int error = 0; 1362 1363 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1364 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1365 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1366 (flags == XFS_ITRUNC_MAYBE)); 1367 1368 mp = ip->i_mount; 1369 1370 /* wait for the completion of any pending DIOs */ 1371 if (new_size == 0 || new_size < ip->i_size) 1372 xfs_ioend_wait(ip); 1373 1374 /* 1375 * Call toss_pages or flushinval_pages to get rid of pages 1376 * overlapping the region being removed. We have to use 1377 * the less efficient flushinval_pages in the case that the 1378 * caller may not be able to finish the truncate without 1379 * dropping the inode's I/O lock. Make sure 1380 * to catch any pages brought in by buffers overlapping 1381 * the EOF by searching out beyond the isize by our 1382 * block size. We round new_size up to a block boundary 1383 * so that we don't toss things on the same block as 1384 * new_size but before it. 1385 * 1386 * Before calling toss_page or flushinval_pages, make sure to 1387 * call remapf() over the same region if the file is mapped. 1388 * This frees up mapped file references to the pages in the 1389 * given range and for the flushinval_pages case it ensures 1390 * that we get the latest mapped changes flushed out. 1391 */ 1392 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1393 toss_start = XFS_FSB_TO_B(mp, toss_start); 1394 if (toss_start < 0) { 1395 /* 1396 * The place to start tossing is beyond our maximum 1397 * file size, so there is no way that the data extended 1398 * out there. 1399 */ 1400 return 0; 1401 } 1402 last_byte = xfs_file_last_byte(ip); 1403 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1404 last_byte); 1405 if (last_byte > toss_start) { 1406 if (flags & XFS_ITRUNC_DEFINITE) { 1407 xfs_tosspages(ip, toss_start, 1408 -1, FI_REMAPF_LOCKED); 1409 } else { 1410 error = xfs_flushinval_pages(ip, toss_start, 1411 -1, FI_REMAPF_LOCKED); 1412 } 1413 } 1414 1415 #ifdef DEBUG 1416 if (new_size == 0) { 1417 ASSERT(VN_CACHED(VFS_I(ip)) == 0); 1418 } 1419 #endif 1420 return error; 1421 } 1422 1423 /* 1424 * Shrink the file to the given new_size. The new size must be smaller than 1425 * the current size. This will free up the underlying blocks in the removed 1426 * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). 1427 * 1428 * The transaction passed to this routine must have made a permanent log 1429 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1430 * given transaction and start new ones, so make sure everything involved in 1431 * the transaction is tidy before calling here. Some transaction will be 1432 * returned to the caller to be committed. The incoming transaction must 1433 * already include the inode, and both inode locks must be held exclusively. 1434 * The inode must also be "held" within the transaction. On return the inode 1435 * will be "held" within the returned transaction. This routine does NOT 1436 * require any disk space to be reserved for it within the transaction. 1437 * 1438 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it 1439 * indicates the fork which is to be truncated. For the attribute fork we only 1440 * support truncation to size 0. 1441 * 1442 * We use the sync parameter to indicate whether or not the first transaction 1443 * we perform might have to be synchronous. For the attr fork, it needs to be 1444 * so if the unlink of the inode is not yet known to be permanent in the log. 1445 * This keeps us from freeing and reusing the blocks of the attribute fork 1446 * before the unlink of the inode becomes permanent. 1447 * 1448 * For the data fork, we normally have to run synchronously if we're being 1449 * called out of the inactive path or we're being called out of the create path 1450 * where we're truncating an existing file. Either way, the truncate needs to 1451 * be sync so blocks don't reappear in the file with altered data in case of a 1452 * crash. wsync filesystems can run the first case async because anything that 1453 * shrinks the inode has to run sync so by the time we're called here from 1454 * inactive, the inode size is permanently set to 0. 1455 * 1456 * Calls from the truncate path always need to be sync unless we're in a wsync 1457 * filesystem and the file has already been unlinked. 1458 * 1459 * The caller is responsible for correctly setting the sync parameter. It gets 1460 * too hard for us to guess here which path we're being called out of just 1461 * based on inode state. 1462 * 1463 * If we get an error, we must return with the inode locked and linked into the 1464 * current transaction. This keeps things simple for the higher level code, 1465 * because it always knows that the inode is locked and held in the transaction 1466 * that returns to it whether errors occur or not. We don't mark the inode 1467 * dirty on error so that transactions can be easily aborted if possible. 1468 */ 1469 int 1470 xfs_itruncate_finish( 1471 xfs_trans_t **tp, 1472 xfs_inode_t *ip, 1473 xfs_fsize_t new_size, 1474 int fork, 1475 int sync) 1476 { 1477 xfs_fsblock_t first_block; 1478 xfs_fileoff_t first_unmap_block; 1479 xfs_fileoff_t last_block; 1480 xfs_filblks_t unmap_len=0; 1481 xfs_mount_t *mp; 1482 xfs_trans_t *ntp; 1483 int done; 1484 int committed; 1485 xfs_bmap_free_t free_list; 1486 int error; 1487 1488 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1489 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1490 ASSERT(*tp != NULL); 1491 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1492 ASSERT(ip->i_transp == *tp); 1493 ASSERT(ip->i_itemp != NULL); 1494 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1495 1496 1497 ntp = *tp; 1498 mp = (ntp)->t_mountp; 1499 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1500 1501 /* 1502 * We only support truncating the entire attribute fork. 1503 */ 1504 if (fork == XFS_ATTR_FORK) { 1505 new_size = 0LL; 1506 } 1507 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1508 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1509 /* 1510 * The first thing we do is set the size to new_size permanently 1511 * on disk. This way we don't have to worry about anyone ever 1512 * being able to look at the data being freed even in the face 1513 * of a crash. What we're getting around here is the case where 1514 * we free a block, it is allocated to another file, it is written 1515 * to, and then we crash. If the new data gets written to the 1516 * file but the log buffers containing the free and reallocation 1517 * don't, then we'd end up with garbage in the blocks being freed. 1518 * As long as we make the new_size permanent before actually 1519 * freeing any blocks it doesn't matter if they get writtten to. 1520 * 1521 * The callers must signal into us whether or not the size 1522 * setting here must be synchronous. There are a few cases 1523 * where it doesn't have to be synchronous. Those cases 1524 * occur if the file is unlinked and we know the unlink is 1525 * permanent or if the blocks being truncated are guaranteed 1526 * to be beyond the inode eof (regardless of the link count) 1527 * and the eof value is permanent. Both of these cases occur 1528 * only on wsync-mounted filesystems. In those cases, we're 1529 * guaranteed that no user will ever see the data in the blocks 1530 * that are being truncated so the truncate can run async. 1531 * In the free beyond eof case, the file may wind up with 1532 * more blocks allocated to it than it needs if we crash 1533 * and that won't get fixed until the next time the file 1534 * is re-opened and closed but that's ok as that shouldn't 1535 * be too many blocks. 1536 * 1537 * However, we can't just make all wsync xactions run async 1538 * because there's one call out of the create path that needs 1539 * to run sync where it's truncating an existing file to size 1540 * 0 whose size is > 0. 1541 * 1542 * It's probably possible to come up with a test in this 1543 * routine that would correctly distinguish all the above 1544 * cases from the values of the function parameters and the 1545 * inode state but for sanity's sake, I've decided to let the 1546 * layers above just tell us. It's simpler to correctly figure 1547 * out in the layer above exactly under what conditions we 1548 * can run async and I think it's easier for others read and 1549 * follow the logic in case something has to be changed. 1550 * cscope is your friend -- rcc. 1551 * 1552 * The attribute fork is much simpler. 1553 * 1554 * For the attribute fork we allow the caller to tell us whether 1555 * the unlink of the inode that led to this call is yet permanent 1556 * in the on disk log. If it is not and we will be freeing extents 1557 * in this inode then we make the first transaction synchronous 1558 * to make sure that the unlink is permanent by the time we free 1559 * the blocks. 1560 */ 1561 if (fork == XFS_DATA_FORK) { 1562 if (ip->i_d.di_nextents > 0) { 1563 /* 1564 * If we are not changing the file size then do 1565 * not update the on-disk file size - we may be 1566 * called from xfs_inactive_free_eofblocks(). If we 1567 * update the on-disk file size and then the system 1568 * crashes before the contents of the file are 1569 * flushed to disk then the files may be full of 1570 * holes (ie NULL files bug). 1571 */ 1572 if (ip->i_size != new_size) { 1573 ip->i_d.di_size = new_size; 1574 ip->i_size = new_size; 1575 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1576 } 1577 } 1578 } else if (sync) { 1579 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1580 if (ip->i_d.di_anextents > 0) 1581 xfs_trans_set_sync(ntp); 1582 } 1583 ASSERT(fork == XFS_DATA_FORK || 1584 (fork == XFS_ATTR_FORK && 1585 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1586 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1587 1588 /* 1589 * Since it is possible for space to become allocated beyond 1590 * the end of the file (in a crash where the space is allocated 1591 * but the inode size is not yet updated), simply remove any 1592 * blocks which show up between the new EOF and the maximum 1593 * possible file size. If the first block to be removed is 1594 * beyond the maximum file size (ie it is the same as last_block), 1595 * then there is nothing to do. 1596 */ 1597 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1598 ASSERT(first_unmap_block <= last_block); 1599 done = 0; 1600 if (last_block == first_unmap_block) { 1601 done = 1; 1602 } else { 1603 unmap_len = last_block - first_unmap_block + 1; 1604 } 1605 while (!done) { 1606 /* 1607 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1608 * will tell us whether it freed the entire range or 1609 * not. If this is a synchronous mount (wsync), 1610 * then we can tell bunmapi to keep all the 1611 * transactions asynchronous since the unlink 1612 * transaction that made this inode inactive has 1613 * already hit the disk. There's no danger of 1614 * the freed blocks being reused, there being a 1615 * crash, and the reused blocks suddenly reappearing 1616 * in this file with garbage in them once recovery 1617 * runs. 1618 */ 1619 xfs_bmap_init(&free_list, &first_block); 1620 error = xfs_bunmapi(ntp, ip, 1621 first_unmap_block, unmap_len, 1622 xfs_bmapi_aflag(fork) | 1623 (sync ? 0 : XFS_BMAPI_ASYNC), 1624 XFS_ITRUNC_MAX_EXTENTS, 1625 &first_block, &free_list, 1626 NULL, &done); 1627 if (error) { 1628 /* 1629 * If the bunmapi call encounters an error, 1630 * return to the caller where the transaction 1631 * can be properly aborted. We just need to 1632 * make sure we're not holding any resources 1633 * that we were not when we came in. 1634 */ 1635 xfs_bmap_cancel(&free_list); 1636 return error; 1637 } 1638 1639 /* 1640 * Duplicate the transaction that has the permanent 1641 * reservation and commit the old transaction. 1642 */ 1643 error = xfs_bmap_finish(tp, &free_list, &committed); 1644 ntp = *tp; 1645 if (committed) { 1646 /* link the inode into the next xact in the chain */ 1647 xfs_trans_ijoin(ntp, ip, 1648 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1649 xfs_trans_ihold(ntp, ip); 1650 } 1651 1652 if (error) { 1653 /* 1654 * If the bmap finish call encounters an error, return 1655 * to the caller where the transaction can be properly 1656 * aborted. We just need to make sure we're not 1657 * holding any resources that we were not when we came 1658 * in. 1659 * 1660 * Aborting from this point might lose some blocks in 1661 * the file system, but oh well. 1662 */ 1663 xfs_bmap_cancel(&free_list); 1664 return error; 1665 } 1666 1667 if (committed) { 1668 /* 1669 * Mark the inode dirty so it will be logged and 1670 * moved forward in the log as part of every commit. 1671 */ 1672 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1673 } 1674 1675 ntp = xfs_trans_dup(ntp); 1676 error = xfs_trans_commit(*tp, 0); 1677 *tp = ntp; 1678 1679 /* link the inode into the next transaction in the chain */ 1680 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1681 xfs_trans_ihold(ntp, ip); 1682 1683 if (error) 1684 return error; 1685 /* 1686 * transaction commit worked ok so we can drop the extra ticket 1687 * reference that we gained in xfs_trans_dup() 1688 */ 1689 xfs_log_ticket_put(ntp->t_ticket); 1690 error = xfs_trans_reserve(ntp, 0, 1691 XFS_ITRUNCATE_LOG_RES(mp), 0, 1692 XFS_TRANS_PERM_LOG_RES, 1693 XFS_ITRUNCATE_LOG_COUNT); 1694 if (error) 1695 return error; 1696 } 1697 /* 1698 * Only update the size in the case of the data fork, but 1699 * always re-log the inode so that our permanent transaction 1700 * can keep on rolling it forward in the log. 1701 */ 1702 if (fork == XFS_DATA_FORK) { 1703 xfs_isize_check(mp, ip, new_size); 1704 /* 1705 * If we are not changing the file size then do 1706 * not update the on-disk file size - we may be 1707 * called from xfs_inactive_free_eofblocks(). If we 1708 * update the on-disk file size and then the system 1709 * crashes before the contents of the file are 1710 * flushed to disk then the files may be full of 1711 * holes (ie NULL files bug). 1712 */ 1713 if (ip->i_size != new_size) { 1714 ip->i_d.di_size = new_size; 1715 ip->i_size = new_size; 1716 } 1717 } 1718 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1719 ASSERT((new_size != 0) || 1720 (fork == XFS_ATTR_FORK) || 1721 (ip->i_delayed_blks == 0)); 1722 ASSERT((new_size != 0) || 1723 (fork == XFS_ATTR_FORK) || 1724 (ip->i_d.di_nextents == 0)); 1725 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1726 return 0; 1727 } 1728 1729 /* 1730 * This is called when the inode's link count goes to 0. 1731 * We place the on-disk inode on a list in the AGI. It 1732 * will be pulled from this list when the inode is freed. 1733 */ 1734 int 1735 xfs_iunlink( 1736 xfs_trans_t *tp, 1737 xfs_inode_t *ip) 1738 { 1739 xfs_mount_t *mp; 1740 xfs_agi_t *agi; 1741 xfs_dinode_t *dip; 1742 xfs_buf_t *agibp; 1743 xfs_buf_t *ibp; 1744 xfs_agino_t agino; 1745 short bucket_index; 1746 int offset; 1747 int error; 1748 1749 ASSERT(ip->i_d.di_nlink == 0); 1750 ASSERT(ip->i_d.di_mode != 0); 1751 ASSERT(ip->i_transp == tp); 1752 1753 mp = tp->t_mountp; 1754 1755 /* 1756 * Get the agi buffer first. It ensures lock ordering 1757 * on the list. 1758 */ 1759 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1760 if (error) 1761 return error; 1762 agi = XFS_BUF_TO_AGI(agibp); 1763 1764 /* 1765 * Get the index into the agi hash table for the 1766 * list this inode will go on. 1767 */ 1768 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1769 ASSERT(agino != 0); 1770 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1771 ASSERT(agi->agi_unlinked[bucket_index]); 1772 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1773 1774 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1775 /* 1776 * There is already another inode in the bucket we need 1777 * to add ourselves to. Add us at the front of the list. 1778 * Here we put the head pointer into our next pointer, 1779 * and then we fall through to point the head at us. 1780 */ 1781 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1782 if (error) 1783 return error; 1784 1785 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1786 /* both on-disk, don't endian flip twice */ 1787 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1788 offset = ip->i_imap.im_boffset + 1789 offsetof(xfs_dinode_t, di_next_unlinked); 1790 xfs_trans_inode_buf(tp, ibp); 1791 xfs_trans_log_buf(tp, ibp, offset, 1792 (offset + sizeof(xfs_agino_t) - 1)); 1793 xfs_inobp_check(mp, ibp); 1794 } 1795 1796 /* 1797 * Point the bucket head pointer at the inode being inserted. 1798 */ 1799 ASSERT(agino != 0); 1800 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1801 offset = offsetof(xfs_agi_t, agi_unlinked) + 1802 (sizeof(xfs_agino_t) * bucket_index); 1803 xfs_trans_log_buf(tp, agibp, offset, 1804 (offset + sizeof(xfs_agino_t) - 1)); 1805 return 0; 1806 } 1807 1808 /* 1809 * Pull the on-disk inode from the AGI unlinked list. 1810 */ 1811 STATIC int 1812 xfs_iunlink_remove( 1813 xfs_trans_t *tp, 1814 xfs_inode_t *ip) 1815 { 1816 xfs_ino_t next_ino; 1817 xfs_mount_t *mp; 1818 xfs_agi_t *agi; 1819 xfs_dinode_t *dip; 1820 xfs_buf_t *agibp; 1821 xfs_buf_t *ibp; 1822 xfs_agnumber_t agno; 1823 xfs_agino_t agino; 1824 xfs_agino_t next_agino; 1825 xfs_buf_t *last_ibp; 1826 xfs_dinode_t *last_dip = NULL; 1827 short bucket_index; 1828 int offset, last_offset = 0; 1829 int error; 1830 1831 mp = tp->t_mountp; 1832 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1833 1834 /* 1835 * Get the agi buffer first. It ensures lock ordering 1836 * on the list. 1837 */ 1838 error = xfs_read_agi(mp, tp, agno, &agibp); 1839 if (error) 1840 return error; 1841 1842 agi = XFS_BUF_TO_AGI(agibp); 1843 1844 /* 1845 * Get the index into the agi hash table for the 1846 * list this inode will go on. 1847 */ 1848 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1849 ASSERT(agino != 0); 1850 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1851 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 1852 ASSERT(agi->agi_unlinked[bucket_index]); 1853 1854 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1855 /* 1856 * We're at the head of the list. Get the inode's 1857 * on-disk buffer to see if there is anyone after us 1858 * on the list. Only modify our next pointer if it 1859 * is not already NULLAGINO. This saves us the overhead 1860 * of dealing with the buffer when there is no need to 1861 * change it. 1862 */ 1863 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1864 if (error) { 1865 cmn_err(CE_WARN, 1866 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1867 error, mp->m_fsname); 1868 return error; 1869 } 1870 next_agino = be32_to_cpu(dip->di_next_unlinked); 1871 ASSERT(next_agino != 0); 1872 if (next_agino != NULLAGINO) { 1873 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1874 offset = ip->i_imap.im_boffset + 1875 offsetof(xfs_dinode_t, di_next_unlinked); 1876 xfs_trans_inode_buf(tp, ibp); 1877 xfs_trans_log_buf(tp, ibp, offset, 1878 (offset + sizeof(xfs_agino_t) - 1)); 1879 xfs_inobp_check(mp, ibp); 1880 } else { 1881 xfs_trans_brelse(tp, ibp); 1882 } 1883 /* 1884 * Point the bucket head pointer at the next inode. 1885 */ 1886 ASSERT(next_agino != 0); 1887 ASSERT(next_agino != agino); 1888 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 1889 offset = offsetof(xfs_agi_t, agi_unlinked) + 1890 (sizeof(xfs_agino_t) * bucket_index); 1891 xfs_trans_log_buf(tp, agibp, offset, 1892 (offset + sizeof(xfs_agino_t) - 1)); 1893 } else { 1894 /* 1895 * We need to search the list for the inode being freed. 1896 */ 1897 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1898 last_ibp = NULL; 1899 while (next_agino != agino) { 1900 /* 1901 * If the last inode wasn't the one pointing to 1902 * us, then release its buffer since we're not 1903 * going to do anything with it. 1904 */ 1905 if (last_ibp != NULL) { 1906 xfs_trans_brelse(tp, last_ibp); 1907 } 1908 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1909 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1910 &last_ibp, &last_offset, 0); 1911 if (error) { 1912 cmn_err(CE_WARN, 1913 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 1914 error, mp->m_fsname); 1915 return error; 1916 } 1917 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1918 ASSERT(next_agino != NULLAGINO); 1919 ASSERT(next_agino != 0); 1920 } 1921 /* 1922 * Now last_ibp points to the buffer previous to us on 1923 * the unlinked list. Pull us from the list. 1924 */ 1925 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1926 if (error) { 1927 cmn_err(CE_WARN, 1928 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1929 error, mp->m_fsname); 1930 return error; 1931 } 1932 next_agino = be32_to_cpu(dip->di_next_unlinked); 1933 ASSERT(next_agino != 0); 1934 ASSERT(next_agino != agino); 1935 if (next_agino != NULLAGINO) { 1936 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1937 offset = ip->i_imap.im_boffset + 1938 offsetof(xfs_dinode_t, di_next_unlinked); 1939 xfs_trans_inode_buf(tp, ibp); 1940 xfs_trans_log_buf(tp, ibp, offset, 1941 (offset + sizeof(xfs_agino_t) - 1)); 1942 xfs_inobp_check(mp, ibp); 1943 } else { 1944 xfs_trans_brelse(tp, ibp); 1945 } 1946 /* 1947 * Point the previous inode on the list to the next inode. 1948 */ 1949 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1950 ASSERT(next_agino != 0); 1951 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1952 xfs_trans_inode_buf(tp, last_ibp); 1953 xfs_trans_log_buf(tp, last_ibp, offset, 1954 (offset + sizeof(xfs_agino_t) - 1)); 1955 xfs_inobp_check(mp, last_ibp); 1956 } 1957 return 0; 1958 } 1959 1960 STATIC void 1961 xfs_ifree_cluster( 1962 xfs_inode_t *free_ip, 1963 xfs_trans_t *tp, 1964 xfs_ino_t inum) 1965 { 1966 xfs_mount_t *mp = free_ip->i_mount; 1967 int blks_per_cluster; 1968 int nbufs; 1969 int ninodes; 1970 int i, j, found, pre_flushed; 1971 xfs_daddr_t blkno; 1972 xfs_buf_t *bp; 1973 xfs_inode_t *ip, **ip_found; 1974 xfs_inode_log_item_t *iip; 1975 xfs_log_item_t *lip; 1976 xfs_perag_t *pag = xfs_get_perag(mp, inum); 1977 1978 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1979 blks_per_cluster = 1; 1980 ninodes = mp->m_sb.sb_inopblock; 1981 nbufs = XFS_IALLOC_BLOCKS(mp); 1982 } else { 1983 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 1984 mp->m_sb.sb_blocksize; 1985 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 1986 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1987 } 1988 1989 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 1990 1991 for (j = 0; j < nbufs; j++, inum += ninodes) { 1992 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1993 XFS_INO_TO_AGBNO(mp, inum)); 1994 1995 1996 /* 1997 * Look for each inode in memory and attempt to lock it, 1998 * we can be racing with flush and tail pushing here. 1999 * any inode we get the locks on, add to an array of 2000 * inode items to process later. 2001 * 2002 * The get the buffer lock, we could beat a flush 2003 * or tail pushing thread to the lock here, in which 2004 * case they will go looking for the inode buffer 2005 * and fail, we need some other form of interlock 2006 * here. 2007 */ 2008 found = 0; 2009 for (i = 0; i < ninodes; i++) { 2010 read_lock(&pag->pag_ici_lock); 2011 ip = radix_tree_lookup(&pag->pag_ici_root, 2012 XFS_INO_TO_AGINO(mp, (inum + i))); 2013 2014 /* Inode not in memory or we found it already, 2015 * nothing to do 2016 */ 2017 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2018 read_unlock(&pag->pag_ici_lock); 2019 continue; 2020 } 2021 2022 if (xfs_inode_clean(ip)) { 2023 read_unlock(&pag->pag_ici_lock); 2024 continue; 2025 } 2026 2027 /* If we can get the locks then add it to the 2028 * list, otherwise by the time we get the bp lock 2029 * below it will already be attached to the 2030 * inode buffer. 2031 */ 2032 2033 /* This inode will already be locked - by us, lets 2034 * keep it that way. 2035 */ 2036 2037 if (ip == free_ip) { 2038 if (xfs_iflock_nowait(ip)) { 2039 xfs_iflags_set(ip, XFS_ISTALE); 2040 if (xfs_inode_clean(ip)) { 2041 xfs_ifunlock(ip); 2042 } else { 2043 ip_found[found++] = ip; 2044 } 2045 } 2046 read_unlock(&pag->pag_ici_lock); 2047 continue; 2048 } 2049 2050 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2051 if (xfs_iflock_nowait(ip)) { 2052 xfs_iflags_set(ip, XFS_ISTALE); 2053 2054 if (xfs_inode_clean(ip)) { 2055 xfs_ifunlock(ip); 2056 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2057 } else { 2058 ip_found[found++] = ip; 2059 } 2060 } else { 2061 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2062 } 2063 } 2064 read_unlock(&pag->pag_ici_lock); 2065 } 2066 2067 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2068 mp->m_bsize * blks_per_cluster, 2069 XFS_BUF_LOCK); 2070 2071 pre_flushed = 0; 2072 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2073 while (lip) { 2074 if (lip->li_type == XFS_LI_INODE) { 2075 iip = (xfs_inode_log_item_t *)lip; 2076 ASSERT(iip->ili_logged == 1); 2077 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2078 xfs_trans_ail_copy_lsn(mp->m_ail, 2079 &iip->ili_flush_lsn, 2080 &iip->ili_item.li_lsn); 2081 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 2082 pre_flushed++; 2083 } 2084 lip = lip->li_bio_list; 2085 } 2086 2087 for (i = 0; i < found; i++) { 2088 ip = ip_found[i]; 2089 iip = ip->i_itemp; 2090 2091 if (!iip) { 2092 ip->i_update_core = 0; 2093 xfs_ifunlock(ip); 2094 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2095 continue; 2096 } 2097 2098 iip->ili_last_fields = iip->ili_format.ilf_fields; 2099 iip->ili_format.ilf_fields = 0; 2100 iip->ili_logged = 1; 2101 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2102 &iip->ili_item.li_lsn); 2103 2104 xfs_buf_attach_iodone(bp, 2105 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2106 xfs_istale_done, (xfs_log_item_t *)iip); 2107 if (ip != free_ip) { 2108 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2109 } 2110 } 2111 2112 if (found || pre_flushed) 2113 xfs_trans_stale_inode_buf(tp, bp); 2114 xfs_trans_binval(tp, bp); 2115 } 2116 2117 kmem_free(ip_found); 2118 xfs_put_perag(mp, pag); 2119 } 2120 2121 /* 2122 * This is called to return an inode to the inode free list. 2123 * The inode should already be truncated to 0 length and have 2124 * no pages associated with it. This routine also assumes that 2125 * the inode is already a part of the transaction. 2126 * 2127 * The on-disk copy of the inode will have been added to the list 2128 * of unlinked inodes in the AGI. We need to remove the inode from 2129 * that list atomically with respect to freeing it here. 2130 */ 2131 int 2132 xfs_ifree( 2133 xfs_trans_t *tp, 2134 xfs_inode_t *ip, 2135 xfs_bmap_free_t *flist) 2136 { 2137 int error; 2138 int delete; 2139 xfs_ino_t first_ino; 2140 xfs_dinode_t *dip; 2141 xfs_buf_t *ibp; 2142 2143 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2144 ASSERT(ip->i_transp == tp); 2145 ASSERT(ip->i_d.di_nlink == 0); 2146 ASSERT(ip->i_d.di_nextents == 0); 2147 ASSERT(ip->i_d.di_anextents == 0); 2148 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 2149 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2150 ASSERT(ip->i_d.di_nblocks == 0); 2151 2152 /* 2153 * Pull the on-disk inode from the AGI unlinked list. 2154 */ 2155 error = xfs_iunlink_remove(tp, ip); 2156 if (error != 0) { 2157 return error; 2158 } 2159 2160 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2161 if (error != 0) { 2162 return error; 2163 } 2164 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2165 ip->i_d.di_flags = 0; 2166 ip->i_d.di_dmevmask = 0; 2167 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2168 ip->i_df.if_ext_max = 2169 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2170 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2171 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2172 /* 2173 * Bump the generation count so no one will be confused 2174 * by reincarnations of this inode. 2175 */ 2176 ip->i_d.di_gen++; 2177 2178 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2179 2180 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 2181 if (error) 2182 return error; 2183 2184 /* 2185 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 2186 * from picking up this inode when it is reclaimed (its incore state 2187 * initialzed but not flushed to disk yet). The in-core di_mode is 2188 * already cleared and a corresponding transaction logged. 2189 * The hack here just synchronizes the in-core to on-disk 2190 * di_mode value in advance before the actual inode sync to disk. 2191 * This is OK because the inode is already unlinked and would never 2192 * change its di_mode again for this inode generation. 2193 * This is a temporary hack that would require a proper fix 2194 * in the future. 2195 */ 2196 dip->di_mode = 0; 2197 2198 if (delete) { 2199 xfs_ifree_cluster(ip, tp, first_ino); 2200 } 2201 2202 return 0; 2203 } 2204 2205 /* 2206 * Reallocate the space for if_broot based on the number of records 2207 * being added or deleted as indicated in rec_diff. Move the records 2208 * and pointers in if_broot to fit the new size. When shrinking this 2209 * will eliminate holes between the records and pointers created by 2210 * the caller. When growing this will create holes to be filled in 2211 * by the caller. 2212 * 2213 * The caller must not request to add more records than would fit in 2214 * the on-disk inode root. If the if_broot is currently NULL, then 2215 * if we adding records one will be allocated. The caller must also 2216 * not request that the number of records go below zero, although 2217 * it can go to zero. 2218 * 2219 * ip -- the inode whose if_broot area is changing 2220 * ext_diff -- the change in the number of records, positive or negative, 2221 * requested for the if_broot array. 2222 */ 2223 void 2224 xfs_iroot_realloc( 2225 xfs_inode_t *ip, 2226 int rec_diff, 2227 int whichfork) 2228 { 2229 struct xfs_mount *mp = ip->i_mount; 2230 int cur_max; 2231 xfs_ifork_t *ifp; 2232 struct xfs_btree_block *new_broot; 2233 int new_max; 2234 size_t new_size; 2235 char *np; 2236 char *op; 2237 2238 /* 2239 * Handle the degenerate case quietly. 2240 */ 2241 if (rec_diff == 0) { 2242 return; 2243 } 2244 2245 ifp = XFS_IFORK_PTR(ip, whichfork); 2246 if (rec_diff > 0) { 2247 /* 2248 * If there wasn't any memory allocated before, just 2249 * allocate it now and get out. 2250 */ 2251 if (ifp->if_broot_bytes == 0) { 2252 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2253 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); 2254 ifp->if_broot_bytes = (int)new_size; 2255 return; 2256 } 2257 2258 /* 2259 * If there is already an existing if_broot, then we need 2260 * to realloc() it and shift the pointers to their new 2261 * location. The records don't change location because 2262 * they are kept butted up against the btree block header. 2263 */ 2264 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2265 new_max = cur_max + rec_diff; 2266 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2267 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 2268 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2269 KM_SLEEP); 2270 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2271 ifp->if_broot_bytes); 2272 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2273 (int)new_size); 2274 ifp->if_broot_bytes = (int)new_size; 2275 ASSERT(ifp->if_broot_bytes <= 2276 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2277 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2278 return; 2279 } 2280 2281 /* 2282 * rec_diff is less than 0. In this case, we are shrinking the 2283 * if_broot buffer. It must already exist. If we go to zero 2284 * records, just get rid of the root and clear the status bit. 2285 */ 2286 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2287 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2288 new_max = cur_max + rec_diff; 2289 ASSERT(new_max >= 0); 2290 if (new_max > 0) 2291 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2292 else 2293 new_size = 0; 2294 if (new_size > 0) { 2295 new_broot = kmem_alloc(new_size, KM_SLEEP); 2296 /* 2297 * First copy over the btree block header. 2298 */ 2299 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); 2300 } else { 2301 new_broot = NULL; 2302 ifp->if_flags &= ~XFS_IFBROOT; 2303 } 2304 2305 /* 2306 * Only copy the records and pointers if there are any. 2307 */ 2308 if (new_max > 0) { 2309 /* 2310 * First copy the records. 2311 */ 2312 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); 2313 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); 2314 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2315 2316 /* 2317 * Then copy the pointers. 2318 */ 2319 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2320 ifp->if_broot_bytes); 2321 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 2322 (int)new_size); 2323 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2324 } 2325 kmem_free(ifp->if_broot); 2326 ifp->if_broot = new_broot; 2327 ifp->if_broot_bytes = (int)new_size; 2328 ASSERT(ifp->if_broot_bytes <= 2329 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2330 return; 2331 } 2332 2333 2334 /* 2335 * This is called when the amount of space needed for if_data 2336 * is increased or decreased. The change in size is indicated by 2337 * the number of bytes that need to be added or deleted in the 2338 * byte_diff parameter. 2339 * 2340 * If the amount of space needed has decreased below the size of the 2341 * inline buffer, then switch to using the inline buffer. Otherwise, 2342 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2343 * to what is needed. 2344 * 2345 * ip -- the inode whose if_data area is changing 2346 * byte_diff -- the change in the number of bytes, positive or negative, 2347 * requested for the if_data array. 2348 */ 2349 void 2350 xfs_idata_realloc( 2351 xfs_inode_t *ip, 2352 int byte_diff, 2353 int whichfork) 2354 { 2355 xfs_ifork_t *ifp; 2356 int new_size; 2357 int real_size; 2358 2359 if (byte_diff == 0) { 2360 return; 2361 } 2362 2363 ifp = XFS_IFORK_PTR(ip, whichfork); 2364 new_size = (int)ifp->if_bytes + byte_diff; 2365 ASSERT(new_size >= 0); 2366 2367 if (new_size == 0) { 2368 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2369 kmem_free(ifp->if_u1.if_data); 2370 } 2371 ifp->if_u1.if_data = NULL; 2372 real_size = 0; 2373 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2374 /* 2375 * If the valid extents/data can fit in if_inline_ext/data, 2376 * copy them from the malloc'd vector and free it. 2377 */ 2378 if (ifp->if_u1.if_data == NULL) { 2379 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2380 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2381 ASSERT(ifp->if_real_bytes != 0); 2382 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2383 new_size); 2384 kmem_free(ifp->if_u1.if_data); 2385 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2386 } 2387 real_size = 0; 2388 } else { 2389 /* 2390 * Stuck with malloc/realloc. 2391 * For inline data, the underlying buffer must be 2392 * a multiple of 4 bytes in size so that it can be 2393 * logged and stay on word boundaries. We enforce 2394 * that here. 2395 */ 2396 real_size = roundup(new_size, 4); 2397 if (ifp->if_u1.if_data == NULL) { 2398 ASSERT(ifp->if_real_bytes == 0); 2399 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2400 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2401 /* 2402 * Only do the realloc if the underlying size 2403 * is really changing. 2404 */ 2405 if (ifp->if_real_bytes != real_size) { 2406 ifp->if_u1.if_data = 2407 kmem_realloc(ifp->if_u1.if_data, 2408 real_size, 2409 ifp->if_real_bytes, 2410 KM_SLEEP); 2411 } 2412 } else { 2413 ASSERT(ifp->if_real_bytes == 0); 2414 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2415 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2416 ifp->if_bytes); 2417 } 2418 } 2419 ifp->if_real_bytes = real_size; 2420 ifp->if_bytes = new_size; 2421 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2422 } 2423 2424 void 2425 xfs_idestroy_fork( 2426 xfs_inode_t *ip, 2427 int whichfork) 2428 { 2429 xfs_ifork_t *ifp; 2430 2431 ifp = XFS_IFORK_PTR(ip, whichfork); 2432 if (ifp->if_broot != NULL) { 2433 kmem_free(ifp->if_broot); 2434 ifp->if_broot = NULL; 2435 } 2436 2437 /* 2438 * If the format is local, then we can't have an extents 2439 * array so just look for an inline data array. If we're 2440 * not local then we may or may not have an extents list, 2441 * so check and free it up if we do. 2442 */ 2443 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2444 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2445 (ifp->if_u1.if_data != NULL)) { 2446 ASSERT(ifp->if_real_bytes != 0); 2447 kmem_free(ifp->if_u1.if_data); 2448 ifp->if_u1.if_data = NULL; 2449 ifp->if_real_bytes = 0; 2450 } 2451 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2452 ((ifp->if_flags & XFS_IFEXTIREC) || 2453 ((ifp->if_u1.if_extents != NULL) && 2454 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2455 ASSERT(ifp->if_real_bytes != 0); 2456 xfs_iext_destroy(ifp); 2457 } 2458 ASSERT(ifp->if_u1.if_extents == NULL || 2459 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2460 ASSERT(ifp->if_real_bytes == 0); 2461 if (whichfork == XFS_ATTR_FORK) { 2462 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2463 ip->i_afp = NULL; 2464 } 2465 } 2466 2467 /* 2468 * Increment the pin count of the given buffer. 2469 * This value is protected by ipinlock spinlock in the mount structure. 2470 */ 2471 void 2472 xfs_ipin( 2473 xfs_inode_t *ip) 2474 { 2475 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2476 2477 atomic_inc(&ip->i_pincount); 2478 } 2479 2480 /* 2481 * Decrement the pin count of the given inode, and wake up 2482 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2483 * inode must have been previously pinned with a call to xfs_ipin(). 2484 */ 2485 void 2486 xfs_iunpin( 2487 xfs_inode_t *ip) 2488 { 2489 ASSERT(atomic_read(&ip->i_pincount) > 0); 2490 2491 if (atomic_dec_and_test(&ip->i_pincount)) 2492 wake_up(&ip->i_ipin_wait); 2493 } 2494 2495 /* 2496 * This is called to unpin an inode. It can be directed to wait or to return 2497 * immediately without waiting for the inode to be unpinned. The caller must 2498 * have the inode locked in at least shared mode so that the buffer cannot be 2499 * subsequently pinned once someone is waiting for it to be unpinned. 2500 */ 2501 STATIC void 2502 __xfs_iunpin_wait( 2503 xfs_inode_t *ip, 2504 int wait) 2505 { 2506 xfs_inode_log_item_t *iip = ip->i_itemp; 2507 2508 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2509 if (atomic_read(&ip->i_pincount) == 0) 2510 return; 2511 2512 /* Give the log a push to start the unpinning I/O */ 2513 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2514 iip->ili_last_lsn : 0, XFS_LOG_FORCE); 2515 if (wait) 2516 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2517 } 2518 2519 static inline void 2520 xfs_iunpin_wait( 2521 xfs_inode_t *ip) 2522 { 2523 __xfs_iunpin_wait(ip, 1); 2524 } 2525 2526 static inline void 2527 xfs_iunpin_nowait( 2528 xfs_inode_t *ip) 2529 { 2530 __xfs_iunpin_wait(ip, 0); 2531 } 2532 2533 2534 /* 2535 * xfs_iextents_copy() 2536 * 2537 * This is called to copy the REAL extents (as opposed to the delayed 2538 * allocation extents) from the inode into the given buffer. It 2539 * returns the number of bytes copied into the buffer. 2540 * 2541 * If there are no delayed allocation extents, then we can just 2542 * memcpy() the extents into the buffer. Otherwise, we need to 2543 * examine each extent in turn and skip those which are delayed. 2544 */ 2545 int 2546 xfs_iextents_copy( 2547 xfs_inode_t *ip, 2548 xfs_bmbt_rec_t *dp, 2549 int whichfork) 2550 { 2551 int copied; 2552 int i; 2553 xfs_ifork_t *ifp; 2554 int nrecs; 2555 xfs_fsblock_t start_block; 2556 2557 ifp = XFS_IFORK_PTR(ip, whichfork); 2558 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2559 ASSERT(ifp->if_bytes > 0); 2560 2561 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2562 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2563 ASSERT(nrecs > 0); 2564 2565 /* 2566 * There are some delayed allocation extents in the 2567 * inode, so copy the extents one at a time and skip 2568 * the delayed ones. There must be at least one 2569 * non-delayed extent. 2570 */ 2571 copied = 0; 2572 for (i = 0; i < nrecs; i++) { 2573 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2574 start_block = xfs_bmbt_get_startblock(ep); 2575 if (isnullstartblock(start_block)) { 2576 /* 2577 * It's a delayed allocation extent, so skip it. 2578 */ 2579 continue; 2580 } 2581 2582 /* Translate to on disk format */ 2583 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2584 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2585 dp++; 2586 copied++; 2587 } 2588 ASSERT(copied != 0); 2589 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2590 2591 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2592 } 2593 2594 /* 2595 * Each of the following cases stores data into the same region 2596 * of the on-disk inode, so only one of them can be valid at 2597 * any given time. While it is possible to have conflicting formats 2598 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2599 * in EXTENTS format, this can only happen when the fork has 2600 * changed formats after being modified but before being flushed. 2601 * In these cases, the format always takes precedence, because the 2602 * format indicates the current state of the fork. 2603 */ 2604 /*ARGSUSED*/ 2605 STATIC void 2606 xfs_iflush_fork( 2607 xfs_inode_t *ip, 2608 xfs_dinode_t *dip, 2609 xfs_inode_log_item_t *iip, 2610 int whichfork, 2611 xfs_buf_t *bp) 2612 { 2613 char *cp; 2614 xfs_ifork_t *ifp; 2615 xfs_mount_t *mp; 2616 #ifdef XFS_TRANS_DEBUG 2617 int first; 2618 #endif 2619 static const short brootflag[2] = 2620 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2621 static const short dataflag[2] = 2622 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2623 static const short extflag[2] = 2624 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2625 2626 if (!iip) 2627 return; 2628 ifp = XFS_IFORK_PTR(ip, whichfork); 2629 /* 2630 * This can happen if we gave up in iformat in an error path, 2631 * for the attribute fork. 2632 */ 2633 if (!ifp) { 2634 ASSERT(whichfork == XFS_ATTR_FORK); 2635 return; 2636 } 2637 cp = XFS_DFORK_PTR(dip, whichfork); 2638 mp = ip->i_mount; 2639 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2640 case XFS_DINODE_FMT_LOCAL: 2641 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2642 (ifp->if_bytes > 0)) { 2643 ASSERT(ifp->if_u1.if_data != NULL); 2644 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2645 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2646 } 2647 break; 2648 2649 case XFS_DINODE_FMT_EXTENTS: 2650 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2651 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2652 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || 2653 (ifp->if_bytes == 0)); 2654 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || 2655 (ifp->if_bytes > 0)); 2656 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2657 (ifp->if_bytes > 0)) { 2658 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2659 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2660 whichfork); 2661 } 2662 break; 2663 2664 case XFS_DINODE_FMT_BTREE: 2665 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 2666 (ifp->if_broot_bytes > 0)) { 2667 ASSERT(ifp->if_broot != NULL); 2668 ASSERT(ifp->if_broot_bytes <= 2669 (XFS_IFORK_SIZE(ip, whichfork) + 2670 XFS_BROOT_SIZE_ADJ)); 2671 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, 2672 (xfs_bmdr_block_t *)cp, 2673 XFS_DFORK_SIZE(dip, mp, whichfork)); 2674 } 2675 break; 2676 2677 case XFS_DINODE_FMT_DEV: 2678 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 2679 ASSERT(whichfork == XFS_DATA_FORK); 2680 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); 2681 } 2682 break; 2683 2684 case XFS_DINODE_FMT_UUID: 2685 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 2686 ASSERT(whichfork == XFS_DATA_FORK); 2687 memcpy(XFS_DFORK_DPTR(dip), 2688 &ip->i_df.if_u2.if_uuid, 2689 sizeof(uuid_t)); 2690 } 2691 break; 2692 2693 default: 2694 ASSERT(0); 2695 break; 2696 } 2697 } 2698 2699 STATIC int 2700 xfs_iflush_cluster( 2701 xfs_inode_t *ip, 2702 xfs_buf_t *bp) 2703 { 2704 xfs_mount_t *mp = ip->i_mount; 2705 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 2706 unsigned long first_index, mask; 2707 unsigned long inodes_per_cluster; 2708 int ilist_size; 2709 xfs_inode_t **ilist; 2710 xfs_inode_t *iq; 2711 int nr_found; 2712 int clcount = 0; 2713 int bufwasdelwri; 2714 int i; 2715 2716 ASSERT(pag->pagi_inodeok); 2717 ASSERT(pag->pag_ici_init); 2718 2719 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2720 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2721 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2722 if (!ilist) 2723 return 0; 2724 2725 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2726 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2727 read_lock(&pag->pag_ici_lock); 2728 /* really need a gang lookup range call here */ 2729 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2730 first_index, inodes_per_cluster); 2731 if (nr_found == 0) 2732 goto out_free; 2733 2734 for (i = 0; i < nr_found; i++) { 2735 iq = ilist[i]; 2736 if (iq == ip) 2737 continue; 2738 /* if the inode lies outside this cluster, we're done. */ 2739 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) 2740 break; 2741 /* 2742 * Do an un-protected check to see if the inode is dirty and 2743 * is a candidate for flushing. These checks will be repeated 2744 * later after the appropriate locks are acquired. 2745 */ 2746 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 2747 continue; 2748 2749 /* 2750 * Try to get locks. If any are unavailable or it is pinned, 2751 * then this inode cannot be flushed and is skipped. 2752 */ 2753 2754 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 2755 continue; 2756 if (!xfs_iflock_nowait(iq)) { 2757 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2758 continue; 2759 } 2760 if (xfs_ipincount(iq)) { 2761 xfs_ifunlock(iq); 2762 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2763 continue; 2764 } 2765 2766 /* 2767 * arriving here means that this inode can be flushed. First 2768 * re-check that it's dirty before flushing. 2769 */ 2770 if (!xfs_inode_clean(iq)) { 2771 int error; 2772 error = xfs_iflush_int(iq, bp); 2773 if (error) { 2774 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2775 goto cluster_corrupt_out; 2776 } 2777 clcount++; 2778 } else { 2779 xfs_ifunlock(iq); 2780 } 2781 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2782 } 2783 2784 if (clcount) { 2785 XFS_STATS_INC(xs_icluster_flushcnt); 2786 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 2787 } 2788 2789 out_free: 2790 read_unlock(&pag->pag_ici_lock); 2791 kmem_free(ilist); 2792 return 0; 2793 2794 2795 cluster_corrupt_out: 2796 /* 2797 * Corruption detected in the clustering loop. Invalidate the 2798 * inode buffer and shut down the filesystem. 2799 */ 2800 read_unlock(&pag->pag_ici_lock); 2801 /* 2802 * Clean up the buffer. If it was B_DELWRI, just release it -- 2803 * brelse can handle it with no problems. If not, shut down the 2804 * filesystem before releasing the buffer. 2805 */ 2806 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); 2807 if (bufwasdelwri) 2808 xfs_buf_relse(bp); 2809 2810 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2811 2812 if (!bufwasdelwri) { 2813 /* 2814 * Just like incore_relse: if we have b_iodone functions, 2815 * mark the buffer as an error and call them. Otherwise 2816 * mark it as stale and brelse. 2817 */ 2818 if (XFS_BUF_IODONE_FUNC(bp)) { 2819 XFS_BUF_CLR_BDSTRAT_FUNC(bp); 2820 XFS_BUF_UNDONE(bp); 2821 XFS_BUF_STALE(bp); 2822 XFS_BUF_ERROR(bp,EIO); 2823 xfs_biodone(bp); 2824 } else { 2825 XFS_BUF_STALE(bp); 2826 xfs_buf_relse(bp); 2827 } 2828 } 2829 2830 /* 2831 * Unlocks the flush lock 2832 */ 2833 xfs_iflush_abort(iq); 2834 kmem_free(ilist); 2835 return XFS_ERROR(EFSCORRUPTED); 2836 } 2837 2838 /* 2839 * xfs_iflush() will write a modified inode's changes out to the 2840 * inode's on disk home. The caller must have the inode lock held 2841 * in at least shared mode and the inode flush completion must be 2842 * active as well. The inode lock will still be held upon return from 2843 * the call and the caller is free to unlock it. 2844 * The inode flush will be completed when the inode reaches the disk. 2845 * The flags indicate how the inode's buffer should be written out. 2846 */ 2847 int 2848 xfs_iflush( 2849 xfs_inode_t *ip, 2850 uint flags) 2851 { 2852 xfs_inode_log_item_t *iip; 2853 xfs_buf_t *bp; 2854 xfs_dinode_t *dip; 2855 xfs_mount_t *mp; 2856 int error; 2857 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK); 2858 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 2859 2860 XFS_STATS_INC(xs_iflush_count); 2861 2862 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2863 ASSERT(!completion_done(&ip->i_flush)); 2864 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2865 ip->i_d.di_nextents > ip->i_df.if_ext_max); 2866 2867 iip = ip->i_itemp; 2868 mp = ip->i_mount; 2869 2870 /* 2871 * If the inode isn't dirty, then just release the inode 2872 * flush lock and do nothing. 2873 */ 2874 if (xfs_inode_clean(ip)) { 2875 xfs_ifunlock(ip); 2876 return 0; 2877 } 2878 2879 /* 2880 * We can't flush the inode until it is unpinned, so wait for it if we 2881 * are allowed to block. We know noone new can pin it, because we are 2882 * holding the inode lock shared and you need to hold it exclusively to 2883 * pin the inode. 2884 * 2885 * If we are not allowed to block, force the log out asynchronously so 2886 * that when we come back the inode will be unpinned. If other inodes 2887 * in the same cluster are dirty, they will probably write the inode 2888 * out for us if they occur after the log force completes. 2889 */ 2890 if (noblock && xfs_ipincount(ip)) { 2891 xfs_iunpin_nowait(ip); 2892 xfs_ifunlock(ip); 2893 return EAGAIN; 2894 } 2895 xfs_iunpin_wait(ip); 2896 2897 /* 2898 * This may have been unpinned because the filesystem is shutting 2899 * down forcibly. If that's the case we must not write this inode 2900 * to disk, because the log record didn't make it to disk! 2901 */ 2902 if (XFS_FORCED_SHUTDOWN(mp)) { 2903 ip->i_update_core = 0; 2904 if (iip) 2905 iip->ili_format.ilf_fields = 0; 2906 xfs_ifunlock(ip); 2907 return XFS_ERROR(EIO); 2908 } 2909 2910 /* 2911 * Decide how buffer will be flushed out. This is done before 2912 * the call to xfs_iflush_int because this field is zeroed by it. 2913 */ 2914 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 2915 /* 2916 * Flush out the inode buffer according to the directions 2917 * of the caller. In the cases where the caller has given 2918 * us a choice choose the non-delwri case. This is because 2919 * the inode is in the AIL and we need to get it out soon. 2920 */ 2921 switch (flags) { 2922 case XFS_IFLUSH_SYNC: 2923 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 2924 flags = 0; 2925 break; 2926 case XFS_IFLUSH_ASYNC_NOBLOCK: 2927 case XFS_IFLUSH_ASYNC: 2928 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 2929 flags = INT_ASYNC; 2930 break; 2931 case XFS_IFLUSH_DELWRI: 2932 flags = INT_DELWRI; 2933 break; 2934 default: 2935 ASSERT(0); 2936 flags = 0; 2937 break; 2938 } 2939 } else { 2940 switch (flags) { 2941 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 2942 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 2943 case XFS_IFLUSH_DELWRI: 2944 flags = INT_DELWRI; 2945 break; 2946 case XFS_IFLUSH_ASYNC_NOBLOCK: 2947 case XFS_IFLUSH_ASYNC: 2948 flags = INT_ASYNC; 2949 break; 2950 case XFS_IFLUSH_SYNC: 2951 flags = 0; 2952 break; 2953 default: 2954 ASSERT(0); 2955 flags = 0; 2956 break; 2957 } 2958 } 2959 2960 /* 2961 * Get the buffer containing the on-disk inode. 2962 */ 2963 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2964 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); 2965 if (error || !bp) { 2966 xfs_ifunlock(ip); 2967 return error; 2968 } 2969 2970 /* 2971 * First flush out the inode that xfs_iflush was called with. 2972 */ 2973 error = xfs_iflush_int(ip, bp); 2974 if (error) 2975 goto corrupt_out; 2976 2977 /* 2978 * If the buffer is pinned then push on the log now so we won't 2979 * get stuck waiting in the write for too long. 2980 */ 2981 if (XFS_BUF_ISPINNED(bp)) 2982 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 2983 2984 /* 2985 * inode clustering: 2986 * see if other inodes can be gathered into this write 2987 */ 2988 error = xfs_iflush_cluster(ip, bp); 2989 if (error) 2990 goto cluster_corrupt_out; 2991 2992 if (flags & INT_DELWRI) { 2993 xfs_bdwrite(mp, bp); 2994 } else if (flags & INT_ASYNC) { 2995 error = xfs_bawrite(mp, bp); 2996 } else { 2997 error = xfs_bwrite(mp, bp); 2998 } 2999 return error; 3000 3001 corrupt_out: 3002 xfs_buf_relse(bp); 3003 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3004 cluster_corrupt_out: 3005 /* 3006 * Unlocks the flush lock 3007 */ 3008 xfs_iflush_abort(ip); 3009 return XFS_ERROR(EFSCORRUPTED); 3010 } 3011 3012 3013 STATIC int 3014 xfs_iflush_int( 3015 xfs_inode_t *ip, 3016 xfs_buf_t *bp) 3017 { 3018 xfs_inode_log_item_t *iip; 3019 xfs_dinode_t *dip; 3020 xfs_mount_t *mp; 3021 #ifdef XFS_TRANS_DEBUG 3022 int first; 3023 #endif 3024 3025 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3026 ASSERT(!completion_done(&ip->i_flush)); 3027 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3028 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3029 3030 iip = ip->i_itemp; 3031 mp = ip->i_mount; 3032 3033 3034 /* 3035 * If the inode isn't dirty, then just release the inode 3036 * flush lock and do nothing. 3037 */ 3038 if (xfs_inode_clean(ip)) { 3039 xfs_ifunlock(ip); 3040 return 0; 3041 } 3042 3043 /* set *dip = inode's place in the buffer */ 3044 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 3045 3046 /* 3047 * Clear i_update_core before copying out the data. 3048 * This is for coordination with our timestamp updates 3049 * that don't hold the inode lock. They will always 3050 * update the timestamps BEFORE setting i_update_core, 3051 * so if we clear i_update_core after they set it we 3052 * are guaranteed to see their updates to the timestamps. 3053 * I believe that this depends on strongly ordered memory 3054 * semantics, but we have that. We use the SYNCHRONIZE 3055 * macro to make sure that the compiler does not reorder 3056 * the i_update_core access below the data copy below. 3057 */ 3058 ip->i_update_core = 0; 3059 SYNCHRONIZE(); 3060 3061 /* 3062 * Make sure to get the latest atime from the Linux inode. 3063 */ 3064 xfs_synchronize_atime(ip); 3065 3066 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 3067 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3068 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3069 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 3070 ip->i_ino, be16_to_cpu(dip->di_magic), dip); 3071 goto corrupt_out; 3072 } 3073 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 3074 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 3075 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3076 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 3077 ip->i_ino, ip, ip->i_d.di_magic); 3078 goto corrupt_out; 3079 } 3080 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3081 if (XFS_TEST_ERROR( 3082 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3083 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 3084 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 3085 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3086 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 3087 ip->i_ino, ip); 3088 goto corrupt_out; 3089 } 3090 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 3091 if (XFS_TEST_ERROR( 3092 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3093 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3094 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 3095 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 3096 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3097 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 3098 ip->i_ino, ip); 3099 goto corrupt_out; 3100 } 3101 } 3102 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 3103 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 3104 XFS_RANDOM_IFLUSH_5)) { 3105 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3106 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 3107 ip->i_ino, 3108 ip->i_d.di_nextents + ip->i_d.di_anextents, 3109 ip->i_d.di_nblocks, 3110 ip); 3111 goto corrupt_out; 3112 } 3113 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 3114 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 3115 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3116 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 3117 ip->i_ino, ip->i_d.di_forkoff, ip); 3118 goto corrupt_out; 3119 } 3120 /* 3121 * bump the flush iteration count, used to detect flushes which 3122 * postdate a log record during recovery. 3123 */ 3124 3125 ip->i_d.di_flushiter++; 3126 3127 /* 3128 * Copy the dirty parts of the inode into the on-disk 3129 * inode. We always copy out the core of the inode, 3130 * because if the inode is dirty at all the core must 3131 * be. 3132 */ 3133 xfs_dinode_to_disk(dip, &ip->i_d); 3134 3135 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3136 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3137 ip->i_d.di_flushiter = 0; 3138 3139 /* 3140 * If this is really an old format inode and the superblock version 3141 * has not been updated to support only new format inodes, then 3142 * convert back to the old inode format. If the superblock version 3143 * has been updated, then make the conversion permanent. 3144 */ 3145 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); 3146 if (ip->i_d.di_version == 1) { 3147 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 3148 /* 3149 * Convert it back. 3150 */ 3151 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3152 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); 3153 } else { 3154 /* 3155 * The superblock version has already been bumped, 3156 * so just make the conversion to the new inode 3157 * format permanent. 3158 */ 3159 ip->i_d.di_version = 2; 3160 dip->di_version = 2; 3161 ip->i_d.di_onlink = 0; 3162 dip->di_onlink = 0; 3163 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3164 memset(&(dip->di_pad[0]), 0, 3165 sizeof(dip->di_pad)); 3166 ASSERT(ip->i_d.di_projid == 0); 3167 } 3168 } 3169 3170 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 3171 if (XFS_IFORK_Q(ip)) 3172 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3173 xfs_inobp_check(mp, bp); 3174 3175 /* 3176 * We've recorded everything logged in the inode, so we'd 3177 * like to clear the ilf_fields bits so we don't log and 3178 * flush things unnecessarily. However, we can't stop 3179 * logging all this information until the data we've copied 3180 * into the disk buffer is written to disk. If we did we might 3181 * overwrite the copy of the inode in the log with all the 3182 * data after re-logging only part of it, and in the face of 3183 * a crash we wouldn't have all the data we need to recover. 3184 * 3185 * What we do is move the bits to the ili_last_fields field. 3186 * When logging the inode, these bits are moved back to the 3187 * ilf_fields field. In the xfs_iflush_done() routine we 3188 * clear ili_last_fields, since we know that the information 3189 * those bits represent is permanently on disk. As long as 3190 * the flush completes before the inode is logged again, then 3191 * both ilf_fields and ili_last_fields will be cleared. 3192 * 3193 * We can play with the ilf_fields bits here, because the inode 3194 * lock must be held exclusively in order to set bits there 3195 * and the flush lock protects the ili_last_fields bits. 3196 * Set ili_logged so the flush done 3197 * routine can tell whether or not to look in the AIL. 3198 * Also, store the current LSN of the inode so that we can tell 3199 * whether the item has moved in the AIL from xfs_iflush_done(). 3200 * In order to read the lsn we need the AIL lock, because 3201 * it is a 64 bit value that cannot be read atomically. 3202 */ 3203 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3204 iip->ili_last_fields = iip->ili_format.ilf_fields; 3205 iip->ili_format.ilf_fields = 0; 3206 iip->ili_logged = 1; 3207 3208 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 3209 &iip->ili_item.li_lsn); 3210 3211 /* 3212 * Attach the function xfs_iflush_done to the inode's 3213 * buffer. This will remove the inode from the AIL 3214 * and unlock the inode's flush lock when the inode is 3215 * completely written to disk. 3216 */ 3217 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3218 xfs_iflush_done, (xfs_log_item_t *)iip); 3219 3220 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3221 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3222 } else { 3223 /* 3224 * We're flushing an inode which is not in the AIL and has 3225 * not been logged but has i_update_core set. For this 3226 * case we can use a B_DELWRI flush and immediately drop 3227 * the inode flush lock because we can avoid the whole 3228 * AIL state thing. It's OK to drop the flush lock now, 3229 * because we've already locked the buffer and to do anything 3230 * you really need both. 3231 */ 3232 if (iip != NULL) { 3233 ASSERT(iip->ili_logged == 0); 3234 ASSERT(iip->ili_last_fields == 0); 3235 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3236 } 3237 xfs_ifunlock(ip); 3238 } 3239 3240 return 0; 3241 3242 corrupt_out: 3243 return XFS_ERROR(EFSCORRUPTED); 3244 } 3245 3246 3247 3248 #ifdef XFS_ILOCK_TRACE 3249 void 3250 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) 3251 { 3252 ktrace_enter(ip->i_lock_trace, 3253 (void *)ip, 3254 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ 3255 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ 3256 (void *)ra, /* caller of ilock */ 3257 (void *)(unsigned long)current_cpu(), 3258 (void *)(unsigned long)current_pid(), 3259 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3260 } 3261 #endif 3262 3263 /* 3264 * Return a pointer to the extent record at file index idx. 3265 */ 3266 xfs_bmbt_rec_host_t * 3267 xfs_iext_get_ext( 3268 xfs_ifork_t *ifp, /* inode fork pointer */ 3269 xfs_extnum_t idx) /* index of target extent */ 3270 { 3271 ASSERT(idx >= 0); 3272 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3273 return ifp->if_u1.if_ext_irec->er_extbuf; 3274 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3275 xfs_ext_irec_t *erp; /* irec pointer */ 3276 int erp_idx = 0; /* irec index */ 3277 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3278 3279 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3280 return &erp->er_extbuf[page_idx]; 3281 } else if (ifp->if_bytes) { 3282 return &ifp->if_u1.if_extents[idx]; 3283 } else { 3284 return NULL; 3285 } 3286 } 3287 3288 /* 3289 * Insert new item(s) into the extent records for incore inode 3290 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3291 */ 3292 void 3293 xfs_iext_insert( 3294 xfs_ifork_t *ifp, /* inode fork pointer */ 3295 xfs_extnum_t idx, /* starting index of new items */ 3296 xfs_extnum_t count, /* number of inserted items */ 3297 xfs_bmbt_irec_t *new) /* items to insert */ 3298 { 3299 xfs_extnum_t i; /* extent record index */ 3300 3301 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3302 xfs_iext_add(ifp, idx, count); 3303 for (i = idx; i < idx + count; i++, new++) 3304 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 3305 } 3306 3307 /* 3308 * This is called when the amount of space required for incore file 3309 * extents needs to be increased. The ext_diff parameter stores the 3310 * number of new extents being added and the idx parameter contains 3311 * the extent index where the new extents will be added. If the new 3312 * extents are being appended, then we just need to (re)allocate and 3313 * initialize the space. Otherwise, if the new extents are being 3314 * inserted into the middle of the existing entries, a bit more work 3315 * is required to make room for the new extents to be inserted. The 3316 * caller is responsible for filling in the new extent entries upon 3317 * return. 3318 */ 3319 void 3320 xfs_iext_add( 3321 xfs_ifork_t *ifp, /* inode fork pointer */ 3322 xfs_extnum_t idx, /* index to begin adding exts */ 3323 int ext_diff) /* number of extents to add */ 3324 { 3325 int byte_diff; /* new bytes being added */ 3326 int new_size; /* size of extents after adding */ 3327 xfs_extnum_t nextents; /* number of extents in file */ 3328 3329 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3330 ASSERT((idx >= 0) && (idx <= nextents)); 3331 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3332 new_size = ifp->if_bytes + byte_diff; 3333 /* 3334 * If the new number of extents (nextents + ext_diff) 3335 * fits inside the inode, then continue to use the inline 3336 * extent buffer. 3337 */ 3338 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3339 if (idx < nextents) { 3340 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3341 &ifp->if_u2.if_inline_ext[idx], 3342 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3343 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3344 } 3345 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3346 ifp->if_real_bytes = 0; 3347 ifp->if_lastex = nextents + ext_diff; 3348 } 3349 /* 3350 * Otherwise use a linear (direct) extent list. 3351 * If the extents are currently inside the inode, 3352 * xfs_iext_realloc_direct will switch us from 3353 * inline to direct extent allocation mode. 3354 */ 3355 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3356 xfs_iext_realloc_direct(ifp, new_size); 3357 if (idx < nextents) { 3358 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3359 &ifp->if_u1.if_extents[idx], 3360 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3361 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3362 } 3363 } 3364 /* Indirection array */ 3365 else { 3366 xfs_ext_irec_t *erp; 3367 int erp_idx = 0; 3368 int page_idx = idx; 3369 3370 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3371 if (ifp->if_flags & XFS_IFEXTIREC) { 3372 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3373 } else { 3374 xfs_iext_irec_init(ifp); 3375 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3376 erp = ifp->if_u1.if_ext_irec; 3377 } 3378 /* Extents fit in target extent page */ 3379 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3380 if (page_idx < erp->er_extcount) { 3381 memmove(&erp->er_extbuf[page_idx + ext_diff], 3382 &erp->er_extbuf[page_idx], 3383 (erp->er_extcount - page_idx) * 3384 sizeof(xfs_bmbt_rec_t)); 3385 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3386 } 3387 erp->er_extcount += ext_diff; 3388 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3389 } 3390 /* Insert a new extent page */ 3391 else if (erp) { 3392 xfs_iext_add_indirect_multi(ifp, 3393 erp_idx, page_idx, ext_diff); 3394 } 3395 /* 3396 * If extent(s) are being appended to the last page in 3397 * the indirection array and the new extent(s) don't fit 3398 * in the page, then erp is NULL and erp_idx is set to 3399 * the next index needed in the indirection array. 3400 */ 3401 else { 3402 int count = ext_diff; 3403 3404 while (count) { 3405 erp = xfs_iext_irec_new(ifp, erp_idx); 3406 erp->er_extcount = count; 3407 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3408 if (count) { 3409 erp_idx++; 3410 } 3411 } 3412 } 3413 } 3414 ifp->if_bytes = new_size; 3415 } 3416 3417 /* 3418 * This is called when incore extents are being added to the indirection 3419 * array and the new extents do not fit in the target extent list. The 3420 * erp_idx parameter contains the irec index for the target extent list 3421 * in the indirection array, and the idx parameter contains the extent 3422 * index within the list. The number of extents being added is stored 3423 * in the count parameter. 3424 * 3425 * |-------| |-------| 3426 * | | | | idx - number of extents before idx 3427 * | idx | | count | 3428 * | | | | count - number of extents being inserted at idx 3429 * |-------| |-------| 3430 * | count | | nex2 | nex2 - number of extents after idx + count 3431 * |-------| |-------| 3432 */ 3433 void 3434 xfs_iext_add_indirect_multi( 3435 xfs_ifork_t *ifp, /* inode fork pointer */ 3436 int erp_idx, /* target extent irec index */ 3437 xfs_extnum_t idx, /* index within target list */ 3438 int count) /* new extents being added */ 3439 { 3440 int byte_diff; /* new bytes being added */ 3441 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3442 xfs_extnum_t ext_diff; /* number of extents to add */ 3443 xfs_extnum_t ext_cnt; /* new extents still needed */ 3444 xfs_extnum_t nex2; /* extents after idx + count */ 3445 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3446 int nlists; /* number of irec's (lists) */ 3447 3448 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3449 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3450 nex2 = erp->er_extcount - idx; 3451 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3452 3453 /* 3454 * Save second part of target extent list 3455 * (all extents past */ 3456 if (nex2) { 3457 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3458 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); 3459 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3460 erp->er_extcount -= nex2; 3461 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3462 memset(&erp->er_extbuf[idx], 0, byte_diff); 3463 } 3464 3465 /* 3466 * Add the new extents to the end of the target 3467 * list, then allocate new irec record(s) and 3468 * extent buffer(s) as needed to store the rest 3469 * of the new extents. 3470 */ 3471 ext_cnt = count; 3472 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3473 if (ext_diff) { 3474 erp->er_extcount += ext_diff; 3475 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3476 ext_cnt -= ext_diff; 3477 } 3478 while (ext_cnt) { 3479 erp_idx++; 3480 erp = xfs_iext_irec_new(ifp, erp_idx); 3481 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3482 erp->er_extcount = ext_diff; 3483 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3484 ext_cnt -= ext_diff; 3485 } 3486 3487 /* Add nex2 extents back to indirection array */ 3488 if (nex2) { 3489 xfs_extnum_t ext_avail; 3490 int i; 3491 3492 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3493 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3494 i = 0; 3495 /* 3496 * If nex2 extents fit in the current page, append 3497 * nex2_ep after the new extents. 3498 */ 3499 if (nex2 <= ext_avail) { 3500 i = erp->er_extcount; 3501 } 3502 /* 3503 * Otherwise, check if space is available in the 3504 * next page. 3505 */ 3506 else if ((erp_idx < nlists - 1) && 3507 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3508 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3509 erp_idx++; 3510 erp++; 3511 /* Create a hole for nex2 extents */ 3512 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3513 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3514 } 3515 /* 3516 * Final choice, create a new extent page for 3517 * nex2 extents. 3518 */ 3519 else { 3520 erp_idx++; 3521 erp = xfs_iext_irec_new(ifp, erp_idx); 3522 } 3523 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3524 kmem_free(nex2_ep); 3525 erp->er_extcount += nex2; 3526 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3527 } 3528 } 3529 3530 /* 3531 * This is called when the amount of space required for incore file 3532 * extents needs to be decreased. The ext_diff parameter stores the 3533 * number of extents to be removed and the idx parameter contains 3534 * the extent index where the extents will be removed from. 3535 * 3536 * If the amount of space needed has decreased below the linear 3537 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3538 * extent array. Otherwise, use kmem_realloc() to adjust the 3539 * size to what is needed. 3540 */ 3541 void 3542 xfs_iext_remove( 3543 xfs_ifork_t *ifp, /* inode fork pointer */ 3544 xfs_extnum_t idx, /* index to begin removing exts */ 3545 int ext_diff) /* number of extents to remove */ 3546 { 3547 xfs_extnum_t nextents; /* number of extents in file */ 3548 int new_size; /* size of extents after removal */ 3549 3550 ASSERT(ext_diff > 0); 3551 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3552 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3553 3554 if (new_size == 0) { 3555 xfs_iext_destroy(ifp); 3556 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3557 xfs_iext_remove_indirect(ifp, idx, ext_diff); 3558 } else if (ifp->if_real_bytes) { 3559 xfs_iext_remove_direct(ifp, idx, ext_diff); 3560 } else { 3561 xfs_iext_remove_inline(ifp, idx, ext_diff); 3562 } 3563 ifp->if_bytes = new_size; 3564 } 3565 3566 /* 3567 * This removes ext_diff extents from the inline buffer, beginning 3568 * at extent index idx. 3569 */ 3570 void 3571 xfs_iext_remove_inline( 3572 xfs_ifork_t *ifp, /* inode fork pointer */ 3573 xfs_extnum_t idx, /* index to begin removing exts */ 3574 int ext_diff) /* number of extents to remove */ 3575 { 3576 int nextents; /* number of extents in file */ 3577 3578 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3579 ASSERT(idx < XFS_INLINE_EXTS); 3580 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3581 ASSERT(((nextents - ext_diff) > 0) && 3582 (nextents - ext_diff) < XFS_INLINE_EXTS); 3583 3584 if (idx + ext_diff < nextents) { 3585 memmove(&ifp->if_u2.if_inline_ext[idx], 3586 &ifp->if_u2.if_inline_ext[idx + ext_diff], 3587 (nextents - (idx + ext_diff)) * 3588 sizeof(xfs_bmbt_rec_t)); 3589 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 3590 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3591 } else { 3592 memset(&ifp->if_u2.if_inline_ext[idx], 0, 3593 ext_diff * sizeof(xfs_bmbt_rec_t)); 3594 } 3595 } 3596 3597 /* 3598 * This removes ext_diff extents from a linear (direct) extent list, 3599 * beginning at extent index idx. If the extents are being removed 3600 * from the end of the list (ie. truncate) then we just need to re- 3601 * allocate the list to remove the extra space. Otherwise, if the 3602 * extents are being removed from the middle of the existing extent 3603 * entries, then we first need to move the extent records beginning 3604 * at idx + ext_diff up in the list to overwrite the records being 3605 * removed, then remove the extra space via kmem_realloc. 3606 */ 3607 void 3608 xfs_iext_remove_direct( 3609 xfs_ifork_t *ifp, /* inode fork pointer */ 3610 xfs_extnum_t idx, /* index to begin removing exts */ 3611 int ext_diff) /* number of extents to remove */ 3612 { 3613 xfs_extnum_t nextents; /* number of extents in file */ 3614 int new_size; /* size of extents after removal */ 3615 3616 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3617 new_size = ifp->if_bytes - 3618 (ext_diff * sizeof(xfs_bmbt_rec_t)); 3619 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3620 3621 if (new_size == 0) { 3622 xfs_iext_destroy(ifp); 3623 return; 3624 } 3625 /* Move extents up in the list (if needed) */ 3626 if (idx + ext_diff < nextents) { 3627 memmove(&ifp->if_u1.if_extents[idx], 3628 &ifp->if_u1.if_extents[idx + ext_diff], 3629 (nextents - (idx + ext_diff)) * 3630 sizeof(xfs_bmbt_rec_t)); 3631 } 3632 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3633 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3634 /* 3635 * Reallocate the direct extent list. If the extents 3636 * will fit inside the inode then xfs_iext_realloc_direct 3637 * will switch from direct to inline extent allocation 3638 * mode for us. 3639 */ 3640 xfs_iext_realloc_direct(ifp, new_size); 3641 ifp->if_bytes = new_size; 3642 } 3643 3644 /* 3645 * This is called when incore extents are being removed from the 3646 * indirection array and the extents being removed span multiple extent 3647 * buffers. The idx parameter contains the file extent index where we 3648 * want to begin removing extents, and the count parameter contains 3649 * how many extents need to be removed. 3650 * 3651 * |-------| |-------| 3652 * | nex1 | | | nex1 - number of extents before idx 3653 * |-------| | count | 3654 * | | | | count - number of extents being removed at idx 3655 * | count | |-------| 3656 * | | | nex2 | nex2 - number of extents after idx + count 3657 * |-------| |-------| 3658 */ 3659 void 3660 xfs_iext_remove_indirect( 3661 xfs_ifork_t *ifp, /* inode fork pointer */ 3662 xfs_extnum_t idx, /* index to begin removing extents */ 3663 int count) /* number of extents to remove */ 3664 { 3665 xfs_ext_irec_t *erp; /* indirection array pointer */ 3666 int erp_idx = 0; /* indirection array index */ 3667 xfs_extnum_t ext_cnt; /* extents left to remove */ 3668 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3669 xfs_extnum_t nex1; /* number of extents before idx */ 3670 xfs_extnum_t nex2; /* extents after idx + count */ 3671 int nlists; /* entries in indirection array */ 3672 int page_idx = idx; /* index in target extent list */ 3673 3674 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3675 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3676 ASSERT(erp != NULL); 3677 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3678 nex1 = page_idx; 3679 ext_cnt = count; 3680 while (ext_cnt) { 3681 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3682 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 3683 /* 3684 * Check for deletion of entire list; 3685 * xfs_iext_irec_remove() updates extent offsets. 3686 */ 3687 if (ext_diff == erp->er_extcount) { 3688 xfs_iext_irec_remove(ifp, erp_idx); 3689 ext_cnt -= ext_diff; 3690 nex1 = 0; 3691 if (ext_cnt) { 3692 ASSERT(erp_idx < ifp->if_real_bytes / 3693 XFS_IEXT_BUFSZ); 3694 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3695 nex1 = 0; 3696 continue; 3697 } else { 3698 break; 3699 } 3700 } 3701 /* Move extents up (if needed) */ 3702 if (nex2) { 3703 memmove(&erp->er_extbuf[nex1], 3704 &erp->er_extbuf[nex1 + ext_diff], 3705 nex2 * sizeof(xfs_bmbt_rec_t)); 3706 } 3707 /* Zero out rest of page */ 3708 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 3709 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 3710 /* Update remaining counters */ 3711 erp->er_extcount -= ext_diff; 3712 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 3713 ext_cnt -= ext_diff; 3714 nex1 = 0; 3715 erp_idx++; 3716 erp++; 3717 } 3718 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 3719 xfs_iext_irec_compact(ifp); 3720 } 3721 3722 /* 3723 * Create, destroy, or resize a linear (direct) block of extents. 3724 */ 3725 void 3726 xfs_iext_realloc_direct( 3727 xfs_ifork_t *ifp, /* inode fork pointer */ 3728 int new_size) /* new size of extents */ 3729 { 3730 int rnew_size; /* real new size of extents */ 3731 3732 rnew_size = new_size; 3733 3734 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 3735 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 3736 (new_size != ifp->if_real_bytes))); 3737 3738 /* Free extent records */ 3739 if (new_size == 0) { 3740 xfs_iext_destroy(ifp); 3741 } 3742 /* Resize direct extent list and zero any new bytes */ 3743 else if (ifp->if_real_bytes) { 3744 /* Check if extents will fit inside the inode */ 3745 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 3746 xfs_iext_direct_to_inline(ifp, new_size / 3747 (uint)sizeof(xfs_bmbt_rec_t)); 3748 ifp->if_bytes = new_size; 3749 return; 3750 } 3751 if (!is_power_of_2(new_size)){ 3752 rnew_size = roundup_pow_of_two(new_size); 3753 } 3754 if (rnew_size != ifp->if_real_bytes) { 3755 ifp->if_u1.if_extents = 3756 kmem_realloc(ifp->if_u1.if_extents, 3757 rnew_size, 3758 ifp->if_real_bytes, KM_NOFS); 3759 } 3760 if (rnew_size > ifp->if_real_bytes) { 3761 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 3762 (uint)sizeof(xfs_bmbt_rec_t)], 0, 3763 rnew_size - ifp->if_real_bytes); 3764 } 3765 } 3766 /* 3767 * Switch from the inline extent buffer to a direct 3768 * extent list. Be sure to include the inline extent 3769 * bytes in new_size. 3770 */ 3771 else { 3772 new_size += ifp->if_bytes; 3773 if (!is_power_of_2(new_size)) { 3774 rnew_size = roundup_pow_of_two(new_size); 3775 } 3776 xfs_iext_inline_to_direct(ifp, rnew_size); 3777 } 3778 ifp->if_real_bytes = rnew_size; 3779 ifp->if_bytes = new_size; 3780 } 3781 3782 /* 3783 * Switch from linear (direct) extent records to inline buffer. 3784 */ 3785 void 3786 xfs_iext_direct_to_inline( 3787 xfs_ifork_t *ifp, /* inode fork pointer */ 3788 xfs_extnum_t nextents) /* number of extents in file */ 3789 { 3790 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3791 ASSERT(nextents <= XFS_INLINE_EXTS); 3792 /* 3793 * The inline buffer was zeroed when we switched 3794 * from inline to direct extent allocation mode, 3795 * so we don't need to clear it here. 3796 */ 3797 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 3798 nextents * sizeof(xfs_bmbt_rec_t)); 3799 kmem_free(ifp->if_u1.if_extents); 3800 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3801 ifp->if_real_bytes = 0; 3802 } 3803 3804 /* 3805 * Switch from inline buffer to linear (direct) extent records. 3806 * new_size should already be rounded up to the next power of 2 3807 * by the caller (when appropriate), so use new_size as it is. 3808 * However, since new_size may be rounded up, we can't update 3809 * if_bytes here. It is the caller's responsibility to update 3810 * if_bytes upon return. 3811 */ 3812 void 3813 xfs_iext_inline_to_direct( 3814 xfs_ifork_t *ifp, /* inode fork pointer */ 3815 int new_size) /* number of extents in file */ 3816 { 3817 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); 3818 memset(ifp->if_u1.if_extents, 0, new_size); 3819 if (ifp->if_bytes) { 3820 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 3821 ifp->if_bytes); 3822 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3823 sizeof(xfs_bmbt_rec_t)); 3824 } 3825 ifp->if_real_bytes = new_size; 3826 } 3827 3828 /* 3829 * Resize an extent indirection array to new_size bytes. 3830 */ 3831 void 3832 xfs_iext_realloc_indirect( 3833 xfs_ifork_t *ifp, /* inode fork pointer */ 3834 int new_size) /* new indirection array size */ 3835 { 3836 int nlists; /* number of irec's (ex lists) */ 3837 int size; /* current indirection array size */ 3838 3839 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3840 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3841 size = nlists * sizeof(xfs_ext_irec_t); 3842 ASSERT(ifp->if_real_bytes); 3843 ASSERT((new_size >= 0) && (new_size != size)); 3844 if (new_size == 0) { 3845 xfs_iext_destroy(ifp); 3846 } else { 3847 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 3848 kmem_realloc(ifp->if_u1.if_ext_irec, 3849 new_size, size, KM_NOFS); 3850 } 3851 } 3852 3853 /* 3854 * Switch from indirection array to linear (direct) extent allocations. 3855 */ 3856 void 3857 xfs_iext_indirect_to_direct( 3858 xfs_ifork_t *ifp) /* inode fork pointer */ 3859 { 3860 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3861 xfs_extnum_t nextents; /* number of extents in file */ 3862 int size; /* size of file extents */ 3863 3864 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3865 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3866 ASSERT(nextents <= XFS_LINEAR_EXTS); 3867 size = nextents * sizeof(xfs_bmbt_rec_t); 3868 3869 xfs_iext_irec_compact_pages(ifp); 3870 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 3871 3872 ep = ifp->if_u1.if_ext_irec->er_extbuf; 3873 kmem_free(ifp->if_u1.if_ext_irec); 3874 ifp->if_flags &= ~XFS_IFEXTIREC; 3875 ifp->if_u1.if_extents = ep; 3876 ifp->if_bytes = size; 3877 if (nextents < XFS_LINEAR_EXTS) { 3878 xfs_iext_realloc_direct(ifp, size); 3879 } 3880 } 3881 3882 /* 3883 * Free incore file extents. 3884 */ 3885 void 3886 xfs_iext_destroy( 3887 xfs_ifork_t *ifp) /* inode fork pointer */ 3888 { 3889 if (ifp->if_flags & XFS_IFEXTIREC) { 3890 int erp_idx; 3891 int nlists; 3892 3893 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3894 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 3895 xfs_iext_irec_remove(ifp, erp_idx); 3896 } 3897 ifp->if_flags &= ~XFS_IFEXTIREC; 3898 } else if (ifp->if_real_bytes) { 3899 kmem_free(ifp->if_u1.if_extents); 3900 } else if (ifp->if_bytes) { 3901 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3902 sizeof(xfs_bmbt_rec_t)); 3903 } 3904 ifp->if_u1.if_extents = NULL; 3905 ifp->if_real_bytes = 0; 3906 ifp->if_bytes = 0; 3907 } 3908 3909 /* 3910 * Return a pointer to the extent record for file system block bno. 3911 */ 3912 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 3913 xfs_iext_bno_to_ext( 3914 xfs_ifork_t *ifp, /* inode fork pointer */ 3915 xfs_fileoff_t bno, /* block number to search for */ 3916 xfs_extnum_t *idxp) /* index of target extent */ 3917 { 3918 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 3919 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 3920 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 3921 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3922 int high; /* upper boundary in search */ 3923 xfs_extnum_t idx = 0; /* index of target extent */ 3924 int low; /* lower boundary in search */ 3925 xfs_extnum_t nextents; /* number of file extents */ 3926 xfs_fileoff_t startoff = 0; /* start offset of extent */ 3927 3928 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3929 if (nextents == 0) { 3930 *idxp = 0; 3931 return NULL; 3932 } 3933 low = 0; 3934 if (ifp->if_flags & XFS_IFEXTIREC) { 3935 /* Find target extent list */ 3936 int erp_idx = 0; 3937 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 3938 base = erp->er_extbuf; 3939 high = erp->er_extcount - 1; 3940 } else { 3941 base = ifp->if_u1.if_extents; 3942 high = nextents - 1; 3943 } 3944 /* Binary search extent records */ 3945 while (low <= high) { 3946 idx = (low + high) >> 1; 3947 ep = base + idx; 3948 startoff = xfs_bmbt_get_startoff(ep); 3949 blockcount = xfs_bmbt_get_blockcount(ep); 3950 if (bno < startoff) { 3951 high = idx - 1; 3952 } else if (bno >= startoff + blockcount) { 3953 low = idx + 1; 3954 } else { 3955 /* Convert back to file-based extent index */ 3956 if (ifp->if_flags & XFS_IFEXTIREC) { 3957 idx += erp->er_extoff; 3958 } 3959 *idxp = idx; 3960 return ep; 3961 } 3962 } 3963 /* Convert back to file-based extent index */ 3964 if (ifp->if_flags & XFS_IFEXTIREC) { 3965 idx += erp->er_extoff; 3966 } 3967 if (bno >= startoff + blockcount) { 3968 if (++idx == nextents) { 3969 ep = NULL; 3970 } else { 3971 ep = xfs_iext_get_ext(ifp, idx); 3972 } 3973 } 3974 *idxp = idx; 3975 return ep; 3976 } 3977 3978 /* 3979 * Return a pointer to the indirection array entry containing the 3980 * extent record for filesystem block bno. Store the index of the 3981 * target irec in *erp_idxp. 3982 */ 3983 xfs_ext_irec_t * /* pointer to found extent record */ 3984 xfs_iext_bno_to_irec( 3985 xfs_ifork_t *ifp, /* inode fork pointer */ 3986 xfs_fileoff_t bno, /* block number to search for */ 3987 int *erp_idxp) /* irec index of target ext list */ 3988 { 3989 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3990 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 3991 int erp_idx; /* indirection array index */ 3992 int nlists; /* number of extent irec's (lists) */ 3993 int high; /* binary search upper limit */ 3994 int low; /* binary search lower limit */ 3995 3996 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3997 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3998 erp_idx = 0; 3999 low = 0; 4000 high = nlists - 1; 4001 while (low <= high) { 4002 erp_idx = (low + high) >> 1; 4003 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4004 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 4005 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 4006 high = erp_idx - 1; 4007 } else if (erp_next && bno >= 4008 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 4009 low = erp_idx + 1; 4010 } else { 4011 break; 4012 } 4013 } 4014 *erp_idxp = erp_idx; 4015 return erp; 4016 } 4017 4018 /* 4019 * Return a pointer to the indirection array entry containing the 4020 * extent record at file extent index *idxp. Store the index of the 4021 * target irec in *erp_idxp and store the page index of the target 4022 * extent record in *idxp. 4023 */ 4024 xfs_ext_irec_t * 4025 xfs_iext_idx_to_irec( 4026 xfs_ifork_t *ifp, /* inode fork pointer */ 4027 xfs_extnum_t *idxp, /* extent index (file -> page) */ 4028 int *erp_idxp, /* pointer to target irec */ 4029 int realloc) /* new bytes were just added */ 4030 { 4031 xfs_ext_irec_t *prev; /* pointer to previous irec */ 4032 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 4033 int erp_idx; /* indirection array index */ 4034 int nlists; /* number of irec's (ex lists) */ 4035 int high; /* binary search upper limit */ 4036 int low; /* binary search lower limit */ 4037 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 4038 4039 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4040 ASSERT(page_idx >= 0 && page_idx <= 4041 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 4042 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4043 erp_idx = 0; 4044 low = 0; 4045 high = nlists - 1; 4046 4047 /* Binary search extent irec's */ 4048 while (low <= high) { 4049 erp_idx = (low + high) >> 1; 4050 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4051 prev = erp_idx > 0 ? erp - 1 : NULL; 4052 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 4053 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 4054 high = erp_idx - 1; 4055 } else if (page_idx > erp->er_extoff + erp->er_extcount || 4056 (page_idx == erp->er_extoff + erp->er_extcount && 4057 !realloc)) { 4058 low = erp_idx + 1; 4059 } else if (page_idx == erp->er_extoff + erp->er_extcount && 4060 erp->er_extcount == XFS_LINEAR_EXTS) { 4061 ASSERT(realloc); 4062 page_idx = 0; 4063 erp_idx++; 4064 erp = erp_idx < nlists ? erp + 1 : NULL; 4065 break; 4066 } else { 4067 page_idx -= erp->er_extoff; 4068 break; 4069 } 4070 } 4071 *idxp = page_idx; 4072 *erp_idxp = erp_idx; 4073 return(erp); 4074 } 4075 4076 /* 4077 * Allocate and initialize an indirection array once the space needed 4078 * for incore extents increases above XFS_IEXT_BUFSZ. 4079 */ 4080 void 4081 xfs_iext_irec_init( 4082 xfs_ifork_t *ifp) /* inode fork pointer */ 4083 { 4084 xfs_ext_irec_t *erp; /* indirection array pointer */ 4085 xfs_extnum_t nextents; /* number of extents in file */ 4086 4087 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4088 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4089 ASSERT(nextents <= XFS_LINEAR_EXTS); 4090 4091 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); 4092 4093 if (nextents == 0) { 4094 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 4095 } else if (!ifp->if_real_bytes) { 4096 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4097 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4098 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 4099 } 4100 erp->er_extbuf = ifp->if_u1.if_extents; 4101 erp->er_extcount = nextents; 4102 erp->er_extoff = 0; 4103 4104 ifp->if_flags |= XFS_IFEXTIREC; 4105 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 4106 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 4107 ifp->if_u1.if_ext_irec = erp; 4108 4109 return; 4110 } 4111 4112 /* 4113 * Allocate and initialize a new entry in the indirection array. 4114 */ 4115 xfs_ext_irec_t * 4116 xfs_iext_irec_new( 4117 xfs_ifork_t *ifp, /* inode fork pointer */ 4118 int erp_idx) /* index for new irec */ 4119 { 4120 xfs_ext_irec_t *erp; /* indirection array pointer */ 4121 int i; /* loop counter */ 4122 int nlists; /* number of irec's (ex lists) */ 4123 4124 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4125 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4126 4127 /* Resize indirection array */ 4128 xfs_iext_realloc_indirect(ifp, ++nlists * 4129 sizeof(xfs_ext_irec_t)); 4130 /* 4131 * Move records down in the array so the 4132 * new page can use erp_idx. 4133 */ 4134 erp = ifp->if_u1.if_ext_irec; 4135 for (i = nlists - 1; i > erp_idx; i--) { 4136 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 4137 } 4138 ASSERT(i == erp_idx); 4139 4140 /* Initialize new extent record */ 4141 erp = ifp->if_u1.if_ext_irec; 4142 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 4143 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4144 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4145 erp[erp_idx].er_extcount = 0; 4146 erp[erp_idx].er_extoff = erp_idx > 0 ? 4147 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 4148 return (&erp[erp_idx]); 4149 } 4150 4151 /* 4152 * Remove a record from the indirection array. 4153 */ 4154 void 4155 xfs_iext_irec_remove( 4156 xfs_ifork_t *ifp, /* inode fork pointer */ 4157 int erp_idx) /* irec index to remove */ 4158 { 4159 xfs_ext_irec_t *erp; /* indirection array pointer */ 4160 int i; /* loop counter */ 4161 int nlists; /* number of irec's (ex lists) */ 4162 4163 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4164 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4165 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4166 if (erp->er_extbuf) { 4167 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4168 -erp->er_extcount); 4169 kmem_free(erp->er_extbuf); 4170 } 4171 /* Compact extent records */ 4172 erp = ifp->if_u1.if_ext_irec; 4173 for (i = erp_idx; i < nlists - 1; i++) { 4174 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4175 } 4176 /* 4177 * Manually free the last extent record from the indirection 4178 * array. A call to xfs_iext_realloc_indirect() with a size 4179 * of zero would result in a call to xfs_iext_destroy() which 4180 * would in turn call this function again, creating a nasty 4181 * infinite loop. 4182 */ 4183 if (--nlists) { 4184 xfs_iext_realloc_indirect(ifp, 4185 nlists * sizeof(xfs_ext_irec_t)); 4186 } else { 4187 kmem_free(ifp->if_u1.if_ext_irec); 4188 } 4189 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4190 } 4191 4192 /* 4193 * This is called to clean up large amounts of unused memory allocated 4194 * by the indirection array. Before compacting anything though, verify 4195 * that the indirection array is still needed and switch back to the 4196 * linear extent list (or even the inline buffer) if possible. The 4197 * compaction policy is as follows: 4198 * 4199 * Full Compaction: Extents fit into a single page (or inline buffer) 4200 * Partial Compaction: Extents occupy less than 50% of allocated space 4201 * No Compaction: Extents occupy at least 50% of allocated space 4202 */ 4203 void 4204 xfs_iext_irec_compact( 4205 xfs_ifork_t *ifp) /* inode fork pointer */ 4206 { 4207 xfs_extnum_t nextents; /* number of extents in file */ 4208 int nlists; /* number of irec's (ex lists) */ 4209 4210 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4211 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4212 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4213 4214 if (nextents == 0) { 4215 xfs_iext_destroy(ifp); 4216 } else if (nextents <= XFS_INLINE_EXTS) { 4217 xfs_iext_indirect_to_direct(ifp); 4218 xfs_iext_direct_to_inline(ifp, nextents); 4219 } else if (nextents <= XFS_LINEAR_EXTS) { 4220 xfs_iext_indirect_to_direct(ifp); 4221 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4222 xfs_iext_irec_compact_pages(ifp); 4223 } 4224 } 4225 4226 /* 4227 * Combine extents from neighboring extent pages. 4228 */ 4229 void 4230 xfs_iext_irec_compact_pages( 4231 xfs_ifork_t *ifp) /* inode fork pointer */ 4232 { 4233 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4234 int erp_idx = 0; /* indirection array index */ 4235 int nlists; /* number of irec's (ex lists) */ 4236 4237 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4238 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4239 while (erp_idx < nlists - 1) { 4240 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4241 erp_next = erp + 1; 4242 if (erp_next->er_extcount <= 4243 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4244 memcpy(&erp->er_extbuf[erp->er_extcount], 4245 erp_next->er_extbuf, erp_next->er_extcount * 4246 sizeof(xfs_bmbt_rec_t)); 4247 erp->er_extcount += erp_next->er_extcount; 4248 /* 4249 * Free page before removing extent record 4250 * so er_extoffs don't get modified in 4251 * xfs_iext_irec_remove. 4252 */ 4253 kmem_free(erp_next->er_extbuf); 4254 erp_next->er_extbuf = NULL; 4255 xfs_iext_irec_remove(ifp, erp_idx + 1); 4256 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4257 } else { 4258 erp_idx++; 4259 } 4260 } 4261 } 4262 4263 /* 4264 * This is called to update the er_extoff field in the indirection 4265 * array when extents have been added or removed from one of the 4266 * extent lists. erp_idx contains the irec index to begin updating 4267 * at and ext_diff contains the number of extents that were added 4268 * or removed. 4269 */ 4270 void 4271 xfs_iext_irec_update_extoffs( 4272 xfs_ifork_t *ifp, /* inode fork pointer */ 4273 int erp_idx, /* irec index to update */ 4274 int ext_diff) /* number of new extents */ 4275 { 4276 int i; /* loop counter */ 4277 int nlists; /* number of irec's (ex lists */ 4278 4279 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4280 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4281 for (i = erp_idx; i < nlists; i++) { 4282 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4283 } 4284 } 4285