1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/log2.h> 19 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_types.h" 23 #include "xfs_bit.h" 24 #include "xfs_log.h" 25 #include "xfs_inum.h" 26 #include "xfs_trans.h" 27 #include "xfs_trans_priv.h" 28 #include "xfs_sb.h" 29 #include "xfs_ag.h" 30 #include "xfs_dir2.h" 31 #include "xfs_dmapi.h" 32 #include "xfs_mount.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_alloc_btree.h" 35 #include "xfs_ialloc_btree.h" 36 #include "xfs_dir2_sf.h" 37 #include "xfs_attr_sf.h" 38 #include "xfs_dinode.h" 39 #include "xfs_inode.h" 40 #include "xfs_buf_item.h" 41 #include "xfs_inode_item.h" 42 #include "xfs_btree.h" 43 #include "xfs_btree_trace.h" 44 #include "xfs_alloc.h" 45 #include "xfs_ialloc.h" 46 #include "xfs_bmap.h" 47 #include "xfs_rw.h" 48 #include "xfs_error.h" 49 #include "xfs_utils.h" 50 #include "xfs_dir2_trace.h" 51 #include "xfs_quota.h" 52 #include "xfs_filestream.h" 53 #include "xfs_vnodeops.h" 54 55 kmem_zone_t *xfs_ifork_zone; 56 kmem_zone_t *xfs_inode_zone; 57 58 /* 59 * Used in xfs_itruncate(). This is the maximum number of extents 60 * freed from a file in a single transaction. 61 */ 62 #define XFS_ITRUNC_MAX_EXTENTS 2 63 64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *); 65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); 66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); 67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); 68 69 #ifdef DEBUG 70 /* 71 * Make sure that the extents in the given memory buffer 72 * are valid. 73 */ 74 STATIC void 75 xfs_validate_extents( 76 xfs_ifork_t *ifp, 77 int nrecs, 78 xfs_exntfmt_t fmt) 79 { 80 xfs_bmbt_irec_t irec; 81 xfs_bmbt_rec_host_t rec; 82 int i; 83 84 for (i = 0; i < nrecs; i++) { 85 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 86 rec.l0 = get_unaligned(&ep->l0); 87 rec.l1 = get_unaligned(&ep->l1); 88 xfs_bmbt_get_all(&rec, &irec); 89 if (fmt == XFS_EXTFMT_NOSTATE) 90 ASSERT(irec.br_state == XFS_EXT_NORM); 91 } 92 } 93 #else /* DEBUG */ 94 #define xfs_validate_extents(ifp, nrecs, fmt) 95 #endif /* DEBUG */ 96 97 /* 98 * Check that none of the inode's in the buffer have a next 99 * unlinked field of 0. 100 */ 101 #if defined(DEBUG) 102 void 103 xfs_inobp_check( 104 xfs_mount_t *mp, 105 xfs_buf_t *bp) 106 { 107 int i; 108 int j; 109 xfs_dinode_t *dip; 110 111 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 112 113 for (i = 0; i < j; i++) { 114 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 115 i * mp->m_sb.sb_inodesize); 116 if (!dip->di_next_unlinked) { 117 xfs_fs_cmn_err(CE_ALERT, mp, 118 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 119 bp); 120 ASSERT(dip->di_next_unlinked); 121 } 122 } 123 } 124 #endif 125 126 /* 127 * Find the buffer associated with the given inode map 128 * We do basic validation checks on the buffer once it has been 129 * retrieved from disk. 130 */ 131 STATIC int 132 xfs_imap_to_bp( 133 xfs_mount_t *mp, 134 xfs_trans_t *tp, 135 struct xfs_imap *imap, 136 xfs_buf_t **bpp, 137 uint buf_flags, 138 uint iget_flags) 139 { 140 int error; 141 int i; 142 int ni; 143 xfs_buf_t *bp; 144 145 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 146 (int)imap->im_len, buf_flags, &bp); 147 if (error) { 148 if (error != EAGAIN) { 149 cmn_err(CE_WARN, 150 "xfs_imap_to_bp: xfs_trans_read_buf()returned " 151 "an error %d on %s. Returning error.", 152 error, mp->m_fsname); 153 } else { 154 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 155 } 156 return error; 157 } 158 159 /* 160 * Validate the magic number and version of every inode in the buffer 161 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 162 */ 163 #ifdef DEBUG 164 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; 165 #else /* usual case */ 166 ni = 1; 167 #endif 168 169 for (i = 0; i < ni; i++) { 170 int di_ok; 171 xfs_dinode_t *dip; 172 173 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 174 (i << mp->m_sb.sb_inodelog)); 175 di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && 176 XFS_DINODE_GOOD_VERSION(dip->di_version); 177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 178 XFS_ERRTAG_ITOBP_INOTOBP, 179 XFS_RANDOM_ITOBP_INOTOBP))) { 180 if (iget_flags & XFS_IGET_BULKSTAT) { 181 xfs_trans_brelse(tp, bp); 182 return XFS_ERROR(EINVAL); 183 } 184 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 185 XFS_ERRLEVEL_HIGH, mp, dip); 186 #ifdef DEBUG 187 cmn_err(CE_PANIC, 188 "Device %s - bad inode magic/vsn " 189 "daddr %lld #%d (magic=%x)", 190 XFS_BUFTARG_NAME(mp->m_ddev_targp), 191 (unsigned long long)imap->im_blkno, i, 192 be16_to_cpu(dip->di_magic)); 193 #endif 194 xfs_trans_brelse(tp, bp); 195 return XFS_ERROR(EFSCORRUPTED); 196 } 197 } 198 199 xfs_inobp_check(mp, bp); 200 201 /* 202 * Mark the buffer as an inode buffer now that it looks good 203 */ 204 XFS_BUF_SET_VTYPE(bp, B_FS_INO); 205 206 *bpp = bp; 207 return 0; 208 } 209 210 /* 211 * This routine is called to map an inode number within a file 212 * system to the buffer containing the on-disk version of the 213 * inode. It returns a pointer to the buffer containing the 214 * on-disk inode in the bpp parameter, and in the dip parameter 215 * it returns a pointer to the on-disk inode within that buffer. 216 * 217 * If a non-zero error is returned, then the contents of bpp and 218 * dipp are undefined. 219 * 220 * Use xfs_imap() to determine the size and location of the 221 * buffer to read from disk. 222 */ 223 int 224 xfs_inotobp( 225 xfs_mount_t *mp, 226 xfs_trans_t *tp, 227 xfs_ino_t ino, 228 xfs_dinode_t **dipp, 229 xfs_buf_t **bpp, 230 int *offset, 231 uint imap_flags) 232 { 233 struct xfs_imap imap; 234 xfs_buf_t *bp; 235 int error; 236 237 imap.im_blkno = 0; 238 error = xfs_imap(mp, tp, ino, &imap, imap_flags); 239 if (error) 240 return error; 241 242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); 243 if (error) 244 return error; 245 246 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); 247 *bpp = bp; 248 *offset = imap.im_boffset; 249 return 0; 250 } 251 252 253 /* 254 * This routine is called to map an inode to the buffer containing 255 * the on-disk version of the inode. It returns a pointer to the 256 * buffer containing the on-disk inode in the bpp parameter, and in 257 * the dip parameter it returns a pointer to the on-disk inode within 258 * that buffer. 259 * 260 * If a non-zero error is returned, then the contents of bpp and 261 * dipp are undefined. 262 * 263 * The inode is expected to already been mapped to its buffer and read 264 * in once, thus we can use the mapping information stored in the inode 265 * rather than calling xfs_imap(). This allows us to avoid the overhead 266 * of looking at the inode btree for small block file systems 267 * (see xfs_imap()). 268 */ 269 int 270 xfs_itobp( 271 xfs_mount_t *mp, 272 xfs_trans_t *tp, 273 xfs_inode_t *ip, 274 xfs_dinode_t **dipp, 275 xfs_buf_t **bpp, 276 uint buf_flags) 277 { 278 xfs_buf_t *bp; 279 int error; 280 281 ASSERT(ip->i_imap.im_blkno != 0); 282 283 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0); 284 if (error) 285 return error; 286 287 if (!bp) { 288 ASSERT(buf_flags & XFS_BUF_TRYLOCK); 289 ASSERT(tp == NULL); 290 *bpp = NULL; 291 return EAGAIN; 292 } 293 294 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 295 *bpp = bp; 296 return 0; 297 } 298 299 /* 300 * Move inode type and inode format specific information from the 301 * on-disk inode to the in-core inode. For fifos, devs, and sockets 302 * this means set if_rdev to the proper value. For files, directories, 303 * and symlinks this means to bring in the in-line data or extent 304 * pointers. For a file in B-tree format, only the root is immediately 305 * brought in-core. The rest will be in-lined in if_extents when it 306 * is first referenced (see xfs_iread_extents()). 307 */ 308 STATIC int 309 xfs_iformat( 310 xfs_inode_t *ip, 311 xfs_dinode_t *dip) 312 { 313 xfs_attr_shortform_t *atp; 314 int size; 315 int error; 316 xfs_fsize_t di_size; 317 ip->i_df.if_ext_max = 318 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 319 error = 0; 320 321 if (unlikely(be32_to_cpu(dip->di_nextents) + 322 be16_to_cpu(dip->di_anextents) > 323 be64_to_cpu(dip->di_nblocks))) { 324 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 325 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 326 (unsigned long long)ip->i_ino, 327 (int)(be32_to_cpu(dip->di_nextents) + 328 be16_to_cpu(dip->di_anextents)), 329 (unsigned long long) 330 be64_to_cpu(dip->di_nblocks)); 331 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 332 ip->i_mount, dip); 333 return XFS_ERROR(EFSCORRUPTED); 334 } 335 336 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 337 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 338 "corrupt dinode %Lu, forkoff = 0x%x.", 339 (unsigned long long)ip->i_ino, 340 dip->di_forkoff); 341 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 342 ip->i_mount, dip); 343 return XFS_ERROR(EFSCORRUPTED); 344 } 345 346 switch (ip->i_d.di_mode & S_IFMT) { 347 case S_IFIFO: 348 case S_IFCHR: 349 case S_IFBLK: 350 case S_IFSOCK: 351 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 352 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 353 ip->i_mount, dip); 354 return XFS_ERROR(EFSCORRUPTED); 355 } 356 ip->i_d.di_size = 0; 357 ip->i_size = 0; 358 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); 359 break; 360 361 case S_IFREG: 362 case S_IFLNK: 363 case S_IFDIR: 364 switch (dip->di_format) { 365 case XFS_DINODE_FMT_LOCAL: 366 /* 367 * no local regular files yet 368 */ 369 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 370 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 371 "corrupt inode %Lu " 372 "(local format for regular file).", 373 (unsigned long long) ip->i_ino); 374 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 375 XFS_ERRLEVEL_LOW, 376 ip->i_mount, dip); 377 return XFS_ERROR(EFSCORRUPTED); 378 } 379 380 di_size = be64_to_cpu(dip->di_size); 381 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 382 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 383 "corrupt inode %Lu " 384 "(bad size %Ld for local inode).", 385 (unsigned long long) ip->i_ino, 386 (long long) di_size); 387 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 388 XFS_ERRLEVEL_LOW, 389 ip->i_mount, dip); 390 return XFS_ERROR(EFSCORRUPTED); 391 } 392 393 size = (int)di_size; 394 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); 395 break; 396 case XFS_DINODE_FMT_EXTENTS: 397 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); 398 break; 399 case XFS_DINODE_FMT_BTREE: 400 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); 401 break; 402 default: 403 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 404 ip->i_mount); 405 return XFS_ERROR(EFSCORRUPTED); 406 } 407 break; 408 409 default: 410 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 411 return XFS_ERROR(EFSCORRUPTED); 412 } 413 if (error) { 414 return error; 415 } 416 if (!XFS_DFORK_Q(dip)) 417 return 0; 418 ASSERT(ip->i_afp == NULL); 419 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 420 ip->i_afp->if_ext_max = 421 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 422 switch (dip->di_aformat) { 423 case XFS_DINODE_FMT_LOCAL: 424 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 425 size = be16_to_cpu(atp->hdr.totsize); 426 427 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 428 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 429 "corrupt inode %Lu " 430 "(bad attr fork size %Ld).", 431 (unsigned long long) ip->i_ino, 432 (long long) size); 433 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 434 XFS_ERRLEVEL_LOW, 435 ip->i_mount, dip); 436 return XFS_ERROR(EFSCORRUPTED); 437 } 438 439 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 440 break; 441 case XFS_DINODE_FMT_EXTENTS: 442 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); 443 break; 444 case XFS_DINODE_FMT_BTREE: 445 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 446 break; 447 default: 448 error = XFS_ERROR(EFSCORRUPTED); 449 break; 450 } 451 if (error) { 452 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 453 ip->i_afp = NULL; 454 xfs_idestroy_fork(ip, XFS_DATA_FORK); 455 } 456 return error; 457 } 458 459 /* 460 * The file is in-lined in the on-disk inode. 461 * If it fits into if_inline_data, then copy 462 * it there, otherwise allocate a buffer for it 463 * and copy the data there. Either way, set 464 * if_data to point at the data. 465 * If we allocate a buffer for the data, make 466 * sure that its size is a multiple of 4 and 467 * record the real size in i_real_bytes. 468 */ 469 STATIC int 470 xfs_iformat_local( 471 xfs_inode_t *ip, 472 xfs_dinode_t *dip, 473 int whichfork, 474 int size) 475 { 476 xfs_ifork_t *ifp; 477 int real_size; 478 479 /* 480 * If the size is unreasonable, then something 481 * is wrong and we just bail out rather than crash in 482 * kmem_alloc() or memcpy() below. 483 */ 484 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 485 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 486 "corrupt inode %Lu " 487 "(bad size %d for local fork, size = %d).", 488 (unsigned long long) ip->i_ino, size, 489 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 490 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 491 ip->i_mount, dip); 492 return XFS_ERROR(EFSCORRUPTED); 493 } 494 ifp = XFS_IFORK_PTR(ip, whichfork); 495 real_size = 0; 496 if (size == 0) 497 ifp->if_u1.if_data = NULL; 498 else if (size <= sizeof(ifp->if_u2.if_inline_data)) 499 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 500 else { 501 real_size = roundup(size, 4); 502 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 503 } 504 ifp->if_bytes = size; 505 ifp->if_real_bytes = real_size; 506 if (size) 507 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size); 508 ifp->if_flags &= ~XFS_IFEXTENTS; 509 ifp->if_flags |= XFS_IFINLINE; 510 return 0; 511 } 512 513 /* 514 * The file consists of a set of extents all 515 * of which fit into the on-disk inode. 516 * If there are few enough extents to fit into 517 * the if_inline_ext, then copy them there. 518 * Otherwise allocate a buffer for them and copy 519 * them into it. Either way, set if_extents 520 * to point at the extents. 521 */ 522 STATIC int 523 xfs_iformat_extents( 524 xfs_inode_t *ip, 525 xfs_dinode_t *dip, 526 int whichfork) 527 { 528 xfs_bmbt_rec_t *dp; 529 xfs_ifork_t *ifp; 530 int nex; 531 int size; 532 int i; 533 534 ifp = XFS_IFORK_PTR(ip, whichfork); 535 nex = XFS_DFORK_NEXTENTS(dip, whichfork); 536 size = nex * (uint)sizeof(xfs_bmbt_rec_t); 537 538 /* 539 * If the number of extents is unreasonable, then something 540 * is wrong and we just bail out rather than crash in 541 * kmem_alloc() or memcpy() below. 542 */ 543 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 544 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 545 "corrupt inode %Lu ((a)extents = %d).", 546 (unsigned long long) ip->i_ino, nex); 547 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 548 ip->i_mount, dip); 549 return XFS_ERROR(EFSCORRUPTED); 550 } 551 552 ifp->if_real_bytes = 0; 553 if (nex == 0) 554 ifp->if_u1.if_extents = NULL; 555 else if (nex <= XFS_INLINE_EXTS) 556 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 557 else 558 xfs_iext_add(ifp, 0, nex); 559 560 ifp->if_bytes = size; 561 if (size) { 562 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 563 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); 564 for (i = 0; i < nex; i++, dp++) { 565 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 566 ep->l0 = get_unaligned_be64(&dp->l0); 567 ep->l1 = get_unaligned_be64(&dp->l1); 568 } 569 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 570 if (whichfork != XFS_DATA_FORK || 571 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 572 if (unlikely(xfs_check_nostate_extents( 573 ifp, 0, nex))) { 574 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 575 XFS_ERRLEVEL_LOW, 576 ip->i_mount); 577 return XFS_ERROR(EFSCORRUPTED); 578 } 579 } 580 ifp->if_flags |= XFS_IFEXTENTS; 581 return 0; 582 } 583 584 /* 585 * The file has too many extents to fit into 586 * the inode, so they are in B-tree format. 587 * Allocate a buffer for the root of the B-tree 588 * and copy the root into it. The i_extents 589 * field will remain NULL until all of the 590 * extents are read in (when they are needed). 591 */ 592 STATIC int 593 xfs_iformat_btree( 594 xfs_inode_t *ip, 595 xfs_dinode_t *dip, 596 int whichfork) 597 { 598 xfs_bmdr_block_t *dfp; 599 xfs_ifork_t *ifp; 600 /* REFERENCED */ 601 int nrecs; 602 int size; 603 604 ifp = XFS_IFORK_PTR(ip, whichfork); 605 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); 606 size = XFS_BMAP_BROOT_SPACE(dfp); 607 nrecs = be16_to_cpu(dfp->bb_numrecs); 608 609 /* 610 * blow out if -- fork has less extents than can fit in 611 * fork (fork shouldn't be a btree format), root btree 612 * block has more records than can fit into the fork, 613 * or the number of extents is greater than the number of 614 * blocks. 615 */ 616 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max 617 || XFS_BMDR_SPACE_CALC(nrecs) > 618 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 619 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 620 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 621 "corrupt inode %Lu (btree).", 622 (unsigned long long) ip->i_ino); 623 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 624 ip->i_mount); 625 return XFS_ERROR(EFSCORRUPTED); 626 } 627 628 ifp->if_broot_bytes = size; 629 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 630 ASSERT(ifp->if_broot != NULL); 631 /* 632 * Copy and convert from the on-disk structure 633 * to the in-memory structure. 634 */ 635 xfs_bmdr_to_bmbt(ip->i_mount, dfp, 636 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), 637 ifp->if_broot, size); 638 ifp->if_flags &= ~XFS_IFEXTENTS; 639 ifp->if_flags |= XFS_IFBROOT; 640 641 return 0; 642 } 643 644 void 645 xfs_dinode_from_disk( 646 xfs_icdinode_t *to, 647 xfs_dinode_t *from) 648 { 649 to->di_magic = be16_to_cpu(from->di_magic); 650 to->di_mode = be16_to_cpu(from->di_mode); 651 to->di_version = from ->di_version; 652 to->di_format = from->di_format; 653 to->di_onlink = be16_to_cpu(from->di_onlink); 654 to->di_uid = be32_to_cpu(from->di_uid); 655 to->di_gid = be32_to_cpu(from->di_gid); 656 to->di_nlink = be32_to_cpu(from->di_nlink); 657 to->di_projid = be16_to_cpu(from->di_projid); 658 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 659 to->di_flushiter = be16_to_cpu(from->di_flushiter); 660 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); 661 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec); 662 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec); 663 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec); 664 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec); 665 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec); 666 to->di_size = be64_to_cpu(from->di_size); 667 to->di_nblocks = be64_to_cpu(from->di_nblocks); 668 to->di_extsize = be32_to_cpu(from->di_extsize); 669 to->di_nextents = be32_to_cpu(from->di_nextents); 670 to->di_anextents = be16_to_cpu(from->di_anextents); 671 to->di_forkoff = from->di_forkoff; 672 to->di_aformat = from->di_aformat; 673 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 674 to->di_dmstate = be16_to_cpu(from->di_dmstate); 675 to->di_flags = be16_to_cpu(from->di_flags); 676 to->di_gen = be32_to_cpu(from->di_gen); 677 } 678 679 void 680 xfs_dinode_to_disk( 681 xfs_dinode_t *to, 682 xfs_icdinode_t *from) 683 { 684 to->di_magic = cpu_to_be16(from->di_magic); 685 to->di_mode = cpu_to_be16(from->di_mode); 686 to->di_version = from ->di_version; 687 to->di_format = from->di_format; 688 to->di_onlink = cpu_to_be16(from->di_onlink); 689 to->di_uid = cpu_to_be32(from->di_uid); 690 to->di_gid = cpu_to_be32(from->di_gid); 691 to->di_nlink = cpu_to_be32(from->di_nlink); 692 to->di_projid = cpu_to_be16(from->di_projid); 693 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 694 to->di_flushiter = cpu_to_be16(from->di_flushiter); 695 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 696 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 697 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 698 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 699 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 700 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 701 to->di_size = cpu_to_be64(from->di_size); 702 to->di_nblocks = cpu_to_be64(from->di_nblocks); 703 to->di_extsize = cpu_to_be32(from->di_extsize); 704 to->di_nextents = cpu_to_be32(from->di_nextents); 705 to->di_anextents = cpu_to_be16(from->di_anextents); 706 to->di_forkoff = from->di_forkoff; 707 to->di_aformat = from->di_aformat; 708 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 709 to->di_dmstate = cpu_to_be16(from->di_dmstate); 710 to->di_flags = cpu_to_be16(from->di_flags); 711 to->di_gen = cpu_to_be32(from->di_gen); 712 } 713 714 STATIC uint 715 _xfs_dic2xflags( 716 __uint16_t di_flags) 717 { 718 uint flags = 0; 719 720 if (di_flags & XFS_DIFLAG_ANY) { 721 if (di_flags & XFS_DIFLAG_REALTIME) 722 flags |= XFS_XFLAG_REALTIME; 723 if (di_flags & XFS_DIFLAG_PREALLOC) 724 flags |= XFS_XFLAG_PREALLOC; 725 if (di_flags & XFS_DIFLAG_IMMUTABLE) 726 flags |= XFS_XFLAG_IMMUTABLE; 727 if (di_flags & XFS_DIFLAG_APPEND) 728 flags |= XFS_XFLAG_APPEND; 729 if (di_flags & XFS_DIFLAG_SYNC) 730 flags |= XFS_XFLAG_SYNC; 731 if (di_flags & XFS_DIFLAG_NOATIME) 732 flags |= XFS_XFLAG_NOATIME; 733 if (di_flags & XFS_DIFLAG_NODUMP) 734 flags |= XFS_XFLAG_NODUMP; 735 if (di_flags & XFS_DIFLAG_RTINHERIT) 736 flags |= XFS_XFLAG_RTINHERIT; 737 if (di_flags & XFS_DIFLAG_PROJINHERIT) 738 flags |= XFS_XFLAG_PROJINHERIT; 739 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 740 flags |= XFS_XFLAG_NOSYMLINKS; 741 if (di_flags & XFS_DIFLAG_EXTSIZE) 742 flags |= XFS_XFLAG_EXTSIZE; 743 if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 744 flags |= XFS_XFLAG_EXTSZINHERIT; 745 if (di_flags & XFS_DIFLAG_NODEFRAG) 746 flags |= XFS_XFLAG_NODEFRAG; 747 if (di_flags & XFS_DIFLAG_FILESTREAM) 748 flags |= XFS_XFLAG_FILESTREAM; 749 } 750 751 return flags; 752 } 753 754 uint 755 xfs_ip2xflags( 756 xfs_inode_t *ip) 757 { 758 xfs_icdinode_t *dic = &ip->i_d; 759 760 return _xfs_dic2xflags(dic->di_flags) | 761 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 762 } 763 764 uint 765 xfs_dic2xflags( 766 xfs_dinode_t *dip) 767 { 768 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | 769 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 770 } 771 772 /* 773 * Read the disk inode attributes into the in-core inode structure. 774 */ 775 int 776 xfs_iread( 777 xfs_mount_t *mp, 778 xfs_trans_t *tp, 779 xfs_inode_t *ip, 780 xfs_daddr_t bno, 781 uint iget_flags) 782 { 783 xfs_buf_t *bp; 784 xfs_dinode_t *dip; 785 int error; 786 787 /* 788 * Fill in the location information in the in-core inode. 789 */ 790 ip->i_imap.im_blkno = bno; 791 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 792 if (error) 793 return error; 794 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); 795 796 /* 797 * Get pointers to the on-disk inode and the buffer containing it. 798 */ 799 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 800 XFS_BUF_LOCK, iget_flags); 801 if (error) 802 return error; 803 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 804 805 /* 806 * If we got something that isn't an inode it means someone 807 * (nfs or dmi) has a stale handle. 808 */ 809 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { 810 #ifdef DEBUG 811 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 812 "dip->di_magic (0x%x) != " 813 "XFS_DINODE_MAGIC (0x%x)", 814 be16_to_cpu(dip->di_magic), 815 XFS_DINODE_MAGIC); 816 #endif /* DEBUG */ 817 error = XFS_ERROR(EINVAL); 818 goto out_brelse; 819 } 820 821 /* 822 * If the on-disk inode is already linked to a directory 823 * entry, copy all of the inode into the in-core inode. 824 * xfs_iformat() handles copying in the inode format 825 * specific information. 826 * Otherwise, just get the truly permanent information. 827 */ 828 if (dip->di_mode) { 829 xfs_dinode_from_disk(&ip->i_d, dip); 830 error = xfs_iformat(ip, dip); 831 if (error) { 832 #ifdef DEBUG 833 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 834 "xfs_iformat() returned error %d", 835 error); 836 #endif /* DEBUG */ 837 goto out_brelse; 838 } 839 } else { 840 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); 841 ip->i_d.di_version = dip->di_version; 842 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); 843 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 844 /* 845 * Make sure to pull in the mode here as well in 846 * case the inode is released without being used. 847 * This ensures that xfs_inactive() will see that 848 * the inode is already free and not try to mess 849 * with the uninitialized part of it. 850 */ 851 ip->i_d.di_mode = 0; 852 /* 853 * Initialize the per-fork minima and maxima for a new 854 * inode here. xfs_iformat will do it for old inodes. 855 */ 856 ip->i_df.if_ext_max = 857 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 858 } 859 860 /* 861 * The inode format changed when we moved the link count and 862 * made it 32 bits long. If this is an old format inode, 863 * convert it in memory to look like a new one. If it gets 864 * flushed to disk we will convert back before flushing or 865 * logging it. We zero out the new projid field and the old link 866 * count field. We'll handle clearing the pad field (the remains 867 * of the old uuid field) when we actually convert the inode to 868 * the new format. We don't change the version number so that we 869 * can distinguish this from a real new format inode. 870 */ 871 if (ip->i_d.di_version == 1) { 872 ip->i_d.di_nlink = ip->i_d.di_onlink; 873 ip->i_d.di_onlink = 0; 874 ip->i_d.di_projid = 0; 875 } 876 877 ip->i_delayed_blks = 0; 878 ip->i_size = ip->i_d.di_size; 879 880 /* 881 * Mark the buffer containing the inode as something to keep 882 * around for a while. This helps to keep recently accessed 883 * meta-data in-core longer. 884 */ 885 XFS_BUF_SET_REF(bp, XFS_INO_REF); 886 887 /* 888 * Use xfs_trans_brelse() to release the buffer containing the 889 * on-disk inode, because it was acquired with xfs_trans_read_buf() 890 * in xfs_itobp() above. If tp is NULL, this is just a normal 891 * brelse(). If we're within a transaction, then xfs_trans_brelse() 892 * will only release the buffer if it is not dirty within the 893 * transaction. It will be OK to release the buffer in this case, 894 * because inodes on disk are never destroyed and we will be 895 * locking the new in-core inode before putting it in the hash 896 * table where other processes can find it. Thus we don't have 897 * to worry about the inode being changed just because we released 898 * the buffer. 899 */ 900 out_brelse: 901 xfs_trans_brelse(tp, bp); 902 return error; 903 } 904 905 /* 906 * Read in extents from a btree-format inode. 907 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c. 908 */ 909 int 910 xfs_iread_extents( 911 xfs_trans_t *tp, 912 xfs_inode_t *ip, 913 int whichfork) 914 { 915 int error; 916 xfs_ifork_t *ifp; 917 xfs_extnum_t nextents; 918 size_t size; 919 920 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 921 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 922 ip->i_mount); 923 return XFS_ERROR(EFSCORRUPTED); 924 } 925 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 926 size = nextents * sizeof(xfs_bmbt_rec_t); 927 ifp = XFS_IFORK_PTR(ip, whichfork); 928 929 /* 930 * We know that the size is valid (it's checked in iformat_btree) 931 */ 932 ifp->if_lastex = NULLEXTNUM; 933 ifp->if_bytes = ifp->if_real_bytes = 0; 934 ifp->if_flags |= XFS_IFEXTENTS; 935 xfs_iext_add(ifp, 0, nextents); 936 error = xfs_bmap_read_extents(tp, ip, whichfork); 937 if (error) { 938 xfs_iext_destroy(ifp); 939 ifp->if_flags &= ~XFS_IFEXTENTS; 940 return error; 941 } 942 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); 943 return 0; 944 } 945 946 /* 947 * Allocate an inode on disk and return a copy of its in-core version. 948 * The in-core inode is locked exclusively. Set mode, nlink, and rdev 949 * appropriately within the inode. The uid and gid for the inode are 950 * set according to the contents of the given cred structure. 951 * 952 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() 953 * has a free inode available, call xfs_iget() 954 * to obtain the in-core version of the allocated inode. Finally, 955 * fill in the inode and log its initial contents. In this case, 956 * ialloc_context would be set to NULL and call_again set to false. 957 * 958 * If xfs_dialloc() does not have an available inode, 959 * it will replenish its supply by doing an allocation. Since we can 960 * only do one allocation within a transaction without deadlocks, we 961 * must commit the current transaction before returning the inode itself. 962 * In this case, therefore, we will set call_again to true and return. 963 * The caller should then commit the current transaction, start a new 964 * transaction, and call xfs_ialloc() again to actually get the inode. 965 * 966 * To ensure that some other process does not grab the inode that 967 * was allocated during the first call to xfs_ialloc(), this routine 968 * also returns the [locked] bp pointing to the head of the freelist 969 * as ialloc_context. The caller should hold this buffer across 970 * the commit and pass it back into this routine on the second call. 971 * 972 * If we are allocating quota inodes, we do not have a parent inode 973 * to attach to or associate with (i.e. pip == NULL) because they 974 * are not linked into the directory structure - they are attached 975 * directly to the superblock - and so have no parent. 976 */ 977 int 978 xfs_ialloc( 979 xfs_trans_t *tp, 980 xfs_inode_t *pip, 981 mode_t mode, 982 xfs_nlink_t nlink, 983 xfs_dev_t rdev, 984 cred_t *cr, 985 xfs_prid_t prid, 986 int okalloc, 987 xfs_buf_t **ialloc_context, 988 boolean_t *call_again, 989 xfs_inode_t **ipp) 990 { 991 xfs_ino_t ino; 992 xfs_inode_t *ip; 993 uint flags; 994 int error; 995 timespec_t tv; 996 int filestreams = 0; 997 998 /* 999 * Call the space management code to pick 1000 * the on-disk inode to be allocated. 1001 */ 1002 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 1003 ialloc_context, call_again, &ino); 1004 if (error) 1005 return error; 1006 if (*call_again || ino == NULLFSINO) { 1007 *ipp = NULL; 1008 return 0; 1009 } 1010 ASSERT(*ialloc_context == NULL); 1011 1012 /* 1013 * Get the in-core inode with the lock held exclusively. 1014 * This is because we're setting fields here we need 1015 * to prevent others from looking at until we're done. 1016 */ 1017 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1018 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1019 if (error) 1020 return error; 1021 ASSERT(ip != NULL); 1022 1023 ip->i_d.di_mode = (__uint16_t)mode; 1024 ip->i_d.di_onlink = 0; 1025 ip->i_d.di_nlink = nlink; 1026 ASSERT(ip->i_d.di_nlink == nlink); 1027 ip->i_d.di_uid = current_fsuid(); 1028 ip->i_d.di_gid = current_fsgid(); 1029 ip->i_d.di_projid = prid; 1030 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 1031 1032 /* 1033 * If the superblock version is up to where we support new format 1034 * inodes and this is currently an old format inode, then change 1035 * the inode version number now. This way we only do the conversion 1036 * here rather than here and in the flush/logging code. 1037 */ 1038 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && 1039 ip->i_d.di_version == 1) { 1040 ip->i_d.di_version = 2; 1041 /* 1042 * We've already zeroed the old link count, the projid field, 1043 * and the pad field. 1044 */ 1045 } 1046 1047 /* 1048 * Project ids won't be stored on disk if we are using a version 1 inode. 1049 */ 1050 if ((prid != 0) && (ip->i_d.di_version == 1)) 1051 xfs_bump_ino_vers2(tp, ip); 1052 1053 if (pip && XFS_INHERIT_GID(pip)) { 1054 ip->i_d.di_gid = pip->i_d.di_gid; 1055 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1056 ip->i_d.di_mode |= S_ISGID; 1057 } 1058 } 1059 1060 /* 1061 * If the group ID of the new file does not match the effective group 1062 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 1063 * (and only if the irix_sgid_inherit compatibility variable is set). 1064 */ 1065 if ((irix_sgid_inherit) && 1066 (ip->i_d.di_mode & S_ISGID) && 1067 (!in_group_p((gid_t)ip->i_d.di_gid))) { 1068 ip->i_d.di_mode &= ~S_ISGID; 1069 } 1070 1071 ip->i_d.di_size = 0; 1072 ip->i_size = 0; 1073 ip->i_d.di_nextents = 0; 1074 ASSERT(ip->i_d.di_nblocks == 0); 1075 1076 nanotime(&tv); 1077 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; 1078 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; 1079 ip->i_d.di_atime = ip->i_d.di_mtime; 1080 ip->i_d.di_ctime = ip->i_d.di_mtime; 1081 1082 /* 1083 * di_gen will have been taken care of in xfs_iread. 1084 */ 1085 ip->i_d.di_extsize = 0; 1086 ip->i_d.di_dmevmask = 0; 1087 ip->i_d.di_dmstate = 0; 1088 ip->i_d.di_flags = 0; 1089 flags = XFS_ILOG_CORE; 1090 switch (mode & S_IFMT) { 1091 case S_IFIFO: 1092 case S_IFCHR: 1093 case S_IFBLK: 1094 case S_IFSOCK: 1095 ip->i_d.di_format = XFS_DINODE_FMT_DEV; 1096 ip->i_df.if_u2.if_rdev = rdev; 1097 ip->i_df.if_flags = 0; 1098 flags |= XFS_ILOG_DEV; 1099 break; 1100 case S_IFREG: 1101 /* 1102 * we can't set up filestreams until after the VFS inode 1103 * is set up properly. 1104 */ 1105 if (pip && xfs_inode_is_filestream(pip)) 1106 filestreams = 1; 1107 /* fall through */ 1108 case S_IFDIR: 1109 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1110 uint di_flags = 0; 1111 1112 if ((mode & S_IFMT) == S_IFDIR) { 1113 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1114 di_flags |= XFS_DIFLAG_RTINHERIT; 1115 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1116 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1117 ip->i_d.di_extsize = pip->i_d.di_extsize; 1118 } 1119 } else if ((mode & S_IFMT) == S_IFREG) { 1120 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1121 di_flags |= XFS_DIFLAG_REALTIME; 1122 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1123 di_flags |= XFS_DIFLAG_EXTSIZE; 1124 ip->i_d.di_extsize = pip->i_d.di_extsize; 1125 } 1126 } 1127 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1128 xfs_inherit_noatime) 1129 di_flags |= XFS_DIFLAG_NOATIME; 1130 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && 1131 xfs_inherit_nodump) 1132 di_flags |= XFS_DIFLAG_NODUMP; 1133 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && 1134 xfs_inherit_sync) 1135 di_flags |= XFS_DIFLAG_SYNC; 1136 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && 1137 xfs_inherit_nosymlinks) 1138 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1139 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1140 di_flags |= XFS_DIFLAG_PROJINHERIT; 1141 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1142 xfs_inherit_nodefrag) 1143 di_flags |= XFS_DIFLAG_NODEFRAG; 1144 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1145 di_flags |= XFS_DIFLAG_FILESTREAM; 1146 ip->i_d.di_flags |= di_flags; 1147 } 1148 /* FALLTHROUGH */ 1149 case S_IFLNK: 1150 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 1151 ip->i_df.if_flags = XFS_IFEXTENTS; 1152 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; 1153 ip->i_df.if_u1.if_extents = NULL; 1154 break; 1155 default: 1156 ASSERT(0); 1157 } 1158 /* 1159 * Attribute fork settings for new inode. 1160 */ 1161 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1162 ip->i_d.di_anextents = 0; 1163 1164 /* 1165 * Log the new values stuffed into the inode. 1166 */ 1167 xfs_trans_log_inode(tp, ip, flags); 1168 1169 /* now that we have an i_mode we can setup inode ops and unlock */ 1170 xfs_setup_inode(ip); 1171 1172 /* now we have set up the vfs inode we can associate the filestream */ 1173 if (filestreams) { 1174 error = xfs_filestream_associate(pip, ip); 1175 if (error < 0) 1176 return -error; 1177 if (!error) 1178 xfs_iflags_set(ip, XFS_IFILESTREAM); 1179 } 1180 1181 *ipp = ip; 1182 return 0; 1183 } 1184 1185 /* 1186 * Check to make sure that there are no blocks allocated to the 1187 * file beyond the size of the file. We don't check this for 1188 * files with fixed size extents or real time extents, but we 1189 * at least do it for regular files. 1190 */ 1191 #ifdef DEBUG 1192 void 1193 xfs_isize_check( 1194 xfs_mount_t *mp, 1195 xfs_inode_t *ip, 1196 xfs_fsize_t isize) 1197 { 1198 xfs_fileoff_t map_first; 1199 int nimaps; 1200 xfs_bmbt_irec_t imaps[2]; 1201 1202 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1203 return; 1204 1205 if (XFS_IS_REALTIME_INODE(ip)) 1206 return; 1207 1208 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 1209 return; 1210 1211 nimaps = 2; 1212 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 1213 /* 1214 * The filesystem could be shutting down, so bmapi may return 1215 * an error. 1216 */ 1217 if (xfs_bmapi(NULL, ip, map_first, 1218 (XFS_B_TO_FSB(mp, 1219 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1220 map_first), 1221 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1222 NULL, NULL)) 1223 return; 1224 ASSERT(nimaps == 1); 1225 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1226 } 1227 #endif /* DEBUG */ 1228 1229 /* 1230 * Calculate the last possible buffered byte in a file. This must 1231 * include data that was buffered beyond the EOF by the write code. 1232 * This also needs to deal with overflowing the xfs_fsize_t type 1233 * which can happen for sizes near the limit. 1234 * 1235 * We also need to take into account any blocks beyond the EOF. It 1236 * may be the case that they were buffered by a write which failed. 1237 * In that case the pages will still be in memory, but the inode size 1238 * will never have been updated. 1239 */ 1240 xfs_fsize_t 1241 xfs_file_last_byte( 1242 xfs_inode_t *ip) 1243 { 1244 xfs_mount_t *mp; 1245 xfs_fsize_t last_byte; 1246 xfs_fileoff_t last_block; 1247 xfs_fileoff_t size_last_block; 1248 int error; 1249 1250 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); 1251 1252 mp = ip->i_mount; 1253 /* 1254 * Only check for blocks beyond the EOF if the extents have 1255 * been read in. This eliminates the need for the inode lock, 1256 * and it also saves us from looking when it really isn't 1257 * necessary. 1258 */ 1259 if (ip->i_df.if_flags & XFS_IFEXTENTS) { 1260 xfs_ilock(ip, XFS_ILOCK_SHARED); 1261 error = xfs_bmap_last_offset(NULL, ip, &last_block, 1262 XFS_DATA_FORK); 1263 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1264 if (error) { 1265 last_block = 0; 1266 } 1267 } else { 1268 last_block = 0; 1269 } 1270 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); 1271 last_block = XFS_FILEOFF_MAX(last_block, size_last_block); 1272 1273 last_byte = XFS_FSB_TO_B(mp, last_block); 1274 if (last_byte < 0) { 1275 return XFS_MAXIOFFSET(mp); 1276 } 1277 last_byte += (1 << mp->m_writeio_log); 1278 if (last_byte < 0) { 1279 return XFS_MAXIOFFSET(mp); 1280 } 1281 return last_byte; 1282 } 1283 1284 #if defined(XFS_RW_TRACE) 1285 STATIC void 1286 xfs_itrunc_trace( 1287 int tag, 1288 xfs_inode_t *ip, 1289 int flag, 1290 xfs_fsize_t new_size, 1291 xfs_off_t toss_start, 1292 xfs_off_t toss_finish) 1293 { 1294 if (ip->i_rwtrace == NULL) { 1295 return; 1296 } 1297 1298 ktrace_enter(ip->i_rwtrace, 1299 (void*)((long)tag), 1300 (void*)ip, 1301 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), 1302 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), 1303 (void*)((long)flag), 1304 (void*)(unsigned long)((new_size >> 32) & 0xffffffff), 1305 (void*)(unsigned long)(new_size & 0xffffffff), 1306 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), 1307 (void*)(unsigned long)(toss_start & 0xffffffff), 1308 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1309 (void*)(unsigned long)(toss_finish & 0xffffffff), 1310 (void*)(unsigned long)current_cpu(), 1311 (void*)(unsigned long)current_pid(), 1312 (void*)NULL, 1313 (void*)NULL, 1314 (void*)NULL); 1315 } 1316 #else 1317 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1318 #endif 1319 1320 /* 1321 * Start the truncation of the file to new_size. The new size 1322 * must be smaller than the current size. This routine will 1323 * clear the buffer and page caches of file data in the removed 1324 * range, and xfs_itruncate_finish() will remove the underlying 1325 * disk blocks. 1326 * 1327 * The inode must have its I/O lock locked EXCLUSIVELY, and it 1328 * must NOT have the inode lock held at all. This is because we're 1329 * calling into the buffer/page cache code and we can't hold the 1330 * inode lock when we do so. 1331 * 1332 * We need to wait for any direct I/Os in flight to complete before we 1333 * proceed with the truncate. This is needed to prevent the extents 1334 * being read or written by the direct I/Os from being removed while the 1335 * I/O is in flight as there is no other method of synchronising 1336 * direct I/O with the truncate operation. Also, because we hold 1337 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being 1338 * started until the truncate completes and drops the lock. Essentially, 1339 * the xfs_ioend_wait() call forms an I/O barrier that provides strict 1340 * ordering between direct I/Os and the truncate operation. 1341 * 1342 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1343 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1344 * in the case that the caller is locking things out of order and 1345 * may not be able to call xfs_itruncate_finish() with the inode lock 1346 * held without dropping the I/O lock. If the caller must drop the 1347 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() 1348 * must be called again with all the same restrictions as the initial 1349 * call. 1350 */ 1351 int 1352 xfs_itruncate_start( 1353 xfs_inode_t *ip, 1354 uint flags, 1355 xfs_fsize_t new_size) 1356 { 1357 xfs_fsize_t last_byte; 1358 xfs_off_t toss_start; 1359 xfs_mount_t *mp; 1360 int error = 0; 1361 1362 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1363 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1364 ASSERT((flags == XFS_ITRUNC_DEFINITE) || 1365 (flags == XFS_ITRUNC_MAYBE)); 1366 1367 mp = ip->i_mount; 1368 1369 /* wait for the completion of any pending DIOs */ 1370 if (new_size == 0 || new_size < ip->i_size) 1371 xfs_ioend_wait(ip); 1372 1373 /* 1374 * Call toss_pages or flushinval_pages to get rid of pages 1375 * overlapping the region being removed. We have to use 1376 * the less efficient flushinval_pages in the case that the 1377 * caller may not be able to finish the truncate without 1378 * dropping the inode's I/O lock. Make sure 1379 * to catch any pages brought in by buffers overlapping 1380 * the EOF by searching out beyond the isize by our 1381 * block size. We round new_size up to a block boundary 1382 * so that we don't toss things on the same block as 1383 * new_size but before it. 1384 * 1385 * Before calling toss_page or flushinval_pages, make sure to 1386 * call remapf() over the same region if the file is mapped. 1387 * This frees up mapped file references to the pages in the 1388 * given range and for the flushinval_pages case it ensures 1389 * that we get the latest mapped changes flushed out. 1390 */ 1391 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1392 toss_start = XFS_FSB_TO_B(mp, toss_start); 1393 if (toss_start < 0) { 1394 /* 1395 * The place to start tossing is beyond our maximum 1396 * file size, so there is no way that the data extended 1397 * out there. 1398 */ 1399 return 0; 1400 } 1401 last_byte = xfs_file_last_byte(ip); 1402 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1403 last_byte); 1404 if (last_byte > toss_start) { 1405 if (flags & XFS_ITRUNC_DEFINITE) { 1406 xfs_tosspages(ip, toss_start, 1407 -1, FI_REMAPF_LOCKED); 1408 } else { 1409 error = xfs_flushinval_pages(ip, toss_start, 1410 -1, FI_REMAPF_LOCKED); 1411 } 1412 } 1413 1414 #ifdef DEBUG 1415 if (new_size == 0) { 1416 ASSERT(VN_CACHED(VFS_I(ip)) == 0); 1417 } 1418 #endif 1419 return error; 1420 } 1421 1422 /* 1423 * Shrink the file to the given new_size. The new size must be smaller than 1424 * the current size. This will free up the underlying blocks in the removed 1425 * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). 1426 * 1427 * The transaction passed to this routine must have made a permanent log 1428 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1429 * given transaction and start new ones, so make sure everything involved in 1430 * the transaction is tidy before calling here. Some transaction will be 1431 * returned to the caller to be committed. The incoming transaction must 1432 * already include the inode, and both inode locks must be held exclusively. 1433 * The inode must also be "held" within the transaction. On return the inode 1434 * will be "held" within the returned transaction. This routine does NOT 1435 * require any disk space to be reserved for it within the transaction. 1436 * 1437 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it 1438 * indicates the fork which is to be truncated. For the attribute fork we only 1439 * support truncation to size 0. 1440 * 1441 * We use the sync parameter to indicate whether or not the first transaction 1442 * we perform might have to be synchronous. For the attr fork, it needs to be 1443 * so if the unlink of the inode is not yet known to be permanent in the log. 1444 * This keeps us from freeing and reusing the blocks of the attribute fork 1445 * before the unlink of the inode becomes permanent. 1446 * 1447 * For the data fork, we normally have to run synchronously if we're being 1448 * called out of the inactive path or we're being called out of the create path 1449 * where we're truncating an existing file. Either way, the truncate needs to 1450 * be sync so blocks don't reappear in the file with altered data in case of a 1451 * crash. wsync filesystems can run the first case async because anything that 1452 * shrinks the inode has to run sync so by the time we're called here from 1453 * inactive, the inode size is permanently set to 0. 1454 * 1455 * Calls from the truncate path always need to be sync unless we're in a wsync 1456 * filesystem and the file has already been unlinked. 1457 * 1458 * The caller is responsible for correctly setting the sync parameter. It gets 1459 * too hard for us to guess here which path we're being called out of just 1460 * based on inode state. 1461 * 1462 * If we get an error, we must return with the inode locked and linked into the 1463 * current transaction. This keeps things simple for the higher level code, 1464 * because it always knows that the inode is locked and held in the transaction 1465 * that returns to it whether errors occur or not. We don't mark the inode 1466 * dirty on error so that transactions can be easily aborted if possible. 1467 */ 1468 int 1469 xfs_itruncate_finish( 1470 xfs_trans_t **tp, 1471 xfs_inode_t *ip, 1472 xfs_fsize_t new_size, 1473 int fork, 1474 int sync) 1475 { 1476 xfs_fsblock_t first_block; 1477 xfs_fileoff_t first_unmap_block; 1478 xfs_fileoff_t last_block; 1479 xfs_filblks_t unmap_len=0; 1480 xfs_mount_t *mp; 1481 xfs_trans_t *ntp; 1482 int done; 1483 int committed; 1484 xfs_bmap_free_t free_list; 1485 int error; 1486 1487 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1488 ASSERT((new_size == 0) || (new_size <= ip->i_size)); 1489 ASSERT(*tp != NULL); 1490 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1491 ASSERT(ip->i_transp == *tp); 1492 ASSERT(ip->i_itemp != NULL); 1493 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1494 1495 1496 ntp = *tp; 1497 mp = (ntp)->t_mountp; 1498 ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); 1499 1500 /* 1501 * We only support truncating the entire attribute fork. 1502 */ 1503 if (fork == XFS_ATTR_FORK) { 1504 new_size = 0LL; 1505 } 1506 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1507 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1508 /* 1509 * The first thing we do is set the size to new_size permanently 1510 * on disk. This way we don't have to worry about anyone ever 1511 * being able to look at the data being freed even in the face 1512 * of a crash. What we're getting around here is the case where 1513 * we free a block, it is allocated to another file, it is written 1514 * to, and then we crash. If the new data gets written to the 1515 * file but the log buffers containing the free and reallocation 1516 * don't, then we'd end up with garbage in the blocks being freed. 1517 * As long as we make the new_size permanent before actually 1518 * freeing any blocks it doesn't matter if they get writtten to. 1519 * 1520 * The callers must signal into us whether or not the size 1521 * setting here must be synchronous. There are a few cases 1522 * where it doesn't have to be synchronous. Those cases 1523 * occur if the file is unlinked and we know the unlink is 1524 * permanent or if the blocks being truncated are guaranteed 1525 * to be beyond the inode eof (regardless of the link count) 1526 * and the eof value is permanent. Both of these cases occur 1527 * only on wsync-mounted filesystems. In those cases, we're 1528 * guaranteed that no user will ever see the data in the blocks 1529 * that are being truncated so the truncate can run async. 1530 * In the free beyond eof case, the file may wind up with 1531 * more blocks allocated to it than it needs if we crash 1532 * and that won't get fixed until the next time the file 1533 * is re-opened and closed but that's ok as that shouldn't 1534 * be too many blocks. 1535 * 1536 * However, we can't just make all wsync xactions run async 1537 * because there's one call out of the create path that needs 1538 * to run sync where it's truncating an existing file to size 1539 * 0 whose size is > 0. 1540 * 1541 * It's probably possible to come up with a test in this 1542 * routine that would correctly distinguish all the above 1543 * cases from the values of the function parameters and the 1544 * inode state but for sanity's sake, I've decided to let the 1545 * layers above just tell us. It's simpler to correctly figure 1546 * out in the layer above exactly under what conditions we 1547 * can run async and I think it's easier for others read and 1548 * follow the logic in case something has to be changed. 1549 * cscope is your friend -- rcc. 1550 * 1551 * The attribute fork is much simpler. 1552 * 1553 * For the attribute fork we allow the caller to tell us whether 1554 * the unlink of the inode that led to this call is yet permanent 1555 * in the on disk log. If it is not and we will be freeing extents 1556 * in this inode then we make the first transaction synchronous 1557 * to make sure that the unlink is permanent by the time we free 1558 * the blocks. 1559 */ 1560 if (fork == XFS_DATA_FORK) { 1561 if (ip->i_d.di_nextents > 0) { 1562 /* 1563 * If we are not changing the file size then do 1564 * not update the on-disk file size - we may be 1565 * called from xfs_inactive_free_eofblocks(). If we 1566 * update the on-disk file size and then the system 1567 * crashes before the contents of the file are 1568 * flushed to disk then the files may be full of 1569 * holes (ie NULL files bug). 1570 */ 1571 if (ip->i_size != new_size) { 1572 ip->i_d.di_size = new_size; 1573 ip->i_size = new_size; 1574 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1575 } 1576 } 1577 } else if (sync) { 1578 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); 1579 if (ip->i_d.di_anextents > 0) 1580 xfs_trans_set_sync(ntp); 1581 } 1582 ASSERT(fork == XFS_DATA_FORK || 1583 (fork == XFS_ATTR_FORK && 1584 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || 1585 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); 1586 1587 /* 1588 * Since it is possible for space to become allocated beyond 1589 * the end of the file (in a crash where the space is allocated 1590 * but the inode size is not yet updated), simply remove any 1591 * blocks which show up between the new EOF and the maximum 1592 * possible file size. If the first block to be removed is 1593 * beyond the maximum file size (ie it is the same as last_block), 1594 * then there is nothing to do. 1595 */ 1596 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1597 ASSERT(first_unmap_block <= last_block); 1598 done = 0; 1599 if (last_block == first_unmap_block) { 1600 done = 1; 1601 } else { 1602 unmap_len = last_block - first_unmap_block + 1; 1603 } 1604 while (!done) { 1605 /* 1606 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() 1607 * will tell us whether it freed the entire range or 1608 * not. If this is a synchronous mount (wsync), 1609 * then we can tell bunmapi to keep all the 1610 * transactions asynchronous since the unlink 1611 * transaction that made this inode inactive has 1612 * already hit the disk. There's no danger of 1613 * the freed blocks being reused, there being a 1614 * crash, and the reused blocks suddenly reappearing 1615 * in this file with garbage in them once recovery 1616 * runs. 1617 */ 1618 xfs_bmap_init(&free_list, &first_block); 1619 error = xfs_bunmapi(ntp, ip, 1620 first_unmap_block, unmap_len, 1621 xfs_bmapi_aflag(fork) | 1622 (sync ? 0 : XFS_BMAPI_ASYNC), 1623 XFS_ITRUNC_MAX_EXTENTS, 1624 &first_block, &free_list, 1625 NULL, &done); 1626 if (error) { 1627 /* 1628 * If the bunmapi call encounters an error, 1629 * return to the caller where the transaction 1630 * can be properly aborted. We just need to 1631 * make sure we're not holding any resources 1632 * that we were not when we came in. 1633 */ 1634 xfs_bmap_cancel(&free_list); 1635 return error; 1636 } 1637 1638 /* 1639 * Duplicate the transaction that has the permanent 1640 * reservation and commit the old transaction. 1641 */ 1642 error = xfs_bmap_finish(tp, &free_list, &committed); 1643 ntp = *tp; 1644 if (committed) { 1645 /* link the inode into the next xact in the chain */ 1646 xfs_trans_ijoin(ntp, ip, 1647 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1648 xfs_trans_ihold(ntp, ip); 1649 } 1650 1651 if (error) { 1652 /* 1653 * If the bmap finish call encounters an error, return 1654 * to the caller where the transaction can be properly 1655 * aborted. We just need to make sure we're not 1656 * holding any resources that we were not when we came 1657 * in. 1658 * 1659 * Aborting from this point might lose some blocks in 1660 * the file system, but oh well. 1661 */ 1662 xfs_bmap_cancel(&free_list); 1663 return error; 1664 } 1665 1666 if (committed) { 1667 /* 1668 * Mark the inode dirty so it will be logged and 1669 * moved forward in the log as part of every commit. 1670 */ 1671 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1672 } 1673 1674 ntp = xfs_trans_dup(ntp); 1675 error = xfs_trans_commit(*tp, 0); 1676 *tp = ntp; 1677 1678 /* link the inode into the next transaction in the chain */ 1679 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1680 xfs_trans_ihold(ntp, ip); 1681 1682 if (error) 1683 return error; 1684 /* 1685 * transaction commit worked ok so we can drop the extra ticket 1686 * reference that we gained in xfs_trans_dup() 1687 */ 1688 xfs_log_ticket_put(ntp->t_ticket); 1689 error = xfs_trans_reserve(ntp, 0, 1690 XFS_ITRUNCATE_LOG_RES(mp), 0, 1691 XFS_TRANS_PERM_LOG_RES, 1692 XFS_ITRUNCATE_LOG_COUNT); 1693 if (error) 1694 return error; 1695 } 1696 /* 1697 * Only update the size in the case of the data fork, but 1698 * always re-log the inode so that our permanent transaction 1699 * can keep on rolling it forward in the log. 1700 */ 1701 if (fork == XFS_DATA_FORK) { 1702 xfs_isize_check(mp, ip, new_size); 1703 /* 1704 * If we are not changing the file size then do 1705 * not update the on-disk file size - we may be 1706 * called from xfs_inactive_free_eofblocks(). If we 1707 * update the on-disk file size and then the system 1708 * crashes before the contents of the file are 1709 * flushed to disk then the files may be full of 1710 * holes (ie NULL files bug). 1711 */ 1712 if (ip->i_size != new_size) { 1713 ip->i_d.di_size = new_size; 1714 ip->i_size = new_size; 1715 } 1716 } 1717 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); 1718 ASSERT((new_size != 0) || 1719 (fork == XFS_ATTR_FORK) || 1720 (ip->i_delayed_blks == 0)); 1721 ASSERT((new_size != 0) || 1722 (fork == XFS_ATTR_FORK) || 1723 (ip->i_d.di_nextents == 0)); 1724 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1725 return 0; 1726 } 1727 1728 /* 1729 * This is called when the inode's link count goes to 0. 1730 * We place the on-disk inode on a list in the AGI. It 1731 * will be pulled from this list when the inode is freed. 1732 */ 1733 int 1734 xfs_iunlink( 1735 xfs_trans_t *tp, 1736 xfs_inode_t *ip) 1737 { 1738 xfs_mount_t *mp; 1739 xfs_agi_t *agi; 1740 xfs_dinode_t *dip; 1741 xfs_buf_t *agibp; 1742 xfs_buf_t *ibp; 1743 xfs_agino_t agino; 1744 short bucket_index; 1745 int offset; 1746 int error; 1747 1748 ASSERT(ip->i_d.di_nlink == 0); 1749 ASSERT(ip->i_d.di_mode != 0); 1750 ASSERT(ip->i_transp == tp); 1751 1752 mp = tp->t_mountp; 1753 1754 /* 1755 * Get the agi buffer first. It ensures lock ordering 1756 * on the list. 1757 */ 1758 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); 1759 if (error) 1760 return error; 1761 agi = XFS_BUF_TO_AGI(agibp); 1762 1763 /* 1764 * Get the index into the agi hash table for the 1765 * list this inode will go on. 1766 */ 1767 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1768 ASSERT(agino != 0); 1769 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1770 ASSERT(agi->agi_unlinked[bucket_index]); 1771 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1772 1773 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1774 /* 1775 * There is already another inode in the bucket we need 1776 * to add ourselves to. Add us at the front of the list. 1777 * Here we put the head pointer into our next pointer, 1778 * and then we fall through to point the head at us. 1779 */ 1780 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1781 if (error) 1782 return error; 1783 1784 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1785 /* both on-disk, don't endian flip twice */ 1786 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; 1787 offset = ip->i_imap.im_boffset + 1788 offsetof(xfs_dinode_t, di_next_unlinked); 1789 xfs_trans_inode_buf(tp, ibp); 1790 xfs_trans_log_buf(tp, ibp, offset, 1791 (offset + sizeof(xfs_agino_t) - 1)); 1792 xfs_inobp_check(mp, ibp); 1793 } 1794 1795 /* 1796 * Point the bucket head pointer at the inode being inserted. 1797 */ 1798 ASSERT(agino != 0); 1799 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); 1800 offset = offsetof(xfs_agi_t, agi_unlinked) + 1801 (sizeof(xfs_agino_t) * bucket_index); 1802 xfs_trans_log_buf(tp, agibp, offset, 1803 (offset + sizeof(xfs_agino_t) - 1)); 1804 return 0; 1805 } 1806 1807 /* 1808 * Pull the on-disk inode from the AGI unlinked list. 1809 */ 1810 STATIC int 1811 xfs_iunlink_remove( 1812 xfs_trans_t *tp, 1813 xfs_inode_t *ip) 1814 { 1815 xfs_ino_t next_ino; 1816 xfs_mount_t *mp; 1817 xfs_agi_t *agi; 1818 xfs_dinode_t *dip; 1819 xfs_buf_t *agibp; 1820 xfs_buf_t *ibp; 1821 xfs_agnumber_t agno; 1822 xfs_agino_t agino; 1823 xfs_agino_t next_agino; 1824 xfs_buf_t *last_ibp; 1825 xfs_dinode_t *last_dip = NULL; 1826 short bucket_index; 1827 int offset, last_offset = 0; 1828 int error; 1829 1830 mp = tp->t_mountp; 1831 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); 1832 1833 /* 1834 * Get the agi buffer first. It ensures lock ordering 1835 * on the list. 1836 */ 1837 error = xfs_read_agi(mp, tp, agno, &agibp); 1838 if (error) 1839 return error; 1840 1841 agi = XFS_BUF_TO_AGI(agibp); 1842 1843 /* 1844 * Get the index into the agi hash table for the 1845 * list this inode will go on. 1846 */ 1847 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1848 ASSERT(agino != 0); 1849 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1850 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); 1851 ASSERT(agi->agi_unlinked[bucket_index]); 1852 1853 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1854 /* 1855 * We're at the head of the list. Get the inode's 1856 * on-disk buffer to see if there is anyone after us 1857 * on the list. Only modify our next pointer if it 1858 * is not already NULLAGINO. This saves us the overhead 1859 * of dealing with the buffer when there is no need to 1860 * change it. 1861 */ 1862 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1863 if (error) { 1864 cmn_err(CE_WARN, 1865 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1866 error, mp->m_fsname); 1867 return error; 1868 } 1869 next_agino = be32_to_cpu(dip->di_next_unlinked); 1870 ASSERT(next_agino != 0); 1871 if (next_agino != NULLAGINO) { 1872 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1873 offset = ip->i_imap.im_boffset + 1874 offsetof(xfs_dinode_t, di_next_unlinked); 1875 xfs_trans_inode_buf(tp, ibp); 1876 xfs_trans_log_buf(tp, ibp, offset, 1877 (offset + sizeof(xfs_agino_t) - 1)); 1878 xfs_inobp_check(mp, ibp); 1879 } else { 1880 xfs_trans_brelse(tp, ibp); 1881 } 1882 /* 1883 * Point the bucket head pointer at the next inode. 1884 */ 1885 ASSERT(next_agino != 0); 1886 ASSERT(next_agino != agino); 1887 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); 1888 offset = offsetof(xfs_agi_t, agi_unlinked) + 1889 (sizeof(xfs_agino_t) * bucket_index); 1890 xfs_trans_log_buf(tp, agibp, offset, 1891 (offset + sizeof(xfs_agino_t) - 1)); 1892 } else { 1893 /* 1894 * We need to search the list for the inode being freed. 1895 */ 1896 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1897 last_ibp = NULL; 1898 while (next_agino != agino) { 1899 /* 1900 * If the last inode wasn't the one pointing to 1901 * us, then release its buffer since we're not 1902 * going to do anything with it. 1903 */ 1904 if (last_ibp != NULL) { 1905 xfs_trans_brelse(tp, last_ibp); 1906 } 1907 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1908 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1909 &last_ibp, &last_offset, 0); 1910 if (error) { 1911 cmn_err(CE_WARN, 1912 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 1913 error, mp->m_fsname); 1914 return error; 1915 } 1916 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1917 ASSERT(next_agino != NULLAGINO); 1918 ASSERT(next_agino != 0); 1919 } 1920 /* 1921 * Now last_ibp points to the buffer previous to us on 1922 * the unlinked list. Pull us from the list. 1923 */ 1924 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 1925 if (error) { 1926 cmn_err(CE_WARN, 1927 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1928 error, mp->m_fsname); 1929 return error; 1930 } 1931 next_agino = be32_to_cpu(dip->di_next_unlinked); 1932 ASSERT(next_agino != 0); 1933 ASSERT(next_agino != agino); 1934 if (next_agino != NULLAGINO) { 1935 dip->di_next_unlinked = cpu_to_be32(NULLAGINO); 1936 offset = ip->i_imap.im_boffset + 1937 offsetof(xfs_dinode_t, di_next_unlinked); 1938 xfs_trans_inode_buf(tp, ibp); 1939 xfs_trans_log_buf(tp, ibp, offset, 1940 (offset + sizeof(xfs_agino_t) - 1)); 1941 xfs_inobp_check(mp, ibp); 1942 } else { 1943 xfs_trans_brelse(tp, ibp); 1944 } 1945 /* 1946 * Point the previous inode on the list to the next inode. 1947 */ 1948 last_dip->di_next_unlinked = cpu_to_be32(next_agino); 1949 ASSERT(next_agino != 0); 1950 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked); 1951 xfs_trans_inode_buf(tp, last_ibp); 1952 xfs_trans_log_buf(tp, last_ibp, offset, 1953 (offset + sizeof(xfs_agino_t) - 1)); 1954 xfs_inobp_check(mp, last_ibp); 1955 } 1956 return 0; 1957 } 1958 1959 STATIC void 1960 xfs_ifree_cluster( 1961 xfs_inode_t *free_ip, 1962 xfs_trans_t *tp, 1963 xfs_ino_t inum) 1964 { 1965 xfs_mount_t *mp = free_ip->i_mount; 1966 int blks_per_cluster; 1967 int nbufs; 1968 int ninodes; 1969 int i, j, found, pre_flushed; 1970 xfs_daddr_t blkno; 1971 xfs_buf_t *bp; 1972 xfs_inode_t *ip, **ip_found; 1973 xfs_inode_log_item_t *iip; 1974 xfs_log_item_t *lip; 1975 xfs_perag_t *pag = xfs_get_perag(mp, inum); 1976 1977 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 1978 blks_per_cluster = 1; 1979 ninodes = mp->m_sb.sb_inopblock; 1980 nbufs = XFS_IALLOC_BLOCKS(mp); 1981 } else { 1982 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / 1983 mp->m_sb.sb_blocksize; 1984 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; 1985 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1986 } 1987 1988 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 1989 1990 for (j = 0; j < nbufs; j++, inum += ninodes) { 1991 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1992 XFS_INO_TO_AGBNO(mp, inum)); 1993 1994 1995 /* 1996 * Look for each inode in memory and attempt to lock it, 1997 * we can be racing with flush and tail pushing here. 1998 * any inode we get the locks on, add to an array of 1999 * inode items to process later. 2000 * 2001 * The get the buffer lock, we could beat a flush 2002 * or tail pushing thread to the lock here, in which 2003 * case they will go looking for the inode buffer 2004 * and fail, we need some other form of interlock 2005 * here. 2006 */ 2007 found = 0; 2008 for (i = 0; i < ninodes; i++) { 2009 read_lock(&pag->pag_ici_lock); 2010 ip = radix_tree_lookup(&pag->pag_ici_root, 2011 XFS_INO_TO_AGINO(mp, (inum + i))); 2012 2013 /* Inode not in memory or we found it already, 2014 * nothing to do 2015 */ 2016 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2017 read_unlock(&pag->pag_ici_lock); 2018 continue; 2019 } 2020 2021 if (xfs_inode_clean(ip)) { 2022 read_unlock(&pag->pag_ici_lock); 2023 continue; 2024 } 2025 2026 /* If we can get the locks then add it to the 2027 * list, otherwise by the time we get the bp lock 2028 * below it will already be attached to the 2029 * inode buffer. 2030 */ 2031 2032 /* This inode will already be locked - by us, lets 2033 * keep it that way. 2034 */ 2035 2036 if (ip == free_ip) { 2037 if (xfs_iflock_nowait(ip)) { 2038 xfs_iflags_set(ip, XFS_ISTALE); 2039 if (xfs_inode_clean(ip)) { 2040 xfs_ifunlock(ip); 2041 } else { 2042 ip_found[found++] = ip; 2043 } 2044 } 2045 read_unlock(&pag->pag_ici_lock); 2046 continue; 2047 } 2048 2049 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2050 if (xfs_iflock_nowait(ip)) { 2051 xfs_iflags_set(ip, XFS_ISTALE); 2052 2053 if (xfs_inode_clean(ip)) { 2054 xfs_ifunlock(ip); 2055 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2056 } else { 2057 ip_found[found++] = ip; 2058 } 2059 } else { 2060 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2061 } 2062 } 2063 read_unlock(&pag->pag_ici_lock); 2064 } 2065 2066 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2067 mp->m_bsize * blks_per_cluster, 2068 XFS_BUF_LOCK); 2069 2070 pre_flushed = 0; 2071 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2072 while (lip) { 2073 if (lip->li_type == XFS_LI_INODE) { 2074 iip = (xfs_inode_log_item_t *)lip; 2075 ASSERT(iip->ili_logged == 1); 2076 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2077 xfs_trans_ail_copy_lsn(mp->m_ail, 2078 &iip->ili_flush_lsn, 2079 &iip->ili_item.li_lsn); 2080 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 2081 pre_flushed++; 2082 } 2083 lip = lip->li_bio_list; 2084 } 2085 2086 for (i = 0; i < found; i++) { 2087 ip = ip_found[i]; 2088 iip = ip->i_itemp; 2089 2090 if (!iip) { 2091 ip->i_update_core = 0; 2092 xfs_ifunlock(ip); 2093 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2094 continue; 2095 } 2096 2097 iip->ili_last_fields = iip->ili_format.ilf_fields; 2098 iip->ili_format.ilf_fields = 0; 2099 iip->ili_logged = 1; 2100 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2101 &iip->ili_item.li_lsn); 2102 2103 xfs_buf_attach_iodone(bp, 2104 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2105 xfs_istale_done, (xfs_log_item_t *)iip); 2106 if (ip != free_ip) { 2107 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2108 } 2109 } 2110 2111 if (found || pre_flushed) 2112 xfs_trans_stale_inode_buf(tp, bp); 2113 xfs_trans_binval(tp, bp); 2114 } 2115 2116 kmem_free(ip_found); 2117 xfs_put_perag(mp, pag); 2118 } 2119 2120 /* 2121 * This is called to return an inode to the inode free list. 2122 * The inode should already be truncated to 0 length and have 2123 * no pages associated with it. This routine also assumes that 2124 * the inode is already a part of the transaction. 2125 * 2126 * The on-disk copy of the inode will have been added to the list 2127 * of unlinked inodes in the AGI. We need to remove the inode from 2128 * that list atomically with respect to freeing it here. 2129 */ 2130 int 2131 xfs_ifree( 2132 xfs_trans_t *tp, 2133 xfs_inode_t *ip, 2134 xfs_bmap_free_t *flist) 2135 { 2136 int error; 2137 int delete; 2138 xfs_ino_t first_ino; 2139 xfs_dinode_t *dip; 2140 xfs_buf_t *ibp; 2141 2142 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2143 ASSERT(ip->i_transp == tp); 2144 ASSERT(ip->i_d.di_nlink == 0); 2145 ASSERT(ip->i_d.di_nextents == 0); 2146 ASSERT(ip->i_d.di_anextents == 0); 2147 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 2148 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 2149 ASSERT(ip->i_d.di_nblocks == 0); 2150 2151 /* 2152 * Pull the on-disk inode from the AGI unlinked list. 2153 */ 2154 error = xfs_iunlink_remove(tp, ip); 2155 if (error != 0) { 2156 return error; 2157 } 2158 2159 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); 2160 if (error != 0) { 2161 return error; 2162 } 2163 ip->i_d.di_mode = 0; /* mark incore inode as free */ 2164 ip->i_d.di_flags = 0; 2165 ip->i_d.di_dmevmask = 0; 2166 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ 2167 ip->i_df.if_ext_max = 2168 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 2169 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; 2170 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 2171 /* 2172 * Bump the generation count so no one will be confused 2173 * by reincarnations of this inode. 2174 */ 2175 ip->i_d.di_gen++; 2176 2177 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2178 2179 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); 2180 if (error) 2181 return error; 2182 2183 /* 2184 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 2185 * from picking up this inode when it is reclaimed (its incore state 2186 * initialzed but not flushed to disk yet). The in-core di_mode is 2187 * already cleared and a corresponding transaction logged. 2188 * The hack here just synchronizes the in-core to on-disk 2189 * di_mode value in advance before the actual inode sync to disk. 2190 * This is OK because the inode is already unlinked and would never 2191 * change its di_mode again for this inode generation. 2192 * This is a temporary hack that would require a proper fix 2193 * in the future. 2194 */ 2195 dip->di_mode = 0; 2196 2197 if (delete) { 2198 xfs_ifree_cluster(ip, tp, first_ino); 2199 } 2200 2201 return 0; 2202 } 2203 2204 /* 2205 * Reallocate the space for if_broot based on the number of records 2206 * being added or deleted as indicated in rec_diff. Move the records 2207 * and pointers in if_broot to fit the new size. When shrinking this 2208 * will eliminate holes between the records and pointers created by 2209 * the caller. When growing this will create holes to be filled in 2210 * by the caller. 2211 * 2212 * The caller must not request to add more records than would fit in 2213 * the on-disk inode root. If the if_broot is currently NULL, then 2214 * if we adding records one will be allocated. The caller must also 2215 * not request that the number of records go below zero, although 2216 * it can go to zero. 2217 * 2218 * ip -- the inode whose if_broot area is changing 2219 * ext_diff -- the change in the number of records, positive or negative, 2220 * requested for the if_broot array. 2221 */ 2222 void 2223 xfs_iroot_realloc( 2224 xfs_inode_t *ip, 2225 int rec_diff, 2226 int whichfork) 2227 { 2228 struct xfs_mount *mp = ip->i_mount; 2229 int cur_max; 2230 xfs_ifork_t *ifp; 2231 struct xfs_btree_block *new_broot; 2232 int new_max; 2233 size_t new_size; 2234 char *np; 2235 char *op; 2236 2237 /* 2238 * Handle the degenerate case quietly. 2239 */ 2240 if (rec_diff == 0) { 2241 return; 2242 } 2243 2244 ifp = XFS_IFORK_PTR(ip, whichfork); 2245 if (rec_diff > 0) { 2246 /* 2247 * If there wasn't any memory allocated before, just 2248 * allocate it now and get out. 2249 */ 2250 if (ifp->if_broot_bytes == 0) { 2251 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2252 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); 2253 ifp->if_broot_bytes = (int)new_size; 2254 return; 2255 } 2256 2257 /* 2258 * If there is already an existing if_broot, then we need 2259 * to realloc() it and shift the pointers to their new 2260 * location. The records don't change location because 2261 * they are kept butted up against the btree block header. 2262 */ 2263 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2264 new_max = cur_max + rec_diff; 2265 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2266 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 2267 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2268 KM_SLEEP); 2269 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2270 ifp->if_broot_bytes); 2271 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2272 (int)new_size); 2273 ifp->if_broot_bytes = (int)new_size; 2274 ASSERT(ifp->if_broot_bytes <= 2275 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2276 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 2277 return; 2278 } 2279 2280 /* 2281 * rec_diff is less than 0. In this case, we are shrinking the 2282 * if_broot buffer. It must already exist. If we go to zero 2283 * records, just get rid of the root and clear the status bit. 2284 */ 2285 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); 2286 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); 2287 new_max = cur_max + rec_diff; 2288 ASSERT(new_max >= 0); 2289 if (new_max > 0) 2290 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2291 else 2292 new_size = 0; 2293 if (new_size > 0) { 2294 new_broot = kmem_alloc(new_size, KM_SLEEP); 2295 /* 2296 * First copy over the btree block header. 2297 */ 2298 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); 2299 } else { 2300 new_broot = NULL; 2301 ifp->if_flags &= ~XFS_IFBROOT; 2302 } 2303 2304 /* 2305 * Only copy the records and pointers if there are any. 2306 */ 2307 if (new_max > 0) { 2308 /* 2309 * First copy the records. 2310 */ 2311 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); 2312 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); 2313 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); 2314 2315 /* 2316 * Then copy the pointers. 2317 */ 2318 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2319 ifp->if_broot_bytes); 2320 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 2321 (int)new_size); 2322 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 2323 } 2324 kmem_free(ifp->if_broot); 2325 ifp->if_broot = new_broot; 2326 ifp->if_broot_bytes = (int)new_size; 2327 ASSERT(ifp->if_broot_bytes <= 2328 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); 2329 return; 2330 } 2331 2332 2333 /* 2334 * This is called when the amount of space needed for if_data 2335 * is increased or decreased. The change in size is indicated by 2336 * the number of bytes that need to be added or deleted in the 2337 * byte_diff parameter. 2338 * 2339 * If the amount of space needed has decreased below the size of the 2340 * inline buffer, then switch to using the inline buffer. Otherwise, 2341 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer 2342 * to what is needed. 2343 * 2344 * ip -- the inode whose if_data area is changing 2345 * byte_diff -- the change in the number of bytes, positive or negative, 2346 * requested for the if_data array. 2347 */ 2348 void 2349 xfs_idata_realloc( 2350 xfs_inode_t *ip, 2351 int byte_diff, 2352 int whichfork) 2353 { 2354 xfs_ifork_t *ifp; 2355 int new_size; 2356 int real_size; 2357 2358 if (byte_diff == 0) { 2359 return; 2360 } 2361 2362 ifp = XFS_IFORK_PTR(ip, whichfork); 2363 new_size = (int)ifp->if_bytes + byte_diff; 2364 ASSERT(new_size >= 0); 2365 2366 if (new_size == 0) { 2367 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2368 kmem_free(ifp->if_u1.if_data); 2369 } 2370 ifp->if_u1.if_data = NULL; 2371 real_size = 0; 2372 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) { 2373 /* 2374 * If the valid extents/data can fit in if_inline_ext/data, 2375 * copy them from the malloc'd vector and free it. 2376 */ 2377 if (ifp->if_u1.if_data == NULL) { 2378 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2379 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2380 ASSERT(ifp->if_real_bytes != 0); 2381 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data, 2382 new_size); 2383 kmem_free(ifp->if_u1.if_data); 2384 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 2385 } 2386 real_size = 0; 2387 } else { 2388 /* 2389 * Stuck with malloc/realloc. 2390 * For inline data, the underlying buffer must be 2391 * a multiple of 4 bytes in size so that it can be 2392 * logged and stay on word boundaries. We enforce 2393 * that here. 2394 */ 2395 real_size = roundup(new_size, 4); 2396 if (ifp->if_u1.if_data == NULL) { 2397 ASSERT(ifp->if_real_bytes == 0); 2398 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2399 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2400 /* 2401 * Only do the realloc if the underlying size 2402 * is really changing. 2403 */ 2404 if (ifp->if_real_bytes != real_size) { 2405 ifp->if_u1.if_data = 2406 kmem_realloc(ifp->if_u1.if_data, 2407 real_size, 2408 ifp->if_real_bytes, 2409 KM_SLEEP); 2410 } 2411 } else { 2412 ASSERT(ifp->if_real_bytes == 0); 2413 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2414 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2415 ifp->if_bytes); 2416 } 2417 } 2418 ifp->if_real_bytes = real_size; 2419 ifp->if_bytes = new_size; 2420 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2421 } 2422 2423 void 2424 xfs_idestroy_fork( 2425 xfs_inode_t *ip, 2426 int whichfork) 2427 { 2428 xfs_ifork_t *ifp; 2429 2430 ifp = XFS_IFORK_PTR(ip, whichfork); 2431 if (ifp->if_broot != NULL) { 2432 kmem_free(ifp->if_broot); 2433 ifp->if_broot = NULL; 2434 } 2435 2436 /* 2437 * If the format is local, then we can't have an extents 2438 * array so just look for an inline data array. If we're 2439 * not local then we may or may not have an extents list, 2440 * so check and free it up if we do. 2441 */ 2442 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 2443 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) && 2444 (ifp->if_u1.if_data != NULL)) { 2445 ASSERT(ifp->if_real_bytes != 0); 2446 kmem_free(ifp->if_u1.if_data); 2447 ifp->if_u1.if_data = NULL; 2448 ifp->if_real_bytes = 0; 2449 } 2450 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2451 ((ifp->if_flags & XFS_IFEXTIREC) || 2452 ((ifp->if_u1.if_extents != NULL) && 2453 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { 2454 ASSERT(ifp->if_real_bytes != 0); 2455 xfs_iext_destroy(ifp); 2456 } 2457 ASSERT(ifp->if_u1.if_extents == NULL || 2458 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2459 ASSERT(ifp->if_real_bytes == 0); 2460 if (whichfork == XFS_ATTR_FORK) { 2461 kmem_zone_free(xfs_ifork_zone, ip->i_afp); 2462 ip->i_afp = NULL; 2463 } 2464 } 2465 2466 /* 2467 * Increment the pin count of the given buffer. 2468 * This value is protected by ipinlock spinlock in the mount structure. 2469 */ 2470 void 2471 xfs_ipin( 2472 xfs_inode_t *ip) 2473 { 2474 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2475 2476 atomic_inc(&ip->i_pincount); 2477 } 2478 2479 /* 2480 * Decrement the pin count of the given inode, and wake up 2481 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2482 * inode must have been previously pinned with a call to xfs_ipin(). 2483 */ 2484 void 2485 xfs_iunpin( 2486 xfs_inode_t *ip) 2487 { 2488 ASSERT(atomic_read(&ip->i_pincount) > 0); 2489 2490 if (atomic_dec_and_test(&ip->i_pincount)) 2491 wake_up(&ip->i_ipin_wait); 2492 } 2493 2494 /* 2495 * This is called to unpin an inode. It can be directed to wait or to return 2496 * immediately without waiting for the inode to be unpinned. The caller must 2497 * have the inode locked in at least shared mode so that the buffer cannot be 2498 * subsequently pinned once someone is waiting for it to be unpinned. 2499 */ 2500 STATIC void 2501 __xfs_iunpin_wait( 2502 xfs_inode_t *ip, 2503 int wait) 2504 { 2505 xfs_inode_log_item_t *iip = ip->i_itemp; 2506 2507 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2508 if (atomic_read(&ip->i_pincount) == 0) 2509 return; 2510 2511 /* Give the log a push to start the unpinning I/O */ 2512 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2513 iip->ili_last_lsn : 0, XFS_LOG_FORCE); 2514 if (wait) 2515 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2516 } 2517 2518 static inline void 2519 xfs_iunpin_wait( 2520 xfs_inode_t *ip) 2521 { 2522 __xfs_iunpin_wait(ip, 1); 2523 } 2524 2525 static inline void 2526 xfs_iunpin_nowait( 2527 xfs_inode_t *ip) 2528 { 2529 __xfs_iunpin_wait(ip, 0); 2530 } 2531 2532 2533 /* 2534 * xfs_iextents_copy() 2535 * 2536 * This is called to copy the REAL extents (as opposed to the delayed 2537 * allocation extents) from the inode into the given buffer. It 2538 * returns the number of bytes copied into the buffer. 2539 * 2540 * If there are no delayed allocation extents, then we can just 2541 * memcpy() the extents into the buffer. Otherwise, we need to 2542 * examine each extent in turn and skip those which are delayed. 2543 */ 2544 int 2545 xfs_iextents_copy( 2546 xfs_inode_t *ip, 2547 xfs_bmbt_rec_t *dp, 2548 int whichfork) 2549 { 2550 int copied; 2551 int i; 2552 xfs_ifork_t *ifp; 2553 int nrecs; 2554 xfs_fsblock_t start_block; 2555 2556 ifp = XFS_IFORK_PTR(ip, whichfork); 2557 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2558 ASSERT(ifp->if_bytes > 0); 2559 2560 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2561 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2562 ASSERT(nrecs > 0); 2563 2564 /* 2565 * There are some delayed allocation extents in the 2566 * inode, so copy the extents one at a time and skip 2567 * the delayed ones. There must be at least one 2568 * non-delayed extent. 2569 */ 2570 copied = 0; 2571 for (i = 0; i < nrecs; i++) { 2572 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); 2573 start_block = xfs_bmbt_get_startblock(ep); 2574 if (isnullstartblock(start_block)) { 2575 /* 2576 * It's a delayed allocation extent, so skip it. 2577 */ 2578 continue; 2579 } 2580 2581 /* Translate to on disk format */ 2582 put_unaligned(cpu_to_be64(ep->l0), &dp->l0); 2583 put_unaligned(cpu_to_be64(ep->l1), &dp->l1); 2584 dp++; 2585 copied++; 2586 } 2587 ASSERT(copied != 0); 2588 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); 2589 2590 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2591 } 2592 2593 /* 2594 * Each of the following cases stores data into the same region 2595 * of the on-disk inode, so only one of them can be valid at 2596 * any given time. While it is possible to have conflicting formats 2597 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is 2598 * in EXTENTS format, this can only happen when the fork has 2599 * changed formats after being modified but before being flushed. 2600 * In these cases, the format always takes precedence, because the 2601 * format indicates the current state of the fork. 2602 */ 2603 /*ARGSUSED*/ 2604 STATIC void 2605 xfs_iflush_fork( 2606 xfs_inode_t *ip, 2607 xfs_dinode_t *dip, 2608 xfs_inode_log_item_t *iip, 2609 int whichfork, 2610 xfs_buf_t *bp) 2611 { 2612 char *cp; 2613 xfs_ifork_t *ifp; 2614 xfs_mount_t *mp; 2615 #ifdef XFS_TRANS_DEBUG 2616 int first; 2617 #endif 2618 static const short brootflag[2] = 2619 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 2620 static const short dataflag[2] = 2621 { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; 2622 static const short extflag[2] = 2623 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 2624 2625 if (!iip) 2626 return; 2627 ifp = XFS_IFORK_PTR(ip, whichfork); 2628 /* 2629 * This can happen if we gave up in iformat in an error path, 2630 * for the attribute fork. 2631 */ 2632 if (!ifp) { 2633 ASSERT(whichfork == XFS_ATTR_FORK); 2634 return; 2635 } 2636 cp = XFS_DFORK_PTR(dip, whichfork); 2637 mp = ip->i_mount; 2638 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 2639 case XFS_DINODE_FMT_LOCAL: 2640 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) && 2641 (ifp->if_bytes > 0)) { 2642 ASSERT(ifp->if_u1.if_data != NULL); 2643 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); 2644 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes); 2645 } 2646 break; 2647 2648 case XFS_DINODE_FMT_EXTENTS: 2649 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2650 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2651 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || 2652 (ifp->if_bytes == 0)); 2653 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || 2654 (ifp->if_bytes > 0)); 2655 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2656 (ifp->if_bytes > 0)) { 2657 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2658 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2659 whichfork); 2660 } 2661 break; 2662 2663 case XFS_DINODE_FMT_BTREE: 2664 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) && 2665 (ifp->if_broot_bytes > 0)) { 2666 ASSERT(ifp->if_broot != NULL); 2667 ASSERT(ifp->if_broot_bytes <= 2668 (XFS_IFORK_SIZE(ip, whichfork) + 2669 XFS_BROOT_SIZE_ADJ)); 2670 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, 2671 (xfs_bmdr_block_t *)cp, 2672 XFS_DFORK_SIZE(dip, mp, whichfork)); 2673 } 2674 break; 2675 2676 case XFS_DINODE_FMT_DEV: 2677 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { 2678 ASSERT(whichfork == XFS_DATA_FORK); 2679 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); 2680 } 2681 break; 2682 2683 case XFS_DINODE_FMT_UUID: 2684 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { 2685 ASSERT(whichfork == XFS_DATA_FORK); 2686 memcpy(XFS_DFORK_DPTR(dip), 2687 &ip->i_df.if_u2.if_uuid, 2688 sizeof(uuid_t)); 2689 } 2690 break; 2691 2692 default: 2693 ASSERT(0); 2694 break; 2695 } 2696 } 2697 2698 STATIC int 2699 xfs_iflush_cluster( 2700 xfs_inode_t *ip, 2701 xfs_buf_t *bp) 2702 { 2703 xfs_mount_t *mp = ip->i_mount; 2704 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); 2705 unsigned long first_index, mask; 2706 unsigned long inodes_per_cluster; 2707 int ilist_size; 2708 xfs_inode_t **ilist; 2709 xfs_inode_t *iq; 2710 int nr_found; 2711 int clcount = 0; 2712 int bufwasdelwri; 2713 int i; 2714 2715 ASSERT(pag->pagi_inodeok); 2716 ASSERT(pag->pag_ici_init); 2717 2718 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2719 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); 2720 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); 2721 if (!ilist) 2722 return 0; 2723 2724 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2725 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2726 read_lock(&pag->pag_ici_lock); 2727 /* really need a gang lookup range call here */ 2728 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2729 first_index, inodes_per_cluster); 2730 if (nr_found == 0) 2731 goto out_free; 2732 2733 for (i = 0; i < nr_found; i++) { 2734 iq = ilist[i]; 2735 if (iq == ip) 2736 continue; 2737 /* if the inode lies outside this cluster, we're done. */ 2738 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) 2739 break; 2740 /* 2741 * Do an un-protected check to see if the inode is dirty and 2742 * is a candidate for flushing. These checks will be repeated 2743 * later after the appropriate locks are acquired. 2744 */ 2745 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) 2746 continue; 2747 2748 /* 2749 * Try to get locks. If any are unavailable or it is pinned, 2750 * then this inode cannot be flushed and is skipped. 2751 */ 2752 2753 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) 2754 continue; 2755 if (!xfs_iflock_nowait(iq)) { 2756 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2757 continue; 2758 } 2759 if (xfs_ipincount(iq)) { 2760 xfs_ifunlock(iq); 2761 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2762 continue; 2763 } 2764 2765 /* 2766 * arriving here means that this inode can be flushed. First 2767 * re-check that it's dirty before flushing. 2768 */ 2769 if (!xfs_inode_clean(iq)) { 2770 int error; 2771 error = xfs_iflush_int(iq, bp); 2772 if (error) { 2773 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2774 goto cluster_corrupt_out; 2775 } 2776 clcount++; 2777 } else { 2778 xfs_ifunlock(iq); 2779 } 2780 xfs_iunlock(iq, XFS_ILOCK_SHARED); 2781 } 2782 2783 if (clcount) { 2784 XFS_STATS_INC(xs_icluster_flushcnt); 2785 XFS_STATS_ADD(xs_icluster_flushinode, clcount); 2786 } 2787 2788 out_free: 2789 read_unlock(&pag->pag_ici_lock); 2790 kmem_free(ilist); 2791 return 0; 2792 2793 2794 cluster_corrupt_out: 2795 /* 2796 * Corruption detected in the clustering loop. Invalidate the 2797 * inode buffer and shut down the filesystem. 2798 */ 2799 read_unlock(&pag->pag_ici_lock); 2800 /* 2801 * Clean up the buffer. If it was B_DELWRI, just release it -- 2802 * brelse can handle it with no problems. If not, shut down the 2803 * filesystem before releasing the buffer. 2804 */ 2805 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); 2806 if (bufwasdelwri) 2807 xfs_buf_relse(bp); 2808 2809 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2810 2811 if (!bufwasdelwri) { 2812 /* 2813 * Just like incore_relse: if we have b_iodone functions, 2814 * mark the buffer as an error and call them. Otherwise 2815 * mark it as stale and brelse. 2816 */ 2817 if (XFS_BUF_IODONE_FUNC(bp)) { 2818 XFS_BUF_CLR_BDSTRAT_FUNC(bp); 2819 XFS_BUF_UNDONE(bp); 2820 XFS_BUF_STALE(bp); 2821 XFS_BUF_ERROR(bp,EIO); 2822 xfs_biodone(bp); 2823 } else { 2824 XFS_BUF_STALE(bp); 2825 xfs_buf_relse(bp); 2826 } 2827 } 2828 2829 /* 2830 * Unlocks the flush lock 2831 */ 2832 xfs_iflush_abort(iq); 2833 kmem_free(ilist); 2834 return XFS_ERROR(EFSCORRUPTED); 2835 } 2836 2837 /* 2838 * xfs_iflush() will write a modified inode's changes out to the 2839 * inode's on disk home. The caller must have the inode lock held 2840 * in at least shared mode and the inode flush completion must be 2841 * active as well. The inode lock will still be held upon return from 2842 * the call and the caller is free to unlock it. 2843 * The inode flush will be completed when the inode reaches the disk. 2844 * The flags indicate how the inode's buffer should be written out. 2845 */ 2846 int 2847 xfs_iflush( 2848 xfs_inode_t *ip, 2849 uint flags) 2850 { 2851 xfs_inode_log_item_t *iip; 2852 xfs_buf_t *bp; 2853 xfs_dinode_t *dip; 2854 xfs_mount_t *mp; 2855 int error; 2856 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK); 2857 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; 2858 2859 XFS_STATS_INC(xs_iflush_count); 2860 2861 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2862 ASSERT(!completion_done(&ip->i_flush)); 2863 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 2864 ip->i_d.di_nextents > ip->i_df.if_ext_max); 2865 2866 iip = ip->i_itemp; 2867 mp = ip->i_mount; 2868 2869 /* 2870 * If the inode isn't dirty, then just release the inode 2871 * flush lock and do nothing. 2872 */ 2873 if (xfs_inode_clean(ip)) { 2874 xfs_ifunlock(ip); 2875 return 0; 2876 } 2877 2878 /* 2879 * We can't flush the inode until it is unpinned, so wait for it if we 2880 * are allowed to block. We know noone new can pin it, because we are 2881 * holding the inode lock shared and you need to hold it exclusively to 2882 * pin the inode. 2883 * 2884 * If we are not allowed to block, force the log out asynchronously so 2885 * that when we come back the inode will be unpinned. If other inodes 2886 * in the same cluster are dirty, they will probably write the inode 2887 * out for us if they occur after the log force completes. 2888 */ 2889 if (noblock && xfs_ipincount(ip)) { 2890 xfs_iunpin_nowait(ip); 2891 xfs_ifunlock(ip); 2892 return EAGAIN; 2893 } 2894 xfs_iunpin_wait(ip); 2895 2896 /* 2897 * This may have been unpinned because the filesystem is shutting 2898 * down forcibly. If that's the case we must not write this inode 2899 * to disk, because the log record didn't make it to disk! 2900 */ 2901 if (XFS_FORCED_SHUTDOWN(mp)) { 2902 ip->i_update_core = 0; 2903 if (iip) 2904 iip->ili_format.ilf_fields = 0; 2905 xfs_ifunlock(ip); 2906 return XFS_ERROR(EIO); 2907 } 2908 2909 /* 2910 * Decide how buffer will be flushed out. This is done before 2911 * the call to xfs_iflush_int because this field is zeroed by it. 2912 */ 2913 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 2914 /* 2915 * Flush out the inode buffer according to the directions 2916 * of the caller. In the cases where the caller has given 2917 * us a choice choose the non-delwri case. This is because 2918 * the inode is in the AIL and we need to get it out soon. 2919 */ 2920 switch (flags) { 2921 case XFS_IFLUSH_SYNC: 2922 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 2923 flags = 0; 2924 break; 2925 case XFS_IFLUSH_ASYNC_NOBLOCK: 2926 case XFS_IFLUSH_ASYNC: 2927 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 2928 flags = INT_ASYNC; 2929 break; 2930 case XFS_IFLUSH_DELWRI: 2931 flags = INT_DELWRI; 2932 break; 2933 default: 2934 ASSERT(0); 2935 flags = 0; 2936 break; 2937 } 2938 } else { 2939 switch (flags) { 2940 case XFS_IFLUSH_DELWRI_ELSE_SYNC: 2941 case XFS_IFLUSH_DELWRI_ELSE_ASYNC: 2942 case XFS_IFLUSH_DELWRI: 2943 flags = INT_DELWRI; 2944 break; 2945 case XFS_IFLUSH_ASYNC_NOBLOCK: 2946 case XFS_IFLUSH_ASYNC: 2947 flags = INT_ASYNC; 2948 break; 2949 case XFS_IFLUSH_SYNC: 2950 flags = 0; 2951 break; 2952 default: 2953 ASSERT(0); 2954 flags = 0; 2955 break; 2956 } 2957 } 2958 2959 /* 2960 * Get the buffer containing the on-disk inode. 2961 */ 2962 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2963 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); 2964 if (error || !bp) { 2965 xfs_ifunlock(ip); 2966 return error; 2967 } 2968 2969 /* 2970 * First flush out the inode that xfs_iflush was called with. 2971 */ 2972 error = xfs_iflush_int(ip, bp); 2973 if (error) 2974 goto corrupt_out; 2975 2976 /* 2977 * If the buffer is pinned then push on the log now so we won't 2978 * get stuck waiting in the write for too long. 2979 */ 2980 if (XFS_BUF_ISPINNED(bp)) 2981 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 2982 2983 /* 2984 * inode clustering: 2985 * see if other inodes can be gathered into this write 2986 */ 2987 error = xfs_iflush_cluster(ip, bp); 2988 if (error) 2989 goto cluster_corrupt_out; 2990 2991 if (flags & INT_DELWRI) { 2992 xfs_bdwrite(mp, bp); 2993 } else if (flags & INT_ASYNC) { 2994 error = xfs_bawrite(mp, bp); 2995 } else { 2996 error = xfs_bwrite(mp, bp); 2997 } 2998 return error; 2999 3000 corrupt_out: 3001 xfs_buf_relse(bp); 3002 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3003 cluster_corrupt_out: 3004 /* 3005 * Unlocks the flush lock 3006 */ 3007 xfs_iflush_abort(ip); 3008 return XFS_ERROR(EFSCORRUPTED); 3009 } 3010 3011 3012 STATIC int 3013 xfs_iflush_int( 3014 xfs_inode_t *ip, 3015 xfs_buf_t *bp) 3016 { 3017 xfs_inode_log_item_t *iip; 3018 xfs_dinode_t *dip; 3019 xfs_mount_t *mp; 3020 #ifdef XFS_TRANS_DEBUG 3021 int first; 3022 #endif 3023 3024 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3025 ASSERT(!completion_done(&ip->i_flush)); 3026 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 3027 ip->i_d.di_nextents > ip->i_df.if_ext_max); 3028 3029 iip = ip->i_itemp; 3030 mp = ip->i_mount; 3031 3032 3033 /* 3034 * If the inode isn't dirty, then just release the inode 3035 * flush lock and do nothing. 3036 */ 3037 if (xfs_inode_clean(ip)) { 3038 xfs_ifunlock(ip); 3039 return 0; 3040 } 3041 3042 /* set *dip = inode's place in the buffer */ 3043 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); 3044 3045 /* 3046 * Clear i_update_core before copying out the data. 3047 * This is for coordination with our timestamp updates 3048 * that don't hold the inode lock. They will always 3049 * update the timestamps BEFORE setting i_update_core, 3050 * so if we clear i_update_core after they set it we 3051 * are guaranteed to see their updates to the timestamps. 3052 * I believe that this depends on strongly ordered memory 3053 * semantics, but we have that. We use the SYNCHRONIZE 3054 * macro to make sure that the compiler does not reorder 3055 * the i_update_core access below the data copy below. 3056 */ 3057 ip->i_update_core = 0; 3058 SYNCHRONIZE(); 3059 3060 /* 3061 * Make sure to get the latest atime from the Linux inode. 3062 */ 3063 xfs_synchronize_atime(ip); 3064 3065 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 3066 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3067 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3068 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 3069 ip->i_ino, be16_to_cpu(dip->di_magic), dip); 3070 goto corrupt_out; 3071 } 3072 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 3073 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 3074 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3075 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 3076 ip->i_ino, ip, ip->i_d.di_magic); 3077 goto corrupt_out; 3078 } 3079 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3080 if (XFS_TEST_ERROR( 3081 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3082 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 3083 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 3084 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3085 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 3086 ip->i_ino, ip); 3087 goto corrupt_out; 3088 } 3089 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 3090 if (XFS_TEST_ERROR( 3091 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 3092 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 3093 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 3094 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 3095 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3096 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 3097 ip->i_ino, ip); 3098 goto corrupt_out; 3099 } 3100 } 3101 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 3102 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 3103 XFS_RANDOM_IFLUSH_5)) { 3104 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3105 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 3106 ip->i_ino, 3107 ip->i_d.di_nextents + ip->i_d.di_anextents, 3108 ip->i_d.di_nblocks, 3109 ip); 3110 goto corrupt_out; 3111 } 3112 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 3113 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 3114 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3115 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 3116 ip->i_ino, ip->i_d.di_forkoff, ip); 3117 goto corrupt_out; 3118 } 3119 /* 3120 * bump the flush iteration count, used to detect flushes which 3121 * postdate a log record during recovery. 3122 */ 3123 3124 ip->i_d.di_flushiter++; 3125 3126 /* 3127 * Copy the dirty parts of the inode into the on-disk 3128 * inode. We always copy out the core of the inode, 3129 * because if the inode is dirty at all the core must 3130 * be. 3131 */ 3132 xfs_dinode_to_disk(dip, &ip->i_d); 3133 3134 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3135 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3136 ip->i_d.di_flushiter = 0; 3137 3138 /* 3139 * If this is really an old format inode and the superblock version 3140 * has not been updated to support only new format inodes, then 3141 * convert back to the old inode format. If the superblock version 3142 * has been updated, then make the conversion permanent. 3143 */ 3144 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); 3145 if (ip->i_d.di_version == 1) { 3146 if (!xfs_sb_version_hasnlink(&mp->m_sb)) { 3147 /* 3148 * Convert it back. 3149 */ 3150 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); 3151 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); 3152 } else { 3153 /* 3154 * The superblock version has already been bumped, 3155 * so just make the conversion to the new inode 3156 * format permanent. 3157 */ 3158 ip->i_d.di_version = 2; 3159 dip->di_version = 2; 3160 ip->i_d.di_onlink = 0; 3161 dip->di_onlink = 0; 3162 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 3163 memset(&(dip->di_pad[0]), 0, 3164 sizeof(dip->di_pad)); 3165 ASSERT(ip->i_d.di_projid == 0); 3166 } 3167 } 3168 3169 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); 3170 if (XFS_IFORK_Q(ip)) 3171 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); 3172 xfs_inobp_check(mp, bp); 3173 3174 /* 3175 * We've recorded everything logged in the inode, so we'd 3176 * like to clear the ilf_fields bits so we don't log and 3177 * flush things unnecessarily. However, we can't stop 3178 * logging all this information until the data we've copied 3179 * into the disk buffer is written to disk. If we did we might 3180 * overwrite the copy of the inode in the log with all the 3181 * data after re-logging only part of it, and in the face of 3182 * a crash we wouldn't have all the data we need to recover. 3183 * 3184 * What we do is move the bits to the ili_last_fields field. 3185 * When logging the inode, these bits are moved back to the 3186 * ilf_fields field. In the xfs_iflush_done() routine we 3187 * clear ili_last_fields, since we know that the information 3188 * those bits represent is permanently on disk. As long as 3189 * the flush completes before the inode is logged again, then 3190 * both ilf_fields and ili_last_fields will be cleared. 3191 * 3192 * We can play with the ilf_fields bits here, because the inode 3193 * lock must be held exclusively in order to set bits there 3194 * and the flush lock protects the ili_last_fields bits. 3195 * Set ili_logged so the flush done 3196 * routine can tell whether or not to look in the AIL. 3197 * Also, store the current LSN of the inode so that we can tell 3198 * whether the item has moved in the AIL from xfs_iflush_done(). 3199 * In order to read the lsn we need the AIL lock, because 3200 * it is a 64 bit value that cannot be read atomically. 3201 */ 3202 if (iip != NULL && iip->ili_format.ilf_fields != 0) { 3203 iip->ili_last_fields = iip->ili_format.ilf_fields; 3204 iip->ili_format.ilf_fields = 0; 3205 iip->ili_logged = 1; 3206 3207 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 3208 &iip->ili_item.li_lsn); 3209 3210 /* 3211 * Attach the function xfs_iflush_done to the inode's 3212 * buffer. This will remove the inode from the AIL 3213 * and unlock the inode's flush lock when the inode is 3214 * completely written to disk. 3215 */ 3216 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3217 xfs_iflush_done, (xfs_log_item_t *)iip); 3218 3219 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3220 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3221 } else { 3222 /* 3223 * We're flushing an inode which is not in the AIL and has 3224 * not been logged but has i_update_core set. For this 3225 * case we can use a B_DELWRI flush and immediately drop 3226 * the inode flush lock because we can avoid the whole 3227 * AIL state thing. It's OK to drop the flush lock now, 3228 * because we've already locked the buffer and to do anything 3229 * you really need both. 3230 */ 3231 if (iip != NULL) { 3232 ASSERT(iip->ili_logged == 0); 3233 ASSERT(iip->ili_last_fields == 0); 3234 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0); 3235 } 3236 xfs_ifunlock(ip); 3237 } 3238 3239 return 0; 3240 3241 corrupt_out: 3242 return XFS_ERROR(EFSCORRUPTED); 3243 } 3244 3245 3246 3247 #ifdef XFS_ILOCK_TRACE 3248 void 3249 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) 3250 { 3251 ktrace_enter(ip->i_lock_trace, 3252 (void *)ip, 3253 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ 3254 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ 3255 (void *)ra, /* caller of ilock */ 3256 (void *)(unsigned long)current_cpu(), 3257 (void *)(unsigned long)current_pid(), 3258 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3259 } 3260 #endif 3261 3262 /* 3263 * Return a pointer to the extent record at file index idx. 3264 */ 3265 xfs_bmbt_rec_host_t * 3266 xfs_iext_get_ext( 3267 xfs_ifork_t *ifp, /* inode fork pointer */ 3268 xfs_extnum_t idx) /* index of target extent */ 3269 { 3270 ASSERT(idx >= 0); 3271 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3272 return ifp->if_u1.if_ext_irec->er_extbuf; 3273 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3274 xfs_ext_irec_t *erp; /* irec pointer */ 3275 int erp_idx = 0; /* irec index */ 3276 xfs_extnum_t page_idx = idx; /* ext index in target list */ 3277 3278 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3279 return &erp->er_extbuf[page_idx]; 3280 } else if (ifp->if_bytes) { 3281 return &ifp->if_u1.if_extents[idx]; 3282 } else { 3283 return NULL; 3284 } 3285 } 3286 3287 /* 3288 * Insert new item(s) into the extent records for incore inode 3289 * fork 'ifp'. 'count' new items are inserted at index 'idx'. 3290 */ 3291 void 3292 xfs_iext_insert( 3293 xfs_ifork_t *ifp, /* inode fork pointer */ 3294 xfs_extnum_t idx, /* starting index of new items */ 3295 xfs_extnum_t count, /* number of inserted items */ 3296 xfs_bmbt_irec_t *new) /* items to insert */ 3297 { 3298 xfs_extnum_t i; /* extent record index */ 3299 3300 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3301 xfs_iext_add(ifp, idx, count); 3302 for (i = idx; i < idx + count; i++, new++) 3303 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); 3304 } 3305 3306 /* 3307 * This is called when the amount of space required for incore file 3308 * extents needs to be increased. The ext_diff parameter stores the 3309 * number of new extents being added and the idx parameter contains 3310 * the extent index where the new extents will be added. If the new 3311 * extents are being appended, then we just need to (re)allocate and 3312 * initialize the space. Otherwise, if the new extents are being 3313 * inserted into the middle of the existing entries, a bit more work 3314 * is required to make room for the new extents to be inserted. The 3315 * caller is responsible for filling in the new extent entries upon 3316 * return. 3317 */ 3318 void 3319 xfs_iext_add( 3320 xfs_ifork_t *ifp, /* inode fork pointer */ 3321 xfs_extnum_t idx, /* index to begin adding exts */ 3322 int ext_diff) /* number of extents to add */ 3323 { 3324 int byte_diff; /* new bytes being added */ 3325 int new_size; /* size of extents after adding */ 3326 xfs_extnum_t nextents; /* number of extents in file */ 3327 3328 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3329 ASSERT((idx >= 0) && (idx <= nextents)); 3330 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); 3331 new_size = ifp->if_bytes + byte_diff; 3332 /* 3333 * If the new number of extents (nextents + ext_diff) 3334 * fits inside the inode, then continue to use the inline 3335 * extent buffer. 3336 */ 3337 if (nextents + ext_diff <= XFS_INLINE_EXTS) { 3338 if (idx < nextents) { 3339 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], 3340 &ifp->if_u2.if_inline_ext[idx], 3341 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3342 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); 3343 } 3344 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3345 ifp->if_real_bytes = 0; 3346 ifp->if_lastex = nextents + ext_diff; 3347 } 3348 /* 3349 * Otherwise use a linear (direct) extent list. 3350 * If the extents are currently inside the inode, 3351 * xfs_iext_realloc_direct will switch us from 3352 * inline to direct extent allocation mode. 3353 */ 3354 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { 3355 xfs_iext_realloc_direct(ifp, new_size); 3356 if (idx < nextents) { 3357 memmove(&ifp->if_u1.if_extents[idx + ext_diff], 3358 &ifp->if_u1.if_extents[idx], 3359 (nextents - idx) * sizeof(xfs_bmbt_rec_t)); 3360 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); 3361 } 3362 } 3363 /* Indirection array */ 3364 else { 3365 xfs_ext_irec_t *erp; 3366 int erp_idx = 0; 3367 int page_idx = idx; 3368 3369 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); 3370 if (ifp->if_flags & XFS_IFEXTIREC) { 3371 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); 3372 } else { 3373 xfs_iext_irec_init(ifp); 3374 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3375 erp = ifp->if_u1.if_ext_irec; 3376 } 3377 /* Extents fit in target extent page */ 3378 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { 3379 if (page_idx < erp->er_extcount) { 3380 memmove(&erp->er_extbuf[page_idx + ext_diff], 3381 &erp->er_extbuf[page_idx], 3382 (erp->er_extcount - page_idx) * 3383 sizeof(xfs_bmbt_rec_t)); 3384 memset(&erp->er_extbuf[page_idx], 0, byte_diff); 3385 } 3386 erp->er_extcount += ext_diff; 3387 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3388 } 3389 /* Insert a new extent page */ 3390 else if (erp) { 3391 xfs_iext_add_indirect_multi(ifp, 3392 erp_idx, page_idx, ext_diff); 3393 } 3394 /* 3395 * If extent(s) are being appended to the last page in 3396 * the indirection array and the new extent(s) don't fit 3397 * in the page, then erp is NULL and erp_idx is set to 3398 * the next index needed in the indirection array. 3399 */ 3400 else { 3401 int count = ext_diff; 3402 3403 while (count) { 3404 erp = xfs_iext_irec_new(ifp, erp_idx); 3405 erp->er_extcount = count; 3406 count -= MIN(count, (int)XFS_LINEAR_EXTS); 3407 if (count) { 3408 erp_idx++; 3409 } 3410 } 3411 } 3412 } 3413 ifp->if_bytes = new_size; 3414 } 3415 3416 /* 3417 * This is called when incore extents are being added to the indirection 3418 * array and the new extents do not fit in the target extent list. The 3419 * erp_idx parameter contains the irec index for the target extent list 3420 * in the indirection array, and the idx parameter contains the extent 3421 * index within the list. The number of extents being added is stored 3422 * in the count parameter. 3423 * 3424 * |-------| |-------| 3425 * | | | | idx - number of extents before idx 3426 * | idx | | count | 3427 * | | | | count - number of extents being inserted at idx 3428 * |-------| |-------| 3429 * | count | | nex2 | nex2 - number of extents after idx + count 3430 * |-------| |-------| 3431 */ 3432 void 3433 xfs_iext_add_indirect_multi( 3434 xfs_ifork_t *ifp, /* inode fork pointer */ 3435 int erp_idx, /* target extent irec index */ 3436 xfs_extnum_t idx, /* index within target list */ 3437 int count) /* new extents being added */ 3438 { 3439 int byte_diff; /* new bytes being added */ 3440 xfs_ext_irec_t *erp; /* pointer to irec entry */ 3441 xfs_extnum_t ext_diff; /* number of extents to add */ 3442 xfs_extnum_t ext_cnt; /* new extents still needed */ 3443 xfs_extnum_t nex2; /* extents after idx + count */ 3444 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ 3445 int nlists; /* number of irec's (lists) */ 3446 3447 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3448 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3449 nex2 = erp->er_extcount - idx; 3450 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3451 3452 /* 3453 * Save second part of target extent list 3454 * (all extents past */ 3455 if (nex2) { 3456 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3457 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); 3458 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); 3459 erp->er_extcount -= nex2; 3460 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); 3461 memset(&erp->er_extbuf[idx], 0, byte_diff); 3462 } 3463 3464 /* 3465 * Add the new extents to the end of the target 3466 * list, then allocate new irec record(s) and 3467 * extent buffer(s) as needed to store the rest 3468 * of the new extents. 3469 */ 3470 ext_cnt = count; 3471 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); 3472 if (ext_diff) { 3473 erp->er_extcount += ext_diff; 3474 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3475 ext_cnt -= ext_diff; 3476 } 3477 while (ext_cnt) { 3478 erp_idx++; 3479 erp = xfs_iext_irec_new(ifp, erp_idx); 3480 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); 3481 erp->er_extcount = ext_diff; 3482 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); 3483 ext_cnt -= ext_diff; 3484 } 3485 3486 /* Add nex2 extents back to indirection array */ 3487 if (nex2) { 3488 xfs_extnum_t ext_avail; 3489 int i; 3490 3491 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); 3492 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; 3493 i = 0; 3494 /* 3495 * If nex2 extents fit in the current page, append 3496 * nex2_ep after the new extents. 3497 */ 3498 if (nex2 <= ext_avail) { 3499 i = erp->er_extcount; 3500 } 3501 /* 3502 * Otherwise, check if space is available in the 3503 * next page. 3504 */ 3505 else if ((erp_idx < nlists - 1) && 3506 (nex2 <= (ext_avail = XFS_LINEAR_EXTS - 3507 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { 3508 erp_idx++; 3509 erp++; 3510 /* Create a hole for nex2 extents */ 3511 memmove(&erp->er_extbuf[nex2], erp->er_extbuf, 3512 erp->er_extcount * sizeof(xfs_bmbt_rec_t)); 3513 } 3514 /* 3515 * Final choice, create a new extent page for 3516 * nex2 extents. 3517 */ 3518 else { 3519 erp_idx++; 3520 erp = xfs_iext_irec_new(ifp, erp_idx); 3521 } 3522 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); 3523 kmem_free(nex2_ep); 3524 erp->er_extcount += nex2; 3525 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); 3526 } 3527 } 3528 3529 /* 3530 * This is called when the amount of space required for incore file 3531 * extents needs to be decreased. The ext_diff parameter stores the 3532 * number of extents to be removed and the idx parameter contains 3533 * the extent index where the extents will be removed from. 3534 * 3535 * If the amount of space needed has decreased below the linear 3536 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous 3537 * extent array. Otherwise, use kmem_realloc() to adjust the 3538 * size to what is needed. 3539 */ 3540 void 3541 xfs_iext_remove( 3542 xfs_ifork_t *ifp, /* inode fork pointer */ 3543 xfs_extnum_t idx, /* index to begin removing exts */ 3544 int ext_diff) /* number of extents to remove */ 3545 { 3546 xfs_extnum_t nextents; /* number of extents in file */ 3547 int new_size; /* size of extents after removal */ 3548 3549 ASSERT(ext_diff > 0); 3550 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3551 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3552 3553 if (new_size == 0) { 3554 xfs_iext_destroy(ifp); 3555 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3556 xfs_iext_remove_indirect(ifp, idx, ext_diff); 3557 } else if (ifp->if_real_bytes) { 3558 xfs_iext_remove_direct(ifp, idx, ext_diff); 3559 } else { 3560 xfs_iext_remove_inline(ifp, idx, ext_diff); 3561 } 3562 ifp->if_bytes = new_size; 3563 } 3564 3565 /* 3566 * This removes ext_diff extents from the inline buffer, beginning 3567 * at extent index idx. 3568 */ 3569 void 3570 xfs_iext_remove_inline( 3571 xfs_ifork_t *ifp, /* inode fork pointer */ 3572 xfs_extnum_t idx, /* index to begin removing exts */ 3573 int ext_diff) /* number of extents to remove */ 3574 { 3575 int nextents; /* number of extents in file */ 3576 3577 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3578 ASSERT(idx < XFS_INLINE_EXTS); 3579 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3580 ASSERT(((nextents - ext_diff) > 0) && 3581 (nextents - ext_diff) < XFS_INLINE_EXTS); 3582 3583 if (idx + ext_diff < nextents) { 3584 memmove(&ifp->if_u2.if_inline_ext[idx], 3585 &ifp->if_u2.if_inline_ext[idx + ext_diff], 3586 (nextents - (idx + ext_diff)) * 3587 sizeof(xfs_bmbt_rec_t)); 3588 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], 3589 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3590 } else { 3591 memset(&ifp->if_u2.if_inline_ext[idx], 0, 3592 ext_diff * sizeof(xfs_bmbt_rec_t)); 3593 } 3594 } 3595 3596 /* 3597 * This removes ext_diff extents from a linear (direct) extent list, 3598 * beginning at extent index idx. If the extents are being removed 3599 * from the end of the list (ie. truncate) then we just need to re- 3600 * allocate the list to remove the extra space. Otherwise, if the 3601 * extents are being removed from the middle of the existing extent 3602 * entries, then we first need to move the extent records beginning 3603 * at idx + ext_diff up in the list to overwrite the records being 3604 * removed, then remove the extra space via kmem_realloc. 3605 */ 3606 void 3607 xfs_iext_remove_direct( 3608 xfs_ifork_t *ifp, /* inode fork pointer */ 3609 xfs_extnum_t idx, /* index to begin removing exts */ 3610 int ext_diff) /* number of extents to remove */ 3611 { 3612 xfs_extnum_t nextents; /* number of extents in file */ 3613 int new_size; /* size of extents after removal */ 3614 3615 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 3616 new_size = ifp->if_bytes - 3617 (ext_diff * sizeof(xfs_bmbt_rec_t)); 3618 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3619 3620 if (new_size == 0) { 3621 xfs_iext_destroy(ifp); 3622 return; 3623 } 3624 /* Move extents up in the list (if needed) */ 3625 if (idx + ext_diff < nextents) { 3626 memmove(&ifp->if_u1.if_extents[idx], 3627 &ifp->if_u1.if_extents[idx + ext_diff], 3628 (nextents - (idx + ext_diff)) * 3629 sizeof(xfs_bmbt_rec_t)); 3630 } 3631 memset(&ifp->if_u1.if_extents[nextents - ext_diff], 3632 0, ext_diff * sizeof(xfs_bmbt_rec_t)); 3633 /* 3634 * Reallocate the direct extent list. If the extents 3635 * will fit inside the inode then xfs_iext_realloc_direct 3636 * will switch from direct to inline extent allocation 3637 * mode for us. 3638 */ 3639 xfs_iext_realloc_direct(ifp, new_size); 3640 ifp->if_bytes = new_size; 3641 } 3642 3643 /* 3644 * This is called when incore extents are being removed from the 3645 * indirection array and the extents being removed span multiple extent 3646 * buffers. The idx parameter contains the file extent index where we 3647 * want to begin removing extents, and the count parameter contains 3648 * how many extents need to be removed. 3649 * 3650 * |-------| |-------| 3651 * | nex1 | | | nex1 - number of extents before idx 3652 * |-------| | count | 3653 * | | | | count - number of extents being removed at idx 3654 * | count | |-------| 3655 * | | | nex2 | nex2 - number of extents after idx + count 3656 * |-------| |-------| 3657 */ 3658 void 3659 xfs_iext_remove_indirect( 3660 xfs_ifork_t *ifp, /* inode fork pointer */ 3661 xfs_extnum_t idx, /* index to begin removing extents */ 3662 int count) /* number of extents to remove */ 3663 { 3664 xfs_ext_irec_t *erp; /* indirection array pointer */ 3665 int erp_idx = 0; /* indirection array index */ 3666 xfs_extnum_t ext_cnt; /* extents left to remove */ 3667 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3668 xfs_extnum_t nex1; /* number of extents before idx */ 3669 xfs_extnum_t nex2; /* extents after idx + count */ 3670 int nlists; /* entries in indirection array */ 3671 int page_idx = idx; /* index in target extent list */ 3672 3673 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3674 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3675 ASSERT(erp != NULL); 3676 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3677 nex1 = page_idx; 3678 ext_cnt = count; 3679 while (ext_cnt) { 3680 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); 3681 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); 3682 /* 3683 * Check for deletion of entire list; 3684 * xfs_iext_irec_remove() updates extent offsets. 3685 */ 3686 if (ext_diff == erp->er_extcount) { 3687 xfs_iext_irec_remove(ifp, erp_idx); 3688 ext_cnt -= ext_diff; 3689 nex1 = 0; 3690 if (ext_cnt) { 3691 ASSERT(erp_idx < ifp->if_real_bytes / 3692 XFS_IEXT_BUFSZ); 3693 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 3694 nex1 = 0; 3695 continue; 3696 } else { 3697 break; 3698 } 3699 } 3700 /* Move extents up (if needed) */ 3701 if (nex2) { 3702 memmove(&erp->er_extbuf[nex1], 3703 &erp->er_extbuf[nex1 + ext_diff], 3704 nex2 * sizeof(xfs_bmbt_rec_t)); 3705 } 3706 /* Zero out rest of page */ 3707 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - 3708 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); 3709 /* Update remaining counters */ 3710 erp->er_extcount -= ext_diff; 3711 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); 3712 ext_cnt -= ext_diff; 3713 nex1 = 0; 3714 erp_idx++; 3715 erp++; 3716 } 3717 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); 3718 xfs_iext_irec_compact(ifp); 3719 } 3720 3721 /* 3722 * Create, destroy, or resize a linear (direct) block of extents. 3723 */ 3724 void 3725 xfs_iext_realloc_direct( 3726 xfs_ifork_t *ifp, /* inode fork pointer */ 3727 int new_size) /* new size of extents */ 3728 { 3729 int rnew_size; /* real new size of extents */ 3730 3731 rnew_size = new_size; 3732 3733 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || 3734 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && 3735 (new_size != ifp->if_real_bytes))); 3736 3737 /* Free extent records */ 3738 if (new_size == 0) { 3739 xfs_iext_destroy(ifp); 3740 } 3741 /* Resize direct extent list and zero any new bytes */ 3742 else if (ifp->if_real_bytes) { 3743 /* Check if extents will fit inside the inode */ 3744 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { 3745 xfs_iext_direct_to_inline(ifp, new_size / 3746 (uint)sizeof(xfs_bmbt_rec_t)); 3747 ifp->if_bytes = new_size; 3748 return; 3749 } 3750 if (!is_power_of_2(new_size)){ 3751 rnew_size = roundup_pow_of_two(new_size); 3752 } 3753 if (rnew_size != ifp->if_real_bytes) { 3754 ifp->if_u1.if_extents = 3755 kmem_realloc(ifp->if_u1.if_extents, 3756 rnew_size, 3757 ifp->if_real_bytes, KM_NOFS); 3758 } 3759 if (rnew_size > ifp->if_real_bytes) { 3760 memset(&ifp->if_u1.if_extents[ifp->if_bytes / 3761 (uint)sizeof(xfs_bmbt_rec_t)], 0, 3762 rnew_size - ifp->if_real_bytes); 3763 } 3764 } 3765 /* 3766 * Switch from the inline extent buffer to a direct 3767 * extent list. Be sure to include the inline extent 3768 * bytes in new_size. 3769 */ 3770 else { 3771 new_size += ifp->if_bytes; 3772 if (!is_power_of_2(new_size)) { 3773 rnew_size = roundup_pow_of_two(new_size); 3774 } 3775 xfs_iext_inline_to_direct(ifp, rnew_size); 3776 } 3777 ifp->if_real_bytes = rnew_size; 3778 ifp->if_bytes = new_size; 3779 } 3780 3781 /* 3782 * Switch from linear (direct) extent records to inline buffer. 3783 */ 3784 void 3785 xfs_iext_direct_to_inline( 3786 xfs_ifork_t *ifp, /* inode fork pointer */ 3787 xfs_extnum_t nextents) /* number of extents in file */ 3788 { 3789 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3790 ASSERT(nextents <= XFS_INLINE_EXTS); 3791 /* 3792 * The inline buffer was zeroed when we switched 3793 * from inline to direct extent allocation mode, 3794 * so we don't need to clear it here. 3795 */ 3796 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, 3797 nextents * sizeof(xfs_bmbt_rec_t)); 3798 kmem_free(ifp->if_u1.if_extents); 3799 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3800 ifp->if_real_bytes = 0; 3801 } 3802 3803 /* 3804 * Switch from inline buffer to linear (direct) extent records. 3805 * new_size should already be rounded up to the next power of 2 3806 * by the caller (when appropriate), so use new_size as it is. 3807 * However, since new_size may be rounded up, we can't update 3808 * if_bytes here. It is the caller's responsibility to update 3809 * if_bytes upon return. 3810 */ 3811 void 3812 xfs_iext_inline_to_direct( 3813 xfs_ifork_t *ifp, /* inode fork pointer */ 3814 int new_size) /* number of extents in file */ 3815 { 3816 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); 3817 memset(ifp->if_u1.if_extents, 0, new_size); 3818 if (ifp->if_bytes) { 3819 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, 3820 ifp->if_bytes); 3821 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3822 sizeof(xfs_bmbt_rec_t)); 3823 } 3824 ifp->if_real_bytes = new_size; 3825 } 3826 3827 /* 3828 * Resize an extent indirection array to new_size bytes. 3829 */ 3830 void 3831 xfs_iext_realloc_indirect( 3832 xfs_ifork_t *ifp, /* inode fork pointer */ 3833 int new_size) /* new indirection array size */ 3834 { 3835 int nlists; /* number of irec's (ex lists) */ 3836 int size; /* current indirection array size */ 3837 3838 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3839 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3840 size = nlists * sizeof(xfs_ext_irec_t); 3841 ASSERT(ifp->if_real_bytes); 3842 ASSERT((new_size >= 0) && (new_size != size)); 3843 if (new_size == 0) { 3844 xfs_iext_destroy(ifp); 3845 } else { 3846 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) 3847 kmem_realloc(ifp->if_u1.if_ext_irec, 3848 new_size, size, KM_NOFS); 3849 } 3850 } 3851 3852 /* 3853 * Switch from indirection array to linear (direct) extent allocations. 3854 */ 3855 void 3856 xfs_iext_indirect_to_direct( 3857 xfs_ifork_t *ifp) /* inode fork pointer */ 3858 { 3859 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ 3860 xfs_extnum_t nextents; /* number of extents in file */ 3861 int size; /* size of file extents */ 3862 3863 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3864 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3865 ASSERT(nextents <= XFS_LINEAR_EXTS); 3866 size = nextents * sizeof(xfs_bmbt_rec_t); 3867 3868 xfs_iext_irec_compact_pages(ifp); 3869 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); 3870 3871 ep = ifp->if_u1.if_ext_irec->er_extbuf; 3872 kmem_free(ifp->if_u1.if_ext_irec); 3873 ifp->if_flags &= ~XFS_IFEXTIREC; 3874 ifp->if_u1.if_extents = ep; 3875 ifp->if_bytes = size; 3876 if (nextents < XFS_LINEAR_EXTS) { 3877 xfs_iext_realloc_direct(ifp, size); 3878 } 3879 } 3880 3881 /* 3882 * Free incore file extents. 3883 */ 3884 void 3885 xfs_iext_destroy( 3886 xfs_ifork_t *ifp) /* inode fork pointer */ 3887 { 3888 if (ifp->if_flags & XFS_IFEXTIREC) { 3889 int erp_idx; 3890 int nlists; 3891 3892 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3893 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { 3894 xfs_iext_irec_remove(ifp, erp_idx); 3895 } 3896 ifp->if_flags &= ~XFS_IFEXTIREC; 3897 } else if (ifp->if_real_bytes) { 3898 kmem_free(ifp->if_u1.if_extents); 3899 } else if (ifp->if_bytes) { 3900 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * 3901 sizeof(xfs_bmbt_rec_t)); 3902 } 3903 ifp->if_u1.if_extents = NULL; 3904 ifp->if_real_bytes = 0; 3905 ifp->if_bytes = 0; 3906 } 3907 3908 /* 3909 * Return a pointer to the extent record for file system block bno. 3910 */ 3911 xfs_bmbt_rec_host_t * /* pointer to found extent record */ 3912 xfs_iext_bno_to_ext( 3913 xfs_ifork_t *ifp, /* inode fork pointer */ 3914 xfs_fileoff_t bno, /* block number to search for */ 3915 xfs_extnum_t *idxp) /* index of target extent */ 3916 { 3917 xfs_bmbt_rec_host_t *base; /* pointer to first extent */ 3918 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 3919 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */ 3920 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3921 int high; /* upper boundary in search */ 3922 xfs_extnum_t idx = 0; /* index of target extent */ 3923 int low; /* lower boundary in search */ 3924 xfs_extnum_t nextents; /* number of file extents */ 3925 xfs_fileoff_t startoff = 0; /* start offset of extent */ 3926 3927 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3928 if (nextents == 0) { 3929 *idxp = 0; 3930 return NULL; 3931 } 3932 low = 0; 3933 if (ifp->if_flags & XFS_IFEXTIREC) { 3934 /* Find target extent list */ 3935 int erp_idx = 0; 3936 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); 3937 base = erp->er_extbuf; 3938 high = erp->er_extcount - 1; 3939 } else { 3940 base = ifp->if_u1.if_extents; 3941 high = nextents - 1; 3942 } 3943 /* Binary search extent records */ 3944 while (low <= high) { 3945 idx = (low + high) >> 1; 3946 ep = base + idx; 3947 startoff = xfs_bmbt_get_startoff(ep); 3948 blockcount = xfs_bmbt_get_blockcount(ep); 3949 if (bno < startoff) { 3950 high = idx - 1; 3951 } else if (bno >= startoff + blockcount) { 3952 low = idx + 1; 3953 } else { 3954 /* Convert back to file-based extent index */ 3955 if (ifp->if_flags & XFS_IFEXTIREC) { 3956 idx += erp->er_extoff; 3957 } 3958 *idxp = idx; 3959 return ep; 3960 } 3961 } 3962 /* Convert back to file-based extent index */ 3963 if (ifp->if_flags & XFS_IFEXTIREC) { 3964 idx += erp->er_extoff; 3965 } 3966 if (bno >= startoff + blockcount) { 3967 if (++idx == nextents) { 3968 ep = NULL; 3969 } else { 3970 ep = xfs_iext_get_ext(ifp, idx); 3971 } 3972 } 3973 *idxp = idx; 3974 return ep; 3975 } 3976 3977 /* 3978 * Return a pointer to the indirection array entry containing the 3979 * extent record for filesystem block bno. Store the index of the 3980 * target irec in *erp_idxp. 3981 */ 3982 xfs_ext_irec_t * /* pointer to found extent record */ 3983 xfs_iext_bno_to_irec( 3984 xfs_ifork_t *ifp, /* inode fork pointer */ 3985 xfs_fileoff_t bno, /* block number to search for */ 3986 int *erp_idxp) /* irec index of target ext list */ 3987 { 3988 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 3989 xfs_ext_irec_t *erp_next; /* next indirection array entry */ 3990 int erp_idx; /* indirection array index */ 3991 int nlists; /* number of extent irec's (lists) */ 3992 int high; /* binary search upper limit */ 3993 int low; /* binary search lower limit */ 3994 3995 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3996 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3997 erp_idx = 0; 3998 low = 0; 3999 high = nlists - 1; 4000 while (low <= high) { 4001 erp_idx = (low + high) >> 1; 4002 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4003 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; 4004 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { 4005 high = erp_idx - 1; 4006 } else if (erp_next && bno >= 4007 xfs_bmbt_get_startoff(erp_next->er_extbuf)) { 4008 low = erp_idx + 1; 4009 } else { 4010 break; 4011 } 4012 } 4013 *erp_idxp = erp_idx; 4014 return erp; 4015 } 4016 4017 /* 4018 * Return a pointer to the indirection array entry containing the 4019 * extent record at file extent index *idxp. Store the index of the 4020 * target irec in *erp_idxp and store the page index of the target 4021 * extent record in *idxp. 4022 */ 4023 xfs_ext_irec_t * 4024 xfs_iext_idx_to_irec( 4025 xfs_ifork_t *ifp, /* inode fork pointer */ 4026 xfs_extnum_t *idxp, /* extent index (file -> page) */ 4027 int *erp_idxp, /* pointer to target irec */ 4028 int realloc) /* new bytes were just added */ 4029 { 4030 xfs_ext_irec_t *prev; /* pointer to previous irec */ 4031 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ 4032 int erp_idx; /* indirection array index */ 4033 int nlists; /* number of irec's (ex lists) */ 4034 int high; /* binary search upper limit */ 4035 int low; /* binary search lower limit */ 4036 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 4037 4038 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4039 ASSERT(page_idx >= 0 && page_idx <= 4040 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 4041 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4042 erp_idx = 0; 4043 low = 0; 4044 high = nlists - 1; 4045 4046 /* Binary search extent irec's */ 4047 while (low <= high) { 4048 erp_idx = (low + high) >> 1; 4049 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4050 prev = erp_idx > 0 ? erp - 1 : NULL; 4051 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && 4052 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { 4053 high = erp_idx - 1; 4054 } else if (page_idx > erp->er_extoff + erp->er_extcount || 4055 (page_idx == erp->er_extoff + erp->er_extcount && 4056 !realloc)) { 4057 low = erp_idx + 1; 4058 } else if (page_idx == erp->er_extoff + erp->er_extcount && 4059 erp->er_extcount == XFS_LINEAR_EXTS) { 4060 ASSERT(realloc); 4061 page_idx = 0; 4062 erp_idx++; 4063 erp = erp_idx < nlists ? erp + 1 : NULL; 4064 break; 4065 } else { 4066 page_idx -= erp->er_extoff; 4067 break; 4068 } 4069 } 4070 *idxp = page_idx; 4071 *erp_idxp = erp_idx; 4072 return(erp); 4073 } 4074 4075 /* 4076 * Allocate and initialize an indirection array once the space needed 4077 * for incore extents increases above XFS_IEXT_BUFSZ. 4078 */ 4079 void 4080 xfs_iext_irec_init( 4081 xfs_ifork_t *ifp) /* inode fork pointer */ 4082 { 4083 xfs_ext_irec_t *erp; /* indirection array pointer */ 4084 xfs_extnum_t nextents; /* number of extents in file */ 4085 4086 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); 4087 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4088 ASSERT(nextents <= XFS_LINEAR_EXTS); 4089 4090 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); 4091 4092 if (nextents == 0) { 4093 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 4094 } else if (!ifp->if_real_bytes) { 4095 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); 4096 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { 4097 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); 4098 } 4099 erp->er_extbuf = ifp->if_u1.if_extents; 4100 erp->er_extcount = nextents; 4101 erp->er_extoff = 0; 4102 4103 ifp->if_flags |= XFS_IFEXTIREC; 4104 ifp->if_real_bytes = XFS_IEXT_BUFSZ; 4105 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); 4106 ifp->if_u1.if_ext_irec = erp; 4107 4108 return; 4109 } 4110 4111 /* 4112 * Allocate and initialize a new entry in the indirection array. 4113 */ 4114 xfs_ext_irec_t * 4115 xfs_iext_irec_new( 4116 xfs_ifork_t *ifp, /* inode fork pointer */ 4117 int erp_idx) /* index for new irec */ 4118 { 4119 xfs_ext_irec_t *erp; /* indirection array pointer */ 4120 int i; /* loop counter */ 4121 int nlists; /* number of irec's (ex lists) */ 4122 4123 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4124 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4125 4126 /* Resize indirection array */ 4127 xfs_iext_realloc_indirect(ifp, ++nlists * 4128 sizeof(xfs_ext_irec_t)); 4129 /* 4130 * Move records down in the array so the 4131 * new page can use erp_idx. 4132 */ 4133 erp = ifp->if_u1.if_ext_irec; 4134 for (i = nlists - 1; i > erp_idx; i--) { 4135 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); 4136 } 4137 ASSERT(i == erp_idx); 4138 4139 /* Initialize new extent record */ 4140 erp = ifp->if_u1.if_ext_irec; 4141 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); 4142 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4143 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); 4144 erp[erp_idx].er_extcount = 0; 4145 erp[erp_idx].er_extoff = erp_idx > 0 ? 4146 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; 4147 return (&erp[erp_idx]); 4148 } 4149 4150 /* 4151 * Remove a record from the indirection array. 4152 */ 4153 void 4154 xfs_iext_irec_remove( 4155 xfs_ifork_t *ifp, /* inode fork pointer */ 4156 int erp_idx) /* irec index to remove */ 4157 { 4158 xfs_ext_irec_t *erp; /* indirection array pointer */ 4159 int i; /* loop counter */ 4160 int nlists; /* number of irec's (ex lists) */ 4161 4162 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4163 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4164 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4165 if (erp->er_extbuf) { 4166 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, 4167 -erp->er_extcount); 4168 kmem_free(erp->er_extbuf); 4169 } 4170 /* Compact extent records */ 4171 erp = ifp->if_u1.if_ext_irec; 4172 for (i = erp_idx; i < nlists - 1; i++) { 4173 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); 4174 } 4175 /* 4176 * Manually free the last extent record from the indirection 4177 * array. A call to xfs_iext_realloc_indirect() with a size 4178 * of zero would result in a call to xfs_iext_destroy() which 4179 * would in turn call this function again, creating a nasty 4180 * infinite loop. 4181 */ 4182 if (--nlists) { 4183 xfs_iext_realloc_indirect(ifp, 4184 nlists * sizeof(xfs_ext_irec_t)); 4185 } else { 4186 kmem_free(ifp->if_u1.if_ext_irec); 4187 } 4188 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; 4189 } 4190 4191 /* 4192 * This is called to clean up large amounts of unused memory allocated 4193 * by the indirection array. Before compacting anything though, verify 4194 * that the indirection array is still needed and switch back to the 4195 * linear extent list (or even the inline buffer) if possible. The 4196 * compaction policy is as follows: 4197 * 4198 * Full Compaction: Extents fit into a single page (or inline buffer) 4199 * Partial Compaction: Extents occupy less than 50% of allocated space 4200 * No Compaction: Extents occupy at least 50% of allocated space 4201 */ 4202 void 4203 xfs_iext_irec_compact( 4204 xfs_ifork_t *ifp) /* inode fork pointer */ 4205 { 4206 xfs_extnum_t nextents; /* number of extents in file */ 4207 int nlists; /* number of irec's (ex lists) */ 4208 4209 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4210 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4211 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4212 4213 if (nextents == 0) { 4214 xfs_iext_destroy(ifp); 4215 } else if (nextents <= XFS_INLINE_EXTS) { 4216 xfs_iext_indirect_to_direct(ifp); 4217 xfs_iext_direct_to_inline(ifp, nextents); 4218 } else if (nextents <= XFS_LINEAR_EXTS) { 4219 xfs_iext_indirect_to_direct(ifp); 4220 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { 4221 xfs_iext_irec_compact_pages(ifp); 4222 } 4223 } 4224 4225 /* 4226 * Combine extents from neighboring extent pages. 4227 */ 4228 void 4229 xfs_iext_irec_compact_pages( 4230 xfs_ifork_t *ifp) /* inode fork pointer */ 4231 { 4232 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ 4233 int erp_idx = 0; /* indirection array index */ 4234 int nlists; /* number of irec's (ex lists) */ 4235 4236 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4237 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4238 while (erp_idx < nlists - 1) { 4239 erp = &ifp->if_u1.if_ext_irec[erp_idx]; 4240 erp_next = erp + 1; 4241 if (erp_next->er_extcount <= 4242 (XFS_LINEAR_EXTS - erp->er_extcount)) { 4243 memcpy(&erp->er_extbuf[erp->er_extcount], 4244 erp_next->er_extbuf, erp_next->er_extcount * 4245 sizeof(xfs_bmbt_rec_t)); 4246 erp->er_extcount += erp_next->er_extcount; 4247 /* 4248 * Free page before removing extent record 4249 * so er_extoffs don't get modified in 4250 * xfs_iext_irec_remove. 4251 */ 4252 kmem_free(erp_next->er_extbuf); 4253 erp_next->er_extbuf = NULL; 4254 xfs_iext_irec_remove(ifp, erp_idx + 1); 4255 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4256 } else { 4257 erp_idx++; 4258 } 4259 } 4260 } 4261 4262 /* 4263 * This is called to update the er_extoff field in the indirection 4264 * array when extents have been added or removed from one of the 4265 * extent lists. erp_idx contains the irec index to begin updating 4266 * at and ext_diff contains the number of extents that were added 4267 * or removed. 4268 */ 4269 void 4270 xfs_iext_irec_update_extoffs( 4271 xfs_ifork_t *ifp, /* inode fork pointer */ 4272 int erp_idx, /* irec index to update */ 4273 int ext_diff) /* number of new extents */ 4274 { 4275 int i; /* loop counter */ 4276 int nlists; /* number of irec's (ex lists */ 4277 4278 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4279 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 4280 for (i = erp_idx; i < nlists; i++) { 4281 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; 4282 } 4283 } 4284