1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2012 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_da_format.h" 16 #include "xfs_defer.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_extfree_item.h" 21 #include "xfs_alloc.h" 22 #include "xfs_bmap.h" 23 #include "xfs_bmap_util.h" 24 #include "xfs_bmap_btree.h" 25 #include "xfs_rtalloc.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_trace.h" 30 #include "xfs_icache.h" 31 #include "xfs_log.h" 32 #include "xfs_rmap_btree.h" 33 #include "xfs_iomap.h" 34 #include "xfs_reflink.h" 35 #include "xfs_refcount.h" 36 37 /* Kernel only BMAP related definitions and functions */ 38 39 /* 40 * Convert the given file system block to a disk block. We have to treat it 41 * differently based on whether the file is a real time file or not, because the 42 * bmap code does. 43 */ 44 xfs_daddr_t 45 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 46 { 47 return (XFS_IS_REALTIME_INODE(ip) ? \ 48 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ 49 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); 50 } 51 52 /* 53 * Routine to zero an extent on disk allocated to the specific inode. 54 * 55 * The VFS functions take a linearised filesystem block offset, so we have to 56 * convert the sparse xfs fsb to the right format first. 57 * VFS types are real funky, too. 58 */ 59 int 60 xfs_zero_extent( 61 struct xfs_inode *ip, 62 xfs_fsblock_t start_fsb, 63 xfs_off_t count_fsb) 64 { 65 struct xfs_mount *mp = ip->i_mount; 66 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); 67 sector_t block = XFS_BB_TO_FSBT(mp, sector); 68 69 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)), 70 block << (mp->m_super->s_blocksize_bits - 9), 71 count_fsb << (mp->m_super->s_blocksize_bits - 9), 72 GFP_NOFS, 0); 73 } 74 75 #ifdef CONFIG_XFS_RT 76 int 77 xfs_bmap_rtalloc( 78 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 79 { 80 int error; /* error return value */ 81 xfs_mount_t *mp; /* mount point structure */ 82 xfs_extlen_t prod = 0; /* product factor for allocators */ 83 xfs_extlen_t mod = 0; /* product factor for allocators */ 84 xfs_extlen_t ralen = 0; /* realtime allocation length */ 85 xfs_extlen_t align; /* minimum allocation alignment */ 86 xfs_rtblock_t rtb; 87 88 mp = ap->ip->i_mount; 89 align = xfs_get_extsz_hint(ap->ip); 90 prod = align / mp->m_sb.sb_rextsize; 91 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 92 align, 1, ap->eof, 0, 93 ap->conv, &ap->offset, &ap->length); 94 if (error) 95 return error; 96 ASSERT(ap->length); 97 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); 98 99 /* 100 * If the offset & length are not perfectly aligned 101 * then kill prod, it will just get us in trouble. 102 */ 103 div_u64_rem(ap->offset, align, &mod); 104 if (mod || ap->length % align) 105 prod = 1; 106 /* 107 * Set ralen to be the actual requested length in rtextents. 108 */ 109 ralen = ap->length / mp->m_sb.sb_rextsize; 110 /* 111 * If the old value was close enough to MAXEXTLEN that 112 * we rounded up to it, cut it back so it's valid again. 113 * Note that if it's a really large request (bigger than 114 * MAXEXTLEN), we don't hear about that number, and can't 115 * adjust the starting point to match it. 116 */ 117 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 118 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 119 120 /* 121 * Lock out modifications to both the RT bitmap and summary inodes 122 */ 123 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 124 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); 125 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 126 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL); 127 128 /* 129 * If it's an allocation to an empty file at offset 0, 130 * pick an extent that will space things out in the rt area. 131 */ 132 if (ap->eof && ap->offset == 0) { 133 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ 134 135 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 136 if (error) 137 return error; 138 ap->blkno = rtx * mp->m_sb.sb_rextsize; 139 } else { 140 ap->blkno = 0; 141 } 142 143 xfs_bmap_adjacent(ap); 144 145 /* 146 * Realtime allocation, done through xfs_rtallocate_extent. 147 */ 148 do_div(ap->blkno, mp->m_sb.sb_rextsize); 149 rtb = ap->blkno; 150 ap->length = ralen; 151 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, 152 &ralen, ap->wasdel, prod, &rtb); 153 if (error) 154 return error; 155 156 ap->blkno = rtb; 157 if (ap->blkno != NULLFSBLOCK) { 158 ap->blkno *= mp->m_sb.sb_rextsize; 159 ralen *= mp->m_sb.sb_rextsize; 160 ap->length = ralen; 161 ap->ip->i_d.di_nblocks += ralen; 162 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 163 if (ap->wasdel) 164 ap->ip->i_delayed_blks -= ralen; 165 /* 166 * Adjust the disk quota also. This was reserved 167 * earlier. 168 */ 169 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 170 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 171 XFS_TRANS_DQ_RTBCOUNT, (long) ralen); 172 173 /* Zero the extent if we were asked to do so */ 174 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) { 175 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length); 176 if (error) 177 return error; 178 } 179 } else { 180 ap->length = 0; 181 } 182 return 0; 183 } 184 #endif /* CONFIG_XFS_RT */ 185 186 /* 187 * Check if the endoff is outside the last extent. If so the caller will grow 188 * the allocation to a stripe unit boundary. All offsets are considered outside 189 * the end of file for an empty fork, so 1 is returned in *eof in that case. 190 */ 191 int 192 xfs_bmap_eof( 193 struct xfs_inode *ip, 194 xfs_fileoff_t endoff, 195 int whichfork, 196 int *eof) 197 { 198 struct xfs_bmbt_irec rec; 199 int error; 200 201 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); 202 if (error || *eof) 203 return error; 204 205 *eof = endoff >= rec.br_startoff + rec.br_blockcount; 206 return 0; 207 } 208 209 /* 210 * Extent tree block counting routines. 211 */ 212 213 /* 214 * Count leaf blocks given a range of extent records. Delayed allocation 215 * extents are not counted towards the totals. 216 */ 217 xfs_extnum_t 218 xfs_bmap_count_leaves( 219 struct xfs_ifork *ifp, 220 xfs_filblks_t *count) 221 { 222 struct xfs_iext_cursor icur; 223 struct xfs_bmbt_irec got; 224 xfs_extnum_t numrecs = 0; 225 226 for_each_xfs_iext(ifp, &icur, &got) { 227 if (!isnullstartblock(got.br_startblock)) { 228 *count += got.br_blockcount; 229 numrecs++; 230 } 231 } 232 233 return numrecs; 234 } 235 236 /* 237 * Count leaf blocks given a range of extent records originally 238 * in btree format. 239 */ 240 STATIC void 241 xfs_bmap_disk_count_leaves( 242 struct xfs_mount *mp, 243 struct xfs_btree_block *block, 244 int numrecs, 245 xfs_filblks_t *count) 246 { 247 int b; 248 xfs_bmbt_rec_t *frp; 249 250 for (b = 1; b <= numrecs; b++) { 251 frp = XFS_BMBT_REC_ADDR(mp, block, b); 252 *count += xfs_bmbt_disk_get_blockcount(frp); 253 } 254 } 255 256 /* 257 * Recursively walks each level of a btree 258 * to count total fsblocks in use. 259 */ 260 STATIC int 261 xfs_bmap_count_tree( 262 struct xfs_mount *mp, 263 struct xfs_trans *tp, 264 struct xfs_ifork *ifp, 265 xfs_fsblock_t blockno, 266 int levelin, 267 xfs_extnum_t *nextents, 268 xfs_filblks_t *count) 269 { 270 int error; 271 struct xfs_buf *bp, *nbp; 272 int level = levelin; 273 __be64 *pp; 274 xfs_fsblock_t bno = blockno; 275 xfs_fsblock_t nextbno; 276 struct xfs_btree_block *block, *nextblock; 277 int numrecs; 278 279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, 280 &xfs_bmbt_buf_ops); 281 if (error) 282 return error; 283 *count += 1; 284 block = XFS_BUF_TO_BLOCK(bp); 285 286 if (--level) { 287 /* Not at node above leaves, count this level of nodes */ 288 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 289 while (nextbno != NULLFSBLOCK) { 290 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, 291 XFS_BMAP_BTREE_REF, 292 &xfs_bmbt_buf_ops); 293 if (error) 294 return error; 295 *count += 1; 296 nextblock = XFS_BUF_TO_BLOCK(nbp); 297 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); 298 xfs_trans_brelse(tp, nbp); 299 } 300 301 /* Dive to the next level */ 302 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 303 bno = be64_to_cpu(*pp); 304 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents, 305 count); 306 if (error) { 307 xfs_trans_brelse(tp, bp); 308 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 309 XFS_ERRLEVEL_LOW, mp); 310 return -EFSCORRUPTED; 311 } 312 xfs_trans_brelse(tp, bp); 313 } else { 314 /* count all level 1 nodes and their leaves */ 315 for (;;) { 316 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 317 numrecs = be16_to_cpu(block->bb_numrecs); 318 (*nextents) += numrecs; 319 xfs_bmap_disk_count_leaves(mp, block, numrecs, count); 320 xfs_trans_brelse(tp, bp); 321 if (nextbno == NULLFSBLOCK) 322 break; 323 bno = nextbno; 324 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 325 XFS_BMAP_BTREE_REF, 326 &xfs_bmbt_buf_ops); 327 if (error) 328 return error; 329 *count += 1; 330 block = XFS_BUF_TO_BLOCK(bp); 331 } 332 } 333 return 0; 334 } 335 336 /* 337 * Count fsblocks of the given fork. Delayed allocation extents are 338 * not counted towards the totals. 339 */ 340 int 341 xfs_bmap_count_blocks( 342 struct xfs_trans *tp, 343 struct xfs_inode *ip, 344 int whichfork, 345 xfs_extnum_t *nextents, 346 xfs_filblks_t *count) 347 { 348 struct xfs_mount *mp; /* file system mount structure */ 349 __be64 *pp; /* pointer to block address */ 350 struct xfs_btree_block *block; /* current btree block */ 351 struct xfs_ifork *ifp; /* fork structure */ 352 xfs_fsblock_t bno; /* block # of "block" */ 353 int level; /* btree level, for checking */ 354 int error; 355 356 bno = NULLFSBLOCK; 357 mp = ip->i_mount; 358 *nextents = 0; 359 *count = 0; 360 ifp = XFS_IFORK_PTR(ip, whichfork); 361 if (!ifp) 362 return 0; 363 364 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 365 case XFS_DINODE_FMT_EXTENTS: 366 *nextents = xfs_bmap_count_leaves(ifp, count); 367 return 0; 368 case XFS_DINODE_FMT_BTREE: 369 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 370 error = xfs_iread_extents(tp, ip, whichfork); 371 if (error) 372 return error; 373 } 374 375 /* 376 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 377 */ 378 block = ifp->if_broot; 379 level = be16_to_cpu(block->bb_level); 380 ASSERT(level > 0); 381 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 382 bno = be64_to_cpu(*pp); 383 ASSERT(bno != NULLFSBLOCK); 384 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 385 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 386 387 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, 388 nextents, count); 389 if (error) { 390 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", 391 XFS_ERRLEVEL_LOW, mp); 392 return -EFSCORRUPTED; 393 } 394 return 0; 395 } 396 397 return 0; 398 } 399 400 static int 401 xfs_getbmap_report_one( 402 struct xfs_inode *ip, 403 struct getbmapx *bmv, 404 struct kgetbmap *out, 405 int64_t bmv_end, 406 struct xfs_bmbt_irec *got) 407 { 408 struct kgetbmap *p = out + bmv->bmv_entries; 409 bool shared = false, trimmed = false; 410 int error; 411 412 error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed); 413 if (error) 414 return error; 415 416 if (isnullstartblock(got->br_startblock) || 417 got->br_startblock == DELAYSTARTBLOCK) { 418 /* 419 * Delalloc extents that start beyond EOF can occur due to 420 * speculative EOF allocation when the delalloc extent is larger 421 * than the largest freespace extent at conversion time. These 422 * extents cannot be converted by data writeback, so can exist 423 * here even if we are not supposed to be finding delalloc 424 * extents. 425 */ 426 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip))) 427 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0); 428 429 p->bmv_oflags |= BMV_OF_DELALLOC; 430 p->bmv_block = -2; 431 } else { 432 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock); 433 } 434 435 if (got->br_state == XFS_EXT_UNWRITTEN && 436 (bmv->bmv_iflags & BMV_IF_PREALLOC)) 437 p->bmv_oflags |= BMV_OF_PREALLOC; 438 439 if (shared) 440 p->bmv_oflags |= BMV_OF_SHARED; 441 442 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff); 443 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount); 444 445 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 446 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 447 bmv->bmv_entries++; 448 return 0; 449 } 450 451 static void 452 xfs_getbmap_report_hole( 453 struct xfs_inode *ip, 454 struct getbmapx *bmv, 455 struct kgetbmap *out, 456 int64_t bmv_end, 457 xfs_fileoff_t bno, 458 xfs_fileoff_t end) 459 { 460 struct kgetbmap *p = out + bmv->bmv_entries; 461 462 if (bmv->bmv_iflags & BMV_IF_NO_HOLES) 463 return; 464 465 p->bmv_block = -1; 466 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno); 467 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno); 468 469 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 470 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 471 bmv->bmv_entries++; 472 } 473 474 static inline bool 475 xfs_getbmap_full( 476 struct getbmapx *bmv) 477 { 478 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1; 479 } 480 481 static bool 482 xfs_getbmap_next_rec( 483 struct xfs_bmbt_irec *rec, 484 xfs_fileoff_t total_end) 485 { 486 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount; 487 488 if (end == total_end) 489 return false; 490 491 rec->br_startoff += rec->br_blockcount; 492 if (!isnullstartblock(rec->br_startblock) && 493 rec->br_startblock != DELAYSTARTBLOCK) 494 rec->br_startblock += rec->br_blockcount; 495 rec->br_blockcount = total_end - end; 496 return true; 497 } 498 499 /* 500 * Get inode's extents as described in bmv, and format for output. 501 * Calls formatter to fill the user's buffer until all extents 502 * are mapped, until the passed-in bmv->bmv_count slots have 503 * been filled, or until the formatter short-circuits the loop, 504 * if it is tracking filled-in extents on its own. 505 */ 506 int /* error code */ 507 xfs_getbmap( 508 struct xfs_inode *ip, 509 struct getbmapx *bmv, /* user bmap structure */ 510 struct kgetbmap *out) 511 { 512 struct xfs_mount *mp = ip->i_mount; 513 int iflags = bmv->bmv_iflags; 514 int whichfork, lock, error = 0; 515 int64_t bmv_end, max_len; 516 xfs_fileoff_t bno, first_bno; 517 struct xfs_ifork *ifp; 518 struct xfs_bmbt_irec got, rec; 519 xfs_filblks_t len; 520 struct xfs_iext_cursor icur; 521 522 if (bmv->bmv_iflags & ~BMV_IF_VALID) 523 return -EINVAL; 524 #ifndef DEBUG 525 /* Only allow CoW fork queries if we're debugging. */ 526 if (iflags & BMV_IF_COWFORK) 527 return -EINVAL; 528 #endif 529 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK)) 530 return -EINVAL; 531 532 if (bmv->bmv_length < -1) 533 return -EINVAL; 534 bmv->bmv_entries = 0; 535 if (bmv->bmv_length == 0) 536 return 0; 537 538 if (iflags & BMV_IF_ATTRFORK) 539 whichfork = XFS_ATTR_FORK; 540 else if (iflags & BMV_IF_COWFORK) 541 whichfork = XFS_COW_FORK; 542 else 543 whichfork = XFS_DATA_FORK; 544 ifp = XFS_IFORK_PTR(ip, whichfork); 545 546 xfs_ilock(ip, XFS_IOLOCK_SHARED); 547 switch (whichfork) { 548 case XFS_ATTR_FORK: 549 if (!XFS_IFORK_Q(ip)) 550 goto out_unlock_iolock; 551 552 max_len = 1LL << 32; 553 lock = xfs_ilock_attr_map_shared(ip); 554 break; 555 case XFS_COW_FORK: 556 /* No CoW fork? Just return */ 557 if (!ifp) 558 goto out_unlock_iolock; 559 560 if (xfs_get_cowextsz_hint(ip)) 561 max_len = mp->m_super->s_maxbytes; 562 else 563 max_len = XFS_ISIZE(ip); 564 565 lock = XFS_ILOCK_SHARED; 566 xfs_ilock(ip, lock); 567 break; 568 case XFS_DATA_FORK: 569 if (!(iflags & BMV_IF_DELALLOC) && 570 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { 571 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 572 if (error) 573 goto out_unlock_iolock; 574 575 /* 576 * Even after flushing the inode, there can still be 577 * delalloc blocks on the inode beyond EOF due to 578 * speculative preallocation. These are not removed 579 * until the release function is called or the inode 580 * is inactivated. Hence we cannot assert here that 581 * ip->i_delayed_blks == 0. 582 */ 583 } 584 585 if (xfs_get_extsz_hint(ip) || 586 (ip->i_d.di_flags & 587 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))) 588 max_len = mp->m_super->s_maxbytes; 589 else 590 max_len = XFS_ISIZE(ip); 591 592 lock = xfs_ilock_data_map_shared(ip); 593 break; 594 } 595 596 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 597 case XFS_DINODE_FMT_EXTENTS: 598 case XFS_DINODE_FMT_BTREE: 599 break; 600 case XFS_DINODE_FMT_LOCAL: 601 /* Local format inode forks report no extents. */ 602 goto out_unlock_ilock; 603 default: 604 error = -EINVAL; 605 goto out_unlock_ilock; 606 } 607 608 if (bmv->bmv_length == -1) { 609 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len)); 610 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset); 611 } 612 613 bmv_end = bmv->bmv_offset + bmv->bmv_length; 614 615 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset); 616 len = XFS_BB_TO_FSB(mp, bmv->bmv_length); 617 618 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 619 error = xfs_iread_extents(NULL, ip, whichfork); 620 if (error) 621 goto out_unlock_ilock; 622 } 623 624 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 625 /* 626 * Report a whole-file hole if the delalloc flag is set to 627 * stay compatible with the old implementation. 628 */ 629 if (iflags & BMV_IF_DELALLOC) 630 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 631 XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); 632 goto out_unlock_ilock; 633 } 634 635 while (!xfs_getbmap_full(bmv)) { 636 xfs_trim_extent(&got, first_bno, len); 637 638 /* 639 * Report an entry for a hole if this extent doesn't directly 640 * follow the previous one. 641 */ 642 if (got.br_startoff > bno) { 643 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 644 got.br_startoff); 645 if (xfs_getbmap_full(bmv)) 646 break; 647 } 648 649 /* 650 * In order to report shared extents accurately, we report each 651 * distinct shared / unshared part of a single bmbt record with 652 * an individual getbmapx record. 653 */ 654 bno = got.br_startoff + got.br_blockcount; 655 rec = got; 656 do { 657 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end, 658 &rec); 659 if (error || xfs_getbmap_full(bmv)) 660 goto out_unlock_ilock; 661 } while (xfs_getbmap_next_rec(&rec, bno)); 662 663 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 664 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 665 666 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST; 667 668 if (whichfork != XFS_ATTR_FORK && bno < end && 669 !xfs_getbmap_full(bmv)) { 670 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, 671 bno, end); 672 } 673 break; 674 } 675 676 if (bno >= first_bno + len) 677 break; 678 } 679 680 out_unlock_ilock: 681 xfs_iunlock(ip, lock); 682 out_unlock_iolock: 683 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 684 return error; 685 } 686 687 /* 688 * Dead simple method of punching delalyed allocation blocks from a range in 689 * the inode. This will always punch out both the start and end blocks, even 690 * if the ranges only partially overlap them, so it is up to the caller to 691 * ensure that partial blocks are not passed in. 692 */ 693 int 694 xfs_bmap_punch_delalloc_range( 695 struct xfs_inode *ip, 696 xfs_fileoff_t start_fsb, 697 xfs_fileoff_t length) 698 { 699 struct xfs_ifork *ifp = &ip->i_df; 700 xfs_fileoff_t end_fsb = start_fsb + length; 701 struct xfs_bmbt_irec got, del; 702 struct xfs_iext_cursor icur; 703 int error = 0; 704 705 xfs_ilock(ip, XFS_ILOCK_EXCL); 706 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 707 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 708 if (error) 709 goto out_unlock; 710 } 711 712 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 713 goto out_unlock; 714 715 while (got.br_startoff + got.br_blockcount > start_fsb) { 716 del = got; 717 xfs_trim_extent(&del, start_fsb, length); 718 719 /* 720 * A delete can push the cursor forward. Step back to the 721 * previous extent on non-delalloc or extents outside the 722 * target range. 723 */ 724 if (!del.br_blockcount || 725 !isnullstartblock(del.br_startblock)) { 726 if (!xfs_iext_prev_extent(ifp, &icur, &got)) 727 break; 728 continue; 729 } 730 731 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, 732 &got, &del); 733 if (error || !xfs_iext_get_extent(ifp, &icur, &got)) 734 break; 735 } 736 737 out_unlock: 738 xfs_iunlock(ip, XFS_ILOCK_EXCL); 739 return error; 740 } 741 742 /* 743 * Test whether it is appropriate to check an inode for and free post EOF 744 * blocks. The 'force' parameter determines whether we should also consider 745 * regular files that are marked preallocated or append-only. 746 */ 747 bool 748 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) 749 { 750 /* prealloc/delalloc exists only on regular files */ 751 if (!S_ISREG(VFS_I(ip)->i_mode)) 752 return false; 753 754 /* 755 * Zero sized files with no cached pages and delalloc blocks will not 756 * have speculative prealloc/delalloc blocks to remove. 757 */ 758 if (VFS_I(ip)->i_size == 0 && 759 VFS_I(ip)->i_mapping->nrpages == 0 && 760 ip->i_delayed_blks == 0) 761 return false; 762 763 /* If we haven't read in the extent list, then don't do it now. */ 764 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) 765 return false; 766 767 /* 768 * Do not free real preallocated or append-only files unless the file 769 * has delalloc blocks and we are forced to remove them. 770 */ 771 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) 772 if (!force || ip->i_delayed_blks == 0) 773 return false; 774 775 return true; 776 } 777 778 /* 779 * This is called to free any blocks beyond eof. The caller must hold 780 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only 781 * reference to the inode. 782 */ 783 int 784 xfs_free_eofblocks( 785 struct xfs_inode *ip) 786 { 787 struct xfs_trans *tp; 788 int error; 789 xfs_fileoff_t end_fsb; 790 xfs_fileoff_t last_fsb; 791 xfs_filblks_t map_len; 792 int nimaps; 793 struct xfs_bmbt_irec imap; 794 struct xfs_mount *mp = ip->i_mount; 795 796 /* 797 * Figure out if there are any blocks beyond the end 798 * of the file. If not, then there is nothing to do. 799 */ 800 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 801 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 802 if (last_fsb <= end_fsb) 803 return 0; 804 map_len = last_fsb - end_fsb; 805 806 nimaps = 1; 807 xfs_ilock(ip, XFS_ILOCK_SHARED); 808 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0); 809 xfs_iunlock(ip, XFS_ILOCK_SHARED); 810 811 /* 812 * If there are blocks after the end of file, truncate the file to its 813 * current size to free them up. 814 */ 815 if (!error && (nimaps != 0) && 816 (imap.br_startblock != HOLESTARTBLOCK || 817 ip->i_delayed_blks)) { 818 /* 819 * Attach the dquots to the inode up front. 820 */ 821 error = xfs_qm_dqattach(ip); 822 if (error) 823 return error; 824 825 /* wait on dio to ensure i_size has settled */ 826 inode_dio_wait(VFS_I(ip)); 827 828 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, 829 &tp); 830 if (error) { 831 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 832 return error; 833 } 834 835 xfs_ilock(ip, XFS_ILOCK_EXCL); 836 xfs_trans_ijoin(tp, ip, 0); 837 838 /* 839 * Do not update the on-disk file size. If we update the 840 * on-disk file size and then the system crashes before the 841 * contents of the file are flushed to disk then the files 842 * may be full of holes (ie NULL files bug). 843 */ 844 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK, 845 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD); 846 if (error) { 847 /* 848 * If we get an error at this point we simply don't 849 * bother truncating the file. 850 */ 851 xfs_trans_cancel(tp); 852 } else { 853 error = xfs_trans_commit(tp); 854 if (!error) 855 xfs_inode_clear_eofblocks_tag(ip); 856 } 857 858 xfs_iunlock(ip, XFS_ILOCK_EXCL); 859 } 860 return error; 861 } 862 863 int 864 xfs_alloc_file_space( 865 struct xfs_inode *ip, 866 xfs_off_t offset, 867 xfs_off_t len, 868 int alloc_type) 869 { 870 xfs_mount_t *mp = ip->i_mount; 871 xfs_off_t count; 872 xfs_filblks_t allocated_fsb; 873 xfs_filblks_t allocatesize_fsb; 874 xfs_extlen_t extsz, temp; 875 xfs_fileoff_t startoffset_fsb; 876 int nimaps; 877 int quota_flag; 878 int rt; 879 xfs_trans_t *tp; 880 xfs_bmbt_irec_t imaps[1], *imapp; 881 uint qblocks, resblks, resrtextents; 882 int error; 883 884 trace_xfs_alloc_file_space(ip); 885 886 if (XFS_FORCED_SHUTDOWN(mp)) 887 return -EIO; 888 889 error = xfs_qm_dqattach(ip); 890 if (error) 891 return error; 892 893 if (len <= 0) 894 return -EINVAL; 895 896 rt = XFS_IS_REALTIME_INODE(ip); 897 extsz = xfs_get_extsz_hint(ip); 898 899 count = len; 900 imapp = &imaps[0]; 901 nimaps = 1; 902 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 903 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 904 905 /* 906 * Allocate file space until done or until there is an error 907 */ 908 while (allocatesize_fsb && !error) { 909 xfs_fileoff_t s, e; 910 911 /* 912 * Determine space reservations for data/realtime. 913 */ 914 if (unlikely(extsz)) { 915 s = startoffset_fsb; 916 do_div(s, extsz); 917 s *= extsz; 918 e = startoffset_fsb + allocatesize_fsb; 919 div_u64_rem(startoffset_fsb, extsz, &temp); 920 if (temp) 921 e += temp; 922 div_u64_rem(e, extsz, &temp); 923 if (temp) 924 e += extsz - temp; 925 } else { 926 s = 0; 927 e = allocatesize_fsb; 928 } 929 930 /* 931 * The transaction reservation is limited to a 32-bit block 932 * count, hence we need to limit the number of blocks we are 933 * trying to reserve to avoid an overflow. We can't allocate 934 * more than @nimaps extents, and an extent is limited on disk 935 * to MAXEXTLEN (21 bits), so use that to enforce the limit. 936 */ 937 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); 938 if (unlikely(rt)) { 939 resrtextents = qblocks = resblks; 940 resrtextents /= mp->m_sb.sb_rextsize; 941 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 942 quota_flag = XFS_QMOPT_RES_RTBLKS; 943 } else { 944 resrtextents = 0; 945 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); 946 quota_flag = XFS_QMOPT_RES_REGBLKS; 947 } 948 949 /* 950 * Allocate and setup the transaction. 951 */ 952 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 953 resrtextents, 0, &tp); 954 955 /* 956 * Check for running out of space 957 */ 958 if (error) { 959 /* 960 * Free the transaction structure. 961 */ 962 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 963 break; 964 } 965 xfs_ilock(ip, XFS_ILOCK_EXCL); 966 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 967 0, quota_flag); 968 if (error) 969 goto error1; 970 971 xfs_trans_ijoin(tp, ip, 0); 972 973 error = xfs_bmapi_write(tp, ip, startoffset_fsb, 974 allocatesize_fsb, alloc_type, resblks, 975 imapp, &nimaps); 976 if (error) 977 goto error0; 978 979 /* 980 * Complete the transaction 981 */ 982 error = xfs_trans_commit(tp); 983 xfs_iunlock(ip, XFS_ILOCK_EXCL); 984 if (error) 985 break; 986 987 allocated_fsb = imapp->br_blockcount; 988 989 if (nimaps == 0) { 990 error = -ENOSPC; 991 break; 992 } 993 994 startoffset_fsb += allocated_fsb; 995 allocatesize_fsb -= allocated_fsb; 996 } 997 998 return error; 999 1000 error0: /* unlock inode, unreserve quota blocks, cancel trans */ 1001 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 1002 1003 error1: /* Just cancel transaction */ 1004 xfs_trans_cancel(tp); 1005 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1006 return error; 1007 } 1008 1009 static int 1010 xfs_unmap_extent( 1011 struct xfs_inode *ip, 1012 xfs_fileoff_t startoffset_fsb, 1013 xfs_filblks_t len_fsb, 1014 int *done) 1015 { 1016 struct xfs_mount *mp = ip->i_mount; 1017 struct xfs_trans *tp; 1018 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1019 int error; 1020 1021 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 1022 if (error) { 1023 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1024 return error; 1025 } 1026 1027 xfs_ilock(ip, XFS_ILOCK_EXCL); 1028 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot, 1029 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS); 1030 if (error) 1031 goto out_trans_cancel; 1032 1033 xfs_trans_ijoin(tp, ip, 0); 1034 1035 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done); 1036 if (error) 1037 goto out_trans_cancel; 1038 1039 error = xfs_trans_commit(tp); 1040 out_unlock: 1041 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1042 return error; 1043 1044 out_trans_cancel: 1045 xfs_trans_cancel(tp); 1046 goto out_unlock; 1047 } 1048 1049 static int 1050 xfs_adjust_extent_unmap_boundaries( 1051 struct xfs_inode *ip, 1052 xfs_fileoff_t *startoffset_fsb, 1053 xfs_fileoff_t *endoffset_fsb) 1054 { 1055 struct xfs_mount *mp = ip->i_mount; 1056 struct xfs_bmbt_irec imap; 1057 int nimap, error; 1058 xfs_extlen_t mod = 0; 1059 1060 nimap = 1; 1061 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0); 1062 if (error) 1063 return error; 1064 1065 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1066 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1067 div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod); 1068 if (mod) 1069 *startoffset_fsb += mp->m_sb.sb_rextsize - mod; 1070 } 1071 1072 nimap = 1; 1073 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0); 1074 if (error) 1075 return error; 1076 1077 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1078 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1079 mod++; 1080 if (mod && mod != mp->m_sb.sb_rextsize) 1081 *endoffset_fsb -= mod; 1082 } 1083 1084 return 0; 1085 } 1086 1087 static int 1088 xfs_flush_unmap_range( 1089 struct xfs_inode *ip, 1090 xfs_off_t offset, 1091 xfs_off_t len) 1092 { 1093 struct xfs_mount *mp = ip->i_mount; 1094 struct inode *inode = VFS_I(ip); 1095 xfs_off_t rounding, start, end; 1096 int error; 1097 1098 /* wait for the completion of any pending DIOs */ 1099 inode_dio_wait(inode); 1100 1101 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); 1102 start = round_down(offset, rounding); 1103 end = round_up(offset + len, rounding) - 1; 1104 1105 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 1106 if (error) 1107 return error; 1108 truncate_pagecache_range(inode, start, end); 1109 return 0; 1110 } 1111 1112 int 1113 xfs_free_file_space( 1114 struct xfs_inode *ip, 1115 xfs_off_t offset, 1116 xfs_off_t len) 1117 { 1118 struct xfs_mount *mp = ip->i_mount; 1119 xfs_fileoff_t startoffset_fsb; 1120 xfs_fileoff_t endoffset_fsb; 1121 int done = 0, error; 1122 1123 trace_xfs_free_file_space(ip); 1124 1125 error = xfs_qm_dqattach(ip); 1126 if (error) 1127 return error; 1128 1129 if (len <= 0) /* if nothing being freed */ 1130 return 0; 1131 1132 error = xfs_flush_unmap_range(ip, offset, len); 1133 if (error) 1134 return error; 1135 1136 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 1137 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); 1138 1139 /* 1140 * Need to zero the stuff we're not freeing, on disk. If it's a RT file 1141 * and we can't use unwritten extents then we actually need to ensure 1142 * to zero the whole extent, otherwise we just need to take of block 1143 * boundaries, and xfs_bunmapi will handle the rest. 1144 */ 1145 if (XFS_IS_REALTIME_INODE(ip) && 1146 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 1147 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb, 1148 &endoffset_fsb); 1149 if (error) 1150 return error; 1151 } 1152 1153 if (endoffset_fsb > startoffset_fsb) { 1154 while (!done) { 1155 error = xfs_unmap_extent(ip, startoffset_fsb, 1156 endoffset_fsb - startoffset_fsb, &done); 1157 if (error) 1158 return error; 1159 } 1160 } 1161 1162 /* 1163 * Now that we've unmap all full blocks we'll have to zero out any 1164 * partial block at the beginning and/or end. iomap_zero_range is smart 1165 * enough to skip any holes, including those we just created, but we 1166 * must take care not to zero beyond EOF and enlarge i_size. 1167 */ 1168 if (offset >= XFS_ISIZE(ip)) 1169 return 0; 1170 if (offset + len > XFS_ISIZE(ip)) 1171 len = XFS_ISIZE(ip) - offset; 1172 error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); 1173 if (error) 1174 return error; 1175 1176 /* 1177 * If we zeroed right up to EOF and EOF straddles a page boundary we 1178 * must make sure that the post-EOF area is also zeroed because the 1179 * page could be mmap'd and iomap_zero_range doesn't do that for us. 1180 * Writeback of the eof page will do this, albeit clumsily. 1181 */ 1182 if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) { 1183 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1184 (offset + len) & ~PAGE_MASK, LLONG_MAX); 1185 } 1186 1187 return error; 1188 } 1189 1190 /* 1191 * Preallocate and zero a range of a file. This mechanism has the allocation 1192 * semantics of fallocate and in addition converts data in the range to zeroes. 1193 */ 1194 int 1195 xfs_zero_file_space( 1196 struct xfs_inode *ip, 1197 xfs_off_t offset, 1198 xfs_off_t len) 1199 { 1200 struct xfs_mount *mp = ip->i_mount; 1201 uint blksize; 1202 int error; 1203 1204 trace_xfs_zero_file_space(ip); 1205 1206 blksize = 1 << mp->m_sb.sb_blocklog; 1207 1208 /* 1209 * Punch a hole and prealloc the range. We use hole punch rather than 1210 * unwritten extent conversion for two reasons: 1211 * 1212 * 1.) Hole punch handles partial block zeroing for us. 1213 * 1214 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued 1215 * by virtue of the hole punch. 1216 */ 1217 error = xfs_free_file_space(ip, offset, len); 1218 if (error) 1219 goto out; 1220 1221 error = xfs_alloc_file_space(ip, round_down(offset, blksize), 1222 round_up(offset + len, blksize) - 1223 round_down(offset, blksize), 1224 XFS_BMAPI_PREALLOC); 1225 out: 1226 return error; 1227 1228 } 1229 1230 static int 1231 xfs_prepare_shift( 1232 struct xfs_inode *ip, 1233 loff_t offset) 1234 { 1235 int error; 1236 1237 /* 1238 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation 1239 * into the accessible region of the file. 1240 */ 1241 if (xfs_can_free_eofblocks(ip, true)) { 1242 error = xfs_free_eofblocks(ip); 1243 if (error) 1244 return error; 1245 } 1246 1247 /* 1248 * Writeback and invalidate cache for the remainder of the file as we're 1249 * about to shift down every extent from offset to EOF. 1250 */ 1251 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); 1252 if (error) 1253 return error; 1254 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 1255 offset >> PAGE_SHIFT, -1); 1256 if (error) 1257 return error; 1258 1259 /* 1260 * Clean out anything hanging around in the cow fork now that 1261 * we've flushed all the dirty data out to disk to avoid having 1262 * CoW extents at the wrong offsets. 1263 */ 1264 if (xfs_inode_has_cow_data(ip)) { 1265 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF, 1266 true); 1267 if (error) 1268 return error; 1269 } 1270 1271 return 0; 1272 } 1273 1274 /* 1275 * xfs_collapse_file_space() 1276 * This routine frees disk space and shift extent for the given file. 1277 * The first thing we do is to free data blocks in the specified range 1278 * by calling xfs_free_file_space(). It would also sync dirty data 1279 * and invalidate page cache over the region on which collapse range 1280 * is working. And Shift extent records to the left to cover a hole. 1281 * RETURNS: 1282 * 0 on success 1283 * errno on error 1284 * 1285 */ 1286 int 1287 xfs_collapse_file_space( 1288 struct xfs_inode *ip, 1289 xfs_off_t offset, 1290 xfs_off_t len) 1291 { 1292 struct xfs_mount *mp = ip->i_mount; 1293 struct xfs_trans *tp; 1294 int error; 1295 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len); 1296 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 1297 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1298 bool done = false; 1299 1300 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1301 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); 1302 1303 trace_xfs_collapse_file_space(ip); 1304 1305 error = xfs_free_file_space(ip, offset, len); 1306 if (error) 1307 return error; 1308 1309 error = xfs_prepare_shift(ip, offset); 1310 if (error) 1311 return error; 1312 1313 while (!error && !done) { 1314 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, 1315 &tp); 1316 if (error) 1317 break; 1318 1319 xfs_ilock(ip, XFS_ILOCK_EXCL); 1320 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, 1321 ip->i_gdquot, ip->i_pdquot, resblks, 0, 1322 XFS_QMOPT_RES_REGBLKS); 1323 if (error) 1324 goto out_trans_cancel; 1325 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1326 1327 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb, 1328 &done); 1329 if (error) 1330 goto out_trans_cancel; 1331 1332 error = xfs_trans_commit(tp); 1333 } 1334 1335 return error; 1336 1337 out_trans_cancel: 1338 xfs_trans_cancel(tp); 1339 return error; 1340 } 1341 1342 /* 1343 * xfs_insert_file_space() 1344 * This routine create hole space by shifting extents for the given file. 1345 * The first thing we do is to sync dirty data and invalidate page cache 1346 * over the region on which insert range is working. And split an extent 1347 * to two extents at given offset by calling xfs_bmap_split_extent. 1348 * And shift all extent records which are laying between [offset, 1349 * last allocated extent] to the right to reserve hole range. 1350 * RETURNS: 1351 * 0 on success 1352 * errno on error 1353 */ 1354 int 1355 xfs_insert_file_space( 1356 struct xfs_inode *ip, 1357 loff_t offset, 1358 loff_t len) 1359 { 1360 struct xfs_mount *mp = ip->i_mount; 1361 struct xfs_trans *tp; 1362 int error; 1363 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset); 1364 xfs_fileoff_t next_fsb = NULLFSBLOCK; 1365 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 1366 bool done = false; 1367 1368 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1369 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); 1370 1371 trace_xfs_insert_file_space(ip); 1372 1373 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb); 1374 if (error) 1375 return error; 1376 1377 error = xfs_prepare_shift(ip, offset); 1378 if (error) 1379 return error; 1380 1381 /* 1382 * The extent shifting code works on extent granularity. So, if stop_fsb 1383 * is not the starting block of extent, we need to split the extent at 1384 * stop_fsb. 1385 */ 1386 error = xfs_bmap_split_extent(ip, stop_fsb); 1387 if (error) 1388 return error; 1389 1390 while (!error && !done) { 1391 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, 1392 &tp); 1393 if (error) 1394 break; 1395 1396 xfs_ilock(ip, XFS_ILOCK_EXCL); 1397 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1398 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb, 1399 &done, stop_fsb); 1400 if (error) 1401 goto out_trans_cancel; 1402 1403 error = xfs_trans_commit(tp); 1404 } 1405 1406 return error; 1407 1408 out_trans_cancel: 1409 xfs_trans_cancel(tp); 1410 return error; 1411 } 1412 1413 /* 1414 * We need to check that the format of the data fork in the temporary inode is 1415 * valid for the target inode before doing the swap. This is not a problem with 1416 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized 1417 * data fork depending on the space the attribute fork is taking so we can get 1418 * invalid formats on the target inode. 1419 * 1420 * E.g. target has space for 7 extents in extent format, temp inode only has 1421 * space for 6. If we defragment down to 7 extents, then the tmp format is a 1422 * btree, but when swapped it needs to be in extent format. Hence we can't just 1423 * blindly swap data forks on attr2 filesystems. 1424 * 1425 * Note that we check the swap in both directions so that we don't end up with 1426 * a corrupt temporary inode, either. 1427 * 1428 * Note that fixing the way xfs_fsr sets up the attribute fork in the source 1429 * inode will prevent this situation from occurring, so all we do here is 1430 * reject and log the attempt. basically we are putting the responsibility on 1431 * userspace to get this right. 1432 */ 1433 static int 1434 xfs_swap_extents_check_format( 1435 struct xfs_inode *ip, /* target inode */ 1436 struct xfs_inode *tip) /* tmp inode */ 1437 { 1438 1439 /* Should never get a local format */ 1440 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || 1441 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) 1442 return -EINVAL; 1443 1444 /* 1445 * if the target inode has less extents that then temporary inode then 1446 * why did userspace call us? 1447 */ 1448 if (ip->i_d.di_nextents < tip->i_d.di_nextents) 1449 return -EINVAL; 1450 1451 /* 1452 * If we have to use the (expensive) rmap swap method, we can 1453 * handle any number of extents and any format. 1454 */ 1455 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb)) 1456 return 0; 1457 1458 /* 1459 * if the target inode is in extent form and the temp inode is in btree 1460 * form then we will end up with the target inode in the wrong format 1461 * as we already know there are less extents in the temp inode. 1462 */ 1463 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1464 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1465 return -EINVAL; 1466 1467 /* Check temp in extent form to max in target */ 1468 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1469 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > 1470 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1471 return -EINVAL; 1472 1473 /* Check target in extent form to max in temp */ 1474 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1475 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > 1476 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1477 return -EINVAL; 1478 1479 /* 1480 * If we are in a btree format, check that the temp root block will fit 1481 * in the target and that it has enough extents to be in btree format 1482 * in the target. 1483 * 1484 * Note that we have to be careful to allow btree->extent conversions 1485 * (a common defrag case) which will occur when the temp inode is in 1486 * extent format... 1487 */ 1488 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1489 if (XFS_IFORK_Q(ip) && 1490 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) 1491 return -EINVAL; 1492 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= 1493 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1494 return -EINVAL; 1495 } 1496 1497 /* Reciprocal target->temp btree format checks */ 1498 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1499 if (XFS_IFORK_Q(tip) && 1500 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) 1501 return -EINVAL; 1502 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= 1503 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1504 return -EINVAL; 1505 } 1506 1507 return 0; 1508 } 1509 1510 static int 1511 xfs_swap_extent_flush( 1512 struct xfs_inode *ip) 1513 { 1514 int error; 1515 1516 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 1517 if (error) 1518 return error; 1519 truncate_pagecache_range(VFS_I(ip), 0, -1); 1520 1521 /* Verify O_DIRECT for ftmp */ 1522 if (VFS_I(ip)->i_mapping->nrpages) 1523 return -EINVAL; 1524 return 0; 1525 } 1526 1527 /* 1528 * Move extents from one file to another, when rmap is enabled. 1529 */ 1530 STATIC int 1531 xfs_swap_extent_rmap( 1532 struct xfs_trans **tpp, 1533 struct xfs_inode *ip, 1534 struct xfs_inode *tip) 1535 { 1536 struct xfs_trans *tp = *tpp; 1537 struct xfs_bmbt_irec irec; 1538 struct xfs_bmbt_irec uirec; 1539 struct xfs_bmbt_irec tirec; 1540 xfs_fileoff_t offset_fsb; 1541 xfs_fileoff_t end_fsb; 1542 xfs_filblks_t count_fsb; 1543 int error; 1544 xfs_filblks_t ilen; 1545 xfs_filblks_t rlen; 1546 int nimaps; 1547 uint64_t tip_flags2; 1548 1549 /* 1550 * If the source file has shared blocks, we must flag the donor 1551 * file as having shared blocks so that we get the shared-block 1552 * rmap functions when we go to fix up the rmaps. The flags 1553 * will be switch for reals later. 1554 */ 1555 tip_flags2 = tip->i_d.di_flags2; 1556 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) 1557 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK; 1558 1559 offset_fsb = 0; 1560 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip))); 1561 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 1562 1563 while (count_fsb) { 1564 /* Read extent from the donor file */ 1565 nimaps = 1; 1566 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec, 1567 &nimaps, 0); 1568 if (error) 1569 goto out; 1570 ASSERT(nimaps == 1); 1571 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK); 1572 1573 trace_xfs_swap_extent_rmap_remap(tip, &tirec); 1574 ilen = tirec.br_blockcount; 1575 1576 /* Unmap the old blocks in the source file. */ 1577 while (tirec.br_blockcount) { 1578 ASSERT(tp->t_firstblock == NULLFSBLOCK); 1579 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec); 1580 1581 /* Read extent from the source file */ 1582 nimaps = 1; 1583 error = xfs_bmapi_read(ip, tirec.br_startoff, 1584 tirec.br_blockcount, &irec, 1585 &nimaps, 0); 1586 if (error) 1587 goto out_defer; 1588 ASSERT(nimaps == 1); 1589 ASSERT(tirec.br_startoff == irec.br_startoff); 1590 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec); 1591 1592 /* Trim the extent. */ 1593 uirec = tirec; 1594 uirec.br_blockcount = rlen = min_t(xfs_filblks_t, 1595 tirec.br_blockcount, 1596 irec.br_blockcount); 1597 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec); 1598 1599 /* Remove the mapping from the donor file. */ 1600 error = xfs_bmap_unmap_extent(tp, tip, &uirec); 1601 if (error) 1602 goto out_defer; 1603 1604 /* Remove the mapping from the source file. */ 1605 error = xfs_bmap_unmap_extent(tp, ip, &irec); 1606 if (error) 1607 goto out_defer; 1608 1609 /* Map the donor file's blocks into the source file. */ 1610 error = xfs_bmap_map_extent(tp, ip, &uirec); 1611 if (error) 1612 goto out_defer; 1613 1614 /* Map the source file's blocks into the donor file. */ 1615 error = xfs_bmap_map_extent(tp, tip, &irec); 1616 if (error) 1617 goto out_defer; 1618 1619 error = xfs_defer_finish(tpp); 1620 tp = *tpp; 1621 if (error) 1622 goto out; 1623 1624 tirec.br_startoff += rlen; 1625 if (tirec.br_startblock != HOLESTARTBLOCK && 1626 tirec.br_startblock != DELAYSTARTBLOCK) 1627 tirec.br_startblock += rlen; 1628 tirec.br_blockcount -= rlen; 1629 } 1630 1631 /* Roll on... */ 1632 count_fsb -= ilen; 1633 offset_fsb += ilen; 1634 } 1635 1636 tip->i_d.di_flags2 = tip_flags2; 1637 return 0; 1638 1639 out_defer: 1640 xfs_defer_cancel(tp); 1641 out: 1642 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_); 1643 tip->i_d.di_flags2 = tip_flags2; 1644 return error; 1645 } 1646 1647 /* Swap the extents of two files by swapping data forks. */ 1648 STATIC int 1649 xfs_swap_extent_forks( 1650 struct xfs_trans *tp, 1651 struct xfs_inode *ip, 1652 struct xfs_inode *tip, 1653 int *src_log_flags, 1654 int *target_log_flags) 1655 { 1656 xfs_filblks_t aforkblks = 0; 1657 xfs_filblks_t taforkblks = 0; 1658 xfs_extnum_t junk; 1659 uint64_t tmp; 1660 int error; 1661 1662 /* 1663 * Count the number of extended attribute blocks 1664 */ 1665 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && 1666 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1667 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk, 1668 &aforkblks); 1669 if (error) 1670 return error; 1671 } 1672 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && 1673 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1674 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk, 1675 &taforkblks); 1676 if (error) 1677 return error; 1678 } 1679 1680 /* 1681 * Btree format (v3) inodes have the inode number stamped in the bmbt 1682 * block headers. We can't start changing the bmbt blocks until the 1683 * inode owner change is logged so recovery does the right thing in the 1684 * event of a crash. Set the owner change log flags now and leave the 1685 * bmbt scan as the last step. 1686 */ 1687 if (ip->i_d.di_version == 3 && 1688 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1689 (*target_log_flags) |= XFS_ILOG_DOWNER; 1690 if (tip->i_d.di_version == 3 && 1691 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1692 (*src_log_flags) |= XFS_ILOG_DOWNER; 1693 1694 /* 1695 * Swap the data forks of the inodes 1696 */ 1697 swap(ip->i_df, tip->i_df); 1698 1699 /* 1700 * Fix the on-disk inode values 1701 */ 1702 tmp = (uint64_t)ip->i_d.di_nblocks; 1703 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; 1704 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; 1705 1706 swap(ip->i_d.di_nextents, tip->i_d.di_nextents); 1707 swap(ip->i_d.di_format, tip->i_d.di_format); 1708 1709 /* 1710 * The extents in the source inode could still contain speculative 1711 * preallocation beyond EOF (e.g. the file is open but not modified 1712 * while defrag is in progress). In that case, we need to copy over the 1713 * number of delalloc blocks the data fork in the source inode is 1714 * tracking beyond EOF so that when the fork is truncated away when the 1715 * temporary inode is unlinked we don't underrun the i_delayed_blks 1716 * counter on that inode. 1717 */ 1718 ASSERT(tip->i_delayed_blks == 0); 1719 tip->i_delayed_blks = ip->i_delayed_blks; 1720 ip->i_delayed_blks = 0; 1721 1722 switch (ip->i_d.di_format) { 1723 case XFS_DINODE_FMT_EXTENTS: 1724 (*src_log_flags) |= XFS_ILOG_DEXT; 1725 break; 1726 case XFS_DINODE_FMT_BTREE: 1727 ASSERT(ip->i_d.di_version < 3 || 1728 (*src_log_flags & XFS_ILOG_DOWNER)); 1729 (*src_log_flags) |= XFS_ILOG_DBROOT; 1730 break; 1731 } 1732 1733 switch (tip->i_d.di_format) { 1734 case XFS_DINODE_FMT_EXTENTS: 1735 (*target_log_flags) |= XFS_ILOG_DEXT; 1736 break; 1737 case XFS_DINODE_FMT_BTREE: 1738 (*target_log_flags) |= XFS_ILOG_DBROOT; 1739 ASSERT(tip->i_d.di_version < 3 || 1740 (*target_log_flags & XFS_ILOG_DOWNER)); 1741 break; 1742 } 1743 1744 return 0; 1745 } 1746 1747 /* 1748 * Fix up the owners of the bmbt blocks to refer to the current inode. The 1749 * change owner scan attempts to order all modified buffers in the current 1750 * transaction. In the event of ordered buffer failure, the offending buffer is 1751 * physically logged as a fallback and the scan returns -EAGAIN. We must roll 1752 * the transaction in this case to replenish the fallback log reservation and 1753 * restart the scan. This process repeats until the scan completes. 1754 */ 1755 static int 1756 xfs_swap_change_owner( 1757 struct xfs_trans **tpp, 1758 struct xfs_inode *ip, 1759 struct xfs_inode *tmpip) 1760 { 1761 int error; 1762 struct xfs_trans *tp = *tpp; 1763 1764 do { 1765 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino, 1766 NULL); 1767 /* success or fatal error */ 1768 if (error != -EAGAIN) 1769 break; 1770 1771 error = xfs_trans_roll(tpp); 1772 if (error) 1773 break; 1774 tp = *tpp; 1775 1776 /* 1777 * Redirty both inodes so they can relog and keep the log tail 1778 * moving forward. 1779 */ 1780 xfs_trans_ijoin(tp, ip, 0); 1781 xfs_trans_ijoin(tp, tmpip, 0); 1782 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1783 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE); 1784 } while (true); 1785 1786 return error; 1787 } 1788 1789 int 1790 xfs_swap_extents( 1791 struct xfs_inode *ip, /* target inode */ 1792 struct xfs_inode *tip, /* tmp inode */ 1793 struct xfs_swapext *sxp) 1794 { 1795 struct xfs_mount *mp = ip->i_mount; 1796 struct xfs_trans *tp; 1797 struct xfs_bstat *sbp = &sxp->sx_stat; 1798 int src_log_flags, target_log_flags; 1799 int error = 0; 1800 int lock_flags; 1801 uint64_t f; 1802 int resblks = 0; 1803 1804 /* 1805 * Lock the inodes against other IO, page faults and truncate to 1806 * begin with. Then we can ensure the inodes are flushed and have no 1807 * page cache safely. Once we have done this we can take the ilocks and 1808 * do the rest of the checks. 1809 */ 1810 lock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 1811 lock_flags = XFS_MMAPLOCK_EXCL; 1812 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL); 1813 1814 /* Verify that both files have the same format */ 1815 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) { 1816 error = -EINVAL; 1817 goto out_unlock; 1818 } 1819 1820 /* Verify both files are either real-time or non-realtime */ 1821 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 1822 error = -EINVAL; 1823 goto out_unlock; 1824 } 1825 1826 error = xfs_swap_extent_flush(ip); 1827 if (error) 1828 goto out_unlock; 1829 error = xfs_swap_extent_flush(tip); 1830 if (error) 1831 goto out_unlock; 1832 1833 /* 1834 * Extent "swapping" with rmap requires a permanent reservation and 1835 * a block reservation because it's really just a remap operation 1836 * performed with log redo items! 1837 */ 1838 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 1839 int w = XFS_DATA_FORK; 1840 uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w); 1841 uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w); 1842 1843 /* 1844 * Conceptually this shouldn't affect the shape of either bmbt, 1845 * but since we atomically move extents one by one, we reserve 1846 * enough space to rebuild both trees. 1847 */ 1848 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w); 1849 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w); 1850 1851 /* 1852 * Handle the corner case where either inode might straddle the 1853 * btree format boundary. If so, the inode could bounce between 1854 * btree <-> extent format on unmap -> remap cycles, freeing and 1855 * allocating a bmapbt block each time. 1856 */ 1857 if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1)) 1858 resblks += XFS_IFORK_MAXEXT(ip, w); 1859 if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1)) 1860 resblks += XFS_IFORK_MAXEXT(tip, w); 1861 } 1862 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 1863 if (error) 1864 goto out_unlock; 1865 1866 /* 1867 * Lock and join the inodes to the tansaction so that transaction commit 1868 * or cancel will unlock the inodes from this point onwards. 1869 */ 1870 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL); 1871 lock_flags |= XFS_ILOCK_EXCL; 1872 xfs_trans_ijoin(tp, ip, 0); 1873 xfs_trans_ijoin(tp, tip, 0); 1874 1875 1876 /* Verify all data are being swapped */ 1877 if (sxp->sx_offset != 0 || 1878 sxp->sx_length != ip->i_d.di_size || 1879 sxp->sx_length != tip->i_d.di_size) { 1880 error = -EFAULT; 1881 goto out_trans_cancel; 1882 } 1883 1884 trace_xfs_swap_extent_before(ip, 0); 1885 trace_xfs_swap_extent_before(tip, 1); 1886 1887 /* check inode formats now that data is flushed */ 1888 error = xfs_swap_extents_check_format(ip, tip); 1889 if (error) { 1890 xfs_notice(mp, 1891 "%s: inode 0x%llx format is incompatible for exchanging.", 1892 __func__, ip->i_ino); 1893 goto out_trans_cancel; 1894 } 1895 1896 /* 1897 * Compare the current change & modify times with that 1898 * passed in. If they differ, we abort this swap. 1899 * This is the mechanism used to ensure the calling 1900 * process that the file was not changed out from 1901 * under it. 1902 */ 1903 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) || 1904 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || 1905 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || 1906 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { 1907 error = -EBUSY; 1908 goto out_trans_cancel; 1909 } 1910 1911 /* 1912 * Note the trickiness in setting the log flags - we set the owner log 1913 * flag on the opposite inode (i.e. the inode we are setting the new 1914 * owner to be) because once we swap the forks and log that, log 1915 * recovery is going to see the fork as owned by the swapped inode, 1916 * not the pre-swapped inodes. 1917 */ 1918 src_log_flags = XFS_ILOG_CORE; 1919 target_log_flags = XFS_ILOG_CORE; 1920 1921 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 1922 error = xfs_swap_extent_rmap(&tp, ip, tip); 1923 else 1924 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags, 1925 &target_log_flags); 1926 if (error) 1927 goto out_trans_cancel; 1928 1929 /* Do we have to swap reflink flags? */ 1930 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^ 1931 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) { 1932 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; 1933 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1934 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; 1935 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1936 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; 1937 } 1938 1939 /* Swap the cow forks. */ 1940 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1941 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS); 1942 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS); 1943 1944 swap(ip->i_cnextents, tip->i_cnextents); 1945 swap(ip->i_cowfp, tip->i_cowfp); 1946 1947 if (ip->i_cowfp && ip->i_cowfp->if_bytes) 1948 xfs_inode_set_cowblocks_tag(ip); 1949 else 1950 xfs_inode_clear_cowblocks_tag(ip); 1951 if (tip->i_cowfp && tip->i_cowfp->if_bytes) 1952 xfs_inode_set_cowblocks_tag(tip); 1953 else 1954 xfs_inode_clear_cowblocks_tag(tip); 1955 } 1956 1957 xfs_trans_log_inode(tp, ip, src_log_flags); 1958 xfs_trans_log_inode(tp, tip, target_log_flags); 1959 1960 /* 1961 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems 1962 * have inode number owner values in the bmbt blocks that still refer to 1963 * the old inode. Scan each bmbt to fix up the owner values with the 1964 * inode number of the current inode. 1965 */ 1966 if (src_log_flags & XFS_ILOG_DOWNER) { 1967 error = xfs_swap_change_owner(&tp, ip, tip); 1968 if (error) 1969 goto out_trans_cancel; 1970 } 1971 if (target_log_flags & XFS_ILOG_DOWNER) { 1972 error = xfs_swap_change_owner(&tp, tip, ip); 1973 if (error) 1974 goto out_trans_cancel; 1975 } 1976 1977 /* 1978 * If this is a synchronous mount, make sure that the 1979 * transaction goes to disk before returning to the user. 1980 */ 1981 if (mp->m_flags & XFS_MOUNT_WSYNC) 1982 xfs_trans_set_sync(tp); 1983 1984 error = xfs_trans_commit(tp); 1985 1986 trace_xfs_swap_extent_after(ip, 0); 1987 trace_xfs_swap_extent_after(tip, 1); 1988 1989 out_unlock: 1990 xfs_iunlock(ip, lock_flags); 1991 xfs_iunlock(tip, lock_flags); 1992 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 1993 return error; 1994 1995 out_trans_cancel: 1996 xfs_trans_cancel(tp); 1997 goto out_unlock; 1998 } 1999