1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2012 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_mount.h" 15 #include "xfs_da_format.h" 16 #include "xfs_defer.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_extfree_item.h" 21 #include "xfs_alloc.h" 22 #include "xfs_bmap.h" 23 #include "xfs_bmap_util.h" 24 #include "xfs_bmap_btree.h" 25 #include "xfs_rtalloc.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_trace.h" 30 #include "xfs_icache.h" 31 #include "xfs_log.h" 32 #include "xfs_rmap_btree.h" 33 #include "xfs_iomap.h" 34 #include "xfs_reflink.h" 35 #include "xfs_refcount.h" 36 37 /* Kernel only BMAP related definitions and functions */ 38 39 /* 40 * Convert the given file system block to a disk block. We have to treat it 41 * differently based on whether the file is a real time file or not, because the 42 * bmap code does. 43 */ 44 xfs_daddr_t 45 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 46 { 47 return (XFS_IS_REALTIME_INODE(ip) ? \ 48 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ 49 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); 50 } 51 52 /* 53 * Routine to zero an extent on disk allocated to the specific inode. 54 * 55 * The VFS functions take a linearised filesystem block offset, so we have to 56 * convert the sparse xfs fsb to the right format first. 57 * VFS types are real funky, too. 58 */ 59 int 60 xfs_zero_extent( 61 struct xfs_inode *ip, 62 xfs_fsblock_t start_fsb, 63 xfs_off_t count_fsb) 64 { 65 struct xfs_mount *mp = ip->i_mount; 66 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); 67 sector_t block = XFS_BB_TO_FSBT(mp, sector); 68 69 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)), 70 block << (mp->m_super->s_blocksize_bits - 9), 71 count_fsb << (mp->m_super->s_blocksize_bits - 9), 72 GFP_NOFS, 0); 73 } 74 75 #ifdef CONFIG_XFS_RT 76 int 77 xfs_bmap_rtalloc( 78 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 79 { 80 int error; /* error return value */ 81 xfs_mount_t *mp; /* mount point structure */ 82 xfs_extlen_t prod = 0; /* product factor for allocators */ 83 xfs_extlen_t mod = 0; /* product factor for allocators */ 84 xfs_extlen_t ralen = 0; /* realtime allocation length */ 85 xfs_extlen_t align; /* minimum allocation alignment */ 86 xfs_rtblock_t rtb; 87 88 mp = ap->ip->i_mount; 89 align = xfs_get_extsz_hint(ap->ip); 90 prod = align / mp->m_sb.sb_rextsize; 91 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 92 align, 1, ap->eof, 0, 93 ap->conv, &ap->offset, &ap->length); 94 if (error) 95 return error; 96 ASSERT(ap->length); 97 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); 98 99 /* 100 * If the offset & length are not perfectly aligned 101 * then kill prod, it will just get us in trouble. 102 */ 103 div_u64_rem(ap->offset, align, &mod); 104 if (mod || ap->length % align) 105 prod = 1; 106 /* 107 * Set ralen to be the actual requested length in rtextents. 108 */ 109 ralen = ap->length / mp->m_sb.sb_rextsize; 110 /* 111 * If the old value was close enough to MAXEXTLEN that 112 * we rounded up to it, cut it back so it's valid again. 113 * Note that if it's a really large request (bigger than 114 * MAXEXTLEN), we don't hear about that number, and can't 115 * adjust the starting point to match it. 116 */ 117 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 118 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 119 120 /* 121 * Lock out modifications to both the RT bitmap and summary inodes 122 */ 123 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 124 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); 125 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 126 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL); 127 128 /* 129 * If it's an allocation to an empty file at offset 0, 130 * pick an extent that will space things out in the rt area. 131 */ 132 if (ap->eof && ap->offset == 0) { 133 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ 134 135 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 136 if (error) 137 return error; 138 ap->blkno = rtx * mp->m_sb.sb_rextsize; 139 } else { 140 ap->blkno = 0; 141 } 142 143 xfs_bmap_adjacent(ap); 144 145 /* 146 * Realtime allocation, done through xfs_rtallocate_extent. 147 */ 148 do_div(ap->blkno, mp->m_sb.sb_rextsize); 149 rtb = ap->blkno; 150 ap->length = ralen; 151 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, 152 &ralen, ap->wasdel, prod, &rtb); 153 if (error) 154 return error; 155 156 ap->blkno = rtb; 157 if (ap->blkno != NULLFSBLOCK) { 158 ap->blkno *= mp->m_sb.sb_rextsize; 159 ralen *= mp->m_sb.sb_rextsize; 160 ap->length = ralen; 161 ap->ip->i_d.di_nblocks += ralen; 162 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 163 if (ap->wasdel) 164 ap->ip->i_delayed_blks -= ralen; 165 /* 166 * Adjust the disk quota also. This was reserved 167 * earlier. 168 */ 169 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 170 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 171 XFS_TRANS_DQ_RTBCOUNT, (long) ralen); 172 173 /* Zero the extent if we were asked to do so */ 174 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) { 175 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length); 176 if (error) 177 return error; 178 } 179 } else { 180 ap->length = 0; 181 } 182 return 0; 183 } 184 #endif /* CONFIG_XFS_RT */ 185 186 /* 187 * Check if the endoff is outside the last extent. If so the caller will grow 188 * the allocation to a stripe unit boundary. All offsets are considered outside 189 * the end of file for an empty fork, so 1 is returned in *eof in that case. 190 */ 191 int 192 xfs_bmap_eof( 193 struct xfs_inode *ip, 194 xfs_fileoff_t endoff, 195 int whichfork, 196 int *eof) 197 { 198 struct xfs_bmbt_irec rec; 199 int error; 200 201 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); 202 if (error || *eof) 203 return error; 204 205 *eof = endoff >= rec.br_startoff + rec.br_blockcount; 206 return 0; 207 } 208 209 /* 210 * Extent tree block counting routines. 211 */ 212 213 /* 214 * Count leaf blocks given a range of extent records. Delayed allocation 215 * extents are not counted towards the totals. 216 */ 217 xfs_extnum_t 218 xfs_bmap_count_leaves( 219 struct xfs_ifork *ifp, 220 xfs_filblks_t *count) 221 { 222 struct xfs_iext_cursor icur; 223 struct xfs_bmbt_irec got; 224 xfs_extnum_t numrecs = 0; 225 226 for_each_xfs_iext(ifp, &icur, &got) { 227 if (!isnullstartblock(got.br_startblock)) { 228 *count += got.br_blockcount; 229 numrecs++; 230 } 231 } 232 233 return numrecs; 234 } 235 236 /* 237 * Count leaf blocks given a range of extent records originally 238 * in btree format. 239 */ 240 STATIC void 241 xfs_bmap_disk_count_leaves( 242 struct xfs_mount *mp, 243 struct xfs_btree_block *block, 244 int numrecs, 245 xfs_filblks_t *count) 246 { 247 int b; 248 xfs_bmbt_rec_t *frp; 249 250 for (b = 1; b <= numrecs; b++) { 251 frp = XFS_BMBT_REC_ADDR(mp, block, b); 252 *count += xfs_bmbt_disk_get_blockcount(frp); 253 } 254 } 255 256 /* 257 * Recursively walks each level of a btree 258 * to count total fsblocks in use. 259 */ 260 STATIC int 261 xfs_bmap_count_tree( 262 struct xfs_mount *mp, 263 struct xfs_trans *tp, 264 struct xfs_ifork *ifp, 265 xfs_fsblock_t blockno, 266 int levelin, 267 xfs_extnum_t *nextents, 268 xfs_filblks_t *count) 269 { 270 int error; 271 struct xfs_buf *bp, *nbp; 272 int level = levelin; 273 __be64 *pp; 274 xfs_fsblock_t bno = blockno; 275 xfs_fsblock_t nextbno; 276 struct xfs_btree_block *block, *nextblock; 277 int numrecs; 278 279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, 280 &xfs_bmbt_buf_ops); 281 if (error) 282 return error; 283 *count += 1; 284 block = XFS_BUF_TO_BLOCK(bp); 285 286 if (--level) { 287 /* Not at node above leaves, count this level of nodes */ 288 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 289 while (nextbno != NULLFSBLOCK) { 290 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, 291 XFS_BMAP_BTREE_REF, 292 &xfs_bmbt_buf_ops); 293 if (error) 294 return error; 295 *count += 1; 296 nextblock = XFS_BUF_TO_BLOCK(nbp); 297 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); 298 xfs_trans_brelse(tp, nbp); 299 } 300 301 /* Dive to the next level */ 302 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 303 bno = be64_to_cpu(*pp); 304 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents, 305 count); 306 if (error) { 307 xfs_trans_brelse(tp, bp); 308 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 309 XFS_ERRLEVEL_LOW, mp); 310 return -EFSCORRUPTED; 311 } 312 xfs_trans_brelse(tp, bp); 313 } else { 314 /* count all level 1 nodes and their leaves */ 315 for (;;) { 316 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 317 numrecs = be16_to_cpu(block->bb_numrecs); 318 (*nextents) += numrecs; 319 xfs_bmap_disk_count_leaves(mp, block, numrecs, count); 320 xfs_trans_brelse(tp, bp); 321 if (nextbno == NULLFSBLOCK) 322 break; 323 bno = nextbno; 324 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 325 XFS_BMAP_BTREE_REF, 326 &xfs_bmbt_buf_ops); 327 if (error) 328 return error; 329 *count += 1; 330 block = XFS_BUF_TO_BLOCK(bp); 331 } 332 } 333 return 0; 334 } 335 336 /* 337 * Count fsblocks of the given fork. Delayed allocation extents are 338 * not counted towards the totals. 339 */ 340 int 341 xfs_bmap_count_blocks( 342 struct xfs_trans *tp, 343 struct xfs_inode *ip, 344 int whichfork, 345 xfs_extnum_t *nextents, 346 xfs_filblks_t *count) 347 { 348 struct xfs_mount *mp; /* file system mount structure */ 349 __be64 *pp; /* pointer to block address */ 350 struct xfs_btree_block *block; /* current btree block */ 351 struct xfs_ifork *ifp; /* fork structure */ 352 xfs_fsblock_t bno; /* block # of "block" */ 353 int level; /* btree level, for checking */ 354 int error; 355 356 bno = NULLFSBLOCK; 357 mp = ip->i_mount; 358 *nextents = 0; 359 *count = 0; 360 ifp = XFS_IFORK_PTR(ip, whichfork); 361 if (!ifp) 362 return 0; 363 364 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 365 case XFS_DINODE_FMT_EXTENTS: 366 *nextents = xfs_bmap_count_leaves(ifp, count); 367 return 0; 368 case XFS_DINODE_FMT_BTREE: 369 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 370 error = xfs_iread_extents(tp, ip, whichfork); 371 if (error) 372 return error; 373 } 374 375 /* 376 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 377 */ 378 block = ifp->if_broot; 379 level = be16_to_cpu(block->bb_level); 380 ASSERT(level > 0); 381 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 382 bno = be64_to_cpu(*pp); 383 ASSERT(bno != NULLFSBLOCK); 384 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 385 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 386 387 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, 388 nextents, count); 389 if (error) { 390 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", 391 XFS_ERRLEVEL_LOW, mp); 392 return -EFSCORRUPTED; 393 } 394 return 0; 395 } 396 397 return 0; 398 } 399 400 static int 401 xfs_getbmap_report_one( 402 struct xfs_inode *ip, 403 struct getbmapx *bmv, 404 struct kgetbmap *out, 405 int64_t bmv_end, 406 struct xfs_bmbt_irec *got) 407 { 408 struct kgetbmap *p = out + bmv->bmv_entries; 409 bool shared = false, trimmed = false; 410 int error; 411 412 error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed); 413 if (error) 414 return error; 415 416 if (isnullstartblock(got->br_startblock) || 417 got->br_startblock == DELAYSTARTBLOCK) { 418 /* 419 * Delalloc extents that start beyond EOF can occur due to 420 * speculative EOF allocation when the delalloc extent is larger 421 * than the largest freespace extent at conversion time. These 422 * extents cannot be converted by data writeback, so can exist 423 * here even if we are not supposed to be finding delalloc 424 * extents. 425 */ 426 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip))) 427 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0); 428 429 p->bmv_oflags |= BMV_OF_DELALLOC; 430 p->bmv_block = -2; 431 } else { 432 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock); 433 } 434 435 if (got->br_state == XFS_EXT_UNWRITTEN && 436 (bmv->bmv_iflags & BMV_IF_PREALLOC)) 437 p->bmv_oflags |= BMV_OF_PREALLOC; 438 439 if (shared) 440 p->bmv_oflags |= BMV_OF_SHARED; 441 442 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff); 443 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount); 444 445 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 446 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 447 bmv->bmv_entries++; 448 return 0; 449 } 450 451 static void 452 xfs_getbmap_report_hole( 453 struct xfs_inode *ip, 454 struct getbmapx *bmv, 455 struct kgetbmap *out, 456 int64_t bmv_end, 457 xfs_fileoff_t bno, 458 xfs_fileoff_t end) 459 { 460 struct kgetbmap *p = out + bmv->bmv_entries; 461 462 if (bmv->bmv_iflags & BMV_IF_NO_HOLES) 463 return; 464 465 p->bmv_block = -1; 466 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno); 467 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno); 468 469 bmv->bmv_offset = p->bmv_offset + p->bmv_length; 470 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset); 471 bmv->bmv_entries++; 472 } 473 474 static inline bool 475 xfs_getbmap_full( 476 struct getbmapx *bmv) 477 { 478 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1; 479 } 480 481 static bool 482 xfs_getbmap_next_rec( 483 struct xfs_bmbt_irec *rec, 484 xfs_fileoff_t total_end) 485 { 486 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount; 487 488 if (end == total_end) 489 return false; 490 491 rec->br_startoff += rec->br_blockcount; 492 if (!isnullstartblock(rec->br_startblock) && 493 rec->br_startblock != DELAYSTARTBLOCK) 494 rec->br_startblock += rec->br_blockcount; 495 rec->br_blockcount = total_end - end; 496 return true; 497 } 498 499 /* 500 * Get inode's extents as described in bmv, and format for output. 501 * Calls formatter to fill the user's buffer until all extents 502 * are mapped, until the passed-in bmv->bmv_count slots have 503 * been filled, or until the formatter short-circuits the loop, 504 * if it is tracking filled-in extents on its own. 505 */ 506 int /* error code */ 507 xfs_getbmap( 508 struct xfs_inode *ip, 509 struct getbmapx *bmv, /* user bmap structure */ 510 struct kgetbmap *out) 511 { 512 struct xfs_mount *mp = ip->i_mount; 513 int iflags = bmv->bmv_iflags; 514 int whichfork, lock, error = 0; 515 int64_t bmv_end, max_len; 516 xfs_fileoff_t bno, first_bno; 517 struct xfs_ifork *ifp; 518 struct xfs_bmbt_irec got, rec; 519 xfs_filblks_t len; 520 struct xfs_iext_cursor icur; 521 522 if (bmv->bmv_iflags & ~BMV_IF_VALID) 523 return -EINVAL; 524 #ifndef DEBUG 525 /* Only allow CoW fork queries if we're debugging. */ 526 if (iflags & BMV_IF_COWFORK) 527 return -EINVAL; 528 #endif 529 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK)) 530 return -EINVAL; 531 532 if (bmv->bmv_length < -1) 533 return -EINVAL; 534 bmv->bmv_entries = 0; 535 if (bmv->bmv_length == 0) 536 return 0; 537 538 if (iflags & BMV_IF_ATTRFORK) 539 whichfork = XFS_ATTR_FORK; 540 else if (iflags & BMV_IF_COWFORK) 541 whichfork = XFS_COW_FORK; 542 else 543 whichfork = XFS_DATA_FORK; 544 ifp = XFS_IFORK_PTR(ip, whichfork); 545 546 xfs_ilock(ip, XFS_IOLOCK_SHARED); 547 switch (whichfork) { 548 case XFS_ATTR_FORK: 549 if (!XFS_IFORK_Q(ip)) 550 goto out_unlock_iolock; 551 552 max_len = 1LL << 32; 553 lock = xfs_ilock_attr_map_shared(ip); 554 break; 555 case XFS_COW_FORK: 556 /* No CoW fork? Just return */ 557 if (!ifp) 558 goto out_unlock_iolock; 559 560 if (xfs_get_cowextsz_hint(ip)) 561 max_len = mp->m_super->s_maxbytes; 562 else 563 max_len = XFS_ISIZE(ip); 564 565 lock = XFS_ILOCK_SHARED; 566 xfs_ilock(ip, lock); 567 break; 568 case XFS_DATA_FORK: 569 if (!(iflags & BMV_IF_DELALLOC) && 570 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { 571 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 572 if (error) 573 goto out_unlock_iolock; 574 575 /* 576 * Even after flushing the inode, there can still be 577 * delalloc blocks on the inode beyond EOF due to 578 * speculative preallocation. These are not removed 579 * until the release function is called or the inode 580 * is inactivated. Hence we cannot assert here that 581 * ip->i_delayed_blks == 0. 582 */ 583 } 584 585 if (xfs_get_extsz_hint(ip) || 586 (ip->i_d.di_flags & 587 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))) 588 max_len = mp->m_super->s_maxbytes; 589 else 590 max_len = XFS_ISIZE(ip); 591 592 lock = xfs_ilock_data_map_shared(ip); 593 break; 594 } 595 596 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 597 case XFS_DINODE_FMT_EXTENTS: 598 case XFS_DINODE_FMT_BTREE: 599 break; 600 case XFS_DINODE_FMT_LOCAL: 601 /* Local format inode forks report no extents. */ 602 goto out_unlock_ilock; 603 default: 604 error = -EINVAL; 605 goto out_unlock_ilock; 606 } 607 608 if (bmv->bmv_length == -1) { 609 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len)); 610 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset); 611 } 612 613 bmv_end = bmv->bmv_offset + bmv->bmv_length; 614 615 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset); 616 len = XFS_BB_TO_FSB(mp, bmv->bmv_length); 617 618 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 619 error = xfs_iread_extents(NULL, ip, whichfork); 620 if (error) 621 goto out_unlock_ilock; 622 } 623 624 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 625 /* 626 * Report a whole-file hole if the delalloc flag is set to 627 * stay compatible with the old implementation. 628 */ 629 if (iflags & BMV_IF_DELALLOC) 630 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 631 XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); 632 goto out_unlock_ilock; 633 } 634 635 while (!xfs_getbmap_full(bmv)) { 636 xfs_trim_extent(&got, first_bno, len); 637 638 /* 639 * Report an entry for a hole if this extent doesn't directly 640 * follow the previous one. 641 */ 642 if (got.br_startoff > bno) { 643 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno, 644 got.br_startoff); 645 if (xfs_getbmap_full(bmv)) 646 break; 647 } 648 649 /* 650 * In order to report shared extents accurately, we report each 651 * distinct shared / unshared part of a single bmbt record with 652 * an individual getbmapx record. 653 */ 654 bno = got.br_startoff + got.br_blockcount; 655 rec = got; 656 do { 657 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end, 658 &rec); 659 if (error || xfs_getbmap_full(bmv)) 660 goto out_unlock_ilock; 661 } while (xfs_getbmap_next_rec(&rec, bno)); 662 663 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 664 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 665 666 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST; 667 668 if (whichfork != XFS_ATTR_FORK && bno < end && 669 !xfs_getbmap_full(bmv)) { 670 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, 671 bno, end); 672 } 673 break; 674 } 675 676 if (bno >= first_bno + len) 677 break; 678 } 679 680 out_unlock_ilock: 681 xfs_iunlock(ip, lock); 682 out_unlock_iolock: 683 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 684 return error; 685 } 686 687 /* 688 * Dead simple method of punching delalyed allocation blocks from a range in 689 * the inode. This will always punch out both the start and end blocks, even 690 * if the ranges only partially overlap them, so it is up to the caller to 691 * ensure that partial blocks are not passed in. 692 */ 693 int 694 xfs_bmap_punch_delalloc_range( 695 struct xfs_inode *ip, 696 xfs_fileoff_t start_fsb, 697 xfs_fileoff_t length) 698 { 699 struct xfs_ifork *ifp = &ip->i_df; 700 xfs_fileoff_t end_fsb = start_fsb + length; 701 struct xfs_bmbt_irec got, del; 702 struct xfs_iext_cursor icur; 703 int error = 0; 704 705 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 706 707 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 708 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 709 if (error) 710 return error; 711 } 712 713 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 714 return 0; 715 716 while (got.br_startoff + got.br_blockcount > start_fsb) { 717 del = got; 718 xfs_trim_extent(&del, start_fsb, length); 719 720 /* 721 * A delete can push the cursor forward. Step back to the 722 * previous extent on non-delalloc or extents outside the 723 * target range. 724 */ 725 if (!del.br_blockcount || 726 !isnullstartblock(del.br_startblock)) { 727 if (!xfs_iext_prev_extent(ifp, &icur, &got)) 728 break; 729 continue; 730 } 731 732 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, 733 &got, &del); 734 if (error || !xfs_iext_get_extent(ifp, &icur, &got)) 735 break; 736 } 737 738 return error; 739 } 740 741 /* 742 * Test whether it is appropriate to check an inode for and free post EOF 743 * blocks. The 'force' parameter determines whether we should also consider 744 * regular files that are marked preallocated or append-only. 745 */ 746 bool 747 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) 748 { 749 /* prealloc/delalloc exists only on regular files */ 750 if (!S_ISREG(VFS_I(ip)->i_mode)) 751 return false; 752 753 /* 754 * Zero sized files with no cached pages and delalloc blocks will not 755 * have speculative prealloc/delalloc blocks to remove. 756 */ 757 if (VFS_I(ip)->i_size == 0 && 758 VFS_I(ip)->i_mapping->nrpages == 0 && 759 ip->i_delayed_blks == 0) 760 return false; 761 762 /* If we haven't read in the extent list, then don't do it now. */ 763 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) 764 return false; 765 766 /* 767 * Do not free real preallocated or append-only files unless the file 768 * has delalloc blocks and we are forced to remove them. 769 */ 770 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) 771 if (!force || ip->i_delayed_blks == 0) 772 return false; 773 774 return true; 775 } 776 777 /* 778 * This is called to free any blocks beyond eof. The caller must hold 779 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only 780 * reference to the inode. 781 */ 782 int 783 xfs_free_eofblocks( 784 struct xfs_inode *ip) 785 { 786 struct xfs_trans *tp; 787 int error; 788 xfs_fileoff_t end_fsb; 789 xfs_fileoff_t last_fsb; 790 xfs_filblks_t map_len; 791 int nimaps; 792 struct xfs_bmbt_irec imap; 793 struct xfs_mount *mp = ip->i_mount; 794 795 /* 796 * Figure out if there are any blocks beyond the end 797 * of the file. If not, then there is nothing to do. 798 */ 799 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 800 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 801 if (last_fsb <= end_fsb) 802 return 0; 803 map_len = last_fsb - end_fsb; 804 805 nimaps = 1; 806 xfs_ilock(ip, XFS_ILOCK_SHARED); 807 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0); 808 xfs_iunlock(ip, XFS_ILOCK_SHARED); 809 810 /* 811 * If there are blocks after the end of file, truncate the file to its 812 * current size to free them up. 813 */ 814 if (!error && (nimaps != 0) && 815 (imap.br_startblock != HOLESTARTBLOCK || 816 ip->i_delayed_blks)) { 817 /* 818 * Attach the dquots to the inode up front. 819 */ 820 error = xfs_qm_dqattach(ip); 821 if (error) 822 return error; 823 824 /* wait on dio to ensure i_size has settled */ 825 inode_dio_wait(VFS_I(ip)); 826 827 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, 828 &tp); 829 if (error) { 830 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 831 return error; 832 } 833 834 xfs_ilock(ip, XFS_ILOCK_EXCL); 835 xfs_trans_ijoin(tp, ip, 0); 836 837 /* 838 * Do not update the on-disk file size. If we update the 839 * on-disk file size and then the system crashes before the 840 * contents of the file are flushed to disk then the files 841 * may be full of holes (ie NULL files bug). 842 */ 843 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK, 844 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD); 845 if (error) { 846 /* 847 * If we get an error at this point we simply don't 848 * bother truncating the file. 849 */ 850 xfs_trans_cancel(tp); 851 } else { 852 error = xfs_trans_commit(tp); 853 if (!error) 854 xfs_inode_clear_eofblocks_tag(ip); 855 } 856 857 xfs_iunlock(ip, XFS_ILOCK_EXCL); 858 } 859 return error; 860 } 861 862 int 863 xfs_alloc_file_space( 864 struct xfs_inode *ip, 865 xfs_off_t offset, 866 xfs_off_t len, 867 int alloc_type) 868 { 869 xfs_mount_t *mp = ip->i_mount; 870 xfs_off_t count; 871 xfs_filblks_t allocated_fsb; 872 xfs_filblks_t allocatesize_fsb; 873 xfs_extlen_t extsz, temp; 874 xfs_fileoff_t startoffset_fsb; 875 xfs_fsblock_t firstfsb; 876 int nimaps; 877 int quota_flag; 878 int rt; 879 xfs_trans_t *tp; 880 xfs_bmbt_irec_t imaps[1], *imapp; 881 struct xfs_defer_ops dfops; 882 uint qblocks, resblks, resrtextents; 883 int error; 884 885 trace_xfs_alloc_file_space(ip); 886 887 if (XFS_FORCED_SHUTDOWN(mp)) 888 return -EIO; 889 890 error = xfs_qm_dqattach(ip); 891 if (error) 892 return error; 893 894 if (len <= 0) 895 return -EINVAL; 896 897 rt = XFS_IS_REALTIME_INODE(ip); 898 extsz = xfs_get_extsz_hint(ip); 899 900 count = len; 901 imapp = &imaps[0]; 902 nimaps = 1; 903 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 904 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 905 906 /* 907 * Allocate file space until done or until there is an error 908 */ 909 while (allocatesize_fsb && !error) { 910 xfs_fileoff_t s, e; 911 912 /* 913 * Determine space reservations for data/realtime. 914 */ 915 if (unlikely(extsz)) { 916 s = startoffset_fsb; 917 do_div(s, extsz); 918 s *= extsz; 919 e = startoffset_fsb + allocatesize_fsb; 920 div_u64_rem(startoffset_fsb, extsz, &temp); 921 if (temp) 922 e += temp; 923 div_u64_rem(e, extsz, &temp); 924 if (temp) 925 e += extsz - temp; 926 } else { 927 s = 0; 928 e = allocatesize_fsb; 929 } 930 931 /* 932 * The transaction reservation is limited to a 32-bit block 933 * count, hence we need to limit the number of blocks we are 934 * trying to reserve to avoid an overflow. We can't allocate 935 * more than @nimaps extents, and an extent is limited on disk 936 * to MAXEXTLEN (21 bits), so use that to enforce the limit. 937 */ 938 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); 939 if (unlikely(rt)) { 940 resrtextents = qblocks = resblks; 941 resrtextents /= mp->m_sb.sb_rextsize; 942 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 943 quota_flag = XFS_QMOPT_RES_RTBLKS; 944 } else { 945 resrtextents = 0; 946 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); 947 quota_flag = XFS_QMOPT_RES_REGBLKS; 948 } 949 950 /* 951 * Allocate and setup the transaction. 952 */ 953 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 954 resrtextents, 0, &tp); 955 956 /* 957 * Check for running out of space 958 */ 959 if (error) { 960 /* 961 * Free the transaction structure. 962 */ 963 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 964 break; 965 } 966 xfs_ilock(ip, XFS_ILOCK_EXCL); 967 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 968 0, quota_flag); 969 if (error) 970 goto error1; 971 972 xfs_trans_ijoin(tp, ip, 0); 973 974 xfs_defer_init(&dfops, &firstfsb); 975 error = xfs_bmapi_write(tp, ip, startoffset_fsb, 976 allocatesize_fsb, alloc_type, &firstfsb, 977 resblks, imapp, &nimaps, &dfops); 978 if (error) 979 goto error0; 980 981 /* 982 * Complete the transaction 983 */ 984 error = xfs_defer_finish(&tp, &dfops); 985 if (error) 986 goto error0; 987 988 error = xfs_trans_commit(tp); 989 xfs_iunlock(ip, XFS_ILOCK_EXCL); 990 if (error) 991 break; 992 993 allocated_fsb = imapp->br_blockcount; 994 995 if (nimaps == 0) { 996 error = -ENOSPC; 997 break; 998 } 999 1000 startoffset_fsb += allocated_fsb; 1001 allocatesize_fsb -= allocated_fsb; 1002 } 1003 1004 return error; 1005 1006 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 1007 xfs_defer_cancel(&dfops); 1008 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 1009 1010 error1: /* Just cancel transaction */ 1011 xfs_trans_cancel(tp); 1012 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1013 return error; 1014 } 1015 1016 static int 1017 xfs_unmap_extent( 1018 struct xfs_inode *ip, 1019 xfs_fileoff_t startoffset_fsb, 1020 xfs_filblks_t len_fsb, 1021 int *done) 1022 { 1023 struct xfs_mount *mp = ip->i_mount; 1024 struct xfs_trans *tp; 1025 struct xfs_defer_ops dfops; 1026 xfs_fsblock_t firstfsb; 1027 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1028 int error; 1029 1030 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 1031 if (error) { 1032 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1033 return error; 1034 } 1035 1036 xfs_ilock(ip, XFS_ILOCK_EXCL); 1037 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot, 1038 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS); 1039 if (error) 1040 goto out_trans_cancel; 1041 1042 xfs_trans_ijoin(tp, ip, 0); 1043 1044 xfs_defer_init(&dfops, &firstfsb); 1045 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb, 1046 &dfops, done); 1047 if (error) 1048 goto out_bmap_cancel; 1049 1050 xfs_defer_ijoin(&dfops, ip); 1051 error = xfs_defer_finish(&tp, &dfops); 1052 if (error) 1053 goto out_bmap_cancel; 1054 1055 error = xfs_trans_commit(tp); 1056 out_unlock: 1057 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1058 return error; 1059 1060 out_bmap_cancel: 1061 xfs_defer_cancel(&dfops); 1062 out_trans_cancel: 1063 xfs_trans_cancel(tp); 1064 goto out_unlock; 1065 } 1066 1067 static int 1068 xfs_adjust_extent_unmap_boundaries( 1069 struct xfs_inode *ip, 1070 xfs_fileoff_t *startoffset_fsb, 1071 xfs_fileoff_t *endoffset_fsb) 1072 { 1073 struct xfs_mount *mp = ip->i_mount; 1074 struct xfs_bmbt_irec imap; 1075 int nimap, error; 1076 xfs_extlen_t mod = 0; 1077 1078 nimap = 1; 1079 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0); 1080 if (error) 1081 return error; 1082 1083 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1084 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1085 div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod); 1086 if (mod) 1087 *startoffset_fsb += mp->m_sb.sb_rextsize - mod; 1088 } 1089 1090 nimap = 1; 1091 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0); 1092 if (error) 1093 return error; 1094 1095 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1096 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1097 mod++; 1098 if (mod && mod != mp->m_sb.sb_rextsize) 1099 *endoffset_fsb -= mod; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static int 1106 xfs_flush_unmap_range( 1107 struct xfs_inode *ip, 1108 xfs_off_t offset, 1109 xfs_off_t len) 1110 { 1111 struct xfs_mount *mp = ip->i_mount; 1112 struct inode *inode = VFS_I(ip); 1113 xfs_off_t rounding, start, end; 1114 int error; 1115 1116 /* wait for the completion of any pending DIOs */ 1117 inode_dio_wait(inode); 1118 1119 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); 1120 start = round_down(offset, rounding); 1121 end = round_up(offset + len, rounding) - 1; 1122 1123 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 1124 if (error) 1125 return error; 1126 truncate_pagecache_range(inode, start, end); 1127 return 0; 1128 } 1129 1130 int 1131 xfs_free_file_space( 1132 struct xfs_inode *ip, 1133 xfs_off_t offset, 1134 xfs_off_t len) 1135 { 1136 struct xfs_mount *mp = ip->i_mount; 1137 xfs_fileoff_t startoffset_fsb; 1138 xfs_fileoff_t endoffset_fsb; 1139 int done = 0, error; 1140 1141 trace_xfs_free_file_space(ip); 1142 1143 error = xfs_qm_dqattach(ip); 1144 if (error) 1145 return error; 1146 1147 if (len <= 0) /* if nothing being freed */ 1148 return 0; 1149 1150 error = xfs_flush_unmap_range(ip, offset, len); 1151 if (error) 1152 return error; 1153 1154 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 1155 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); 1156 1157 /* 1158 * Need to zero the stuff we're not freeing, on disk. If it's a RT file 1159 * and we can't use unwritten extents then we actually need to ensure 1160 * to zero the whole extent, otherwise we just need to take of block 1161 * boundaries, and xfs_bunmapi will handle the rest. 1162 */ 1163 if (XFS_IS_REALTIME_INODE(ip) && 1164 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 1165 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb, 1166 &endoffset_fsb); 1167 if (error) 1168 return error; 1169 } 1170 1171 if (endoffset_fsb > startoffset_fsb) { 1172 while (!done) { 1173 error = xfs_unmap_extent(ip, startoffset_fsb, 1174 endoffset_fsb - startoffset_fsb, &done); 1175 if (error) 1176 return error; 1177 } 1178 } 1179 1180 /* 1181 * Now that we've unmap all full blocks we'll have to zero out any 1182 * partial block at the beginning and/or end. iomap_zero_range is smart 1183 * enough to skip any holes, including those we just created, but we 1184 * must take care not to zero beyond EOF and enlarge i_size. 1185 */ 1186 if (offset >= XFS_ISIZE(ip)) 1187 return 0; 1188 if (offset + len > XFS_ISIZE(ip)) 1189 len = XFS_ISIZE(ip) - offset; 1190 error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); 1191 if (error) 1192 return error; 1193 1194 /* 1195 * If we zeroed right up to EOF and EOF straddles a page boundary we 1196 * must make sure that the post-EOF area is also zeroed because the 1197 * page could be mmap'd and iomap_zero_range doesn't do that for us. 1198 * Writeback of the eof page will do this, albeit clumsily. 1199 */ 1200 if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) { 1201 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1202 (offset + len) & ~PAGE_MASK, LLONG_MAX); 1203 } 1204 1205 return error; 1206 } 1207 1208 /* 1209 * Preallocate and zero a range of a file. This mechanism has the allocation 1210 * semantics of fallocate and in addition converts data in the range to zeroes. 1211 */ 1212 int 1213 xfs_zero_file_space( 1214 struct xfs_inode *ip, 1215 xfs_off_t offset, 1216 xfs_off_t len) 1217 { 1218 struct xfs_mount *mp = ip->i_mount; 1219 uint blksize; 1220 int error; 1221 1222 trace_xfs_zero_file_space(ip); 1223 1224 blksize = 1 << mp->m_sb.sb_blocklog; 1225 1226 /* 1227 * Punch a hole and prealloc the range. We use hole punch rather than 1228 * unwritten extent conversion for two reasons: 1229 * 1230 * 1.) Hole punch handles partial block zeroing for us. 1231 * 1232 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued 1233 * by virtue of the hole punch. 1234 */ 1235 error = xfs_free_file_space(ip, offset, len); 1236 if (error) 1237 goto out; 1238 1239 error = xfs_alloc_file_space(ip, round_down(offset, blksize), 1240 round_up(offset + len, blksize) - 1241 round_down(offset, blksize), 1242 XFS_BMAPI_PREALLOC); 1243 out: 1244 return error; 1245 1246 } 1247 1248 static int 1249 xfs_prepare_shift( 1250 struct xfs_inode *ip, 1251 loff_t offset) 1252 { 1253 int error; 1254 1255 /* 1256 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation 1257 * into the accessible region of the file. 1258 */ 1259 if (xfs_can_free_eofblocks(ip, true)) { 1260 error = xfs_free_eofblocks(ip); 1261 if (error) 1262 return error; 1263 } 1264 1265 /* 1266 * Writeback and invalidate cache for the remainder of the file as we're 1267 * about to shift down every extent from offset to EOF. 1268 */ 1269 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); 1270 if (error) 1271 return error; 1272 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 1273 offset >> PAGE_SHIFT, -1); 1274 if (error) 1275 return error; 1276 1277 /* 1278 * Clean out anything hanging around in the cow fork now that 1279 * we've flushed all the dirty data out to disk to avoid having 1280 * CoW extents at the wrong offsets. 1281 */ 1282 if (xfs_is_reflink_inode(ip)) { 1283 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF, 1284 true); 1285 if (error) 1286 return error; 1287 } 1288 1289 return 0; 1290 } 1291 1292 /* 1293 * xfs_collapse_file_space() 1294 * This routine frees disk space and shift extent for the given file. 1295 * The first thing we do is to free data blocks in the specified range 1296 * by calling xfs_free_file_space(). It would also sync dirty data 1297 * and invalidate page cache over the region on which collapse range 1298 * is working. And Shift extent records to the left to cover a hole. 1299 * RETURNS: 1300 * 0 on success 1301 * errno on error 1302 * 1303 */ 1304 int 1305 xfs_collapse_file_space( 1306 struct xfs_inode *ip, 1307 xfs_off_t offset, 1308 xfs_off_t len) 1309 { 1310 struct xfs_mount *mp = ip->i_mount; 1311 struct xfs_trans *tp; 1312 int error; 1313 struct xfs_defer_ops dfops; 1314 xfs_fsblock_t first_block; 1315 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len); 1316 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 1317 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1318 bool done = false; 1319 1320 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1321 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); 1322 1323 trace_xfs_collapse_file_space(ip); 1324 1325 error = xfs_free_file_space(ip, offset, len); 1326 if (error) 1327 return error; 1328 1329 error = xfs_prepare_shift(ip, offset); 1330 if (error) 1331 return error; 1332 1333 while (!error && !done) { 1334 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, 1335 &tp); 1336 if (error) 1337 break; 1338 1339 xfs_ilock(ip, XFS_ILOCK_EXCL); 1340 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, 1341 ip->i_gdquot, ip->i_pdquot, resblks, 0, 1342 XFS_QMOPT_RES_REGBLKS); 1343 if (error) 1344 goto out_trans_cancel; 1345 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1346 1347 xfs_defer_init(&dfops, &first_block); 1348 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb, 1349 &done, &first_block, &dfops); 1350 if (error) 1351 goto out_bmap_cancel; 1352 1353 error = xfs_defer_finish(&tp, &dfops); 1354 if (error) 1355 goto out_bmap_cancel; 1356 error = xfs_trans_commit(tp); 1357 } 1358 1359 return error; 1360 1361 out_bmap_cancel: 1362 xfs_defer_cancel(&dfops); 1363 out_trans_cancel: 1364 xfs_trans_cancel(tp); 1365 return error; 1366 } 1367 1368 /* 1369 * xfs_insert_file_space() 1370 * This routine create hole space by shifting extents for the given file. 1371 * The first thing we do is to sync dirty data and invalidate page cache 1372 * over the region on which insert range is working. And split an extent 1373 * to two extents at given offset by calling xfs_bmap_split_extent. 1374 * And shift all extent records which are laying between [offset, 1375 * last allocated extent] to the right to reserve hole range. 1376 * RETURNS: 1377 * 0 on success 1378 * errno on error 1379 */ 1380 int 1381 xfs_insert_file_space( 1382 struct xfs_inode *ip, 1383 loff_t offset, 1384 loff_t len) 1385 { 1386 struct xfs_mount *mp = ip->i_mount; 1387 struct xfs_trans *tp; 1388 int error; 1389 struct xfs_defer_ops dfops; 1390 xfs_fsblock_t first_block; 1391 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset); 1392 xfs_fileoff_t next_fsb = NULLFSBLOCK; 1393 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len); 1394 bool done = false; 1395 1396 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1397 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); 1398 1399 trace_xfs_insert_file_space(ip); 1400 1401 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb); 1402 if (error) 1403 return error; 1404 1405 error = xfs_prepare_shift(ip, offset); 1406 if (error) 1407 return error; 1408 1409 /* 1410 * The extent shifting code works on extent granularity. So, if stop_fsb 1411 * is not the starting block of extent, we need to split the extent at 1412 * stop_fsb. 1413 */ 1414 error = xfs_bmap_split_extent(ip, stop_fsb); 1415 if (error) 1416 return error; 1417 1418 while (!error && !done) { 1419 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, 1420 &tp); 1421 if (error) 1422 break; 1423 1424 xfs_ilock(ip, XFS_ILOCK_EXCL); 1425 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1426 xfs_defer_init(&dfops, &first_block); 1427 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb, 1428 &done, stop_fsb, &first_block, &dfops); 1429 if (error) 1430 goto out_bmap_cancel; 1431 1432 error = xfs_defer_finish(&tp, &dfops); 1433 if (error) 1434 goto out_bmap_cancel; 1435 error = xfs_trans_commit(tp); 1436 } 1437 1438 return error; 1439 1440 out_bmap_cancel: 1441 xfs_defer_cancel(&dfops); 1442 xfs_trans_cancel(tp); 1443 return error; 1444 } 1445 1446 /* 1447 * We need to check that the format of the data fork in the temporary inode is 1448 * valid for the target inode before doing the swap. This is not a problem with 1449 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized 1450 * data fork depending on the space the attribute fork is taking so we can get 1451 * invalid formats on the target inode. 1452 * 1453 * E.g. target has space for 7 extents in extent format, temp inode only has 1454 * space for 6. If we defragment down to 7 extents, then the tmp format is a 1455 * btree, but when swapped it needs to be in extent format. Hence we can't just 1456 * blindly swap data forks on attr2 filesystems. 1457 * 1458 * Note that we check the swap in both directions so that we don't end up with 1459 * a corrupt temporary inode, either. 1460 * 1461 * Note that fixing the way xfs_fsr sets up the attribute fork in the source 1462 * inode will prevent this situation from occurring, so all we do here is 1463 * reject and log the attempt. basically we are putting the responsibility on 1464 * userspace to get this right. 1465 */ 1466 static int 1467 xfs_swap_extents_check_format( 1468 struct xfs_inode *ip, /* target inode */ 1469 struct xfs_inode *tip) /* tmp inode */ 1470 { 1471 1472 /* Should never get a local format */ 1473 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || 1474 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) 1475 return -EINVAL; 1476 1477 /* 1478 * if the target inode has less extents that then temporary inode then 1479 * why did userspace call us? 1480 */ 1481 if (ip->i_d.di_nextents < tip->i_d.di_nextents) 1482 return -EINVAL; 1483 1484 /* 1485 * If we have to use the (expensive) rmap swap method, we can 1486 * handle any number of extents and any format. 1487 */ 1488 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb)) 1489 return 0; 1490 1491 /* 1492 * if the target inode is in extent form and the temp inode is in btree 1493 * form then we will end up with the target inode in the wrong format 1494 * as we already know there are less extents in the temp inode. 1495 */ 1496 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1497 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1498 return -EINVAL; 1499 1500 /* Check temp in extent form to max in target */ 1501 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1502 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > 1503 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1504 return -EINVAL; 1505 1506 /* Check target in extent form to max in temp */ 1507 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1508 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > 1509 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1510 return -EINVAL; 1511 1512 /* 1513 * If we are in a btree format, check that the temp root block will fit 1514 * in the target and that it has enough extents to be in btree format 1515 * in the target. 1516 * 1517 * Note that we have to be careful to allow btree->extent conversions 1518 * (a common defrag case) which will occur when the temp inode is in 1519 * extent format... 1520 */ 1521 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1522 if (XFS_IFORK_Q(ip) && 1523 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) 1524 return -EINVAL; 1525 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= 1526 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1527 return -EINVAL; 1528 } 1529 1530 /* Reciprocal target->temp btree format checks */ 1531 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1532 if (XFS_IFORK_Q(tip) && 1533 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) 1534 return -EINVAL; 1535 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= 1536 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1537 return -EINVAL; 1538 } 1539 1540 return 0; 1541 } 1542 1543 static int 1544 xfs_swap_extent_flush( 1545 struct xfs_inode *ip) 1546 { 1547 int error; 1548 1549 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 1550 if (error) 1551 return error; 1552 truncate_pagecache_range(VFS_I(ip), 0, -1); 1553 1554 /* Verify O_DIRECT for ftmp */ 1555 if (VFS_I(ip)->i_mapping->nrpages) 1556 return -EINVAL; 1557 return 0; 1558 } 1559 1560 /* 1561 * Move extents from one file to another, when rmap is enabled. 1562 */ 1563 STATIC int 1564 xfs_swap_extent_rmap( 1565 struct xfs_trans **tpp, 1566 struct xfs_inode *ip, 1567 struct xfs_inode *tip) 1568 { 1569 struct xfs_bmbt_irec irec; 1570 struct xfs_bmbt_irec uirec; 1571 struct xfs_bmbt_irec tirec; 1572 xfs_fileoff_t offset_fsb; 1573 xfs_fileoff_t end_fsb; 1574 xfs_filblks_t count_fsb; 1575 xfs_fsblock_t firstfsb; 1576 struct xfs_defer_ops dfops; 1577 int error; 1578 xfs_filblks_t ilen; 1579 xfs_filblks_t rlen; 1580 int nimaps; 1581 uint64_t tip_flags2; 1582 1583 /* 1584 * If the source file has shared blocks, we must flag the donor 1585 * file as having shared blocks so that we get the shared-block 1586 * rmap functions when we go to fix up the rmaps. The flags 1587 * will be switch for reals later. 1588 */ 1589 tip_flags2 = tip->i_d.di_flags2; 1590 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) 1591 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK; 1592 1593 offset_fsb = 0; 1594 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip))); 1595 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 1596 1597 while (count_fsb) { 1598 /* Read extent from the donor file */ 1599 nimaps = 1; 1600 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec, 1601 &nimaps, 0); 1602 if (error) 1603 goto out; 1604 ASSERT(nimaps == 1); 1605 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK); 1606 1607 trace_xfs_swap_extent_rmap_remap(tip, &tirec); 1608 ilen = tirec.br_blockcount; 1609 1610 /* Unmap the old blocks in the source file. */ 1611 while (tirec.br_blockcount) { 1612 xfs_defer_init(&dfops, &firstfsb); 1613 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec); 1614 1615 /* Read extent from the source file */ 1616 nimaps = 1; 1617 error = xfs_bmapi_read(ip, tirec.br_startoff, 1618 tirec.br_blockcount, &irec, 1619 &nimaps, 0); 1620 if (error) 1621 goto out_defer; 1622 ASSERT(nimaps == 1); 1623 ASSERT(tirec.br_startoff == irec.br_startoff); 1624 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec); 1625 1626 /* Trim the extent. */ 1627 uirec = tirec; 1628 uirec.br_blockcount = rlen = min_t(xfs_filblks_t, 1629 tirec.br_blockcount, 1630 irec.br_blockcount); 1631 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec); 1632 1633 /* Remove the mapping from the donor file. */ 1634 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops, 1635 tip, &uirec); 1636 if (error) 1637 goto out_defer; 1638 1639 /* Remove the mapping from the source file. */ 1640 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops, 1641 ip, &irec); 1642 if (error) 1643 goto out_defer; 1644 1645 /* Map the donor file's blocks into the source file. */ 1646 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops, 1647 ip, &uirec); 1648 if (error) 1649 goto out_defer; 1650 1651 /* Map the source file's blocks into the donor file. */ 1652 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops, 1653 tip, &irec); 1654 if (error) 1655 goto out_defer; 1656 1657 xfs_defer_ijoin(&dfops, ip); 1658 error = xfs_defer_finish(tpp, &dfops); 1659 if (error) 1660 goto out_defer; 1661 1662 tirec.br_startoff += rlen; 1663 if (tirec.br_startblock != HOLESTARTBLOCK && 1664 tirec.br_startblock != DELAYSTARTBLOCK) 1665 tirec.br_startblock += rlen; 1666 tirec.br_blockcount -= rlen; 1667 } 1668 1669 /* Roll on... */ 1670 count_fsb -= ilen; 1671 offset_fsb += ilen; 1672 } 1673 1674 tip->i_d.di_flags2 = tip_flags2; 1675 return 0; 1676 1677 out_defer: 1678 xfs_defer_cancel(&dfops); 1679 out: 1680 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_); 1681 tip->i_d.di_flags2 = tip_flags2; 1682 return error; 1683 } 1684 1685 /* Swap the extents of two files by swapping data forks. */ 1686 STATIC int 1687 xfs_swap_extent_forks( 1688 struct xfs_trans *tp, 1689 struct xfs_inode *ip, 1690 struct xfs_inode *tip, 1691 int *src_log_flags, 1692 int *target_log_flags) 1693 { 1694 struct xfs_ifork tempifp, *ifp, *tifp; 1695 xfs_filblks_t aforkblks = 0; 1696 xfs_filblks_t taforkblks = 0; 1697 xfs_extnum_t junk; 1698 uint64_t tmp; 1699 int error; 1700 1701 /* 1702 * Count the number of extended attribute blocks 1703 */ 1704 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && 1705 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1706 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk, 1707 &aforkblks); 1708 if (error) 1709 return error; 1710 } 1711 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && 1712 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1713 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk, 1714 &taforkblks); 1715 if (error) 1716 return error; 1717 } 1718 1719 /* 1720 * Btree format (v3) inodes have the inode number stamped in the bmbt 1721 * block headers. We can't start changing the bmbt blocks until the 1722 * inode owner change is logged so recovery does the right thing in the 1723 * event of a crash. Set the owner change log flags now and leave the 1724 * bmbt scan as the last step. 1725 */ 1726 if (ip->i_d.di_version == 3 && 1727 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1728 (*target_log_flags) |= XFS_ILOG_DOWNER; 1729 if (tip->i_d.di_version == 3 && 1730 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1731 (*src_log_flags) |= XFS_ILOG_DOWNER; 1732 1733 /* 1734 * Swap the data forks of the inodes 1735 */ 1736 ifp = &ip->i_df; 1737 tifp = &tip->i_df; 1738 tempifp = *ifp; /* struct copy */ 1739 *ifp = *tifp; /* struct copy */ 1740 *tifp = tempifp; /* struct copy */ 1741 1742 /* 1743 * Fix the on-disk inode values 1744 */ 1745 tmp = (uint64_t)ip->i_d.di_nblocks; 1746 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; 1747 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; 1748 1749 tmp = (uint64_t) ip->i_d.di_nextents; 1750 ip->i_d.di_nextents = tip->i_d.di_nextents; 1751 tip->i_d.di_nextents = tmp; 1752 1753 tmp = (uint64_t) ip->i_d.di_format; 1754 ip->i_d.di_format = tip->i_d.di_format; 1755 tip->i_d.di_format = tmp; 1756 1757 /* 1758 * The extents in the source inode could still contain speculative 1759 * preallocation beyond EOF (e.g. the file is open but not modified 1760 * while defrag is in progress). In that case, we need to copy over the 1761 * number of delalloc blocks the data fork in the source inode is 1762 * tracking beyond EOF so that when the fork is truncated away when the 1763 * temporary inode is unlinked we don't underrun the i_delayed_blks 1764 * counter on that inode. 1765 */ 1766 ASSERT(tip->i_delayed_blks == 0); 1767 tip->i_delayed_blks = ip->i_delayed_blks; 1768 ip->i_delayed_blks = 0; 1769 1770 switch (ip->i_d.di_format) { 1771 case XFS_DINODE_FMT_EXTENTS: 1772 (*src_log_flags) |= XFS_ILOG_DEXT; 1773 break; 1774 case XFS_DINODE_FMT_BTREE: 1775 ASSERT(ip->i_d.di_version < 3 || 1776 (*src_log_flags & XFS_ILOG_DOWNER)); 1777 (*src_log_flags) |= XFS_ILOG_DBROOT; 1778 break; 1779 } 1780 1781 switch (tip->i_d.di_format) { 1782 case XFS_DINODE_FMT_EXTENTS: 1783 (*target_log_flags) |= XFS_ILOG_DEXT; 1784 break; 1785 case XFS_DINODE_FMT_BTREE: 1786 (*target_log_flags) |= XFS_ILOG_DBROOT; 1787 ASSERT(tip->i_d.di_version < 3 || 1788 (*target_log_flags & XFS_ILOG_DOWNER)); 1789 break; 1790 } 1791 1792 return 0; 1793 } 1794 1795 /* 1796 * Fix up the owners of the bmbt blocks to refer to the current inode. The 1797 * change owner scan attempts to order all modified buffers in the current 1798 * transaction. In the event of ordered buffer failure, the offending buffer is 1799 * physically logged as a fallback and the scan returns -EAGAIN. We must roll 1800 * the transaction in this case to replenish the fallback log reservation and 1801 * restart the scan. This process repeats until the scan completes. 1802 */ 1803 static int 1804 xfs_swap_change_owner( 1805 struct xfs_trans **tpp, 1806 struct xfs_inode *ip, 1807 struct xfs_inode *tmpip) 1808 { 1809 int error; 1810 struct xfs_trans *tp = *tpp; 1811 1812 do { 1813 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino, 1814 NULL); 1815 /* success or fatal error */ 1816 if (error != -EAGAIN) 1817 break; 1818 1819 error = xfs_trans_roll(tpp); 1820 if (error) 1821 break; 1822 tp = *tpp; 1823 1824 /* 1825 * Redirty both inodes so they can relog and keep the log tail 1826 * moving forward. 1827 */ 1828 xfs_trans_ijoin(tp, ip, 0); 1829 xfs_trans_ijoin(tp, tmpip, 0); 1830 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1831 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE); 1832 } while (true); 1833 1834 return error; 1835 } 1836 1837 int 1838 xfs_swap_extents( 1839 struct xfs_inode *ip, /* target inode */ 1840 struct xfs_inode *tip, /* tmp inode */ 1841 struct xfs_swapext *sxp) 1842 { 1843 struct xfs_mount *mp = ip->i_mount; 1844 struct xfs_trans *tp; 1845 struct xfs_bstat *sbp = &sxp->sx_stat; 1846 int src_log_flags, target_log_flags; 1847 int error = 0; 1848 int lock_flags; 1849 struct xfs_ifork *cowfp; 1850 uint64_t f; 1851 int resblks = 0; 1852 1853 /* 1854 * Lock the inodes against other IO, page faults and truncate to 1855 * begin with. Then we can ensure the inodes are flushed and have no 1856 * page cache safely. Once we have done this we can take the ilocks and 1857 * do the rest of the checks. 1858 */ 1859 lock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 1860 lock_flags = XFS_MMAPLOCK_EXCL; 1861 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL); 1862 1863 /* Verify that both files have the same format */ 1864 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) { 1865 error = -EINVAL; 1866 goto out_unlock; 1867 } 1868 1869 /* Verify both files are either real-time or non-realtime */ 1870 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 1871 error = -EINVAL; 1872 goto out_unlock; 1873 } 1874 1875 error = xfs_swap_extent_flush(ip); 1876 if (error) 1877 goto out_unlock; 1878 error = xfs_swap_extent_flush(tip); 1879 if (error) 1880 goto out_unlock; 1881 1882 /* 1883 * Extent "swapping" with rmap requires a permanent reservation and 1884 * a block reservation because it's really just a remap operation 1885 * performed with log redo items! 1886 */ 1887 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 1888 int w = XFS_DATA_FORK; 1889 uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w); 1890 uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w); 1891 1892 /* 1893 * Conceptually this shouldn't affect the shape of either bmbt, 1894 * but since we atomically move extents one by one, we reserve 1895 * enough space to rebuild both trees. 1896 */ 1897 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w); 1898 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w); 1899 1900 /* 1901 * Handle the corner case where either inode might straddle the 1902 * btree format boundary. If so, the inode could bounce between 1903 * btree <-> extent format on unmap -> remap cycles, freeing and 1904 * allocating a bmapbt block each time. 1905 */ 1906 if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1)) 1907 resblks += XFS_IFORK_MAXEXT(ip, w); 1908 if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1)) 1909 resblks += XFS_IFORK_MAXEXT(tip, w); 1910 } 1911 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 1912 if (error) 1913 goto out_unlock; 1914 1915 /* 1916 * Lock and join the inodes to the tansaction so that transaction commit 1917 * or cancel will unlock the inodes from this point onwards. 1918 */ 1919 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL); 1920 lock_flags |= XFS_ILOCK_EXCL; 1921 xfs_trans_ijoin(tp, ip, 0); 1922 xfs_trans_ijoin(tp, tip, 0); 1923 1924 1925 /* Verify all data are being swapped */ 1926 if (sxp->sx_offset != 0 || 1927 sxp->sx_length != ip->i_d.di_size || 1928 sxp->sx_length != tip->i_d.di_size) { 1929 error = -EFAULT; 1930 goto out_trans_cancel; 1931 } 1932 1933 trace_xfs_swap_extent_before(ip, 0); 1934 trace_xfs_swap_extent_before(tip, 1); 1935 1936 /* check inode formats now that data is flushed */ 1937 error = xfs_swap_extents_check_format(ip, tip); 1938 if (error) { 1939 xfs_notice(mp, 1940 "%s: inode 0x%llx format is incompatible for exchanging.", 1941 __func__, ip->i_ino); 1942 goto out_trans_cancel; 1943 } 1944 1945 /* 1946 * Compare the current change & modify times with that 1947 * passed in. If they differ, we abort this swap. 1948 * This is the mechanism used to ensure the calling 1949 * process that the file was not changed out from 1950 * under it. 1951 */ 1952 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) || 1953 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || 1954 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || 1955 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { 1956 error = -EBUSY; 1957 goto out_trans_cancel; 1958 } 1959 1960 /* 1961 * Note the trickiness in setting the log flags - we set the owner log 1962 * flag on the opposite inode (i.e. the inode we are setting the new 1963 * owner to be) because once we swap the forks and log that, log 1964 * recovery is going to see the fork as owned by the swapped inode, 1965 * not the pre-swapped inodes. 1966 */ 1967 src_log_flags = XFS_ILOG_CORE; 1968 target_log_flags = XFS_ILOG_CORE; 1969 1970 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 1971 error = xfs_swap_extent_rmap(&tp, ip, tip); 1972 else 1973 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags, 1974 &target_log_flags); 1975 if (error) 1976 goto out_trans_cancel; 1977 1978 /* Do we have to swap reflink flags? */ 1979 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^ 1980 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) { 1981 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; 1982 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1983 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; 1984 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1985 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; 1986 } 1987 1988 /* Swap the cow forks. */ 1989 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1990 xfs_extnum_t extnum; 1991 1992 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS); 1993 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS); 1994 1995 extnum = ip->i_cnextents; 1996 ip->i_cnextents = tip->i_cnextents; 1997 tip->i_cnextents = extnum; 1998 1999 cowfp = ip->i_cowfp; 2000 ip->i_cowfp = tip->i_cowfp; 2001 tip->i_cowfp = cowfp; 2002 2003 if (ip->i_cowfp && ip->i_cowfp->if_bytes) 2004 xfs_inode_set_cowblocks_tag(ip); 2005 else 2006 xfs_inode_clear_cowblocks_tag(ip); 2007 if (tip->i_cowfp && tip->i_cowfp->if_bytes) 2008 xfs_inode_set_cowblocks_tag(tip); 2009 else 2010 xfs_inode_clear_cowblocks_tag(tip); 2011 } 2012 2013 xfs_trans_log_inode(tp, ip, src_log_flags); 2014 xfs_trans_log_inode(tp, tip, target_log_flags); 2015 2016 /* 2017 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems 2018 * have inode number owner values in the bmbt blocks that still refer to 2019 * the old inode. Scan each bmbt to fix up the owner values with the 2020 * inode number of the current inode. 2021 */ 2022 if (src_log_flags & XFS_ILOG_DOWNER) { 2023 error = xfs_swap_change_owner(&tp, ip, tip); 2024 if (error) 2025 goto out_trans_cancel; 2026 } 2027 if (target_log_flags & XFS_ILOG_DOWNER) { 2028 error = xfs_swap_change_owner(&tp, tip, ip); 2029 if (error) 2030 goto out_trans_cancel; 2031 } 2032 2033 /* 2034 * If this is a synchronous mount, make sure that the 2035 * transaction goes to disk before returning to the user. 2036 */ 2037 if (mp->m_flags & XFS_MOUNT_WSYNC) 2038 xfs_trans_set_sync(tp); 2039 2040 error = xfs_trans_commit(tp); 2041 2042 trace_xfs_swap_extent_after(ip, 0); 2043 trace_xfs_swap_extent_after(tip, 1); 2044 2045 out_unlock: 2046 xfs_iunlock(ip, lock_flags); 2047 xfs_iunlock(tip, lock_flags); 2048 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip)); 2049 return error; 2050 2051 out_trans_cancel: 2052 xfs_trans_cancel(tp); 2053 goto out_unlock; 2054 } 2055