1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * Copyright (c) 2012 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_format.h" 22 #include "xfs_bit.h" 23 #include "xfs_log.h" 24 #include "xfs_inum.h" 25 #include "xfs_trans.h" 26 #include "xfs_sb.h" 27 #include "xfs_ag.h" 28 #include "xfs_mount.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_alloc_btree.h" 32 #include "xfs_ialloc_btree.h" 33 #include "xfs_dinode.h" 34 #include "xfs_inode.h" 35 #include "xfs_btree.h" 36 #include "xfs_extfree_item.h" 37 #include "xfs_alloc.h" 38 #include "xfs_bmap.h" 39 #include "xfs_bmap_util.h" 40 #include "xfs_rtalloc.h" 41 #include "xfs_error.h" 42 #include "xfs_quota.h" 43 #include "xfs_trans_space.h" 44 #include "xfs_trace.h" 45 #include "xfs_icache.h" 46 47 /* Kernel only BMAP related definitions and functions */ 48 49 /* 50 * Convert the given file system block to a disk block. We have to treat it 51 * differently based on whether the file is a real time file or not, because the 52 * bmap code does. 53 */ 54 xfs_daddr_t 55 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 56 { 57 return (XFS_IS_REALTIME_INODE(ip) ? \ 58 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ 59 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); 60 } 61 62 /* 63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi 64 * caller. Frees all the extents that need freeing, which must be done 65 * last due to locking considerations. We never free any extents in 66 * the first transaction. 67 * 68 * Return 1 if the given transaction was committed and a new one 69 * started, and 0 otherwise in the committed parameter. 70 */ 71 int /* error */ 72 xfs_bmap_finish( 73 xfs_trans_t **tp, /* transaction pointer addr */ 74 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 75 int *committed) /* xact committed or not */ 76 { 77 xfs_efd_log_item_t *efd; /* extent free data */ 78 xfs_efi_log_item_t *efi; /* extent free intention */ 79 int error; /* error return value */ 80 xfs_bmap_free_item_t *free; /* free extent item */ 81 struct xfs_trans_res tres; /* new log reservation */ 82 xfs_mount_t *mp; /* filesystem mount structure */ 83 xfs_bmap_free_item_t *next; /* next item on free list */ 84 xfs_trans_t *ntp; /* new transaction pointer */ 85 86 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 87 if (flist->xbf_count == 0) { 88 *committed = 0; 89 return 0; 90 } 91 ntp = *tp; 92 efi = xfs_trans_get_efi(ntp, flist->xbf_count); 93 for (free = flist->xbf_first; free; free = free->xbfi_next) 94 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock, 95 free->xbfi_blockcount); 96 97 tres.tr_logres = ntp->t_log_res; 98 tres.tr_logcount = ntp->t_log_count; 99 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 100 ntp = xfs_trans_dup(*tp); 101 error = xfs_trans_commit(*tp, 0); 102 *tp = ntp; 103 *committed = 1; 104 /* 105 * We have a new transaction, so we should return committed=1, 106 * even though we're returning an error. 107 */ 108 if (error) 109 return error; 110 111 /* 112 * transaction commit worked ok so we can drop the extra ticket 113 * reference that we gained in xfs_trans_dup() 114 */ 115 xfs_log_ticket_put(ntp->t_ticket); 116 117 error = xfs_trans_reserve(ntp, &tres, 0, 0); 118 if (error) 119 return error; 120 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count); 121 for (free = flist->xbf_first; free != NULL; free = next) { 122 next = free->xbfi_next; 123 if ((error = xfs_free_extent(ntp, free->xbfi_startblock, 124 free->xbfi_blockcount))) { 125 /* 126 * The bmap free list will be cleaned up at a 127 * higher level. The EFI will be canceled when 128 * this transaction is aborted. 129 * Need to force shutdown here to make sure it 130 * happens, since this transaction may not be 131 * dirty yet. 132 */ 133 mp = ntp->t_mountp; 134 if (!XFS_FORCED_SHUTDOWN(mp)) 135 xfs_force_shutdown(mp, 136 (error == EFSCORRUPTED) ? 137 SHUTDOWN_CORRUPT_INCORE : 138 SHUTDOWN_META_IO_ERROR); 139 return error; 140 } 141 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock, 142 free->xbfi_blockcount); 143 xfs_bmap_del_free(flist, NULL, free); 144 } 145 return 0; 146 } 147 148 int 149 xfs_bmap_rtalloc( 150 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 151 { 152 xfs_alloctype_t atype = 0; /* type for allocation routines */ 153 int error; /* error return value */ 154 xfs_mount_t *mp; /* mount point structure */ 155 xfs_extlen_t prod = 0; /* product factor for allocators */ 156 xfs_extlen_t ralen = 0; /* realtime allocation length */ 157 xfs_extlen_t align; /* minimum allocation alignment */ 158 xfs_rtblock_t rtb; 159 160 mp = ap->ip->i_mount; 161 align = xfs_get_extsz_hint(ap->ip); 162 prod = align / mp->m_sb.sb_rextsize; 163 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 164 align, 1, ap->eof, 0, 165 ap->conv, &ap->offset, &ap->length); 166 if (error) 167 return error; 168 ASSERT(ap->length); 169 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); 170 171 /* 172 * If the offset & length are not perfectly aligned 173 * then kill prod, it will just get us in trouble. 174 */ 175 if (do_mod(ap->offset, align) || ap->length % align) 176 prod = 1; 177 /* 178 * Set ralen to be the actual requested length in rtextents. 179 */ 180 ralen = ap->length / mp->m_sb.sb_rextsize; 181 /* 182 * If the old value was close enough to MAXEXTLEN that 183 * we rounded up to it, cut it back so it's valid again. 184 * Note that if it's a really large request (bigger than 185 * MAXEXTLEN), we don't hear about that number, and can't 186 * adjust the starting point to match it. 187 */ 188 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 189 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 190 191 /* 192 * Lock out other modifications to the RT bitmap inode. 193 */ 194 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 195 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); 196 197 /* 198 * If it's an allocation to an empty file at offset 0, 199 * pick an extent that will space things out in the rt area. 200 */ 201 if (ap->eof && ap->offset == 0) { 202 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ 203 204 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 205 if (error) 206 return error; 207 ap->blkno = rtx * mp->m_sb.sb_rextsize; 208 } else { 209 ap->blkno = 0; 210 } 211 212 xfs_bmap_adjacent(ap); 213 214 /* 215 * Realtime allocation, done through xfs_rtallocate_extent. 216 */ 217 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; 218 do_div(ap->blkno, mp->m_sb.sb_rextsize); 219 rtb = ap->blkno; 220 ap->length = ralen; 221 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length, 222 &ralen, atype, ap->wasdel, prod, &rtb))) 223 return error; 224 if (rtb == NULLFSBLOCK && prod > 1 && 225 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, 226 ap->length, &ralen, atype, 227 ap->wasdel, 1, &rtb))) 228 return error; 229 ap->blkno = rtb; 230 if (ap->blkno != NULLFSBLOCK) { 231 ap->blkno *= mp->m_sb.sb_rextsize; 232 ralen *= mp->m_sb.sb_rextsize; 233 ap->length = ralen; 234 ap->ip->i_d.di_nblocks += ralen; 235 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 236 if (ap->wasdel) 237 ap->ip->i_delayed_blks -= ralen; 238 /* 239 * Adjust the disk quota also. This was reserved 240 * earlier. 241 */ 242 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 243 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 244 XFS_TRANS_DQ_RTBCOUNT, (long) ralen); 245 } else { 246 ap->length = 0; 247 } 248 return 0; 249 } 250 251 /* 252 * Stack switching interfaces for allocation 253 */ 254 static void 255 xfs_bmapi_allocate_worker( 256 struct work_struct *work) 257 { 258 struct xfs_bmalloca *args = container_of(work, 259 struct xfs_bmalloca, work); 260 unsigned long pflags; 261 262 /* we are in a transaction context here */ 263 current_set_flags_nested(&pflags, PF_FSTRANS); 264 265 args->result = __xfs_bmapi_allocate(args); 266 complete(args->done); 267 268 current_restore_flags_nested(&pflags, PF_FSTRANS); 269 } 270 271 /* 272 * Some allocation requests often come in with little stack to work on. Push 273 * them off to a worker thread so there is lots of stack to use. Otherwise just 274 * call directly to avoid the context switch overhead here. 275 */ 276 int 277 xfs_bmapi_allocate( 278 struct xfs_bmalloca *args) 279 { 280 DECLARE_COMPLETION_ONSTACK(done); 281 282 if (!args->stack_switch) 283 return __xfs_bmapi_allocate(args); 284 285 286 args->done = &done; 287 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); 288 queue_work(xfs_alloc_wq, &args->work); 289 wait_for_completion(&done); 290 return args->result; 291 } 292 293 /* 294 * Check if the endoff is outside the last extent. If so the caller will grow 295 * the allocation to a stripe unit boundary. All offsets are considered outside 296 * the end of file for an empty fork, so 1 is returned in *eof in that case. 297 */ 298 int 299 xfs_bmap_eof( 300 struct xfs_inode *ip, 301 xfs_fileoff_t endoff, 302 int whichfork, 303 int *eof) 304 { 305 struct xfs_bmbt_irec rec; 306 int error; 307 308 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); 309 if (error || *eof) 310 return error; 311 312 *eof = endoff >= rec.br_startoff + rec.br_blockcount; 313 return 0; 314 } 315 316 /* 317 * Extent tree block counting routines. 318 */ 319 320 /* 321 * Count leaf blocks given a range of extent records. 322 */ 323 STATIC void 324 xfs_bmap_count_leaves( 325 xfs_ifork_t *ifp, 326 xfs_extnum_t idx, 327 int numrecs, 328 int *count) 329 { 330 int b; 331 332 for (b = 0; b < numrecs; b++) { 333 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); 334 *count += xfs_bmbt_get_blockcount(frp); 335 } 336 } 337 338 /* 339 * Count leaf blocks given a range of extent records originally 340 * in btree format. 341 */ 342 STATIC void 343 xfs_bmap_disk_count_leaves( 344 struct xfs_mount *mp, 345 struct xfs_btree_block *block, 346 int numrecs, 347 int *count) 348 { 349 int b; 350 xfs_bmbt_rec_t *frp; 351 352 for (b = 1; b <= numrecs; b++) { 353 frp = XFS_BMBT_REC_ADDR(mp, block, b); 354 *count += xfs_bmbt_disk_get_blockcount(frp); 355 } 356 } 357 358 /* 359 * Recursively walks each level of a btree 360 * to count total fsblocks in use. 361 */ 362 STATIC int /* error */ 363 xfs_bmap_count_tree( 364 xfs_mount_t *mp, /* file system mount point */ 365 xfs_trans_t *tp, /* transaction pointer */ 366 xfs_ifork_t *ifp, /* inode fork pointer */ 367 xfs_fsblock_t blockno, /* file system block number */ 368 int levelin, /* level in btree */ 369 int *count) /* Count of blocks */ 370 { 371 int error; 372 xfs_buf_t *bp, *nbp; 373 int level = levelin; 374 __be64 *pp; 375 xfs_fsblock_t bno = blockno; 376 xfs_fsblock_t nextbno; 377 struct xfs_btree_block *block, *nextblock; 378 int numrecs; 379 380 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF, 381 &xfs_bmbt_buf_ops); 382 if (error) 383 return error; 384 *count += 1; 385 block = XFS_BUF_TO_BLOCK(bp); 386 387 if (--level) { 388 /* Not at node above leaves, count this level of nodes */ 389 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 390 while (nextbno != NULLFSBLOCK) { 391 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, 392 XFS_BMAP_BTREE_REF, 393 &xfs_bmbt_buf_ops); 394 if (error) 395 return error; 396 *count += 1; 397 nextblock = XFS_BUF_TO_BLOCK(nbp); 398 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); 399 xfs_trans_brelse(tp, nbp); 400 } 401 402 /* Dive to the next level */ 403 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 404 bno = be64_to_cpu(*pp); 405 if (unlikely((error = 406 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { 407 xfs_trans_brelse(tp, bp); 408 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 409 XFS_ERRLEVEL_LOW, mp); 410 return XFS_ERROR(EFSCORRUPTED); 411 } 412 xfs_trans_brelse(tp, bp); 413 } else { 414 /* count all level 1 nodes and their leaves */ 415 for (;;) { 416 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 417 numrecs = be16_to_cpu(block->bb_numrecs); 418 xfs_bmap_disk_count_leaves(mp, block, numrecs, count); 419 xfs_trans_brelse(tp, bp); 420 if (nextbno == NULLFSBLOCK) 421 break; 422 bno = nextbno; 423 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 424 XFS_BMAP_BTREE_REF, 425 &xfs_bmbt_buf_ops); 426 if (error) 427 return error; 428 *count += 1; 429 block = XFS_BUF_TO_BLOCK(bp); 430 } 431 } 432 return 0; 433 } 434 435 /* 436 * Count fsblocks of the given fork. 437 */ 438 int /* error */ 439 xfs_bmap_count_blocks( 440 xfs_trans_t *tp, /* transaction pointer */ 441 xfs_inode_t *ip, /* incore inode */ 442 int whichfork, /* data or attr fork */ 443 int *count) /* out: count of blocks */ 444 { 445 struct xfs_btree_block *block; /* current btree block */ 446 xfs_fsblock_t bno; /* block # of "block" */ 447 xfs_ifork_t *ifp; /* fork structure */ 448 int level; /* btree level, for checking */ 449 xfs_mount_t *mp; /* file system mount structure */ 450 __be64 *pp; /* pointer to block address */ 451 452 bno = NULLFSBLOCK; 453 mp = ip->i_mount; 454 ifp = XFS_IFORK_PTR(ip, whichfork); 455 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { 456 xfs_bmap_count_leaves(ifp, 0, 457 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), 458 count); 459 return 0; 460 } 461 462 /* 463 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 464 */ 465 block = ifp->if_broot; 466 level = be16_to_cpu(block->bb_level); 467 ASSERT(level > 0); 468 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 469 bno = be64_to_cpu(*pp); 470 ASSERT(bno != NULLDFSBNO); 471 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 472 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 473 474 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { 475 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, 476 mp); 477 return XFS_ERROR(EFSCORRUPTED); 478 } 479 480 return 0; 481 } 482 483 /* 484 * returns 1 for success, 0 if we failed to map the extent. 485 */ 486 STATIC int 487 xfs_getbmapx_fix_eof_hole( 488 xfs_inode_t *ip, /* xfs incore inode pointer */ 489 struct getbmapx *out, /* output structure */ 490 int prealloced, /* this is a file with 491 * preallocated data space */ 492 __int64_t end, /* last block requested */ 493 xfs_fsblock_t startblock) 494 { 495 __int64_t fixlen; 496 xfs_mount_t *mp; /* file system mount point */ 497 xfs_ifork_t *ifp; /* inode fork pointer */ 498 xfs_extnum_t lastx; /* last extent pointer */ 499 xfs_fileoff_t fileblock; 500 501 if (startblock == HOLESTARTBLOCK) { 502 mp = ip->i_mount; 503 out->bmv_block = -1; 504 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); 505 fixlen -= out->bmv_offset; 506 if (prealloced && out->bmv_offset + out->bmv_length == end) { 507 /* Came to hole at EOF. Trim it. */ 508 if (fixlen <= 0) 509 return 0; 510 out->bmv_length = fixlen; 511 } 512 } else { 513 if (startblock == DELAYSTARTBLOCK) 514 out->bmv_block = -2; 515 else 516 out->bmv_block = xfs_fsb_to_db(ip, startblock); 517 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); 518 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 519 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && 520 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) 521 out->bmv_oflags |= BMV_OF_LAST; 522 } 523 524 return 1; 525 } 526 527 /* 528 * Get inode's extents as described in bmv, and format for output. 529 * Calls formatter to fill the user's buffer until all extents 530 * are mapped, until the passed-in bmv->bmv_count slots have 531 * been filled, or until the formatter short-circuits the loop, 532 * if it is tracking filled-in extents on its own. 533 */ 534 int /* error code */ 535 xfs_getbmap( 536 xfs_inode_t *ip, 537 struct getbmapx *bmv, /* user bmap structure */ 538 xfs_bmap_format_t formatter, /* format to user */ 539 void *arg) /* formatter arg */ 540 { 541 __int64_t bmvend; /* last block requested */ 542 int error = 0; /* return value */ 543 __int64_t fixlen; /* length for -1 case */ 544 int i; /* extent number */ 545 int lock; /* lock state */ 546 xfs_bmbt_irec_t *map; /* buffer for user's data */ 547 xfs_mount_t *mp; /* file system mount point */ 548 int nex; /* # of user extents can do */ 549 int nexleft; /* # of user extents left */ 550 int subnex; /* # of bmapi's can do */ 551 int nmap; /* number of map entries */ 552 struct getbmapx *out; /* output structure */ 553 int whichfork; /* data or attr fork */ 554 int prealloced; /* this is a file with 555 * preallocated data space */ 556 int iflags; /* interface flags */ 557 int bmapi_flags; /* flags for xfs_bmapi */ 558 int cur_ext = 0; 559 560 mp = ip->i_mount; 561 iflags = bmv->bmv_iflags; 562 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; 563 564 if (whichfork == XFS_ATTR_FORK) { 565 if (XFS_IFORK_Q(ip)) { 566 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && 567 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && 568 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) 569 return XFS_ERROR(EINVAL); 570 } else if (unlikely( 571 ip->i_d.di_aformat != 0 && 572 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { 573 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, 574 ip->i_mount); 575 return XFS_ERROR(EFSCORRUPTED); 576 } 577 578 prealloced = 0; 579 fixlen = 1LL << 32; 580 } else { 581 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 582 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 583 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 584 return XFS_ERROR(EINVAL); 585 586 if (xfs_get_extsz_hint(ip) || 587 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 588 prealloced = 1; 589 fixlen = mp->m_super->s_maxbytes; 590 } else { 591 prealloced = 0; 592 fixlen = XFS_ISIZE(ip); 593 } 594 } 595 596 if (bmv->bmv_length == -1) { 597 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); 598 bmv->bmv_length = 599 max_t(__int64_t, fixlen - bmv->bmv_offset, 0); 600 } else if (bmv->bmv_length == 0) { 601 bmv->bmv_entries = 0; 602 return 0; 603 } else if (bmv->bmv_length < 0) { 604 return XFS_ERROR(EINVAL); 605 } 606 607 nex = bmv->bmv_count - 1; 608 if (nex <= 0) 609 return XFS_ERROR(EINVAL); 610 bmvend = bmv->bmv_offset + bmv->bmv_length; 611 612 613 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) 614 return XFS_ERROR(ENOMEM); 615 out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL); 616 if (!out) { 617 out = kmem_zalloc_large(bmv->bmv_count * 618 sizeof(struct getbmapx)); 619 if (!out) 620 return XFS_ERROR(ENOMEM); 621 } 622 623 xfs_ilock(ip, XFS_IOLOCK_SHARED); 624 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { 625 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { 626 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); 627 if (error) 628 goto out_unlock_iolock; 629 } 630 /* 631 * even after flushing the inode, there can still be delalloc 632 * blocks on the inode beyond EOF due to speculative 633 * preallocation. These are not removed until the release 634 * function is called or the inode is inactivated. Hence we 635 * cannot assert here that ip->i_delayed_blks == 0. 636 */ 637 } 638 639 lock = xfs_ilock_map_shared(ip); 640 641 /* 642 * Don't let nex be bigger than the number of extents 643 * we can have assuming alternating holes and real extents. 644 */ 645 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) 646 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; 647 648 bmapi_flags = xfs_bmapi_aflag(whichfork); 649 if (!(iflags & BMV_IF_PREALLOC)) 650 bmapi_flags |= XFS_BMAPI_IGSTATE; 651 652 /* 653 * Allocate enough space to handle "subnex" maps at a time. 654 */ 655 error = ENOMEM; 656 subnex = 16; 657 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); 658 if (!map) 659 goto out_unlock_ilock; 660 661 bmv->bmv_entries = 0; 662 663 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && 664 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { 665 error = 0; 666 goto out_free_map; 667 } 668 669 nexleft = nex; 670 671 do { 672 nmap = (nexleft > subnex) ? subnex : nexleft; 673 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 674 XFS_BB_TO_FSB(mp, bmv->bmv_length), 675 map, &nmap, bmapi_flags); 676 if (error) 677 goto out_free_map; 678 ASSERT(nmap <= subnex); 679 680 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { 681 out[cur_ext].bmv_oflags = 0; 682 if (map[i].br_state == XFS_EXT_UNWRITTEN) 683 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; 684 else if (map[i].br_startblock == DELAYSTARTBLOCK) 685 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC; 686 out[cur_ext].bmv_offset = 687 XFS_FSB_TO_BB(mp, map[i].br_startoff); 688 out[cur_ext].bmv_length = 689 XFS_FSB_TO_BB(mp, map[i].br_blockcount); 690 out[cur_ext].bmv_unused1 = 0; 691 out[cur_ext].bmv_unused2 = 0; 692 693 /* 694 * delayed allocation extents that start beyond EOF can 695 * occur due to speculative EOF allocation when the 696 * delalloc extent is larger than the largest freespace 697 * extent at conversion time. These extents cannot be 698 * converted by data writeback, so can exist here even 699 * if we are not supposed to be finding delalloc 700 * extents. 701 */ 702 if (map[i].br_startblock == DELAYSTARTBLOCK && 703 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) 704 ASSERT((iflags & BMV_IF_DELALLOC) != 0); 705 706 if (map[i].br_startblock == HOLESTARTBLOCK && 707 whichfork == XFS_ATTR_FORK) { 708 /* came to the end of attribute fork */ 709 out[cur_ext].bmv_oflags |= BMV_OF_LAST; 710 goto out_free_map; 711 } 712 713 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], 714 prealloced, bmvend, 715 map[i].br_startblock)) 716 goto out_free_map; 717 718 bmv->bmv_offset = 719 out[cur_ext].bmv_offset + 720 out[cur_ext].bmv_length; 721 bmv->bmv_length = 722 max_t(__int64_t, 0, bmvend - bmv->bmv_offset); 723 724 /* 725 * In case we don't want to return the hole, 726 * don't increase cur_ext so that we can reuse 727 * it in the next loop. 728 */ 729 if ((iflags & BMV_IF_NO_HOLES) && 730 map[i].br_startblock == HOLESTARTBLOCK) { 731 memset(&out[cur_ext], 0, sizeof(out[cur_ext])); 732 continue; 733 } 734 735 nexleft--; 736 bmv->bmv_entries++; 737 cur_ext++; 738 } 739 } while (nmap && nexleft && bmv->bmv_length); 740 741 out_free_map: 742 kmem_free(map); 743 out_unlock_ilock: 744 xfs_iunlock_map_shared(ip, lock); 745 out_unlock_iolock: 746 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 747 748 for (i = 0; i < cur_ext; i++) { 749 int full = 0; /* user array is full */ 750 751 /* format results & advance arg */ 752 error = formatter(&arg, &out[i], &full); 753 if (error || full) 754 break; 755 } 756 757 if (is_vmalloc_addr(out)) 758 kmem_free_large(out); 759 else 760 kmem_free(out); 761 return error; 762 } 763 764 /* 765 * dead simple method of punching delalyed allocation blocks from a range in 766 * the inode. Walks a block at a time so will be slow, but is only executed in 767 * rare error cases so the overhead is not critical. This will always punch out 768 * both the start and end blocks, even if the ranges only partially overlap 769 * them, so it is up to the caller to ensure that partial blocks are not 770 * passed in. 771 */ 772 int 773 xfs_bmap_punch_delalloc_range( 774 struct xfs_inode *ip, 775 xfs_fileoff_t start_fsb, 776 xfs_fileoff_t length) 777 { 778 xfs_fileoff_t remaining = length; 779 int error = 0; 780 781 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 782 783 do { 784 int done; 785 xfs_bmbt_irec_t imap; 786 int nimaps = 1; 787 xfs_fsblock_t firstblock; 788 xfs_bmap_free_t flist; 789 790 /* 791 * Map the range first and check that it is a delalloc extent 792 * before trying to unmap the range. Otherwise we will be 793 * trying to remove a real extent (which requires a 794 * transaction) or a hole, which is probably a bad idea... 795 */ 796 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, 797 XFS_BMAPI_ENTIRE); 798 799 if (error) { 800 /* something screwed, just bail */ 801 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 802 xfs_alert(ip->i_mount, 803 "Failed delalloc mapping lookup ino %lld fsb %lld.", 804 ip->i_ino, start_fsb); 805 } 806 break; 807 } 808 if (!nimaps) { 809 /* nothing there */ 810 goto next_block; 811 } 812 if (imap.br_startblock != DELAYSTARTBLOCK) { 813 /* been converted, ignore */ 814 goto next_block; 815 } 816 WARN_ON(imap.br_blockcount == 0); 817 818 /* 819 * Note: while we initialise the firstblock/flist pair, they 820 * should never be used because blocks should never be 821 * allocated or freed for a delalloc extent and hence we need 822 * don't cancel or finish them after the xfs_bunmapi() call. 823 */ 824 xfs_bmap_init(&flist, &firstblock); 825 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, 826 &flist, &done); 827 if (error) 828 break; 829 830 ASSERT(!flist.xbf_count && !flist.xbf_first); 831 next_block: 832 start_fsb++; 833 remaining--; 834 } while(remaining > 0); 835 836 return error; 837 } 838 839 /* 840 * Test whether it is appropriate to check an inode for and free post EOF 841 * blocks. The 'force' parameter determines whether we should also consider 842 * regular files that are marked preallocated or append-only. 843 */ 844 bool 845 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) 846 { 847 /* prealloc/delalloc exists only on regular files */ 848 if (!S_ISREG(ip->i_d.di_mode)) 849 return false; 850 851 /* 852 * Zero sized files with no cached pages and delalloc blocks will not 853 * have speculative prealloc/delalloc blocks to remove. 854 */ 855 if (VFS_I(ip)->i_size == 0 && 856 VN_CACHED(VFS_I(ip)) == 0 && 857 ip->i_delayed_blks == 0) 858 return false; 859 860 /* If we haven't read in the extent list, then don't do it now. */ 861 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) 862 return false; 863 864 /* 865 * Do not free real preallocated or append-only files unless the file 866 * has delalloc blocks and we are forced to remove them. 867 */ 868 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) 869 if (!force || ip->i_delayed_blks == 0) 870 return false; 871 872 return true; 873 } 874 875 /* 876 * This is called by xfs_inactive to free any blocks beyond eof 877 * when the link count isn't zero and by xfs_dm_punch_hole() when 878 * punching a hole to EOF. 879 */ 880 int 881 xfs_free_eofblocks( 882 xfs_mount_t *mp, 883 xfs_inode_t *ip, 884 bool need_iolock) 885 { 886 xfs_trans_t *tp; 887 int error; 888 xfs_fileoff_t end_fsb; 889 xfs_fileoff_t last_fsb; 890 xfs_filblks_t map_len; 891 int nimaps; 892 xfs_bmbt_irec_t imap; 893 894 /* 895 * Figure out if there are any blocks beyond the end 896 * of the file. If not, then there is nothing to do. 897 */ 898 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 899 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 900 if (last_fsb <= end_fsb) 901 return 0; 902 map_len = last_fsb - end_fsb; 903 904 nimaps = 1; 905 xfs_ilock(ip, XFS_ILOCK_SHARED); 906 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0); 907 xfs_iunlock(ip, XFS_ILOCK_SHARED); 908 909 if (!error && (nimaps != 0) && 910 (imap.br_startblock != HOLESTARTBLOCK || 911 ip->i_delayed_blks)) { 912 /* 913 * Attach the dquots to the inode up front. 914 */ 915 error = xfs_qm_dqattach(ip, 0); 916 if (error) 917 return error; 918 919 /* 920 * There are blocks after the end of file. 921 * Free them up now by truncating the file to 922 * its current size. 923 */ 924 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 925 926 if (need_iolock) { 927 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 928 xfs_trans_cancel(tp, 0); 929 return EAGAIN; 930 } 931 } 932 933 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0); 934 if (error) { 935 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 936 xfs_trans_cancel(tp, 0); 937 if (need_iolock) 938 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 939 return error; 940 } 941 942 xfs_ilock(ip, XFS_ILOCK_EXCL); 943 xfs_trans_ijoin(tp, ip, 0); 944 945 /* 946 * Do not update the on-disk file size. If we update the 947 * on-disk file size and then the system crashes before the 948 * contents of the file are flushed to disk then the files 949 * may be full of holes (ie NULL files bug). 950 */ 951 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 952 XFS_ISIZE(ip)); 953 if (error) { 954 /* 955 * If we get an error at this point we simply don't 956 * bother truncating the file. 957 */ 958 xfs_trans_cancel(tp, 959 (XFS_TRANS_RELEASE_LOG_RES | 960 XFS_TRANS_ABORT)); 961 } else { 962 error = xfs_trans_commit(tp, 963 XFS_TRANS_RELEASE_LOG_RES); 964 if (!error) 965 xfs_inode_clear_eofblocks_tag(ip); 966 } 967 968 xfs_iunlock(ip, XFS_ILOCK_EXCL); 969 if (need_iolock) 970 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 971 } 972 return error; 973 } 974 975 /* 976 * xfs_alloc_file_space() 977 * This routine allocates disk space for the given file. 978 * 979 * If alloc_type == 0, this request is for an ALLOCSP type 980 * request which will change the file size. In this case, no 981 * DMAPI event will be generated by the call. A TRUNCATE event 982 * will be generated later by xfs_setattr. 983 * 984 * If alloc_type != 0, this request is for a RESVSP type 985 * request, and a DMAPI DM_EVENT_WRITE will be generated if the 986 * lower block boundary byte address is less than the file's 987 * length. 988 * 989 * RETURNS: 990 * 0 on success 991 * errno on error 992 * 993 */ 994 STATIC int 995 xfs_alloc_file_space( 996 xfs_inode_t *ip, 997 xfs_off_t offset, 998 xfs_off_t len, 999 int alloc_type, 1000 int attr_flags) 1001 { 1002 xfs_mount_t *mp = ip->i_mount; 1003 xfs_off_t count; 1004 xfs_filblks_t allocated_fsb; 1005 xfs_filblks_t allocatesize_fsb; 1006 xfs_extlen_t extsz, temp; 1007 xfs_fileoff_t startoffset_fsb; 1008 xfs_fsblock_t firstfsb; 1009 int nimaps; 1010 int quota_flag; 1011 int rt; 1012 xfs_trans_t *tp; 1013 xfs_bmbt_irec_t imaps[1], *imapp; 1014 xfs_bmap_free_t free_list; 1015 uint qblocks, resblks, resrtextents; 1016 int committed; 1017 int error; 1018 1019 trace_xfs_alloc_file_space(ip); 1020 1021 if (XFS_FORCED_SHUTDOWN(mp)) 1022 return XFS_ERROR(EIO); 1023 1024 error = xfs_qm_dqattach(ip, 0); 1025 if (error) 1026 return error; 1027 1028 if (len <= 0) 1029 return XFS_ERROR(EINVAL); 1030 1031 rt = XFS_IS_REALTIME_INODE(ip); 1032 extsz = xfs_get_extsz_hint(ip); 1033 1034 count = len; 1035 imapp = &imaps[0]; 1036 nimaps = 1; 1037 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 1038 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 1039 1040 /* 1041 * Allocate file space until done or until there is an error 1042 */ 1043 while (allocatesize_fsb && !error) { 1044 xfs_fileoff_t s, e; 1045 1046 /* 1047 * Determine space reservations for data/realtime. 1048 */ 1049 if (unlikely(extsz)) { 1050 s = startoffset_fsb; 1051 do_div(s, extsz); 1052 s *= extsz; 1053 e = startoffset_fsb + allocatesize_fsb; 1054 if ((temp = do_mod(startoffset_fsb, extsz))) 1055 e += temp; 1056 if ((temp = do_mod(e, extsz))) 1057 e += extsz - temp; 1058 } else { 1059 s = 0; 1060 e = allocatesize_fsb; 1061 } 1062 1063 /* 1064 * The transaction reservation is limited to a 32-bit block 1065 * count, hence we need to limit the number of blocks we are 1066 * trying to reserve to avoid an overflow. We can't allocate 1067 * more than @nimaps extents, and an extent is limited on disk 1068 * to MAXEXTLEN (21 bits), so use that to enforce the limit. 1069 */ 1070 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps)); 1071 if (unlikely(rt)) { 1072 resrtextents = qblocks = resblks; 1073 resrtextents /= mp->m_sb.sb_rextsize; 1074 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1075 quota_flag = XFS_QMOPT_RES_RTBLKS; 1076 } else { 1077 resrtextents = 0; 1078 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks); 1079 quota_flag = XFS_QMOPT_RES_REGBLKS; 1080 } 1081 1082 /* 1083 * Allocate and setup the transaction. 1084 */ 1085 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 1086 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 1087 resblks, resrtextents); 1088 /* 1089 * Check for running out of space 1090 */ 1091 if (error) { 1092 /* 1093 * Free the transaction structure. 1094 */ 1095 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1096 xfs_trans_cancel(tp, 0); 1097 break; 1098 } 1099 xfs_ilock(ip, XFS_ILOCK_EXCL); 1100 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 1101 0, quota_flag); 1102 if (error) 1103 goto error1; 1104 1105 xfs_trans_ijoin(tp, ip, 0); 1106 1107 xfs_bmap_init(&free_list, &firstfsb); 1108 error = xfs_bmapi_write(tp, ip, startoffset_fsb, 1109 allocatesize_fsb, alloc_type, &firstfsb, 1110 0, imapp, &nimaps, &free_list); 1111 if (error) { 1112 goto error0; 1113 } 1114 1115 /* 1116 * Complete the transaction 1117 */ 1118 error = xfs_bmap_finish(&tp, &free_list, &committed); 1119 if (error) { 1120 goto error0; 1121 } 1122 1123 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1124 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1125 if (error) { 1126 break; 1127 } 1128 1129 allocated_fsb = imapp->br_blockcount; 1130 1131 if (nimaps == 0) { 1132 error = XFS_ERROR(ENOSPC); 1133 break; 1134 } 1135 1136 startoffset_fsb += allocated_fsb; 1137 allocatesize_fsb -= allocated_fsb; 1138 } 1139 1140 return error; 1141 1142 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 1143 xfs_bmap_cancel(&free_list); 1144 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 1145 1146 error1: /* Just cancel transaction */ 1147 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 1148 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1149 return error; 1150 } 1151 1152 /* 1153 * Zero file bytes between startoff and endoff inclusive. 1154 * The iolock is held exclusive and no blocks are buffered. 1155 * 1156 * This function is used by xfs_free_file_space() to zero 1157 * partial blocks when the range to free is not block aligned. 1158 * When unreserving space with boundaries that are not block 1159 * aligned we round up the start and round down the end 1160 * boundaries and then use this function to zero the parts of 1161 * the blocks that got dropped during the rounding. 1162 */ 1163 STATIC int 1164 xfs_zero_remaining_bytes( 1165 xfs_inode_t *ip, 1166 xfs_off_t startoff, 1167 xfs_off_t endoff) 1168 { 1169 xfs_bmbt_irec_t imap; 1170 xfs_fileoff_t offset_fsb; 1171 xfs_off_t lastoffset; 1172 xfs_off_t offset; 1173 xfs_buf_t *bp; 1174 xfs_mount_t *mp = ip->i_mount; 1175 int nimap; 1176 int error = 0; 1177 1178 /* 1179 * Avoid doing I/O beyond eof - it's not necessary 1180 * since nothing can read beyond eof. The space will 1181 * be zeroed when the file is extended anyway. 1182 */ 1183 if (startoff >= XFS_ISIZE(ip)) 1184 return 0; 1185 1186 if (endoff > XFS_ISIZE(ip)) 1187 endoff = XFS_ISIZE(ip); 1188 1189 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ? 1190 mp->m_rtdev_targp : mp->m_ddev_targp, 1191 BTOBB(mp->m_sb.sb_blocksize), 0); 1192 if (!bp) 1193 return XFS_ERROR(ENOMEM); 1194 1195 xfs_buf_unlock(bp); 1196 1197 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { 1198 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1199 nimap = 1; 1200 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); 1201 if (error || nimap < 1) 1202 break; 1203 ASSERT(imap.br_blockcount >= 1); 1204 ASSERT(imap.br_startoff == offset_fsb); 1205 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1; 1206 if (lastoffset > endoff) 1207 lastoffset = endoff; 1208 if (imap.br_startblock == HOLESTARTBLOCK) 1209 continue; 1210 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1211 if (imap.br_state == XFS_EXT_UNWRITTEN) 1212 continue; 1213 XFS_BUF_UNDONE(bp); 1214 XFS_BUF_UNWRITE(bp); 1215 XFS_BUF_READ(bp); 1216 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1217 xfsbdstrat(mp, bp); 1218 error = xfs_buf_iowait(bp); 1219 if (error) { 1220 xfs_buf_ioerror_alert(bp, 1221 "xfs_zero_remaining_bytes(read)"); 1222 break; 1223 } 1224 memset(bp->b_addr + 1225 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), 1226 0, lastoffset - offset + 1); 1227 XFS_BUF_UNDONE(bp); 1228 XFS_BUF_UNREAD(bp); 1229 XFS_BUF_WRITE(bp); 1230 xfsbdstrat(mp, bp); 1231 error = xfs_buf_iowait(bp); 1232 if (error) { 1233 xfs_buf_ioerror_alert(bp, 1234 "xfs_zero_remaining_bytes(write)"); 1235 break; 1236 } 1237 } 1238 xfs_buf_free(bp); 1239 return error; 1240 } 1241 1242 /* 1243 * xfs_free_file_space() 1244 * This routine frees disk space for the given file. 1245 * 1246 * This routine is only called by xfs_change_file_space 1247 * for an UNRESVSP type call. 1248 * 1249 * RETURNS: 1250 * 0 on success 1251 * errno on error 1252 * 1253 */ 1254 STATIC int 1255 xfs_free_file_space( 1256 xfs_inode_t *ip, 1257 xfs_off_t offset, 1258 xfs_off_t len, 1259 int attr_flags) 1260 { 1261 int committed; 1262 int done; 1263 xfs_fileoff_t endoffset_fsb; 1264 int error; 1265 xfs_fsblock_t firstfsb; 1266 xfs_bmap_free_t free_list; 1267 xfs_bmbt_irec_t imap; 1268 xfs_off_t ioffset; 1269 xfs_extlen_t mod=0; 1270 xfs_mount_t *mp; 1271 int nimap; 1272 uint resblks; 1273 xfs_off_t rounding; 1274 int rt; 1275 xfs_fileoff_t startoffset_fsb; 1276 xfs_trans_t *tp; 1277 int need_iolock = 1; 1278 1279 mp = ip->i_mount; 1280 1281 trace_xfs_free_file_space(ip); 1282 1283 error = xfs_qm_dqattach(ip, 0); 1284 if (error) 1285 return error; 1286 1287 error = 0; 1288 if (len <= 0) /* if nothing being freed */ 1289 return error; 1290 rt = XFS_IS_REALTIME_INODE(ip); 1291 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 1292 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); 1293 1294 if (attr_flags & XFS_ATTR_NOLOCK) 1295 need_iolock = 0; 1296 if (need_iolock) { 1297 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1298 /* wait for the completion of any pending DIOs */ 1299 inode_dio_wait(VFS_I(ip)); 1300 } 1301 1302 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1303 ioffset = offset & ~(rounding - 1); 1304 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1305 ioffset, -1); 1306 if (error) 1307 goto out_unlock_iolock; 1308 truncate_pagecache_range(VFS_I(ip), ioffset, -1); 1309 1310 /* 1311 * Need to zero the stuff we're not freeing, on disk. 1312 * If it's a realtime file & can't use unwritten extents then we 1313 * actually need to zero the extent edges. Otherwise xfs_bunmapi 1314 * will take care of it for us. 1315 */ 1316 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 1317 nimap = 1; 1318 error = xfs_bmapi_read(ip, startoffset_fsb, 1, 1319 &imap, &nimap, 0); 1320 if (error) 1321 goto out_unlock_iolock; 1322 ASSERT(nimap == 0 || nimap == 1); 1323 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1324 xfs_daddr_t block; 1325 1326 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1327 block = imap.br_startblock; 1328 mod = do_div(block, mp->m_sb.sb_rextsize); 1329 if (mod) 1330 startoffset_fsb += mp->m_sb.sb_rextsize - mod; 1331 } 1332 nimap = 1; 1333 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1, 1334 &imap, &nimap, 0); 1335 if (error) 1336 goto out_unlock_iolock; 1337 ASSERT(nimap == 0 || nimap == 1); 1338 if (nimap && imap.br_startblock != HOLESTARTBLOCK) { 1339 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1340 mod++; 1341 if (mod && (mod != mp->m_sb.sb_rextsize)) 1342 endoffset_fsb -= mod; 1343 } 1344 } 1345 if ((done = (endoffset_fsb <= startoffset_fsb))) 1346 /* 1347 * One contiguous piece to clear 1348 */ 1349 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); 1350 else { 1351 /* 1352 * Some full blocks, possibly two pieces to clear 1353 */ 1354 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb)) 1355 error = xfs_zero_remaining_bytes(ip, offset, 1356 XFS_FSB_TO_B(mp, startoffset_fsb) - 1); 1357 if (!error && 1358 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len) 1359 error = xfs_zero_remaining_bytes(ip, 1360 XFS_FSB_TO_B(mp, endoffset_fsb), 1361 offset + len - 1); 1362 } 1363 1364 /* 1365 * free file space until done or until there is an error 1366 */ 1367 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 1368 while (!error && !done) { 1369 1370 /* 1371 * allocate and setup the transaction. Allow this 1372 * transaction to dip into the reserve blocks to ensure 1373 * the freeing of the space succeeds at ENOSPC. 1374 */ 1375 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 1376 tp->t_flags |= XFS_TRANS_RESERVE; 1377 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0); 1378 1379 /* 1380 * check for running out of space 1381 */ 1382 if (error) { 1383 /* 1384 * Free the transaction structure. 1385 */ 1386 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1387 xfs_trans_cancel(tp, 0); 1388 break; 1389 } 1390 xfs_ilock(ip, XFS_ILOCK_EXCL); 1391 error = xfs_trans_reserve_quota(tp, mp, 1392 ip->i_udquot, ip->i_gdquot, ip->i_pdquot, 1393 resblks, 0, XFS_QMOPT_RES_REGBLKS); 1394 if (error) 1395 goto error1; 1396 1397 xfs_trans_ijoin(tp, ip, 0); 1398 1399 /* 1400 * issue the bunmapi() call to free the blocks 1401 */ 1402 xfs_bmap_init(&free_list, &firstfsb); 1403 error = xfs_bunmapi(tp, ip, startoffset_fsb, 1404 endoffset_fsb - startoffset_fsb, 1405 0, 2, &firstfsb, &free_list, &done); 1406 if (error) { 1407 goto error0; 1408 } 1409 1410 /* 1411 * complete the transaction 1412 */ 1413 error = xfs_bmap_finish(&tp, &free_list, &committed); 1414 if (error) { 1415 goto error0; 1416 } 1417 1418 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1419 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1420 } 1421 1422 out_unlock_iolock: 1423 if (need_iolock) 1424 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1425 return error; 1426 1427 error0: 1428 xfs_bmap_cancel(&free_list); 1429 error1: 1430 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 1431 xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) : 1432 XFS_ILOCK_EXCL); 1433 return error; 1434 } 1435 1436 1437 STATIC int 1438 xfs_zero_file_space( 1439 struct xfs_inode *ip, 1440 xfs_off_t offset, 1441 xfs_off_t len, 1442 int attr_flags) 1443 { 1444 struct xfs_mount *mp = ip->i_mount; 1445 uint granularity; 1446 xfs_off_t start_boundary; 1447 xfs_off_t end_boundary; 1448 int error; 1449 1450 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1451 1452 /* 1453 * Round the range of extents we are going to convert inwards. If the 1454 * offset is aligned, then it doesn't get changed so we zero from the 1455 * start of the block offset points to. 1456 */ 1457 start_boundary = round_up(offset, granularity); 1458 end_boundary = round_down(offset + len, granularity); 1459 1460 ASSERT(start_boundary >= offset); 1461 ASSERT(end_boundary <= offset + len); 1462 1463 if (!(attr_flags & XFS_ATTR_NOLOCK)) 1464 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1465 1466 if (start_boundary < end_boundary - 1) { 1467 /* punch out the page cache over the conversion range */ 1468 truncate_pagecache_range(VFS_I(ip), start_boundary, 1469 end_boundary - 1); 1470 /* convert the blocks */ 1471 error = xfs_alloc_file_space(ip, start_boundary, 1472 end_boundary - start_boundary - 1, 1473 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT, 1474 attr_flags); 1475 if (error) 1476 goto out_unlock; 1477 1478 /* We've handled the interior of the range, now for the edges */ 1479 if (start_boundary != offset) 1480 error = xfs_iozero(ip, offset, start_boundary - offset); 1481 if (error) 1482 goto out_unlock; 1483 1484 if (end_boundary != offset + len) 1485 error = xfs_iozero(ip, end_boundary, 1486 offset + len - end_boundary); 1487 1488 } else { 1489 /* 1490 * It's either a sub-granularity range or the range spanned lies 1491 * partially across two adjacent blocks. 1492 */ 1493 error = xfs_iozero(ip, offset, len); 1494 } 1495 1496 out_unlock: 1497 if (!(attr_flags & XFS_ATTR_NOLOCK)) 1498 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1499 return error; 1500 1501 } 1502 1503 /* 1504 * xfs_change_file_space() 1505 * This routine allocates or frees disk space for the given file. 1506 * The user specified parameters are checked for alignment and size 1507 * limitations. 1508 * 1509 * RETURNS: 1510 * 0 on success 1511 * errno on error 1512 * 1513 */ 1514 int 1515 xfs_change_file_space( 1516 xfs_inode_t *ip, 1517 int cmd, 1518 xfs_flock64_t *bf, 1519 xfs_off_t offset, 1520 int attr_flags) 1521 { 1522 xfs_mount_t *mp = ip->i_mount; 1523 int clrprealloc; 1524 int error; 1525 xfs_fsize_t fsize; 1526 int setprealloc; 1527 xfs_off_t startoffset; 1528 xfs_trans_t *tp; 1529 struct iattr iattr; 1530 1531 if (!S_ISREG(ip->i_d.di_mode)) 1532 return XFS_ERROR(EINVAL); 1533 1534 switch (bf->l_whence) { 1535 case 0: /*SEEK_SET*/ 1536 break; 1537 case 1: /*SEEK_CUR*/ 1538 bf->l_start += offset; 1539 break; 1540 case 2: /*SEEK_END*/ 1541 bf->l_start += XFS_ISIZE(ip); 1542 break; 1543 default: 1544 return XFS_ERROR(EINVAL); 1545 } 1546 1547 /* 1548 * length of <= 0 for resv/unresv/zero is invalid. length for 1549 * alloc/free is ignored completely and we have no idea what userspace 1550 * might have set it to, so set it to zero to allow range 1551 * checks to pass. 1552 */ 1553 switch (cmd) { 1554 case XFS_IOC_ZERO_RANGE: 1555 case XFS_IOC_RESVSP: 1556 case XFS_IOC_RESVSP64: 1557 case XFS_IOC_UNRESVSP: 1558 case XFS_IOC_UNRESVSP64: 1559 if (bf->l_len <= 0) 1560 return XFS_ERROR(EINVAL); 1561 break; 1562 default: 1563 bf->l_len = 0; 1564 break; 1565 } 1566 1567 if (bf->l_start < 0 || 1568 bf->l_start > mp->m_super->s_maxbytes || 1569 bf->l_start + bf->l_len < 0 || 1570 bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) 1571 return XFS_ERROR(EINVAL); 1572 1573 bf->l_whence = 0; 1574 1575 startoffset = bf->l_start; 1576 fsize = XFS_ISIZE(ip); 1577 1578 setprealloc = clrprealloc = 0; 1579 switch (cmd) { 1580 case XFS_IOC_ZERO_RANGE: 1581 error = xfs_zero_file_space(ip, startoffset, bf->l_len, 1582 attr_flags); 1583 if (error) 1584 return error; 1585 setprealloc = 1; 1586 break; 1587 1588 case XFS_IOC_RESVSP: 1589 case XFS_IOC_RESVSP64: 1590 error = xfs_alloc_file_space(ip, startoffset, bf->l_len, 1591 XFS_BMAPI_PREALLOC, attr_flags); 1592 if (error) 1593 return error; 1594 setprealloc = 1; 1595 break; 1596 1597 case XFS_IOC_UNRESVSP: 1598 case XFS_IOC_UNRESVSP64: 1599 if ((error = xfs_free_file_space(ip, startoffset, bf->l_len, 1600 attr_flags))) 1601 return error; 1602 break; 1603 1604 case XFS_IOC_ALLOCSP: 1605 case XFS_IOC_ALLOCSP64: 1606 case XFS_IOC_FREESP: 1607 case XFS_IOC_FREESP64: 1608 /* 1609 * These operations actually do IO when extending the file, but 1610 * the allocation is done seperately to the zeroing that is 1611 * done. This set of operations need to be serialised against 1612 * other IO operations, such as truncate and buffered IO. We 1613 * need to take the IOLOCK here to serialise the allocation and 1614 * zeroing IO to prevent other IOLOCK holders (e.g. getbmap, 1615 * truncate, direct IO) from racing against the transient 1616 * allocated but not written state we can have here. 1617 */ 1618 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1619 if (startoffset > fsize) { 1620 error = xfs_alloc_file_space(ip, fsize, 1621 startoffset - fsize, 0, 1622 attr_flags | XFS_ATTR_NOLOCK); 1623 if (error) { 1624 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1625 break; 1626 } 1627 } 1628 1629 iattr.ia_valid = ATTR_SIZE; 1630 iattr.ia_size = startoffset; 1631 1632 error = xfs_setattr_size(ip, &iattr, 1633 attr_flags | XFS_ATTR_NOLOCK); 1634 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1635 1636 if (error) 1637 return error; 1638 1639 clrprealloc = 1; 1640 break; 1641 1642 default: 1643 ASSERT(0); 1644 return XFS_ERROR(EINVAL); 1645 } 1646 1647 /* 1648 * update the inode timestamp, mode, and prealloc flag bits 1649 */ 1650 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); 1651 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0); 1652 if (error) { 1653 xfs_trans_cancel(tp, 0); 1654 return error; 1655 } 1656 1657 xfs_ilock(ip, XFS_ILOCK_EXCL); 1658 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1659 1660 if ((attr_flags & XFS_ATTR_DMI) == 0) { 1661 ip->i_d.di_mode &= ~S_ISUID; 1662 1663 /* 1664 * Note that we don't have to worry about mandatory 1665 * file locking being disabled here because we only 1666 * clear the S_ISGID bit if the Group execute bit is 1667 * on, but if it was on then mandatory locking wouldn't 1668 * have been enabled. 1669 */ 1670 if (ip->i_d.di_mode & S_IXGRP) 1671 ip->i_d.di_mode &= ~S_ISGID; 1672 1673 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1674 } 1675 if (setprealloc) 1676 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 1677 else if (clrprealloc) 1678 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 1679 1680 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1681 if (attr_flags & XFS_ATTR_SYNC) 1682 xfs_trans_set_sync(tp); 1683 return xfs_trans_commit(tp, 0); 1684 } 1685 1686 /* 1687 * We need to check that the format of the data fork in the temporary inode is 1688 * valid for the target inode before doing the swap. This is not a problem with 1689 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized 1690 * data fork depending on the space the attribute fork is taking so we can get 1691 * invalid formats on the target inode. 1692 * 1693 * E.g. target has space for 7 extents in extent format, temp inode only has 1694 * space for 6. If we defragment down to 7 extents, then the tmp format is a 1695 * btree, but when swapped it needs to be in extent format. Hence we can't just 1696 * blindly swap data forks on attr2 filesystems. 1697 * 1698 * Note that we check the swap in both directions so that we don't end up with 1699 * a corrupt temporary inode, either. 1700 * 1701 * Note that fixing the way xfs_fsr sets up the attribute fork in the source 1702 * inode will prevent this situation from occurring, so all we do here is 1703 * reject and log the attempt. basically we are putting the responsibility on 1704 * userspace to get this right. 1705 */ 1706 static int 1707 xfs_swap_extents_check_format( 1708 xfs_inode_t *ip, /* target inode */ 1709 xfs_inode_t *tip) /* tmp inode */ 1710 { 1711 1712 /* Should never get a local format */ 1713 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || 1714 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) 1715 return EINVAL; 1716 1717 /* 1718 * if the target inode has less extents that then temporary inode then 1719 * why did userspace call us? 1720 */ 1721 if (ip->i_d.di_nextents < tip->i_d.di_nextents) 1722 return EINVAL; 1723 1724 /* 1725 * if the target inode is in extent form and the temp inode is in btree 1726 * form then we will end up with the target inode in the wrong format 1727 * as we already know there are less extents in the temp inode. 1728 */ 1729 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1730 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1731 return EINVAL; 1732 1733 /* Check temp in extent form to max in target */ 1734 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1735 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > 1736 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1737 return EINVAL; 1738 1739 /* Check target in extent form to max in temp */ 1740 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1741 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > 1742 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1743 return EINVAL; 1744 1745 /* 1746 * If we are in a btree format, check that the temp root block will fit 1747 * in the target and that it has enough extents to be in btree format 1748 * in the target. 1749 * 1750 * Note that we have to be careful to allow btree->extent conversions 1751 * (a common defrag case) which will occur when the temp inode is in 1752 * extent format... 1753 */ 1754 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1755 if (XFS_IFORK_BOFF(ip) && 1756 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) 1757 return EINVAL; 1758 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= 1759 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1760 return EINVAL; 1761 } 1762 1763 /* Reciprocal target->temp btree format checks */ 1764 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1765 if (XFS_IFORK_BOFF(tip) && 1766 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) 1767 return EINVAL; 1768 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= 1769 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1770 return EINVAL; 1771 } 1772 1773 return 0; 1774 } 1775 1776 int 1777 xfs_swap_extents( 1778 xfs_inode_t *ip, /* target inode */ 1779 xfs_inode_t *tip, /* tmp inode */ 1780 xfs_swapext_t *sxp) 1781 { 1782 xfs_mount_t *mp = ip->i_mount; 1783 xfs_trans_t *tp; 1784 xfs_bstat_t *sbp = &sxp->sx_stat; 1785 xfs_ifork_t *tempifp, *ifp, *tifp; 1786 int src_log_flags, target_log_flags; 1787 int error = 0; 1788 int aforkblks = 0; 1789 int taforkblks = 0; 1790 __uint64_t tmp; 1791 1792 /* 1793 * We have no way of updating owner information in the BMBT blocks for 1794 * each inode on CRC enabled filesystems, so to avoid corrupting the 1795 * this metadata we simply don't allow extent swaps to occur. 1796 */ 1797 if (xfs_sb_version_hascrc(&mp->m_sb)) 1798 return XFS_ERROR(EINVAL); 1799 1800 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); 1801 if (!tempifp) { 1802 error = XFS_ERROR(ENOMEM); 1803 goto out; 1804 } 1805 1806 /* 1807 * we have to do two separate lock calls here to keep lockdep 1808 * happy. If we try to get all the locks in one call, lock will 1809 * report false positives when we drop the ILOCK and regain them 1810 * below. 1811 */ 1812 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); 1813 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 1814 1815 /* Verify that both files have the same format */ 1816 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { 1817 error = XFS_ERROR(EINVAL); 1818 goto out_unlock; 1819 } 1820 1821 /* Verify both files are either real-time or non-realtime */ 1822 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 1823 error = XFS_ERROR(EINVAL); 1824 goto out_unlock; 1825 } 1826 1827 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping); 1828 if (error) 1829 goto out_unlock; 1830 truncate_pagecache_range(VFS_I(tip), 0, -1); 1831 1832 /* Verify O_DIRECT for ftmp */ 1833 if (VN_CACHED(VFS_I(tip)) != 0) { 1834 error = XFS_ERROR(EINVAL); 1835 goto out_unlock; 1836 } 1837 1838 /* Verify all data are being swapped */ 1839 if (sxp->sx_offset != 0 || 1840 sxp->sx_length != ip->i_d.di_size || 1841 sxp->sx_length != tip->i_d.di_size) { 1842 error = XFS_ERROR(EFAULT); 1843 goto out_unlock; 1844 } 1845 1846 trace_xfs_swap_extent_before(ip, 0); 1847 trace_xfs_swap_extent_before(tip, 1); 1848 1849 /* check inode formats now that data is flushed */ 1850 error = xfs_swap_extents_check_format(ip, tip); 1851 if (error) { 1852 xfs_notice(mp, 1853 "%s: inode 0x%llx format is incompatible for exchanging.", 1854 __func__, ip->i_ino); 1855 goto out_unlock; 1856 } 1857 1858 /* 1859 * Compare the current change & modify times with that 1860 * passed in. If they differ, we abort this swap. 1861 * This is the mechanism used to ensure the calling 1862 * process that the file was not changed out from 1863 * under it. 1864 */ 1865 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) || 1866 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || 1867 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || 1868 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { 1869 error = XFS_ERROR(EBUSY); 1870 goto out_unlock; 1871 } 1872 1873 /* We need to fail if the file is memory mapped. Once we have tossed 1874 * all existing pages, the page fault will have no option 1875 * but to go to the filesystem for pages. By making the page fault call 1876 * vop_read (or write in the case of autogrow) they block on the iolock 1877 * until we have switched the extents. 1878 */ 1879 if (VN_MAPPED(VFS_I(ip))) { 1880 error = XFS_ERROR(EBUSY); 1881 goto out_unlock; 1882 } 1883 1884 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1885 xfs_iunlock(tip, XFS_ILOCK_EXCL); 1886 1887 /* 1888 * There is a race condition here since we gave up the 1889 * ilock. However, the data fork will not change since 1890 * we have the iolock (locked for truncation too) so we 1891 * are safe. We don't really care if non-io related 1892 * fields change. 1893 */ 1894 truncate_pagecache_range(VFS_I(ip), 0, -1); 1895 1896 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); 1897 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); 1898 if (error) { 1899 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1900 xfs_iunlock(tip, XFS_IOLOCK_EXCL); 1901 xfs_trans_cancel(tp, 0); 1902 goto out; 1903 } 1904 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 1905 1906 /* 1907 * Count the number of extended attribute blocks 1908 */ 1909 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && 1910 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1911 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); 1912 if (error) 1913 goto out_trans_cancel; 1914 } 1915 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) && 1916 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { 1917 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, 1918 &taforkblks); 1919 if (error) 1920 goto out_trans_cancel; 1921 } 1922 1923 /* 1924 * Swap the data forks of the inodes 1925 */ 1926 ifp = &ip->i_df; 1927 tifp = &tip->i_df; 1928 *tempifp = *ifp; /* struct copy */ 1929 *ifp = *tifp; /* struct copy */ 1930 *tifp = *tempifp; /* struct copy */ 1931 1932 /* 1933 * Fix the on-disk inode values 1934 */ 1935 tmp = (__uint64_t)ip->i_d.di_nblocks; 1936 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; 1937 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks; 1938 1939 tmp = (__uint64_t) ip->i_d.di_nextents; 1940 ip->i_d.di_nextents = tip->i_d.di_nextents; 1941 tip->i_d.di_nextents = tmp; 1942 1943 tmp = (__uint64_t) ip->i_d.di_format; 1944 ip->i_d.di_format = tip->i_d.di_format; 1945 tip->i_d.di_format = tmp; 1946 1947 /* 1948 * The extents in the source inode could still contain speculative 1949 * preallocation beyond EOF (e.g. the file is open but not modified 1950 * while defrag is in progress). In that case, we need to copy over the 1951 * number of delalloc blocks the data fork in the source inode is 1952 * tracking beyond EOF so that when the fork is truncated away when the 1953 * temporary inode is unlinked we don't underrun the i_delayed_blks 1954 * counter on that inode. 1955 */ 1956 ASSERT(tip->i_delayed_blks == 0); 1957 tip->i_delayed_blks = ip->i_delayed_blks; 1958 ip->i_delayed_blks = 0; 1959 1960 src_log_flags = XFS_ILOG_CORE; 1961 switch (ip->i_d.di_format) { 1962 case XFS_DINODE_FMT_EXTENTS: 1963 /* If the extents fit in the inode, fix the 1964 * pointer. Otherwise it's already NULL or 1965 * pointing to the extent. 1966 */ 1967 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { 1968 ifp->if_u1.if_extents = 1969 ifp->if_u2.if_inline_ext; 1970 } 1971 src_log_flags |= XFS_ILOG_DEXT; 1972 break; 1973 case XFS_DINODE_FMT_BTREE: 1974 src_log_flags |= XFS_ILOG_DBROOT; 1975 break; 1976 } 1977 1978 target_log_flags = XFS_ILOG_CORE; 1979 switch (tip->i_d.di_format) { 1980 case XFS_DINODE_FMT_EXTENTS: 1981 /* If the extents fit in the inode, fix the 1982 * pointer. Otherwise it's already NULL or 1983 * pointing to the extent. 1984 */ 1985 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) { 1986 tifp->if_u1.if_extents = 1987 tifp->if_u2.if_inline_ext; 1988 } 1989 target_log_flags |= XFS_ILOG_DEXT; 1990 break; 1991 case XFS_DINODE_FMT_BTREE: 1992 target_log_flags |= XFS_ILOG_DBROOT; 1993 break; 1994 } 1995 1996 1997 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1998 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1999 2000 xfs_trans_log_inode(tp, ip, src_log_flags); 2001 xfs_trans_log_inode(tp, tip, target_log_flags); 2002 2003 /* 2004 * If this is a synchronous mount, make sure that the 2005 * transaction goes to disk before returning to the user. 2006 */ 2007 if (mp->m_flags & XFS_MOUNT_WSYNC) 2008 xfs_trans_set_sync(tp); 2009 2010 error = xfs_trans_commit(tp, 0); 2011 2012 trace_xfs_swap_extent_after(ip, 0); 2013 trace_xfs_swap_extent_after(tip, 1); 2014 out: 2015 kmem_free(tempifp); 2016 return error; 2017 2018 out_unlock: 2019 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 2020 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 2021 goto out; 2022 2023 out_trans_cancel: 2024 xfs_trans_cancel(tp, 0); 2025 goto out_unlock; 2026 } 2027