1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_da_format.h" 17 #include "xfs_da_btree.h" 18 #include "xfs_dir2.h" 19 #include "xfs_inode.h" 20 #include "xfs_btree.h" 21 #include "xfs_trans.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_extfree_item.h" 24 #include "xfs_alloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_errortag.h" 30 #include "xfs_error.h" 31 #include "xfs_quota.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_buf_item.h" 34 #include "xfs_trace.h" 35 #include "xfs_symlink.h" 36 #include "xfs_attr_leaf.h" 37 #include "xfs_filestream.h" 38 #include "xfs_rmap.h" 39 #include "xfs_ag_resv.h" 40 #include "xfs_refcount.h" 41 #include "xfs_icache.h" 42 43 44 kmem_zone_t *xfs_bmap_free_item_zone; 45 46 /* 47 * Miscellaneous helper functions 48 */ 49 50 /* 51 * Compute and fill in the value of the maximum depth of a bmap btree 52 * in this filesystem. Done once, during mount. 53 */ 54 void 55 xfs_bmap_compute_maxlevels( 56 xfs_mount_t *mp, /* file system mount structure */ 57 int whichfork) /* data or attr fork */ 58 { 59 int level; /* btree level */ 60 uint maxblocks; /* max blocks at this level */ 61 uint maxleafents; /* max leaf entries possible */ 62 int maxrootrecs; /* max records in root block */ 63 int minleafrecs; /* min records in leaf block */ 64 int minnoderecs; /* min records in node block */ 65 int sz; /* root block size */ 66 67 /* 68 * The maximum number of extents in a file, hence the maximum 69 * number of leaf entries, is controlled by the type of di_nextents 70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 71 * (a signed 16-bit number, xfs_aextnum_t). 72 * 73 * Note that we can no longer assume that if we are in ATTR1 that 74 * the fork offset of all the inodes will be 75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 76 * with ATTR2 and then mounted back with ATTR1, keeping the 77 * di_forkoff's fixed but probably at various positions. Therefore, 78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 79 * of a minimum size available. 80 */ 81 if (whichfork == XFS_DATA_FORK) { 82 maxleafents = MAXEXTNUM; 83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 84 } else { 85 maxleafents = MAXAEXTNUM; 86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 87 } 88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 89 minleafrecs = mp->m_bmap_dmnr[0]; 90 minnoderecs = mp->m_bmap_dmnr[1]; 91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 92 for (level = 1; maxblocks > 1; level++) { 93 if (maxblocks <= maxrootrecs) 94 maxblocks = 1; 95 else 96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 97 } 98 mp->m_bm_maxlevels[whichfork] = level; 99 } 100 101 STATIC int /* error */ 102 xfs_bmbt_lookup_eq( 103 struct xfs_btree_cur *cur, 104 struct xfs_bmbt_irec *irec, 105 int *stat) /* success/failure */ 106 { 107 cur->bc_rec.b = *irec; 108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 109 } 110 111 STATIC int /* error */ 112 xfs_bmbt_lookup_first( 113 struct xfs_btree_cur *cur, 114 int *stat) /* success/failure */ 115 { 116 cur->bc_rec.b.br_startoff = 0; 117 cur->bc_rec.b.br_startblock = 0; 118 cur->bc_rec.b.br_blockcount = 0; 119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 120 } 121 122 /* 123 * Check if the inode needs to be converted to btree format. 124 */ 125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 126 { 127 return whichfork != XFS_COW_FORK && 128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 129 XFS_IFORK_NEXTENTS(ip, whichfork) > 130 XFS_IFORK_MAXEXT(ip, whichfork); 131 } 132 133 /* 134 * Check if the inode should be converted to extent format. 135 */ 136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 137 { 138 return whichfork != XFS_COW_FORK && 139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 140 XFS_IFORK_NEXTENTS(ip, whichfork) <= 141 XFS_IFORK_MAXEXT(ip, whichfork); 142 } 143 144 /* 145 * Update the record referred to by cur to the value given by irec 146 * This either works (return 0) or gets an EFSCORRUPTED error. 147 */ 148 STATIC int 149 xfs_bmbt_update( 150 struct xfs_btree_cur *cur, 151 struct xfs_bmbt_irec *irec) 152 { 153 union xfs_btree_rec rec; 154 155 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 156 return xfs_btree_update(cur, &rec); 157 } 158 159 /* 160 * Compute the worst-case number of indirect blocks that will be used 161 * for ip's delayed extent of length "len". 162 */ 163 STATIC xfs_filblks_t 164 xfs_bmap_worst_indlen( 165 xfs_inode_t *ip, /* incore inode pointer */ 166 xfs_filblks_t len) /* delayed extent length */ 167 { 168 int level; /* btree level number */ 169 int maxrecs; /* maximum record count at this level */ 170 xfs_mount_t *mp; /* mount structure */ 171 xfs_filblks_t rval; /* return value */ 172 173 mp = ip->i_mount; 174 maxrecs = mp->m_bmap_dmxr[0]; 175 for (level = 0, rval = 0; 176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 177 level++) { 178 len += maxrecs - 1; 179 do_div(len, maxrecs); 180 rval += len; 181 if (len == 1) 182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 183 level - 1; 184 if (level == 0) 185 maxrecs = mp->m_bmap_dmxr[1]; 186 } 187 return rval; 188 } 189 190 /* 191 * Calculate the default attribute fork offset for newly created inodes. 192 */ 193 uint 194 xfs_default_attroffset( 195 struct xfs_inode *ip) 196 { 197 struct xfs_mount *mp = ip->i_mount; 198 uint offset; 199 200 if (mp->m_sb.sb_inodesize == 256) { 201 offset = XFS_LITINO(mp, ip->i_d.di_version) - 202 XFS_BMDR_SPACE_CALC(MINABTPTRS); 203 } else { 204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 205 } 206 207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 208 return offset; 209 } 210 211 /* 212 * Helper routine to reset inode di_forkoff field when switching 213 * attribute fork from local to extent format - we reset it where 214 * possible to make space available for inline data fork extents. 215 */ 216 STATIC void 217 xfs_bmap_forkoff_reset( 218 xfs_inode_t *ip, 219 int whichfork) 220 { 221 if (whichfork == XFS_ATTR_FORK && 222 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 225 226 if (dfl_forkoff > ip->i_d.di_forkoff) 227 ip->i_d.di_forkoff = dfl_forkoff; 228 } 229 } 230 231 #ifdef DEBUG 232 STATIC struct xfs_buf * 233 xfs_bmap_get_bp( 234 struct xfs_btree_cur *cur, 235 xfs_fsblock_t bno) 236 { 237 struct xfs_log_item *lip; 238 int i; 239 240 if (!cur) 241 return NULL; 242 243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 244 if (!cur->bc_bufs[i]) 245 break; 246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 247 return cur->bc_bufs[i]; 248 } 249 250 /* Chase down all the log items to see if the bp is there */ 251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 253 254 if (bip->bli_item.li_type == XFS_LI_BUF && 255 XFS_BUF_ADDR(bip->bli_buf) == bno) 256 return bip->bli_buf; 257 } 258 259 return NULL; 260 } 261 262 STATIC void 263 xfs_check_block( 264 struct xfs_btree_block *block, 265 xfs_mount_t *mp, 266 int root, 267 short sz) 268 { 269 int i, j, dmxr; 270 __be64 *pp, *thispa; /* pointer to block address */ 271 xfs_bmbt_key_t *prevp, *keyp; 272 273 ASSERT(be16_to_cpu(block->bb_level) > 0); 274 275 prevp = NULL; 276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 277 dmxr = mp->m_bmap_dmxr[0]; 278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 279 280 if (prevp) { 281 ASSERT(be64_to_cpu(prevp->br_startoff) < 282 be64_to_cpu(keyp->br_startoff)); 283 } 284 prevp = keyp; 285 286 /* 287 * Compare the block numbers to see if there are dups. 288 */ 289 if (root) 290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 291 else 292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 293 294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 295 if (root) 296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 297 else 298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 299 if (*thispa == *pp) { 300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 301 __func__, j, i, 302 (unsigned long long)be64_to_cpu(*thispa)); 303 xfs_err(mp, "%s: ptrs are equal in node\n", 304 __func__); 305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 306 } 307 } 308 } 309 } 310 311 /* 312 * Check that the extents for the inode ip are in the right order in all 313 * btree leaves. THis becomes prohibitively expensive for large extent count 314 * files, so don't bother with inodes that have more than 10,000 extents in 315 * them. The btree record ordering checks will still be done, so for such large 316 * bmapbt constructs that is going to catch most corruptions. 317 */ 318 STATIC void 319 xfs_bmap_check_leaf_extents( 320 xfs_btree_cur_t *cur, /* btree cursor or null */ 321 xfs_inode_t *ip, /* incore inode pointer */ 322 int whichfork) /* data or attr fork */ 323 { 324 struct xfs_btree_block *block; /* current btree block */ 325 xfs_fsblock_t bno; /* block # of "block" */ 326 xfs_buf_t *bp; /* buffer for "block" */ 327 int error; /* error return value */ 328 xfs_extnum_t i=0, j; /* index into the extents list */ 329 struct xfs_ifork *ifp; /* fork structure */ 330 int level; /* btree level, for checking */ 331 xfs_mount_t *mp; /* file system mount structure */ 332 __be64 *pp; /* pointer to block address */ 333 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 336 int bp_release = 0; 337 338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 339 return; 340 } 341 342 /* skip large extent count inodes */ 343 if (ip->i_d.di_nextents > 10000) 344 return; 345 346 bno = NULLFSBLOCK; 347 mp = ip->i_mount; 348 ifp = XFS_IFORK_PTR(ip, whichfork); 349 block = ifp->if_broot; 350 /* 351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 352 */ 353 level = be16_to_cpu(block->bb_level); 354 ASSERT(level > 0); 355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 357 bno = be64_to_cpu(*pp); 358 359 ASSERT(bno != NULLFSBLOCK); 360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 362 363 /* 364 * Go down the tree until leaf level is reached, following the first 365 * pointer (leftmost) at each level. 366 */ 367 while (level-- > 0) { 368 /* See if buf is in cur first */ 369 bp_release = 0; 370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 371 if (!bp) { 372 bp_release = 1; 373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 374 XFS_BMAP_BTREE_REF, 375 &xfs_bmbt_buf_ops); 376 if (error) 377 goto error_norelse; 378 } 379 block = XFS_BUF_TO_BLOCK(bp); 380 if (level == 0) 381 break; 382 383 /* 384 * Check this block for basic sanity (increasing keys and 385 * no duplicate blocks). 386 */ 387 388 xfs_check_block(block, mp, 0, 0); 389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 390 bno = be64_to_cpu(*pp); 391 XFS_WANT_CORRUPTED_GOTO(mp, 392 xfs_verify_fsbno(mp, bno), error0); 393 if (bp_release) { 394 bp_release = 0; 395 xfs_trans_brelse(NULL, bp); 396 } 397 } 398 399 /* 400 * Here with bp and block set to the leftmost leaf node in the tree. 401 */ 402 i = 0; 403 404 /* 405 * Loop over all leaf nodes checking that all extents are in the right order. 406 */ 407 for (;;) { 408 xfs_fsblock_t nextbno; 409 xfs_extnum_t num_recs; 410 411 412 num_recs = xfs_btree_get_numrecs(block); 413 414 /* 415 * Read-ahead the next leaf block, if any. 416 */ 417 418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 419 420 /* 421 * Check all the extents to make sure they are OK. 422 * If we had a previous block, the last entry should 423 * conform with the first entry in this one. 424 */ 425 426 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 427 if (i) { 428 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 429 xfs_bmbt_disk_get_blockcount(&last) <= 430 xfs_bmbt_disk_get_startoff(ep)); 431 } 432 for (j = 1; j < num_recs; j++) { 433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 434 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 435 xfs_bmbt_disk_get_blockcount(ep) <= 436 xfs_bmbt_disk_get_startoff(nextp)); 437 ep = nextp; 438 } 439 440 last = *ep; 441 i += num_recs; 442 if (bp_release) { 443 bp_release = 0; 444 xfs_trans_brelse(NULL, bp); 445 } 446 bno = nextbno; 447 /* 448 * If we've reached the end, stop. 449 */ 450 if (bno == NULLFSBLOCK) 451 break; 452 453 bp_release = 0; 454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 455 if (!bp) { 456 bp_release = 1; 457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 458 XFS_BMAP_BTREE_REF, 459 &xfs_bmbt_buf_ops); 460 if (error) 461 goto error_norelse; 462 } 463 block = XFS_BUF_TO_BLOCK(bp); 464 } 465 466 return; 467 468 error0: 469 xfs_warn(mp, "%s: at error0", __func__); 470 if (bp_release) 471 xfs_trans_brelse(NULL, bp); 472 error_norelse: 473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 474 __func__, i); 475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 477 return; 478 } 479 480 /* 481 * Validate that the bmbt_irecs being returned from bmapi are valid 482 * given the caller's original parameters. Specifically check the 483 * ranges of the returned irecs to ensure that they only extend beyond 484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 485 */ 486 STATIC void 487 xfs_bmap_validate_ret( 488 xfs_fileoff_t bno, 489 xfs_filblks_t len, 490 int flags, 491 xfs_bmbt_irec_t *mval, 492 int nmap, 493 int ret_nmap) 494 { 495 int i; /* index to map values */ 496 497 ASSERT(ret_nmap <= nmap); 498 499 for (i = 0; i < ret_nmap; i++) { 500 ASSERT(mval[i].br_blockcount > 0); 501 if (!(flags & XFS_BMAPI_ENTIRE)) { 502 ASSERT(mval[i].br_startoff >= bno); 503 ASSERT(mval[i].br_blockcount <= len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 505 bno + len); 506 } else { 507 ASSERT(mval[i].br_startoff < bno + len); 508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 509 bno); 510 } 511 ASSERT(i == 0 || 512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 513 mval[i].br_startoff); 514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 515 mval[i].br_startblock != HOLESTARTBLOCK); 516 ASSERT(mval[i].br_state == XFS_EXT_NORM || 517 mval[i].br_state == XFS_EXT_UNWRITTEN); 518 } 519 } 520 521 #else 522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 524 #endif /* DEBUG */ 525 526 /* 527 * bmap free list manipulation functions 528 */ 529 530 /* 531 * Add the extent to the list of extents to be free at transaction end. 532 * The list is maintained sorted (by block number). 533 */ 534 void 535 __xfs_bmap_add_free( 536 struct xfs_trans *tp, 537 xfs_fsblock_t bno, 538 xfs_filblks_t len, 539 const struct xfs_owner_info *oinfo, 540 bool skip_discard) 541 { 542 struct xfs_extent_free_item *new; /* new element */ 543 #ifdef DEBUG 544 struct xfs_mount *mp = tp->t_mountp; 545 xfs_agnumber_t agno; 546 xfs_agblock_t agbno; 547 548 ASSERT(bno != NULLFSBLOCK); 549 ASSERT(len > 0); 550 ASSERT(len <= MAXEXTLEN); 551 ASSERT(!isnullstartblock(bno)); 552 agno = XFS_FSB_TO_AGNO(mp, bno); 553 agbno = XFS_FSB_TO_AGBNO(mp, bno); 554 ASSERT(agno < mp->m_sb.sb_agcount); 555 ASSERT(agbno < mp->m_sb.sb_agblocks); 556 ASSERT(len < mp->m_sb.sb_agblocks); 557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 558 #endif 559 ASSERT(xfs_bmap_free_item_zone != NULL); 560 561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 562 new->xefi_startblock = bno; 563 new->xefi_blockcount = (xfs_extlen_t)len; 564 if (oinfo) 565 new->xefi_oinfo = *oinfo; 566 else 567 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 568 new->xefi_skip_discard = skip_discard; 569 trace_xfs_bmap_free_defer(tp->t_mountp, 570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 573 } 574 575 /* 576 * Inode fork format manipulation functions 577 */ 578 579 /* 580 * Transform a btree format file with only one leaf node, where the 581 * extents list will fit in the inode, into an extents format file. 582 * Since the file extents are already in-core, all we have to do is 583 * give up the space for the btree root and pitch the leaf block. 584 */ 585 STATIC int /* error */ 586 xfs_bmap_btree_to_extents( 587 xfs_trans_t *tp, /* transaction pointer */ 588 xfs_inode_t *ip, /* incore inode pointer */ 589 xfs_btree_cur_t *cur, /* btree cursor */ 590 int *logflagsp, /* inode logging flags */ 591 int whichfork) /* data or attr fork */ 592 { 593 /* REFERENCED */ 594 struct xfs_btree_block *cblock;/* child btree block */ 595 xfs_fsblock_t cbno; /* child block number */ 596 xfs_buf_t *cbp; /* child block's buffer */ 597 int error; /* error return value */ 598 struct xfs_ifork *ifp; /* inode fork data */ 599 xfs_mount_t *mp; /* mount point structure */ 600 __be64 *pp; /* ptr to block address */ 601 struct xfs_btree_block *rblock;/* root btree block */ 602 struct xfs_owner_info oinfo; 603 604 mp = ip->i_mount; 605 ifp = XFS_IFORK_PTR(ip, whichfork); 606 ASSERT(whichfork != XFS_COW_FORK); 607 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 608 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 609 rblock = ifp->if_broot; 610 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 611 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 612 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 613 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 614 cbno = be64_to_cpu(*pp); 615 *logflagsp = 0; 616 #ifdef DEBUG 617 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 618 xfs_btree_check_lptr(cur, cbno, 1)); 619 #endif 620 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 621 &xfs_bmbt_buf_ops); 622 if (error) 623 return error; 624 cblock = XFS_BUF_TO_BLOCK(cbp); 625 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 626 return error; 627 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 628 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 629 ip->i_d.di_nblocks--; 630 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 631 xfs_trans_binval(tp, cbp); 632 if (cur->bc_bufs[0] == cbp) 633 cur->bc_bufs[0] = NULL; 634 xfs_iroot_realloc(ip, -1, whichfork); 635 ASSERT(ifp->if_broot == NULL); 636 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 637 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 638 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 639 return 0; 640 } 641 642 /* 643 * Convert an extents-format file into a btree-format file. 644 * The new file will have a root block (in the inode) and a single child block. 645 */ 646 STATIC int /* error */ 647 xfs_bmap_extents_to_btree( 648 struct xfs_trans *tp, /* transaction pointer */ 649 struct xfs_inode *ip, /* incore inode pointer */ 650 struct xfs_btree_cur **curp, /* cursor returned to caller */ 651 int wasdel, /* converting a delayed alloc */ 652 int *logflagsp, /* inode logging flags */ 653 int whichfork) /* data or attr fork */ 654 { 655 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 656 struct xfs_buf *abp; /* buffer for ablock */ 657 struct xfs_alloc_arg args; /* allocation arguments */ 658 struct xfs_bmbt_rec *arp; /* child record pointer */ 659 struct xfs_btree_block *block; /* btree root block */ 660 struct xfs_btree_cur *cur; /* bmap btree cursor */ 661 int error; /* error return value */ 662 struct xfs_ifork *ifp; /* inode fork pointer */ 663 struct xfs_bmbt_key *kp; /* root block key pointer */ 664 struct xfs_mount *mp; /* mount structure */ 665 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 666 struct xfs_iext_cursor icur; 667 struct xfs_bmbt_irec rec; 668 xfs_extnum_t cnt = 0; 669 670 mp = ip->i_mount; 671 ASSERT(whichfork != XFS_COW_FORK); 672 ifp = XFS_IFORK_PTR(ip, whichfork); 673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 674 675 /* 676 * Make space in the inode incore. This needs to be undone if we fail 677 * to expand the root. 678 */ 679 xfs_iroot_realloc(ip, 1, whichfork); 680 ifp->if_flags |= XFS_IFBROOT; 681 682 /* 683 * Fill in the root. 684 */ 685 block = ifp->if_broot; 686 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 687 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 688 XFS_BTREE_LONG_PTRS); 689 /* 690 * Need a cursor. Can't allocate until bb_level is filled in. 691 */ 692 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 693 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 694 /* 695 * Convert to a btree with two levels, one record in root. 696 */ 697 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 698 memset(&args, 0, sizeof(args)); 699 args.tp = tp; 700 args.mp = mp; 701 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 702 if (tp->t_firstblock == NULLFSBLOCK) { 703 args.type = XFS_ALLOCTYPE_START_BNO; 704 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 705 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 706 args.type = XFS_ALLOCTYPE_START_BNO; 707 args.fsbno = tp->t_firstblock; 708 } else { 709 args.type = XFS_ALLOCTYPE_NEAR_BNO; 710 args.fsbno = tp->t_firstblock; 711 } 712 args.minlen = args.maxlen = args.prod = 1; 713 args.wasdel = wasdel; 714 *logflagsp = 0; 715 error = xfs_alloc_vextent(&args); 716 if (error) 717 goto out_root_realloc; 718 719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 720 error = -ENOSPC; 721 goto out_root_realloc; 722 } 723 724 /* 725 * Allocation can't fail, the space was reserved. 726 */ 727 ASSERT(tp->t_firstblock == NULLFSBLOCK || 728 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 729 tp->t_firstblock = args.fsbno; 730 cur->bc_private.b.allocated++; 731 ip->i_d.di_nblocks++; 732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 734 if (!abp) { 735 error = -EFSCORRUPTED; 736 goto out_unreserve_dquot; 737 } 738 739 /* 740 * Fill in the child block. 741 */ 742 abp->b_ops = &xfs_bmbt_buf_ops; 743 ablock = XFS_BUF_TO_BLOCK(abp); 744 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 745 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 746 XFS_BTREE_LONG_PTRS); 747 748 for_each_xfs_iext(ifp, &icur, &rec) { 749 if (isnullstartblock(rec.br_startblock)) 750 continue; 751 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 752 xfs_bmbt_disk_set_all(arp, &rec); 753 cnt++; 754 } 755 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 756 xfs_btree_set_numrecs(ablock, cnt); 757 758 /* 759 * Fill in the root key and pointer. 760 */ 761 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 762 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 763 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 764 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 765 be16_to_cpu(block->bb_level))); 766 *pp = cpu_to_be64(args.fsbno); 767 768 /* 769 * Do all this logging at the end so that 770 * the root is at the right level. 771 */ 772 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 773 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 774 ASSERT(*curp == NULL); 775 *curp = cur; 776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 777 return 0; 778 779 out_unreserve_dquot: 780 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 781 out_root_realloc: 782 xfs_iroot_realloc(ip, -1, whichfork); 783 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 784 ASSERT(ifp->if_broot == NULL); 785 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 786 787 return error; 788 } 789 790 /* 791 * Convert a local file to an extents file. 792 * This code is out of bounds for data forks of regular files, 793 * since the file data needs to get logged so things will stay consistent. 794 * (The bmap-level manipulations are ok, though). 795 */ 796 void 797 xfs_bmap_local_to_extents_empty( 798 struct xfs_inode *ip, 799 int whichfork) 800 { 801 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 802 803 ASSERT(whichfork != XFS_COW_FORK); 804 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 805 ASSERT(ifp->if_bytes == 0); 806 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 807 808 xfs_bmap_forkoff_reset(ip, whichfork); 809 ifp->if_flags &= ~XFS_IFINLINE; 810 ifp->if_flags |= XFS_IFEXTENTS; 811 ifp->if_u1.if_root = NULL; 812 ifp->if_height = 0; 813 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 814 } 815 816 817 STATIC int /* error */ 818 xfs_bmap_local_to_extents( 819 xfs_trans_t *tp, /* transaction pointer */ 820 xfs_inode_t *ip, /* incore inode pointer */ 821 xfs_extlen_t total, /* total blocks needed by transaction */ 822 int *logflagsp, /* inode logging flags */ 823 int whichfork, 824 void (*init_fn)(struct xfs_trans *tp, 825 struct xfs_buf *bp, 826 struct xfs_inode *ip, 827 struct xfs_ifork *ifp)) 828 { 829 int error = 0; 830 int flags; /* logging flags returned */ 831 struct xfs_ifork *ifp; /* inode fork pointer */ 832 xfs_alloc_arg_t args; /* allocation arguments */ 833 xfs_buf_t *bp; /* buffer for extent block */ 834 struct xfs_bmbt_irec rec; 835 struct xfs_iext_cursor icur; 836 837 /* 838 * We don't want to deal with the case of keeping inode data inline yet. 839 * So sending the data fork of a regular inode is invalid. 840 */ 841 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 842 ifp = XFS_IFORK_PTR(ip, whichfork); 843 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 844 845 if (!ifp->if_bytes) { 846 xfs_bmap_local_to_extents_empty(ip, whichfork); 847 flags = XFS_ILOG_CORE; 848 goto done; 849 } 850 851 flags = 0; 852 error = 0; 853 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 854 memset(&args, 0, sizeof(args)); 855 args.tp = tp; 856 args.mp = ip->i_mount; 857 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 858 /* 859 * Allocate a block. We know we need only one, since the 860 * file currently fits in an inode. 861 */ 862 if (tp->t_firstblock == NULLFSBLOCK) { 863 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 864 args.type = XFS_ALLOCTYPE_START_BNO; 865 } else { 866 args.fsbno = tp->t_firstblock; 867 args.type = XFS_ALLOCTYPE_NEAR_BNO; 868 } 869 args.total = total; 870 args.minlen = args.maxlen = args.prod = 1; 871 error = xfs_alloc_vextent(&args); 872 if (error) 873 goto done; 874 875 /* Can't fail, the space was reserved. */ 876 ASSERT(args.fsbno != NULLFSBLOCK); 877 ASSERT(args.len == 1); 878 tp->t_firstblock = args.fsbno; 879 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 880 881 /* 882 * Initialize the block, copy the data and log the remote buffer. 883 * 884 * The callout is responsible for logging because the remote format 885 * might differ from the local format and thus we don't know how much to 886 * log here. Note that init_fn must also set the buffer log item type 887 * correctly. 888 */ 889 init_fn(tp, bp, ip, ifp); 890 891 /* account for the change in fork size */ 892 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 893 xfs_bmap_local_to_extents_empty(ip, whichfork); 894 flags |= XFS_ILOG_CORE; 895 896 ifp->if_u1.if_root = NULL; 897 ifp->if_height = 0; 898 899 rec.br_startoff = 0; 900 rec.br_startblock = args.fsbno; 901 rec.br_blockcount = 1; 902 rec.br_state = XFS_EXT_NORM; 903 xfs_iext_first(ifp, &icur); 904 xfs_iext_insert(ip, &icur, &rec, 0); 905 906 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 907 ip->i_d.di_nblocks = 1; 908 xfs_trans_mod_dquot_byino(tp, ip, 909 XFS_TRANS_DQ_BCOUNT, 1L); 910 flags |= xfs_ilog_fext(whichfork); 911 912 done: 913 *logflagsp = flags; 914 return error; 915 } 916 917 /* 918 * Called from xfs_bmap_add_attrfork to handle btree format files. 919 */ 920 STATIC int /* error */ 921 xfs_bmap_add_attrfork_btree( 922 xfs_trans_t *tp, /* transaction pointer */ 923 xfs_inode_t *ip, /* incore inode pointer */ 924 int *flags) /* inode logging flags */ 925 { 926 xfs_btree_cur_t *cur; /* btree cursor */ 927 int error; /* error return value */ 928 xfs_mount_t *mp; /* file system mount struct */ 929 int stat; /* newroot status */ 930 931 mp = ip->i_mount; 932 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 933 *flags |= XFS_ILOG_DBROOT; 934 else { 935 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 936 error = xfs_bmbt_lookup_first(cur, &stat); 937 if (error) 938 goto error0; 939 /* must be at least one entry */ 940 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 941 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 942 goto error0; 943 if (stat == 0) { 944 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 945 return -ENOSPC; 946 } 947 cur->bc_private.b.allocated = 0; 948 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 949 } 950 return 0; 951 error0: 952 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 953 return error; 954 } 955 956 /* 957 * Called from xfs_bmap_add_attrfork to handle extents format files. 958 */ 959 STATIC int /* error */ 960 xfs_bmap_add_attrfork_extents( 961 struct xfs_trans *tp, /* transaction pointer */ 962 struct xfs_inode *ip, /* incore inode pointer */ 963 int *flags) /* inode logging flags */ 964 { 965 xfs_btree_cur_t *cur; /* bmap btree cursor */ 966 int error; /* error return value */ 967 968 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 969 return 0; 970 cur = NULL; 971 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 972 XFS_DATA_FORK); 973 if (cur) { 974 cur->bc_private.b.allocated = 0; 975 xfs_btree_del_cursor(cur, error); 976 } 977 return error; 978 } 979 980 /* 981 * Called from xfs_bmap_add_attrfork to handle local format files. Each 982 * different data fork content type needs a different callout to do the 983 * conversion. Some are basic and only require special block initialisation 984 * callouts for the data formating, others (directories) are so specialised they 985 * handle everything themselves. 986 * 987 * XXX (dgc): investigate whether directory conversion can use the generic 988 * formatting callout. It should be possible - it's just a very complex 989 * formatter. 990 */ 991 STATIC int /* error */ 992 xfs_bmap_add_attrfork_local( 993 struct xfs_trans *tp, /* transaction pointer */ 994 struct xfs_inode *ip, /* incore inode pointer */ 995 int *flags) /* inode logging flags */ 996 { 997 struct xfs_da_args dargs; /* args for dir/attr code */ 998 999 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1000 return 0; 1001 1002 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1003 memset(&dargs, 0, sizeof(dargs)); 1004 dargs.geo = ip->i_mount->m_dir_geo; 1005 dargs.dp = ip; 1006 dargs.total = dargs.geo->fsbcount; 1007 dargs.whichfork = XFS_DATA_FORK; 1008 dargs.trans = tp; 1009 return xfs_dir2_sf_to_block(&dargs); 1010 } 1011 1012 if (S_ISLNK(VFS_I(ip)->i_mode)) 1013 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1014 XFS_DATA_FORK, 1015 xfs_symlink_local_to_remote); 1016 1017 /* should only be called for types that support local format data */ 1018 ASSERT(0); 1019 return -EFSCORRUPTED; 1020 } 1021 1022 /* Set an inode attr fork off based on the format */ 1023 int 1024 xfs_bmap_set_attrforkoff( 1025 struct xfs_inode *ip, 1026 int size, 1027 int *version) 1028 { 1029 switch (ip->i_d.di_format) { 1030 case XFS_DINODE_FMT_DEV: 1031 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1032 break; 1033 case XFS_DINODE_FMT_LOCAL: 1034 case XFS_DINODE_FMT_EXTENTS: 1035 case XFS_DINODE_FMT_BTREE: 1036 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1037 if (!ip->i_d.di_forkoff) 1038 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1039 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) 1040 *version = 2; 1041 break; 1042 default: 1043 ASSERT(0); 1044 return -EINVAL; 1045 } 1046 1047 return 0; 1048 } 1049 1050 /* 1051 * Convert inode from non-attributed to attributed. 1052 * Must not be in a transaction, ip must not be locked. 1053 */ 1054 int /* error code */ 1055 xfs_bmap_add_attrfork( 1056 xfs_inode_t *ip, /* incore inode pointer */ 1057 int size, /* space new attribute needs */ 1058 int rsvd) /* xact may use reserved blks */ 1059 { 1060 xfs_mount_t *mp; /* mount structure */ 1061 xfs_trans_t *tp; /* transaction pointer */ 1062 int blks; /* space reservation */ 1063 int version = 1; /* superblock attr version */ 1064 int logflags; /* logging flags */ 1065 int error; /* error return value */ 1066 1067 ASSERT(XFS_IFORK_Q(ip) == 0); 1068 1069 mp = ip->i_mount; 1070 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1071 1072 blks = XFS_ADDAFORK_SPACE_RES(mp); 1073 1074 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1075 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1076 if (error) 1077 return error; 1078 1079 xfs_ilock(ip, XFS_ILOCK_EXCL); 1080 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1081 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1082 XFS_QMOPT_RES_REGBLKS); 1083 if (error) 1084 goto trans_cancel; 1085 if (XFS_IFORK_Q(ip)) 1086 goto trans_cancel; 1087 if (ip->i_d.di_anextents != 0) { 1088 error = -EFSCORRUPTED; 1089 goto trans_cancel; 1090 } 1091 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1092 /* 1093 * For inodes coming from pre-6.2 filesystems. 1094 */ 1095 ASSERT(ip->i_d.di_aformat == 0); 1096 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1097 } 1098 1099 xfs_trans_ijoin(tp, ip, 0); 1100 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1101 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1102 if (error) 1103 goto trans_cancel; 1104 ASSERT(ip->i_afp == NULL); 1105 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1106 ip->i_afp->if_flags = XFS_IFEXTENTS; 1107 logflags = 0; 1108 switch (ip->i_d.di_format) { 1109 case XFS_DINODE_FMT_LOCAL: 1110 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1111 break; 1112 case XFS_DINODE_FMT_EXTENTS: 1113 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1114 break; 1115 case XFS_DINODE_FMT_BTREE: 1116 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1117 break; 1118 default: 1119 error = 0; 1120 break; 1121 } 1122 if (logflags) 1123 xfs_trans_log_inode(tp, ip, logflags); 1124 if (error) 1125 goto trans_cancel; 1126 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1127 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1128 bool log_sb = false; 1129 1130 spin_lock(&mp->m_sb_lock); 1131 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1132 xfs_sb_version_addattr(&mp->m_sb); 1133 log_sb = true; 1134 } 1135 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1136 xfs_sb_version_addattr2(&mp->m_sb); 1137 log_sb = true; 1138 } 1139 spin_unlock(&mp->m_sb_lock); 1140 if (log_sb) 1141 xfs_log_sb(tp); 1142 } 1143 1144 error = xfs_trans_commit(tp); 1145 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1146 return error; 1147 1148 trans_cancel: 1149 xfs_trans_cancel(tp); 1150 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1151 return error; 1152 } 1153 1154 /* 1155 * Internal and external extent tree search functions. 1156 */ 1157 1158 /* 1159 * Read in extents from a btree-format inode. 1160 */ 1161 int 1162 xfs_iread_extents( 1163 struct xfs_trans *tp, 1164 struct xfs_inode *ip, 1165 int whichfork) 1166 { 1167 struct xfs_mount *mp = ip->i_mount; 1168 int state = xfs_bmap_fork_to_state(whichfork); 1169 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1170 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1171 struct xfs_btree_block *block = ifp->if_broot; 1172 struct xfs_iext_cursor icur; 1173 struct xfs_bmbt_irec new; 1174 xfs_fsblock_t bno; 1175 struct xfs_buf *bp; 1176 xfs_extnum_t i, j; 1177 int level; 1178 __be64 *pp; 1179 int error; 1180 1181 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1182 1183 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1184 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1185 return -EFSCORRUPTED; 1186 } 1187 1188 /* 1189 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1190 */ 1191 level = be16_to_cpu(block->bb_level); 1192 ASSERT(level > 0); 1193 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1194 bno = be64_to_cpu(*pp); 1195 1196 /* 1197 * Go down the tree until leaf level is reached, following the first 1198 * pointer (leftmost) at each level. 1199 */ 1200 while (level-- > 0) { 1201 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1202 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1203 if (error) 1204 goto out; 1205 block = XFS_BUF_TO_BLOCK(bp); 1206 if (level == 0) 1207 break; 1208 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1209 bno = be64_to_cpu(*pp); 1210 XFS_WANT_CORRUPTED_GOTO(mp, 1211 xfs_verify_fsbno(mp, bno), out_brelse); 1212 xfs_trans_brelse(tp, bp); 1213 } 1214 1215 /* 1216 * Here with bp and block set to the leftmost leaf node in the tree. 1217 */ 1218 i = 0; 1219 xfs_iext_first(ifp, &icur); 1220 1221 /* 1222 * Loop over all leaf nodes. Copy information to the extent records. 1223 */ 1224 for (;;) { 1225 xfs_bmbt_rec_t *frp; 1226 xfs_fsblock_t nextbno; 1227 xfs_extnum_t num_recs; 1228 1229 num_recs = xfs_btree_get_numrecs(block); 1230 if (unlikely(i + num_recs > nextents)) { 1231 xfs_warn(ip->i_mount, 1232 "corrupt dinode %Lu, (btree extents).", 1233 (unsigned long long) ip->i_ino); 1234 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1235 __func__, block, sizeof(*block), 1236 __this_address); 1237 error = -EFSCORRUPTED; 1238 goto out_brelse; 1239 } 1240 /* 1241 * Read-ahead the next leaf block, if any. 1242 */ 1243 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1244 if (nextbno != NULLFSBLOCK) 1245 xfs_btree_reada_bufl(mp, nextbno, 1, 1246 &xfs_bmbt_buf_ops); 1247 /* 1248 * Copy records into the extent records. 1249 */ 1250 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1251 for (j = 0; j < num_recs; j++, frp++, i++) { 1252 xfs_failaddr_t fa; 1253 1254 xfs_bmbt_disk_get_all(frp, &new); 1255 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1256 if (fa) { 1257 error = -EFSCORRUPTED; 1258 xfs_inode_verifier_error(ip, error, 1259 "xfs_iread_extents(2)", 1260 frp, sizeof(*frp), fa); 1261 goto out_brelse; 1262 } 1263 xfs_iext_insert(ip, &icur, &new, state); 1264 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1265 xfs_iext_next(ifp, &icur); 1266 } 1267 xfs_trans_brelse(tp, bp); 1268 bno = nextbno; 1269 /* 1270 * If we've reached the end, stop. 1271 */ 1272 if (bno == NULLFSBLOCK) 1273 break; 1274 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1275 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1276 if (error) 1277 goto out; 1278 block = XFS_BUF_TO_BLOCK(bp); 1279 } 1280 1281 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1282 error = -EFSCORRUPTED; 1283 goto out; 1284 } 1285 ASSERT(i == xfs_iext_count(ifp)); 1286 1287 ifp->if_flags |= XFS_IFEXTENTS; 1288 return 0; 1289 1290 out_brelse: 1291 xfs_trans_brelse(tp, bp); 1292 out: 1293 xfs_iext_destroy(ifp); 1294 return error; 1295 } 1296 1297 /* 1298 * Returns the relative block number of the first unused block(s) in the given 1299 * fork with at least "len" logically contiguous blocks free. This is the 1300 * lowest-address hole if the fork has holes, else the first block past the end 1301 * of fork. Return 0 if the fork is currently local (in-inode). 1302 */ 1303 int /* error */ 1304 xfs_bmap_first_unused( 1305 struct xfs_trans *tp, /* transaction pointer */ 1306 struct xfs_inode *ip, /* incore inode */ 1307 xfs_extlen_t len, /* size of hole to find */ 1308 xfs_fileoff_t *first_unused, /* unused block */ 1309 int whichfork) /* data or attr fork */ 1310 { 1311 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1312 struct xfs_bmbt_irec got; 1313 struct xfs_iext_cursor icur; 1314 xfs_fileoff_t lastaddr = 0; 1315 xfs_fileoff_t lowest, max; 1316 int error; 1317 1318 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1319 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1320 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1321 1322 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1323 *first_unused = 0; 1324 return 0; 1325 } 1326 1327 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1328 error = xfs_iread_extents(tp, ip, whichfork); 1329 if (error) 1330 return error; 1331 } 1332 1333 lowest = max = *first_unused; 1334 for_each_xfs_iext(ifp, &icur, &got) { 1335 /* 1336 * See if the hole before this extent will work. 1337 */ 1338 if (got.br_startoff >= lowest + len && 1339 got.br_startoff - max >= len) 1340 break; 1341 lastaddr = got.br_startoff + got.br_blockcount; 1342 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1343 } 1344 1345 *first_unused = max; 1346 return 0; 1347 } 1348 1349 /* 1350 * Returns the file-relative block number of the last block - 1 before 1351 * last_block (input value) in the file. 1352 * This is not based on i_size, it is based on the extent records. 1353 * Returns 0 for local files, as they do not have extent records. 1354 */ 1355 int /* error */ 1356 xfs_bmap_last_before( 1357 struct xfs_trans *tp, /* transaction pointer */ 1358 struct xfs_inode *ip, /* incore inode */ 1359 xfs_fileoff_t *last_block, /* last block */ 1360 int whichfork) /* data or attr fork */ 1361 { 1362 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1363 struct xfs_bmbt_irec got; 1364 struct xfs_iext_cursor icur; 1365 int error; 1366 1367 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1368 case XFS_DINODE_FMT_LOCAL: 1369 *last_block = 0; 1370 return 0; 1371 case XFS_DINODE_FMT_BTREE: 1372 case XFS_DINODE_FMT_EXTENTS: 1373 break; 1374 default: 1375 return -EIO; 1376 } 1377 1378 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1379 error = xfs_iread_extents(tp, ip, whichfork); 1380 if (error) 1381 return error; 1382 } 1383 1384 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1385 *last_block = 0; 1386 return 0; 1387 } 1388 1389 int 1390 xfs_bmap_last_extent( 1391 struct xfs_trans *tp, 1392 struct xfs_inode *ip, 1393 int whichfork, 1394 struct xfs_bmbt_irec *rec, 1395 int *is_empty) 1396 { 1397 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1398 struct xfs_iext_cursor icur; 1399 int error; 1400 1401 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1402 error = xfs_iread_extents(tp, ip, whichfork); 1403 if (error) 1404 return error; 1405 } 1406 1407 xfs_iext_last(ifp, &icur); 1408 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1409 *is_empty = 1; 1410 else 1411 *is_empty = 0; 1412 return 0; 1413 } 1414 1415 /* 1416 * Check the last inode extent to determine whether this allocation will result 1417 * in blocks being allocated at the end of the file. When we allocate new data 1418 * blocks at the end of the file which do not start at the previous data block, 1419 * we will try to align the new blocks at stripe unit boundaries. 1420 * 1421 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1422 * at, or past the EOF. 1423 */ 1424 STATIC int 1425 xfs_bmap_isaeof( 1426 struct xfs_bmalloca *bma, 1427 int whichfork) 1428 { 1429 struct xfs_bmbt_irec rec; 1430 int is_empty; 1431 int error; 1432 1433 bma->aeof = false; 1434 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1435 &is_empty); 1436 if (error) 1437 return error; 1438 1439 if (is_empty) { 1440 bma->aeof = true; 1441 return 0; 1442 } 1443 1444 /* 1445 * Check if we are allocation or past the last extent, or at least into 1446 * the last delayed allocated extent. 1447 */ 1448 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1449 (bma->offset >= rec.br_startoff && 1450 isnullstartblock(rec.br_startblock)); 1451 return 0; 1452 } 1453 1454 /* 1455 * Returns the file-relative block number of the first block past eof in 1456 * the file. This is not based on i_size, it is based on the extent records. 1457 * Returns 0 for local files, as they do not have extent records. 1458 */ 1459 int 1460 xfs_bmap_last_offset( 1461 struct xfs_inode *ip, 1462 xfs_fileoff_t *last_block, 1463 int whichfork) 1464 { 1465 struct xfs_bmbt_irec rec; 1466 int is_empty; 1467 int error; 1468 1469 *last_block = 0; 1470 1471 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1472 return 0; 1473 1474 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1475 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1476 return -EIO; 1477 1478 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1479 if (error || is_empty) 1480 return error; 1481 1482 *last_block = rec.br_startoff + rec.br_blockcount; 1483 return 0; 1484 } 1485 1486 /* 1487 * Returns whether the selected fork of the inode has exactly one 1488 * block or not. For the data fork we check this matches di_size, 1489 * implying the file's range is 0..bsize-1. 1490 */ 1491 int /* 1=>1 block, 0=>otherwise */ 1492 xfs_bmap_one_block( 1493 xfs_inode_t *ip, /* incore inode */ 1494 int whichfork) /* data or attr fork */ 1495 { 1496 struct xfs_ifork *ifp; /* inode fork pointer */ 1497 int rval; /* return value */ 1498 xfs_bmbt_irec_t s; /* internal version of extent */ 1499 struct xfs_iext_cursor icur; 1500 1501 #ifndef DEBUG 1502 if (whichfork == XFS_DATA_FORK) 1503 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1504 #endif /* !DEBUG */ 1505 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1506 return 0; 1507 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1508 return 0; 1509 ifp = XFS_IFORK_PTR(ip, whichfork); 1510 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1511 xfs_iext_first(ifp, &icur); 1512 xfs_iext_get_extent(ifp, &icur, &s); 1513 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1514 if (rval && whichfork == XFS_DATA_FORK) 1515 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1516 return rval; 1517 } 1518 1519 /* 1520 * Extent tree manipulation functions used during allocation. 1521 */ 1522 1523 /* 1524 * Convert a delayed allocation to a real allocation. 1525 */ 1526 STATIC int /* error */ 1527 xfs_bmap_add_extent_delay_real( 1528 struct xfs_bmalloca *bma, 1529 int whichfork) 1530 { 1531 struct xfs_bmbt_irec *new = &bma->got; 1532 int error; /* error return value */ 1533 int i; /* temp state */ 1534 struct xfs_ifork *ifp; /* inode fork pointer */ 1535 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1536 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1537 /* left is 0, right is 1, prev is 2 */ 1538 int rval=0; /* return value (logging flags) */ 1539 int state = xfs_bmap_fork_to_state(whichfork); 1540 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1541 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1542 xfs_filblks_t temp=0; /* value for da_new calculations */ 1543 int tmp_rval; /* partial logging flags */ 1544 struct xfs_mount *mp; 1545 xfs_extnum_t *nextents; 1546 struct xfs_bmbt_irec old; 1547 1548 mp = bma->ip->i_mount; 1549 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1550 ASSERT(whichfork != XFS_ATTR_FORK); 1551 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1552 &bma->ip->i_d.di_nextents); 1553 1554 ASSERT(!isnullstartblock(new->br_startblock)); 1555 ASSERT(!bma->cur || 1556 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1557 1558 XFS_STATS_INC(mp, xs_add_exlist); 1559 1560 #define LEFT r[0] 1561 #define RIGHT r[1] 1562 #define PREV r[2] 1563 1564 /* 1565 * Set up a bunch of variables to make the tests simpler. 1566 */ 1567 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1568 new_endoff = new->br_startoff + new->br_blockcount; 1569 ASSERT(isnullstartblock(PREV.br_startblock)); 1570 ASSERT(PREV.br_startoff <= new->br_startoff); 1571 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1572 1573 da_old = startblockval(PREV.br_startblock); 1574 da_new = 0; 1575 1576 /* 1577 * Set flags determining what part of the previous delayed allocation 1578 * extent is being replaced by a real allocation. 1579 */ 1580 if (PREV.br_startoff == new->br_startoff) 1581 state |= BMAP_LEFT_FILLING; 1582 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1583 state |= BMAP_RIGHT_FILLING; 1584 1585 /* 1586 * Check and set flags if this segment has a left neighbor. 1587 * Don't set contiguous if the combined extent would be too large. 1588 */ 1589 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1590 state |= BMAP_LEFT_VALID; 1591 if (isnullstartblock(LEFT.br_startblock)) 1592 state |= BMAP_LEFT_DELAY; 1593 } 1594 1595 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1596 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1597 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1598 LEFT.br_state == new->br_state && 1599 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1600 state |= BMAP_LEFT_CONTIG; 1601 1602 /* 1603 * Check and set flags if this segment has a right neighbor. 1604 * Don't set contiguous if the combined extent would be too large. 1605 * Also check for all-three-contiguous being too large. 1606 */ 1607 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1608 state |= BMAP_RIGHT_VALID; 1609 if (isnullstartblock(RIGHT.br_startblock)) 1610 state |= BMAP_RIGHT_DELAY; 1611 } 1612 1613 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1614 new_endoff == RIGHT.br_startoff && 1615 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1616 new->br_state == RIGHT.br_state && 1617 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1618 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1619 BMAP_RIGHT_FILLING)) != 1620 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1621 BMAP_RIGHT_FILLING) || 1622 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1623 <= MAXEXTLEN)) 1624 state |= BMAP_RIGHT_CONTIG; 1625 1626 error = 0; 1627 /* 1628 * Switch out based on the FILLING and CONTIG state bits. 1629 */ 1630 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1631 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1632 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1633 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1634 /* 1635 * Filling in all of a previously delayed allocation extent. 1636 * The left and right neighbors are both contiguous with new. 1637 */ 1638 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1639 1640 xfs_iext_remove(bma->ip, &bma->icur, state); 1641 xfs_iext_remove(bma->ip, &bma->icur, state); 1642 xfs_iext_prev(ifp, &bma->icur); 1643 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1644 (*nextents)--; 1645 1646 if (bma->cur == NULL) 1647 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1648 else { 1649 rval = XFS_ILOG_CORE; 1650 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1651 if (error) 1652 goto done; 1653 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1654 error = xfs_btree_delete(bma->cur, &i); 1655 if (error) 1656 goto done; 1657 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1658 error = xfs_btree_decrement(bma->cur, 0, &i); 1659 if (error) 1660 goto done; 1661 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1662 error = xfs_bmbt_update(bma->cur, &LEFT); 1663 if (error) 1664 goto done; 1665 } 1666 break; 1667 1668 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1669 /* 1670 * Filling in all of a previously delayed allocation extent. 1671 * The left neighbor is contiguous, the right is not. 1672 */ 1673 old = LEFT; 1674 LEFT.br_blockcount += PREV.br_blockcount; 1675 1676 xfs_iext_remove(bma->ip, &bma->icur, state); 1677 xfs_iext_prev(ifp, &bma->icur); 1678 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1679 1680 if (bma->cur == NULL) 1681 rval = XFS_ILOG_DEXT; 1682 else { 1683 rval = 0; 1684 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1685 if (error) 1686 goto done; 1687 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1688 error = xfs_bmbt_update(bma->cur, &LEFT); 1689 if (error) 1690 goto done; 1691 } 1692 break; 1693 1694 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1695 /* 1696 * Filling in all of a previously delayed allocation extent. 1697 * The right neighbor is contiguous, the left is not. Take care 1698 * with delay -> unwritten extent allocation here because the 1699 * delalloc record we are overwriting is always written. 1700 */ 1701 PREV.br_startblock = new->br_startblock; 1702 PREV.br_blockcount += RIGHT.br_blockcount; 1703 PREV.br_state = new->br_state; 1704 1705 xfs_iext_next(ifp, &bma->icur); 1706 xfs_iext_remove(bma->ip, &bma->icur, state); 1707 xfs_iext_prev(ifp, &bma->icur); 1708 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1709 1710 if (bma->cur == NULL) 1711 rval = XFS_ILOG_DEXT; 1712 else { 1713 rval = 0; 1714 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1715 if (error) 1716 goto done; 1717 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1718 error = xfs_bmbt_update(bma->cur, &PREV); 1719 if (error) 1720 goto done; 1721 } 1722 break; 1723 1724 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1725 /* 1726 * Filling in all of a previously delayed allocation extent. 1727 * Neither the left nor right neighbors are contiguous with 1728 * the new one. 1729 */ 1730 PREV.br_startblock = new->br_startblock; 1731 PREV.br_state = new->br_state; 1732 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1733 1734 (*nextents)++; 1735 if (bma->cur == NULL) 1736 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1737 else { 1738 rval = XFS_ILOG_CORE; 1739 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1740 if (error) 1741 goto done; 1742 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1743 error = xfs_btree_insert(bma->cur, &i); 1744 if (error) 1745 goto done; 1746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1747 } 1748 break; 1749 1750 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1751 /* 1752 * Filling in the first part of a previous delayed allocation. 1753 * The left neighbor is contiguous. 1754 */ 1755 old = LEFT; 1756 temp = PREV.br_blockcount - new->br_blockcount; 1757 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1758 startblockval(PREV.br_startblock)); 1759 1760 LEFT.br_blockcount += new->br_blockcount; 1761 1762 PREV.br_blockcount = temp; 1763 PREV.br_startoff += new->br_blockcount; 1764 PREV.br_startblock = nullstartblock(da_new); 1765 1766 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1767 xfs_iext_prev(ifp, &bma->icur); 1768 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1769 1770 if (bma->cur == NULL) 1771 rval = XFS_ILOG_DEXT; 1772 else { 1773 rval = 0; 1774 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1775 if (error) 1776 goto done; 1777 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1778 error = xfs_bmbt_update(bma->cur, &LEFT); 1779 if (error) 1780 goto done; 1781 } 1782 break; 1783 1784 case BMAP_LEFT_FILLING: 1785 /* 1786 * Filling in the first part of a previous delayed allocation. 1787 * The left neighbor is not contiguous. 1788 */ 1789 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1790 (*nextents)++; 1791 if (bma->cur == NULL) 1792 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1793 else { 1794 rval = XFS_ILOG_CORE; 1795 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1796 if (error) 1797 goto done; 1798 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1799 error = xfs_btree_insert(bma->cur, &i); 1800 if (error) 1801 goto done; 1802 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1803 } 1804 1805 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1806 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1807 &bma->cur, 1, &tmp_rval, whichfork); 1808 rval |= tmp_rval; 1809 if (error) 1810 goto done; 1811 } 1812 1813 temp = PREV.br_blockcount - new->br_blockcount; 1814 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1815 startblockval(PREV.br_startblock) - 1816 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1817 1818 PREV.br_startoff = new_endoff; 1819 PREV.br_blockcount = temp; 1820 PREV.br_startblock = nullstartblock(da_new); 1821 xfs_iext_next(ifp, &bma->icur); 1822 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1823 xfs_iext_prev(ifp, &bma->icur); 1824 break; 1825 1826 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1827 /* 1828 * Filling in the last part of a previous delayed allocation. 1829 * The right neighbor is contiguous with the new allocation. 1830 */ 1831 old = RIGHT; 1832 RIGHT.br_startoff = new->br_startoff; 1833 RIGHT.br_startblock = new->br_startblock; 1834 RIGHT.br_blockcount += new->br_blockcount; 1835 1836 if (bma->cur == NULL) 1837 rval = XFS_ILOG_DEXT; 1838 else { 1839 rval = 0; 1840 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1841 if (error) 1842 goto done; 1843 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1844 error = xfs_bmbt_update(bma->cur, &RIGHT); 1845 if (error) 1846 goto done; 1847 } 1848 1849 temp = PREV.br_blockcount - new->br_blockcount; 1850 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1851 startblockval(PREV.br_startblock)); 1852 1853 PREV.br_blockcount = temp; 1854 PREV.br_startblock = nullstartblock(da_new); 1855 1856 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1857 xfs_iext_next(ifp, &bma->icur); 1858 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1859 break; 1860 1861 case BMAP_RIGHT_FILLING: 1862 /* 1863 * Filling in the last part of a previous delayed allocation. 1864 * The right neighbor is not contiguous. 1865 */ 1866 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1867 (*nextents)++; 1868 if (bma->cur == NULL) 1869 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1870 else { 1871 rval = XFS_ILOG_CORE; 1872 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1873 if (error) 1874 goto done; 1875 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1876 error = xfs_btree_insert(bma->cur, &i); 1877 if (error) 1878 goto done; 1879 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1880 } 1881 1882 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1883 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1884 &bma->cur, 1, &tmp_rval, whichfork); 1885 rval |= tmp_rval; 1886 if (error) 1887 goto done; 1888 } 1889 1890 temp = PREV.br_blockcount - new->br_blockcount; 1891 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1892 startblockval(PREV.br_startblock) - 1893 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1894 1895 PREV.br_startblock = nullstartblock(da_new); 1896 PREV.br_blockcount = temp; 1897 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1898 xfs_iext_next(ifp, &bma->icur); 1899 break; 1900 1901 case 0: 1902 /* 1903 * Filling in the middle part of a previous delayed allocation. 1904 * Contiguity is impossible here. 1905 * This case is avoided almost all the time. 1906 * 1907 * We start with a delayed allocation: 1908 * 1909 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1910 * PREV @ idx 1911 * 1912 * and we are allocating: 1913 * +rrrrrrrrrrrrrrrrr+ 1914 * new 1915 * 1916 * and we set it up for insertion as: 1917 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1918 * new 1919 * PREV @ idx LEFT RIGHT 1920 * inserted at idx + 1 1921 */ 1922 old = PREV; 1923 1924 /* LEFT is the new middle */ 1925 LEFT = *new; 1926 1927 /* RIGHT is the new right */ 1928 RIGHT.br_state = PREV.br_state; 1929 RIGHT.br_startoff = new_endoff; 1930 RIGHT.br_blockcount = 1931 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1932 RIGHT.br_startblock = 1933 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1934 RIGHT.br_blockcount)); 1935 1936 /* truncate PREV */ 1937 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1938 PREV.br_startblock = 1939 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1940 PREV.br_blockcount)); 1941 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1942 1943 xfs_iext_next(ifp, &bma->icur); 1944 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1945 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1946 (*nextents)++; 1947 1948 if (bma->cur == NULL) 1949 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1950 else { 1951 rval = XFS_ILOG_CORE; 1952 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1953 if (error) 1954 goto done; 1955 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1956 error = xfs_btree_insert(bma->cur, &i); 1957 if (error) 1958 goto done; 1959 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1960 } 1961 1962 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1963 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1964 &bma->cur, 1, &tmp_rval, whichfork); 1965 rval |= tmp_rval; 1966 if (error) 1967 goto done; 1968 } 1969 1970 da_new = startblockval(PREV.br_startblock) + 1971 startblockval(RIGHT.br_startblock); 1972 break; 1973 1974 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1975 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1976 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1977 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1978 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1979 case BMAP_LEFT_CONTIG: 1980 case BMAP_RIGHT_CONTIG: 1981 /* 1982 * These cases are all impossible. 1983 */ 1984 ASSERT(0); 1985 } 1986 1987 /* add reverse mapping unless caller opted out */ 1988 if (!(bma->flags & XFS_BMAPI_NORMAP)) { 1989 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1990 if (error) 1991 goto done; 1992 } 1993 1994 /* convert to a btree if necessary */ 1995 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1996 int tmp_logflags; /* partial log flag return val */ 1997 1998 ASSERT(bma->cur == NULL); 1999 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2000 &bma->cur, da_old > 0, &tmp_logflags, 2001 whichfork); 2002 bma->logflags |= tmp_logflags; 2003 if (error) 2004 goto done; 2005 } 2006 2007 if (bma->cur) { 2008 da_new += bma->cur->bc_private.b.allocated; 2009 bma->cur->bc_private.b.allocated = 0; 2010 } 2011 2012 /* adjust for changes in reserved delayed indirect blocks */ 2013 if (da_new != da_old) { 2014 ASSERT(state == 0 || da_new < da_old); 2015 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2016 false); 2017 } 2018 2019 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2020 done: 2021 if (whichfork != XFS_COW_FORK) 2022 bma->logflags |= rval; 2023 return error; 2024 #undef LEFT 2025 #undef RIGHT 2026 #undef PREV 2027 } 2028 2029 /* 2030 * Convert an unwritten allocation to a real allocation or vice versa. 2031 */ 2032 STATIC int /* error */ 2033 xfs_bmap_add_extent_unwritten_real( 2034 struct xfs_trans *tp, 2035 xfs_inode_t *ip, /* incore inode pointer */ 2036 int whichfork, 2037 struct xfs_iext_cursor *icur, 2038 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2039 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2040 int *logflagsp) /* inode logging flags */ 2041 { 2042 xfs_btree_cur_t *cur; /* btree cursor */ 2043 int error; /* error return value */ 2044 int i; /* temp state */ 2045 struct xfs_ifork *ifp; /* inode fork pointer */ 2046 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2047 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2048 /* left is 0, right is 1, prev is 2 */ 2049 int rval=0; /* return value (logging flags) */ 2050 int state = xfs_bmap_fork_to_state(whichfork); 2051 struct xfs_mount *mp = ip->i_mount; 2052 struct xfs_bmbt_irec old; 2053 2054 *logflagsp = 0; 2055 2056 cur = *curp; 2057 ifp = XFS_IFORK_PTR(ip, whichfork); 2058 2059 ASSERT(!isnullstartblock(new->br_startblock)); 2060 2061 XFS_STATS_INC(mp, xs_add_exlist); 2062 2063 #define LEFT r[0] 2064 #define RIGHT r[1] 2065 #define PREV r[2] 2066 2067 /* 2068 * Set up a bunch of variables to make the tests simpler. 2069 */ 2070 error = 0; 2071 xfs_iext_get_extent(ifp, icur, &PREV); 2072 ASSERT(new->br_state != PREV.br_state); 2073 new_endoff = new->br_startoff + new->br_blockcount; 2074 ASSERT(PREV.br_startoff <= new->br_startoff); 2075 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2076 2077 /* 2078 * Set flags determining what part of the previous oldext allocation 2079 * extent is being replaced by a newext allocation. 2080 */ 2081 if (PREV.br_startoff == new->br_startoff) 2082 state |= BMAP_LEFT_FILLING; 2083 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2084 state |= BMAP_RIGHT_FILLING; 2085 2086 /* 2087 * Check and set flags if this segment has a left neighbor. 2088 * Don't set contiguous if the combined extent would be too large. 2089 */ 2090 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2091 state |= BMAP_LEFT_VALID; 2092 if (isnullstartblock(LEFT.br_startblock)) 2093 state |= BMAP_LEFT_DELAY; 2094 } 2095 2096 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2097 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2098 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2099 LEFT.br_state == new->br_state && 2100 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2101 state |= BMAP_LEFT_CONTIG; 2102 2103 /* 2104 * Check and set flags if this segment has a right neighbor. 2105 * Don't set contiguous if the combined extent would be too large. 2106 * Also check for all-three-contiguous being too large. 2107 */ 2108 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2109 state |= BMAP_RIGHT_VALID; 2110 if (isnullstartblock(RIGHT.br_startblock)) 2111 state |= BMAP_RIGHT_DELAY; 2112 } 2113 2114 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2115 new_endoff == RIGHT.br_startoff && 2116 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2117 new->br_state == RIGHT.br_state && 2118 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2119 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2120 BMAP_RIGHT_FILLING)) != 2121 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2122 BMAP_RIGHT_FILLING) || 2123 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2124 <= MAXEXTLEN)) 2125 state |= BMAP_RIGHT_CONTIG; 2126 2127 /* 2128 * Switch out based on the FILLING and CONTIG state bits. 2129 */ 2130 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2131 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2132 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2133 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2134 /* 2135 * Setting all of a previous oldext extent to newext. 2136 * The left and right neighbors are both contiguous with new. 2137 */ 2138 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2139 2140 xfs_iext_remove(ip, icur, state); 2141 xfs_iext_remove(ip, icur, state); 2142 xfs_iext_prev(ifp, icur); 2143 xfs_iext_update_extent(ip, state, icur, &LEFT); 2144 XFS_IFORK_NEXT_SET(ip, whichfork, 2145 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2146 if (cur == NULL) 2147 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2148 else { 2149 rval = XFS_ILOG_CORE; 2150 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2151 if (error) 2152 goto done; 2153 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2154 if ((error = xfs_btree_delete(cur, &i))) 2155 goto done; 2156 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2157 if ((error = xfs_btree_decrement(cur, 0, &i))) 2158 goto done; 2159 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2160 if ((error = xfs_btree_delete(cur, &i))) 2161 goto done; 2162 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2163 if ((error = xfs_btree_decrement(cur, 0, &i))) 2164 goto done; 2165 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2166 error = xfs_bmbt_update(cur, &LEFT); 2167 if (error) 2168 goto done; 2169 } 2170 break; 2171 2172 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2173 /* 2174 * Setting all of a previous oldext extent to newext. 2175 * The left neighbor is contiguous, the right is not. 2176 */ 2177 LEFT.br_blockcount += PREV.br_blockcount; 2178 2179 xfs_iext_remove(ip, icur, state); 2180 xfs_iext_prev(ifp, icur); 2181 xfs_iext_update_extent(ip, state, icur, &LEFT); 2182 XFS_IFORK_NEXT_SET(ip, whichfork, 2183 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2184 if (cur == NULL) 2185 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2186 else { 2187 rval = XFS_ILOG_CORE; 2188 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2189 if (error) 2190 goto done; 2191 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2192 if ((error = xfs_btree_delete(cur, &i))) 2193 goto done; 2194 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2195 if ((error = xfs_btree_decrement(cur, 0, &i))) 2196 goto done; 2197 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2198 error = xfs_bmbt_update(cur, &LEFT); 2199 if (error) 2200 goto done; 2201 } 2202 break; 2203 2204 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2205 /* 2206 * Setting all of a previous oldext extent to newext. 2207 * The right neighbor is contiguous, the left is not. 2208 */ 2209 PREV.br_blockcount += RIGHT.br_blockcount; 2210 PREV.br_state = new->br_state; 2211 2212 xfs_iext_next(ifp, icur); 2213 xfs_iext_remove(ip, icur, state); 2214 xfs_iext_prev(ifp, icur); 2215 xfs_iext_update_extent(ip, state, icur, &PREV); 2216 2217 XFS_IFORK_NEXT_SET(ip, whichfork, 2218 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2219 if (cur == NULL) 2220 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2221 else { 2222 rval = XFS_ILOG_CORE; 2223 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2224 if (error) 2225 goto done; 2226 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2227 if ((error = xfs_btree_delete(cur, &i))) 2228 goto done; 2229 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2230 if ((error = xfs_btree_decrement(cur, 0, &i))) 2231 goto done; 2232 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2233 error = xfs_bmbt_update(cur, &PREV); 2234 if (error) 2235 goto done; 2236 } 2237 break; 2238 2239 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2240 /* 2241 * Setting all of a previous oldext extent to newext. 2242 * Neither the left nor right neighbors are contiguous with 2243 * the new one. 2244 */ 2245 PREV.br_state = new->br_state; 2246 xfs_iext_update_extent(ip, state, icur, &PREV); 2247 2248 if (cur == NULL) 2249 rval = XFS_ILOG_DEXT; 2250 else { 2251 rval = 0; 2252 error = xfs_bmbt_lookup_eq(cur, new, &i); 2253 if (error) 2254 goto done; 2255 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2256 error = xfs_bmbt_update(cur, &PREV); 2257 if (error) 2258 goto done; 2259 } 2260 break; 2261 2262 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2263 /* 2264 * Setting the first part of a previous oldext extent to newext. 2265 * The left neighbor is contiguous. 2266 */ 2267 LEFT.br_blockcount += new->br_blockcount; 2268 2269 old = PREV; 2270 PREV.br_startoff += new->br_blockcount; 2271 PREV.br_startblock += new->br_blockcount; 2272 PREV.br_blockcount -= new->br_blockcount; 2273 2274 xfs_iext_update_extent(ip, state, icur, &PREV); 2275 xfs_iext_prev(ifp, icur); 2276 xfs_iext_update_extent(ip, state, icur, &LEFT); 2277 2278 if (cur == NULL) 2279 rval = XFS_ILOG_DEXT; 2280 else { 2281 rval = 0; 2282 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2283 if (error) 2284 goto done; 2285 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2286 error = xfs_bmbt_update(cur, &PREV); 2287 if (error) 2288 goto done; 2289 error = xfs_btree_decrement(cur, 0, &i); 2290 if (error) 2291 goto done; 2292 error = xfs_bmbt_update(cur, &LEFT); 2293 if (error) 2294 goto done; 2295 } 2296 break; 2297 2298 case BMAP_LEFT_FILLING: 2299 /* 2300 * Setting the first part of a previous oldext extent to newext. 2301 * The left neighbor is not contiguous. 2302 */ 2303 old = PREV; 2304 PREV.br_startoff += new->br_blockcount; 2305 PREV.br_startblock += new->br_blockcount; 2306 PREV.br_blockcount -= new->br_blockcount; 2307 2308 xfs_iext_update_extent(ip, state, icur, &PREV); 2309 xfs_iext_insert(ip, icur, new, state); 2310 XFS_IFORK_NEXT_SET(ip, whichfork, 2311 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2312 if (cur == NULL) 2313 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2314 else { 2315 rval = XFS_ILOG_CORE; 2316 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2317 if (error) 2318 goto done; 2319 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2320 error = xfs_bmbt_update(cur, &PREV); 2321 if (error) 2322 goto done; 2323 cur->bc_rec.b = *new; 2324 if ((error = xfs_btree_insert(cur, &i))) 2325 goto done; 2326 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2327 } 2328 break; 2329 2330 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2331 /* 2332 * Setting the last part of a previous oldext extent to newext. 2333 * The right neighbor is contiguous with the new allocation. 2334 */ 2335 old = PREV; 2336 PREV.br_blockcount -= new->br_blockcount; 2337 2338 RIGHT.br_startoff = new->br_startoff; 2339 RIGHT.br_startblock = new->br_startblock; 2340 RIGHT.br_blockcount += new->br_blockcount; 2341 2342 xfs_iext_update_extent(ip, state, icur, &PREV); 2343 xfs_iext_next(ifp, icur); 2344 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2345 2346 if (cur == NULL) 2347 rval = XFS_ILOG_DEXT; 2348 else { 2349 rval = 0; 2350 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2351 if (error) 2352 goto done; 2353 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2354 error = xfs_bmbt_update(cur, &PREV); 2355 if (error) 2356 goto done; 2357 error = xfs_btree_increment(cur, 0, &i); 2358 if (error) 2359 goto done; 2360 error = xfs_bmbt_update(cur, &RIGHT); 2361 if (error) 2362 goto done; 2363 } 2364 break; 2365 2366 case BMAP_RIGHT_FILLING: 2367 /* 2368 * Setting the last part of a previous oldext extent to newext. 2369 * The right neighbor is not contiguous. 2370 */ 2371 old = PREV; 2372 PREV.br_blockcount -= new->br_blockcount; 2373 2374 xfs_iext_update_extent(ip, state, icur, &PREV); 2375 xfs_iext_next(ifp, icur); 2376 xfs_iext_insert(ip, icur, new, state); 2377 2378 XFS_IFORK_NEXT_SET(ip, whichfork, 2379 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2380 if (cur == NULL) 2381 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2382 else { 2383 rval = XFS_ILOG_CORE; 2384 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2385 if (error) 2386 goto done; 2387 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2388 error = xfs_bmbt_update(cur, &PREV); 2389 if (error) 2390 goto done; 2391 error = xfs_bmbt_lookup_eq(cur, new, &i); 2392 if (error) 2393 goto done; 2394 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2395 if ((error = xfs_btree_insert(cur, &i))) 2396 goto done; 2397 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2398 } 2399 break; 2400 2401 case 0: 2402 /* 2403 * Setting the middle part of a previous oldext extent to 2404 * newext. Contiguity is impossible here. 2405 * One extent becomes three extents. 2406 */ 2407 old = PREV; 2408 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2409 2410 r[0] = *new; 2411 r[1].br_startoff = new_endoff; 2412 r[1].br_blockcount = 2413 old.br_startoff + old.br_blockcount - new_endoff; 2414 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2415 r[1].br_state = PREV.br_state; 2416 2417 xfs_iext_update_extent(ip, state, icur, &PREV); 2418 xfs_iext_next(ifp, icur); 2419 xfs_iext_insert(ip, icur, &r[1], state); 2420 xfs_iext_insert(ip, icur, &r[0], state); 2421 2422 XFS_IFORK_NEXT_SET(ip, whichfork, 2423 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2424 if (cur == NULL) 2425 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2426 else { 2427 rval = XFS_ILOG_CORE; 2428 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2429 if (error) 2430 goto done; 2431 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2432 /* new right extent - oldext */ 2433 error = xfs_bmbt_update(cur, &r[1]); 2434 if (error) 2435 goto done; 2436 /* new left extent - oldext */ 2437 cur->bc_rec.b = PREV; 2438 if ((error = xfs_btree_insert(cur, &i))) 2439 goto done; 2440 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2441 /* 2442 * Reset the cursor to the position of the new extent 2443 * we are about to insert as we can't trust it after 2444 * the previous insert. 2445 */ 2446 error = xfs_bmbt_lookup_eq(cur, new, &i); 2447 if (error) 2448 goto done; 2449 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2450 /* new middle extent - newext */ 2451 if ((error = xfs_btree_insert(cur, &i))) 2452 goto done; 2453 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2454 } 2455 break; 2456 2457 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2458 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2459 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2460 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2461 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2462 case BMAP_LEFT_CONTIG: 2463 case BMAP_RIGHT_CONTIG: 2464 /* 2465 * These cases are all impossible. 2466 */ 2467 ASSERT(0); 2468 } 2469 2470 /* update reverse mappings */ 2471 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2472 if (error) 2473 goto done; 2474 2475 /* convert to a btree if necessary */ 2476 if (xfs_bmap_needs_btree(ip, whichfork)) { 2477 int tmp_logflags; /* partial log flag return val */ 2478 2479 ASSERT(cur == NULL); 2480 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2481 &tmp_logflags, whichfork); 2482 *logflagsp |= tmp_logflags; 2483 if (error) 2484 goto done; 2485 } 2486 2487 /* clear out the allocated field, done with it now in any case. */ 2488 if (cur) { 2489 cur->bc_private.b.allocated = 0; 2490 *curp = cur; 2491 } 2492 2493 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2494 done: 2495 *logflagsp |= rval; 2496 return error; 2497 #undef LEFT 2498 #undef RIGHT 2499 #undef PREV 2500 } 2501 2502 /* 2503 * Convert a hole to a delayed allocation. 2504 */ 2505 STATIC void 2506 xfs_bmap_add_extent_hole_delay( 2507 xfs_inode_t *ip, /* incore inode pointer */ 2508 int whichfork, 2509 struct xfs_iext_cursor *icur, 2510 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2511 { 2512 struct xfs_ifork *ifp; /* inode fork pointer */ 2513 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2514 xfs_filblks_t newlen=0; /* new indirect size */ 2515 xfs_filblks_t oldlen=0; /* old indirect size */ 2516 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2517 int state = xfs_bmap_fork_to_state(whichfork); 2518 xfs_filblks_t temp; /* temp for indirect calculations */ 2519 2520 ifp = XFS_IFORK_PTR(ip, whichfork); 2521 ASSERT(isnullstartblock(new->br_startblock)); 2522 2523 /* 2524 * Check and set flags if this segment has a left neighbor 2525 */ 2526 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2527 state |= BMAP_LEFT_VALID; 2528 if (isnullstartblock(left.br_startblock)) 2529 state |= BMAP_LEFT_DELAY; 2530 } 2531 2532 /* 2533 * Check and set flags if the current (right) segment exists. 2534 * If it doesn't exist, we're converting the hole at end-of-file. 2535 */ 2536 if (xfs_iext_get_extent(ifp, icur, &right)) { 2537 state |= BMAP_RIGHT_VALID; 2538 if (isnullstartblock(right.br_startblock)) 2539 state |= BMAP_RIGHT_DELAY; 2540 } 2541 2542 /* 2543 * Set contiguity flags on the left and right neighbors. 2544 * Don't let extents get too large, even if the pieces are contiguous. 2545 */ 2546 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2547 left.br_startoff + left.br_blockcount == new->br_startoff && 2548 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2549 state |= BMAP_LEFT_CONTIG; 2550 2551 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2552 new->br_startoff + new->br_blockcount == right.br_startoff && 2553 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2554 (!(state & BMAP_LEFT_CONTIG) || 2555 (left.br_blockcount + new->br_blockcount + 2556 right.br_blockcount <= MAXEXTLEN))) 2557 state |= BMAP_RIGHT_CONTIG; 2558 2559 /* 2560 * Switch out based on the contiguity flags. 2561 */ 2562 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2563 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2564 /* 2565 * New allocation is contiguous with delayed allocations 2566 * on the left and on the right. 2567 * Merge all three into a single extent record. 2568 */ 2569 temp = left.br_blockcount + new->br_blockcount + 2570 right.br_blockcount; 2571 2572 oldlen = startblockval(left.br_startblock) + 2573 startblockval(new->br_startblock) + 2574 startblockval(right.br_startblock); 2575 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2576 oldlen); 2577 left.br_startblock = nullstartblock(newlen); 2578 left.br_blockcount = temp; 2579 2580 xfs_iext_remove(ip, icur, state); 2581 xfs_iext_prev(ifp, icur); 2582 xfs_iext_update_extent(ip, state, icur, &left); 2583 break; 2584 2585 case BMAP_LEFT_CONTIG: 2586 /* 2587 * New allocation is contiguous with a delayed allocation 2588 * on the left. 2589 * Merge the new allocation with the left neighbor. 2590 */ 2591 temp = left.br_blockcount + new->br_blockcount; 2592 2593 oldlen = startblockval(left.br_startblock) + 2594 startblockval(new->br_startblock); 2595 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2596 oldlen); 2597 left.br_blockcount = temp; 2598 left.br_startblock = nullstartblock(newlen); 2599 2600 xfs_iext_prev(ifp, icur); 2601 xfs_iext_update_extent(ip, state, icur, &left); 2602 break; 2603 2604 case BMAP_RIGHT_CONTIG: 2605 /* 2606 * New allocation is contiguous with a delayed allocation 2607 * on the right. 2608 * Merge the new allocation with the right neighbor. 2609 */ 2610 temp = new->br_blockcount + right.br_blockcount; 2611 oldlen = startblockval(new->br_startblock) + 2612 startblockval(right.br_startblock); 2613 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2614 oldlen); 2615 right.br_startoff = new->br_startoff; 2616 right.br_startblock = nullstartblock(newlen); 2617 right.br_blockcount = temp; 2618 xfs_iext_update_extent(ip, state, icur, &right); 2619 break; 2620 2621 case 0: 2622 /* 2623 * New allocation is not contiguous with another 2624 * delayed allocation. 2625 * Insert a new entry. 2626 */ 2627 oldlen = newlen = 0; 2628 xfs_iext_insert(ip, icur, new, state); 2629 break; 2630 } 2631 if (oldlen != newlen) { 2632 ASSERT(oldlen > newlen); 2633 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2634 false); 2635 /* 2636 * Nothing to do for disk quota accounting here. 2637 */ 2638 } 2639 } 2640 2641 /* 2642 * Convert a hole to a real allocation. 2643 */ 2644 STATIC int /* error */ 2645 xfs_bmap_add_extent_hole_real( 2646 struct xfs_trans *tp, 2647 struct xfs_inode *ip, 2648 int whichfork, 2649 struct xfs_iext_cursor *icur, 2650 struct xfs_btree_cur **curp, 2651 struct xfs_bmbt_irec *new, 2652 int *logflagsp, 2653 int flags) 2654 { 2655 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2656 struct xfs_mount *mp = ip->i_mount; 2657 struct xfs_btree_cur *cur = *curp; 2658 int error; /* error return value */ 2659 int i; /* temp state */ 2660 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2661 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2662 int rval=0; /* return value (logging flags) */ 2663 int state = xfs_bmap_fork_to_state(whichfork); 2664 struct xfs_bmbt_irec old; 2665 2666 ASSERT(!isnullstartblock(new->br_startblock)); 2667 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2668 2669 XFS_STATS_INC(mp, xs_add_exlist); 2670 2671 /* 2672 * Check and set flags if this segment has a left neighbor. 2673 */ 2674 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2675 state |= BMAP_LEFT_VALID; 2676 if (isnullstartblock(left.br_startblock)) 2677 state |= BMAP_LEFT_DELAY; 2678 } 2679 2680 /* 2681 * Check and set flags if this segment has a current value. 2682 * Not true if we're inserting into the "hole" at eof. 2683 */ 2684 if (xfs_iext_get_extent(ifp, icur, &right)) { 2685 state |= BMAP_RIGHT_VALID; 2686 if (isnullstartblock(right.br_startblock)) 2687 state |= BMAP_RIGHT_DELAY; 2688 } 2689 2690 /* 2691 * We're inserting a real allocation between "left" and "right". 2692 * Set the contiguity flags. Don't let extents get too large. 2693 */ 2694 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2695 left.br_startoff + left.br_blockcount == new->br_startoff && 2696 left.br_startblock + left.br_blockcount == new->br_startblock && 2697 left.br_state == new->br_state && 2698 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2699 state |= BMAP_LEFT_CONTIG; 2700 2701 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2702 new->br_startoff + new->br_blockcount == right.br_startoff && 2703 new->br_startblock + new->br_blockcount == right.br_startblock && 2704 new->br_state == right.br_state && 2705 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2706 (!(state & BMAP_LEFT_CONTIG) || 2707 left.br_blockcount + new->br_blockcount + 2708 right.br_blockcount <= MAXEXTLEN)) 2709 state |= BMAP_RIGHT_CONTIG; 2710 2711 error = 0; 2712 /* 2713 * Select which case we're in here, and implement it. 2714 */ 2715 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2716 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2717 /* 2718 * New allocation is contiguous with real allocations on the 2719 * left and on the right. 2720 * Merge all three into a single extent record. 2721 */ 2722 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2723 2724 xfs_iext_remove(ip, icur, state); 2725 xfs_iext_prev(ifp, icur); 2726 xfs_iext_update_extent(ip, state, icur, &left); 2727 2728 XFS_IFORK_NEXT_SET(ip, whichfork, 2729 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2730 if (cur == NULL) { 2731 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2732 } else { 2733 rval = XFS_ILOG_CORE; 2734 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2735 if (error) 2736 goto done; 2737 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2738 error = xfs_btree_delete(cur, &i); 2739 if (error) 2740 goto done; 2741 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2742 error = xfs_btree_decrement(cur, 0, &i); 2743 if (error) 2744 goto done; 2745 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2746 error = xfs_bmbt_update(cur, &left); 2747 if (error) 2748 goto done; 2749 } 2750 break; 2751 2752 case BMAP_LEFT_CONTIG: 2753 /* 2754 * New allocation is contiguous with a real allocation 2755 * on the left. 2756 * Merge the new allocation with the left neighbor. 2757 */ 2758 old = left; 2759 left.br_blockcount += new->br_blockcount; 2760 2761 xfs_iext_prev(ifp, icur); 2762 xfs_iext_update_extent(ip, state, icur, &left); 2763 2764 if (cur == NULL) { 2765 rval = xfs_ilog_fext(whichfork); 2766 } else { 2767 rval = 0; 2768 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2769 if (error) 2770 goto done; 2771 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2772 error = xfs_bmbt_update(cur, &left); 2773 if (error) 2774 goto done; 2775 } 2776 break; 2777 2778 case BMAP_RIGHT_CONTIG: 2779 /* 2780 * New allocation is contiguous with a real allocation 2781 * on the right. 2782 * Merge the new allocation with the right neighbor. 2783 */ 2784 old = right; 2785 2786 right.br_startoff = new->br_startoff; 2787 right.br_startblock = new->br_startblock; 2788 right.br_blockcount += new->br_blockcount; 2789 xfs_iext_update_extent(ip, state, icur, &right); 2790 2791 if (cur == NULL) { 2792 rval = xfs_ilog_fext(whichfork); 2793 } else { 2794 rval = 0; 2795 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2796 if (error) 2797 goto done; 2798 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2799 error = xfs_bmbt_update(cur, &right); 2800 if (error) 2801 goto done; 2802 } 2803 break; 2804 2805 case 0: 2806 /* 2807 * New allocation is not contiguous with another 2808 * real allocation. 2809 * Insert a new entry. 2810 */ 2811 xfs_iext_insert(ip, icur, new, state); 2812 XFS_IFORK_NEXT_SET(ip, whichfork, 2813 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2814 if (cur == NULL) { 2815 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2816 } else { 2817 rval = XFS_ILOG_CORE; 2818 error = xfs_bmbt_lookup_eq(cur, new, &i); 2819 if (error) 2820 goto done; 2821 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2822 error = xfs_btree_insert(cur, &i); 2823 if (error) 2824 goto done; 2825 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2826 } 2827 break; 2828 } 2829 2830 /* add reverse mapping unless caller opted out */ 2831 if (!(flags & XFS_BMAPI_NORMAP)) { 2832 error = xfs_rmap_map_extent(tp, ip, whichfork, new); 2833 if (error) 2834 goto done; 2835 } 2836 2837 /* convert to a btree if necessary */ 2838 if (xfs_bmap_needs_btree(ip, whichfork)) { 2839 int tmp_logflags; /* partial log flag return val */ 2840 2841 ASSERT(cur == NULL); 2842 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2843 &tmp_logflags, whichfork); 2844 *logflagsp |= tmp_logflags; 2845 cur = *curp; 2846 if (error) 2847 goto done; 2848 } 2849 2850 /* clear out the allocated field, done with it now in any case. */ 2851 if (cur) 2852 cur->bc_private.b.allocated = 0; 2853 2854 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2855 done: 2856 *logflagsp |= rval; 2857 return error; 2858 } 2859 2860 /* 2861 * Functions used in the extent read, allocate and remove paths 2862 */ 2863 2864 /* 2865 * Adjust the size of the new extent based on di_extsize and rt extsize. 2866 */ 2867 int 2868 xfs_bmap_extsize_align( 2869 xfs_mount_t *mp, 2870 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2871 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2872 xfs_extlen_t extsz, /* align to this extent size */ 2873 int rt, /* is this a realtime inode? */ 2874 int eof, /* is extent at end-of-file? */ 2875 int delay, /* creating delalloc extent? */ 2876 int convert, /* overwriting unwritten extent? */ 2877 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2878 xfs_extlen_t *lenp) /* in/out: aligned length */ 2879 { 2880 xfs_fileoff_t orig_off; /* original offset */ 2881 xfs_extlen_t orig_alen; /* original length */ 2882 xfs_fileoff_t orig_end; /* original off+len */ 2883 xfs_fileoff_t nexto; /* next file offset */ 2884 xfs_fileoff_t prevo; /* previous file offset */ 2885 xfs_fileoff_t align_off; /* temp for offset */ 2886 xfs_extlen_t align_alen; /* temp for length */ 2887 xfs_extlen_t temp; /* temp for calculations */ 2888 2889 if (convert) 2890 return 0; 2891 2892 orig_off = align_off = *offp; 2893 orig_alen = align_alen = *lenp; 2894 orig_end = orig_off + orig_alen; 2895 2896 /* 2897 * If this request overlaps an existing extent, then don't 2898 * attempt to perform any additional alignment. 2899 */ 2900 if (!delay && !eof && 2901 (orig_off >= gotp->br_startoff) && 2902 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2903 return 0; 2904 } 2905 2906 /* 2907 * If the file offset is unaligned vs. the extent size 2908 * we need to align it. This will be possible unless 2909 * the file was previously written with a kernel that didn't 2910 * perform this alignment, or if a truncate shot us in the 2911 * foot. 2912 */ 2913 div_u64_rem(orig_off, extsz, &temp); 2914 if (temp) { 2915 align_alen += temp; 2916 align_off -= temp; 2917 } 2918 2919 /* Same adjustment for the end of the requested area. */ 2920 temp = (align_alen % extsz); 2921 if (temp) 2922 align_alen += extsz - temp; 2923 2924 /* 2925 * For large extent hint sizes, the aligned extent might be larger than 2926 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2927 * the length back under MAXEXTLEN. The outer allocation loops handle 2928 * short allocation just fine, so it is safe to do this. We only want to 2929 * do it when we are forced to, though, because it means more allocation 2930 * operations are required. 2931 */ 2932 while (align_alen > MAXEXTLEN) 2933 align_alen -= extsz; 2934 ASSERT(align_alen <= MAXEXTLEN); 2935 2936 /* 2937 * If the previous block overlaps with this proposed allocation 2938 * then move the start forward without adjusting the length. 2939 */ 2940 if (prevp->br_startoff != NULLFILEOFF) { 2941 if (prevp->br_startblock == HOLESTARTBLOCK) 2942 prevo = prevp->br_startoff; 2943 else 2944 prevo = prevp->br_startoff + prevp->br_blockcount; 2945 } else 2946 prevo = 0; 2947 if (align_off != orig_off && align_off < prevo) 2948 align_off = prevo; 2949 /* 2950 * If the next block overlaps with this proposed allocation 2951 * then move the start back without adjusting the length, 2952 * but not before offset 0. 2953 * This may of course make the start overlap previous block, 2954 * and if we hit the offset 0 limit then the next block 2955 * can still overlap too. 2956 */ 2957 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2958 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2959 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2960 nexto = gotp->br_startoff + gotp->br_blockcount; 2961 else 2962 nexto = gotp->br_startoff; 2963 } else 2964 nexto = NULLFILEOFF; 2965 if (!eof && 2966 align_off + align_alen != orig_end && 2967 align_off + align_alen > nexto) 2968 align_off = nexto > align_alen ? nexto - align_alen : 0; 2969 /* 2970 * If we're now overlapping the next or previous extent that 2971 * means we can't fit an extsz piece in this hole. Just move 2972 * the start forward to the first valid spot and set 2973 * the length so we hit the end. 2974 */ 2975 if (align_off != orig_off && align_off < prevo) 2976 align_off = prevo; 2977 if (align_off + align_alen != orig_end && 2978 align_off + align_alen > nexto && 2979 nexto != NULLFILEOFF) { 2980 ASSERT(nexto > prevo); 2981 align_alen = nexto - align_off; 2982 } 2983 2984 /* 2985 * If realtime, and the result isn't a multiple of the realtime 2986 * extent size we need to remove blocks until it is. 2987 */ 2988 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2989 /* 2990 * We're not covering the original request, or 2991 * we won't be able to once we fix the length. 2992 */ 2993 if (orig_off < align_off || 2994 orig_end > align_off + align_alen || 2995 align_alen - temp < orig_alen) 2996 return -EINVAL; 2997 /* 2998 * Try to fix it by moving the start up. 2999 */ 3000 if (align_off + temp <= orig_off) { 3001 align_alen -= temp; 3002 align_off += temp; 3003 } 3004 /* 3005 * Try to fix it by moving the end in. 3006 */ 3007 else if (align_off + align_alen - temp >= orig_end) 3008 align_alen -= temp; 3009 /* 3010 * Set the start to the minimum then trim the length. 3011 */ 3012 else { 3013 align_alen -= orig_off - align_off; 3014 align_off = orig_off; 3015 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3016 } 3017 /* 3018 * Result doesn't cover the request, fail it. 3019 */ 3020 if (orig_off < align_off || orig_end > align_off + align_alen) 3021 return -EINVAL; 3022 } else { 3023 ASSERT(orig_off >= align_off); 3024 /* see MAXEXTLEN handling above */ 3025 ASSERT(orig_end <= align_off + align_alen || 3026 align_alen + extsz > MAXEXTLEN); 3027 } 3028 3029 #ifdef DEBUG 3030 if (!eof && gotp->br_startoff != NULLFILEOFF) 3031 ASSERT(align_off + align_alen <= gotp->br_startoff); 3032 if (prevp->br_startoff != NULLFILEOFF) 3033 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3034 #endif 3035 3036 *lenp = align_alen; 3037 *offp = align_off; 3038 return 0; 3039 } 3040 3041 #define XFS_ALLOC_GAP_UNITS 4 3042 3043 void 3044 xfs_bmap_adjacent( 3045 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3046 { 3047 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3048 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3049 xfs_mount_t *mp; /* mount point structure */ 3050 int nullfb; /* true if ap->firstblock isn't set */ 3051 int rt; /* true if inode is realtime */ 3052 3053 #define ISVALID(x,y) \ 3054 (rt ? \ 3055 (x) < mp->m_sb.sb_rblocks : \ 3056 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3057 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3058 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3059 3060 mp = ap->ip->i_mount; 3061 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3062 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3063 xfs_alloc_is_userdata(ap->datatype); 3064 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3065 ap->tp->t_firstblock); 3066 /* 3067 * If allocating at eof, and there's a previous real block, 3068 * try to use its last block as our starting point. 3069 */ 3070 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3071 !isnullstartblock(ap->prev.br_startblock) && 3072 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3073 ap->prev.br_startblock)) { 3074 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3075 /* 3076 * Adjust for the gap between prevp and us. 3077 */ 3078 adjust = ap->offset - 3079 (ap->prev.br_startoff + ap->prev.br_blockcount); 3080 if (adjust && 3081 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3082 ap->blkno += adjust; 3083 } 3084 /* 3085 * If not at eof, then compare the two neighbor blocks. 3086 * Figure out whether either one gives us a good starting point, 3087 * and pick the better one. 3088 */ 3089 else if (!ap->eof) { 3090 xfs_fsblock_t gotbno; /* right side block number */ 3091 xfs_fsblock_t gotdiff=0; /* right side difference */ 3092 xfs_fsblock_t prevbno; /* left side block number */ 3093 xfs_fsblock_t prevdiff=0; /* left side difference */ 3094 3095 /* 3096 * If there's a previous (left) block, select a requested 3097 * start block based on it. 3098 */ 3099 if (ap->prev.br_startoff != NULLFILEOFF && 3100 !isnullstartblock(ap->prev.br_startblock) && 3101 (prevbno = ap->prev.br_startblock + 3102 ap->prev.br_blockcount) && 3103 ISVALID(prevbno, ap->prev.br_startblock)) { 3104 /* 3105 * Calculate gap to end of previous block. 3106 */ 3107 adjust = prevdiff = ap->offset - 3108 (ap->prev.br_startoff + 3109 ap->prev.br_blockcount); 3110 /* 3111 * Figure the startblock based on the previous block's 3112 * end and the gap size. 3113 * Heuristic! 3114 * If the gap is large relative to the piece we're 3115 * allocating, or using it gives us an invalid block 3116 * number, then just use the end of the previous block. 3117 */ 3118 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3119 ISVALID(prevbno + prevdiff, 3120 ap->prev.br_startblock)) 3121 prevbno += adjust; 3122 else 3123 prevdiff += adjust; 3124 /* 3125 * If the firstblock forbids it, can't use it, 3126 * must use default. 3127 */ 3128 if (!rt && !nullfb && 3129 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3130 prevbno = NULLFSBLOCK; 3131 } 3132 /* 3133 * No previous block or can't follow it, just default. 3134 */ 3135 else 3136 prevbno = NULLFSBLOCK; 3137 /* 3138 * If there's a following (right) block, select a requested 3139 * start block based on it. 3140 */ 3141 if (!isnullstartblock(ap->got.br_startblock)) { 3142 /* 3143 * Calculate gap to start of next block. 3144 */ 3145 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3146 /* 3147 * Figure the startblock based on the next block's 3148 * start and the gap size. 3149 */ 3150 gotbno = ap->got.br_startblock; 3151 /* 3152 * Heuristic! 3153 * If the gap is large relative to the piece we're 3154 * allocating, or using it gives us an invalid block 3155 * number, then just use the start of the next block 3156 * offset by our length. 3157 */ 3158 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3159 ISVALID(gotbno - gotdiff, gotbno)) 3160 gotbno -= adjust; 3161 else if (ISVALID(gotbno - ap->length, gotbno)) { 3162 gotbno -= ap->length; 3163 gotdiff += adjust - ap->length; 3164 } else 3165 gotdiff += adjust; 3166 /* 3167 * If the firstblock forbids it, can't use it, 3168 * must use default. 3169 */ 3170 if (!rt && !nullfb && 3171 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3172 gotbno = NULLFSBLOCK; 3173 } 3174 /* 3175 * No next block, just default. 3176 */ 3177 else 3178 gotbno = NULLFSBLOCK; 3179 /* 3180 * If both valid, pick the better one, else the only good 3181 * one, else ap->blkno is already set (to 0 or the inode block). 3182 */ 3183 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3184 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3185 else if (prevbno != NULLFSBLOCK) 3186 ap->blkno = prevbno; 3187 else if (gotbno != NULLFSBLOCK) 3188 ap->blkno = gotbno; 3189 } 3190 #undef ISVALID 3191 } 3192 3193 static int 3194 xfs_bmap_longest_free_extent( 3195 struct xfs_trans *tp, 3196 xfs_agnumber_t ag, 3197 xfs_extlen_t *blen, 3198 int *notinit) 3199 { 3200 struct xfs_mount *mp = tp->t_mountp; 3201 struct xfs_perag *pag; 3202 xfs_extlen_t longest; 3203 int error = 0; 3204 3205 pag = xfs_perag_get(mp, ag); 3206 if (!pag->pagf_init) { 3207 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3208 if (error) 3209 goto out; 3210 3211 if (!pag->pagf_init) { 3212 *notinit = 1; 3213 goto out; 3214 } 3215 } 3216 3217 longest = xfs_alloc_longest_free_extent(pag, 3218 xfs_alloc_min_freelist(mp, pag), 3219 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3220 if (*blen < longest) 3221 *blen = longest; 3222 3223 out: 3224 xfs_perag_put(pag); 3225 return error; 3226 } 3227 3228 static void 3229 xfs_bmap_select_minlen( 3230 struct xfs_bmalloca *ap, 3231 struct xfs_alloc_arg *args, 3232 xfs_extlen_t *blen, 3233 int notinit) 3234 { 3235 if (notinit || *blen < ap->minlen) { 3236 /* 3237 * Since we did a BUF_TRYLOCK above, it is possible that 3238 * there is space for this request. 3239 */ 3240 args->minlen = ap->minlen; 3241 } else if (*blen < args->maxlen) { 3242 /* 3243 * If the best seen length is less than the request length, 3244 * use the best as the minimum. 3245 */ 3246 args->minlen = *blen; 3247 } else { 3248 /* 3249 * Otherwise we've seen an extent as big as maxlen, use that 3250 * as the minimum. 3251 */ 3252 args->minlen = args->maxlen; 3253 } 3254 } 3255 3256 STATIC int 3257 xfs_bmap_btalloc_nullfb( 3258 struct xfs_bmalloca *ap, 3259 struct xfs_alloc_arg *args, 3260 xfs_extlen_t *blen) 3261 { 3262 struct xfs_mount *mp = ap->ip->i_mount; 3263 xfs_agnumber_t ag, startag; 3264 int notinit = 0; 3265 int error; 3266 3267 args->type = XFS_ALLOCTYPE_START_BNO; 3268 args->total = ap->total; 3269 3270 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3271 if (startag == NULLAGNUMBER) 3272 startag = ag = 0; 3273 3274 while (*blen < args->maxlen) { 3275 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3276 ¬init); 3277 if (error) 3278 return error; 3279 3280 if (++ag == mp->m_sb.sb_agcount) 3281 ag = 0; 3282 if (ag == startag) 3283 break; 3284 } 3285 3286 xfs_bmap_select_minlen(ap, args, blen, notinit); 3287 return 0; 3288 } 3289 3290 STATIC int 3291 xfs_bmap_btalloc_filestreams( 3292 struct xfs_bmalloca *ap, 3293 struct xfs_alloc_arg *args, 3294 xfs_extlen_t *blen) 3295 { 3296 struct xfs_mount *mp = ap->ip->i_mount; 3297 xfs_agnumber_t ag; 3298 int notinit = 0; 3299 int error; 3300 3301 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3302 args->total = ap->total; 3303 3304 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3305 if (ag == NULLAGNUMBER) 3306 ag = 0; 3307 3308 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3309 if (error) 3310 return error; 3311 3312 if (*blen < args->maxlen) { 3313 error = xfs_filestream_new_ag(ap, &ag); 3314 if (error) 3315 return error; 3316 3317 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3318 ¬init); 3319 if (error) 3320 return error; 3321 3322 } 3323 3324 xfs_bmap_select_minlen(ap, args, blen, notinit); 3325 3326 /* 3327 * Set the failure fallback case to look in the selected AG as stream 3328 * may have moved. 3329 */ 3330 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3331 return 0; 3332 } 3333 3334 /* Update all inode and quota accounting for the allocation we just did. */ 3335 static void 3336 xfs_bmap_btalloc_accounting( 3337 struct xfs_bmalloca *ap, 3338 struct xfs_alloc_arg *args) 3339 { 3340 if (ap->flags & XFS_BMAPI_COWFORK) { 3341 /* 3342 * COW fork blocks are in-core only and thus are treated as 3343 * in-core quota reservation (like delalloc blocks) even when 3344 * converted to real blocks. The quota reservation is not 3345 * accounted to disk until blocks are remapped to the data 3346 * fork. So if these blocks were previously delalloc, we 3347 * already have quota reservation and there's nothing to do 3348 * yet. 3349 */ 3350 if (ap->wasdel) 3351 return; 3352 3353 /* 3354 * Otherwise, we've allocated blocks in a hole. The transaction 3355 * has acquired in-core quota reservation for this extent. 3356 * Rather than account these as real blocks, however, we reduce 3357 * the transaction quota reservation based on the allocation. 3358 * This essentially transfers the transaction quota reservation 3359 * to that of a delalloc extent. 3360 */ 3361 ap->ip->i_delayed_blks += args->len; 3362 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3363 -(long)args->len); 3364 return; 3365 } 3366 3367 /* data/attr fork only */ 3368 ap->ip->i_d.di_nblocks += args->len; 3369 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3370 if (ap->wasdel) 3371 ap->ip->i_delayed_blks -= args->len; 3372 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3373 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3374 args->len); 3375 } 3376 3377 STATIC int 3378 xfs_bmap_btalloc( 3379 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3380 { 3381 xfs_mount_t *mp; /* mount point structure */ 3382 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3383 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3384 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3385 xfs_agnumber_t ag; 3386 xfs_alloc_arg_t args; 3387 xfs_fileoff_t orig_offset; 3388 xfs_extlen_t orig_length; 3389 xfs_extlen_t blen; 3390 xfs_extlen_t nextminlen = 0; 3391 int nullfb; /* true if ap->firstblock isn't set */ 3392 int isaligned; 3393 int tryagain; 3394 int error; 3395 int stripe_align; 3396 3397 ASSERT(ap->length); 3398 orig_offset = ap->offset; 3399 orig_length = ap->length; 3400 3401 mp = ap->ip->i_mount; 3402 3403 /* stripe alignment for allocation is determined by mount parameters */ 3404 stripe_align = 0; 3405 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3406 stripe_align = mp->m_swidth; 3407 else if (mp->m_dalign) 3408 stripe_align = mp->m_dalign; 3409 3410 if (ap->flags & XFS_BMAPI_COWFORK) 3411 align = xfs_get_cowextsz_hint(ap->ip); 3412 else if (xfs_alloc_is_userdata(ap->datatype)) 3413 align = xfs_get_extsz_hint(ap->ip); 3414 if (align) { 3415 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3416 align, 0, ap->eof, 0, ap->conv, 3417 &ap->offset, &ap->length); 3418 ASSERT(!error); 3419 ASSERT(ap->length); 3420 } 3421 3422 3423 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3424 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3425 ap->tp->t_firstblock); 3426 if (nullfb) { 3427 if (xfs_alloc_is_userdata(ap->datatype) && 3428 xfs_inode_is_filestream(ap->ip)) { 3429 ag = xfs_filestream_lookup_ag(ap->ip); 3430 ag = (ag != NULLAGNUMBER) ? ag : 0; 3431 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3432 } else { 3433 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3434 } 3435 } else 3436 ap->blkno = ap->tp->t_firstblock; 3437 3438 xfs_bmap_adjacent(ap); 3439 3440 /* 3441 * If allowed, use ap->blkno; otherwise must use firstblock since 3442 * it's in the right allocation group. 3443 */ 3444 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3445 ; 3446 else 3447 ap->blkno = ap->tp->t_firstblock; 3448 /* 3449 * Normal allocation, done through xfs_alloc_vextent. 3450 */ 3451 tryagain = isaligned = 0; 3452 memset(&args, 0, sizeof(args)); 3453 args.tp = ap->tp; 3454 args.mp = mp; 3455 args.fsbno = ap->blkno; 3456 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3457 3458 /* Trim the allocation back to the maximum an AG can fit. */ 3459 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3460 blen = 0; 3461 if (nullfb) { 3462 /* 3463 * Search for an allocation group with a single extent large 3464 * enough for the request. If one isn't found, then adjust 3465 * the minimum allocation size to the largest space found. 3466 */ 3467 if (xfs_alloc_is_userdata(ap->datatype) && 3468 xfs_inode_is_filestream(ap->ip)) 3469 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3470 else 3471 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3472 if (error) 3473 return error; 3474 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3475 if (xfs_inode_is_filestream(ap->ip)) 3476 args.type = XFS_ALLOCTYPE_FIRST_AG; 3477 else 3478 args.type = XFS_ALLOCTYPE_START_BNO; 3479 args.total = args.minlen = ap->minlen; 3480 } else { 3481 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3482 args.total = ap->total; 3483 args.minlen = ap->minlen; 3484 } 3485 /* apply extent size hints if obtained earlier */ 3486 if (align) { 3487 args.prod = align; 3488 div_u64_rem(ap->offset, args.prod, &args.mod); 3489 if (args.mod) 3490 args.mod = args.prod - args.mod; 3491 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3492 args.prod = 1; 3493 args.mod = 0; 3494 } else { 3495 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3496 div_u64_rem(ap->offset, args.prod, &args.mod); 3497 if (args.mod) 3498 args.mod = args.prod - args.mod; 3499 } 3500 /* 3501 * If we are not low on available data blocks, and the 3502 * underlying logical volume manager is a stripe, and 3503 * the file offset is zero then try to allocate data 3504 * blocks on stripe unit boundary. 3505 * NOTE: ap->aeof is only set if the allocation length 3506 * is >= the stripe unit and the allocation offset is 3507 * at the end of file. 3508 */ 3509 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3510 if (!ap->offset) { 3511 args.alignment = stripe_align; 3512 atype = args.type; 3513 isaligned = 1; 3514 /* 3515 * Adjust for alignment 3516 */ 3517 if (blen > args.alignment && blen <= args.maxlen) 3518 args.minlen = blen - args.alignment; 3519 args.minalignslop = 0; 3520 } else { 3521 /* 3522 * First try an exact bno allocation. 3523 * If it fails then do a near or start bno 3524 * allocation with alignment turned on. 3525 */ 3526 atype = args.type; 3527 tryagain = 1; 3528 args.type = XFS_ALLOCTYPE_THIS_BNO; 3529 args.alignment = 1; 3530 /* 3531 * Compute the minlen+alignment for the 3532 * next case. Set slop so that the value 3533 * of minlen+alignment+slop doesn't go up 3534 * between the calls. 3535 */ 3536 if (blen > stripe_align && blen <= args.maxlen) 3537 nextminlen = blen - stripe_align; 3538 else 3539 nextminlen = args.minlen; 3540 if (nextminlen + stripe_align > args.minlen + 1) 3541 args.minalignslop = 3542 nextminlen + stripe_align - 3543 args.minlen - 1; 3544 else 3545 args.minalignslop = 0; 3546 } 3547 } else { 3548 args.alignment = 1; 3549 args.minalignslop = 0; 3550 } 3551 args.minleft = ap->minleft; 3552 args.wasdel = ap->wasdel; 3553 args.resv = XFS_AG_RESV_NONE; 3554 args.datatype = ap->datatype; 3555 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3556 args.ip = ap->ip; 3557 3558 error = xfs_alloc_vextent(&args); 3559 if (error) 3560 return error; 3561 3562 if (tryagain && args.fsbno == NULLFSBLOCK) { 3563 /* 3564 * Exact allocation failed. Now try with alignment 3565 * turned on. 3566 */ 3567 args.type = atype; 3568 args.fsbno = ap->blkno; 3569 args.alignment = stripe_align; 3570 args.minlen = nextminlen; 3571 args.minalignslop = 0; 3572 isaligned = 1; 3573 if ((error = xfs_alloc_vextent(&args))) 3574 return error; 3575 } 3576 if (isaligned && args.fsbno == NULLFSBLOCK) { 3577 /* 3578 * allocation failed, so turn off alignment and 3579 * try again. 3580 */ 3581 args.type = atype; 3582 args.fsbno = ap->blkno; 3583 args.alignment = 0; 3584 if ((error = xfs_alloc_vextent(&args))) 3585 return error; 3586 } 3587 if (args.fsbno == NULLFSBLOCK && nullfb && 3588 args.minlen > ap->minlen) { 3589 args.minlen = ap->minlen; 3590 args.type = XFS_ALLOCTYPE_START_BNO; 3591 args.fsbno = ap->blkno; 3592 if ((error = xfs_alloc_vextent(&args))) 3593 return error; 3594 } 3595 if (args.fsbno == NULLFSBLOCK && nullfb) { 3596 args.fsbno = 0; 3597 args.type = XFS_ALLOCTYPE_FIRST_AG; 3598 args.total = ap->minlen; 3599 if ((error = xfs_alloc_vextent(&args))) 3600 return error; 3601 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3602 } 3603 if (args.fsbno != NULLFSBLOCK) { 3604 /* 3605 * check the allocation happened at the same or higher AG than 3606 * the first block that was allocated. 3607 */ 3608 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3609 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3610 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3611 3612 ap->blkno = args.fsbno; 3613 if (ap->tp->t_firstblock == NULLFSBLOCK) 3614 ap->tp->t_firstblock = args.fsbno; 3615 ASSERT(nullfb || fb_agno <= args.agno); 3616 ap->length = args.len; 3617 /* 3618 * If the extent size hint is active, we tried to round the 3619 * caller's allocation request offset down to extsz and the 3620 * length up to another extsz boundary. If we found a free 3621 * extent we mapped it in starting at this new offset. If the 3622 * newly mapped space isn't long enough to cover any of the 3623 * range of offsets that was originally requested, move the 3624 * mapping up so that we can fill as much of the caller's 3625 * original request as possible. Free space is apparently 3626 * very fragmented so we're unlikely to be able to satisfy the 3627 * hints anyway. 3628 */ 3629 if (ap->length <= orig_length) 3630 ap->offset = orig_offset; 3631 else if (ap->offset + ap->length < orig_offset + orig_length) 3632 ap->offset = orig_offset + orig_length - ap->length; 3633 xfs_bmap_btalloc_accounting(ap, &args); 3634 } else { 3635 ap->blkno = NULLFSBLOCK; 3636 ap->length = 0; 3637 } 3638 return 0; 3639 } 3640 3641 /* 3642 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3643 * It figures out where to ask the underlying allocator to put the new extent. 3644 */ 3645 STATIC int 3646 xfs_bmap_alloc( 3647 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3648 { 3649 if (XFS_IS_REALTIME_INODE(ap->ip) && 3650 xfs_alloc_is_userdata(ap->datatype)) 3651 return xfs_bmap_rtalloc(ap); 3652 return xfs_bmap_btalloc(ap); 3653 } 3654 3655 /* Trim extent to fit a logical block range. */ 3656 void 3657 xfs_trim_extent( 3658 struct xfs_bmbt_irec *irec, 3659 xfs_fileoff_t bno, 3660 xfs_filblks_t len) 3661 { 3662 xfs_fileoff_t distance; 3663 xfs_fileoff_t end = bno + len; 3664 3665 if (irec->br_startoff + irec->br_blockcount <= bno || 3666 irec->br_startoff >= end) { 3667 irec->br_blockcount = 0; 3668 return; 3669 } 3670 3671 if (irec->br_startoff < bno) { 3672 distance = bno - irec->br_startoff; 3673 if (isnullstartblock(irec->br_startblock)) 3674 irec->br_startblock = DELAYSTARTBLOCK; 3675 if (irec->br_startblock != DELAYSTARTBLOCK && 3676 irec->br_startblock != HOLESTARTBLOCK) 3677 irec->br_startblock += distance; 3678 irec->br_startoff += distance; 3679 irec->br_blockcount -= distance; 3680 } 3681 3682 if (end < irec->br_startoff + irec->br_blockcount) { 3683 distance = irec->br_startoff + irec->br_blockcount - end; 3684 irec->br_blockcount -= distance; 3685 } 3686 } 3687 3688 /* trim extent to within eof */ 3689 void 3690 xfs_trim_extent_eof( 3691 struct xfs_bmbt_irec *irec, 3692 struct xfs_inode *ip) 3693 3694 { 3695 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount, 3696 i_size_read(VFS_I(ip)))); 3697 } 3698 3699 /* 3700 * Trim the returned map to the required bounds 3701 */ 3702 STATIC void 3703 xfs_bmapi_trim_map( 3704 struct xfs_bmbt_irec *mval, 3705 struct xfs_bmbt_irec *got, 3706 xfs_fileoff_t *bno, 3707 xfs_filblks_t len, 3708 xfs_fileoff_t obno, 3709 xfs_fileoff_t end, 3710 int n, 3711 int flags) 3712 { 3713 if ((flags & XFS_BMAPI_ENTIRE) || 3714 got->br_startoff + got->br_blockcount <= obno) { 3715 *mval = *got; 3716 if (isnullstartblock(got->br_startblock)) 3717 mval->br_startblock = DELAYSTARTBLOCK; 3718 return; 3719 } 3720 3721 if (obno > *bno) 3722 *bno = obno; 3723 ASSERT((*bno >= obno) || (n == 0)); 3724 ASSERT(*bno < end); 3725 mval->br_startoff = *bno; 3726 if (isnullstartblock(got->br_startblock)) 3727 mval->br_startblock = DELAYSTARTBLOCK; 3728 else 3729 mval->br_startblock = got->br_startblock + 3730 (*bno - got->br_startoff); 3731 /* 3732 * Return the minimum of what we got and what we asked for for 3733 * the length. We can use the len variable here because it is 3734 * modified below and we could have been there before coming 3735 * here if the first part of the allocation didn't overlap what 3736 * was asked for. 3737 */ 3738 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3739 got->br_blockcount - (*bno - got->br_startoff)); 3740 mval->br_state = got->br_state; 3741 ASSERT(mval->br_blockcount <= len); 3742 return; 3743 } 3744 3745 /* 3746 * Update and validate the extent map to return 3747 */ 3748 STATIC void 3749 xfs_bmapi_update_map( 3750 struct xfs_bmbt_irec **map, 3751 xfs_fileoff_t *bno, 3752 xfs_filblks_t *len, 3753 xfs_fileoff_t obno, 3754 xfs_fileoff_t end, 3755 int *n, 3756 int flags) 3757 { 3758 xfs_bmbt_irec_t *mval = *map; 3759 3760 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3761 ((mval->br_startoff + mval->br_blockcount) <= end)); 3762 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3763 (mval->br_startoff < obno)); 3764 3765 *bno = mval->br_startoff + mval->br_blockcount; 3766 *len = end - *bno; 3767 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3768 /* update previous map with new information */ 3769 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3770 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3771 ASSERT(mval->br_state == mval[-1].br_state); 3772 mval[-1].br_blockcount = mval->br_blockcount; 3773 mval[-1].br_state = mval->br_state; 3774 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3775 mval[-1].br_startblock != DELAYSTARTBLOCK && 3776 mval[-1].br_startblock != HOLESTARTBLOCK && 3777 mval->br_startblock == mval[-1].br_startblock + 3778 mval[-1].br_blockcount && 3779 mval[-1].br_state == mval->br_state) { 3780 ASSERT(mval->br_startoff == 3781 mval[-1].br_startoff + mval[-1].br_blockcount); 3782 mval[-1].br_blockcount += mval->br_blockcount; 3783 } else if (*n > 0 && 3784 mval->br_startblock == DELAYSTARTBLOCK && 3785 mval[-1].br_startblock == DELAYSTARTBLOCK && 3786 mval->br_startoff == 3787 mval[-1].br_startoff + mval[-1].br_blockcount) { 3788 mval[-1].br_blockcount += mval->br_blockcount; 3789 mval[-1].br_state = mval->br_state; 3790 } else if (!((*n == 0) && 3791 ((mval->br_startoff + mval->br_blockcount) <= 3792 obno))) { 3793 mval++; 3794 (*n)++; 3795 } 3796 *map = mval; 3797 } 3798 3799 /* 3800 * Map file blocks to filesystem blocks without allocation. 3801 */ 3802 int 3803 xfs_bmapi_read( 3804 struct xfs_inode *ip, 3805 xfs_fileoff_t bno, 3806 xfs_filblks_t len, 3807 struct xfs_bmbt_irec *mval, 3808 int *nmap, 3809 int flags) 3810 { 3811 struct xfs_mount *mp = ip->i_mount; 3812 struct xfs_ifork *ifp; 3813 struct xfs_bmbt_irec got; 3814 xfs_fileoff_t obno; 3815 xfs_fileoff_t end; 3816 struct xfs_iext_cursor icur; 3817 int error; 3818 bool eof = false; 3819 int n = 0; 3820 int whichfork = xfs_bmapi_whichfork(flags); 3821 3822 ASSERT(*nmap >= 1); 3823 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3824 XFS_BMAPI_COWFORK))); 3825 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3826 3827 if (unlikely(XFS_TEST_ERROR( 3828 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3829 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3830 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3831 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3832 return -EFSCORRUPTED; 3833 } 3834 3835 if (XFS_FORCED_SHUTDOWN(mp)) 3836 return -EIO; 3837 3838 XFS_STATS_INC(mp, xs_blk_mapr); 3839 3840 ifp = XFS_IFORK_PTR(ip, whichfork); 3841 3842 /* No CoW fork? Return a hole. */ 3843 if (whichfork == XFS_COW_FORK && !ifp) { 3844 mval->br_startoff = bno; 3845 mval->br_startblock = HOLESTARTBLOCK; 3846 mval->br_blockcount = len; 3847 mval->br_state = XFS_EXT_NORM; 3848 *nmap = 1; 3849 return 0; 3850 } 3851 3852 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3853 error = xfs_iread_extents(NULL, ip, whichfork); 3854 if (error) 3855 return error; 3856 } 3857 3858 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3859 eof = true; 3860 end = bno + len; 3861 obno = bno; 3862 3863 while (bno < end && n < *nmap) { 3864 /* Reading past eof, act as though there's a hole up to end. */ 3865 if (eof) 3866 got.br_startoff = end; 3867 if (got.br_startoff > bno) { 3868 /* Reading in a hole. */ 3869 mval->br_startoff = bno; 3870 mval->br_startblock = HOLESTARTBLOCK; 3871 mval->br_blockcount = 3872 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3873 mval->br_state = XFS_EXT_NORM; 3874 bno += mval->br_blockcount; 3875 len -= mval->br_blockcount; 3876 mval++; 3877 n++; 3878 continue; 3879 } 3880 3881 /* set up the extent map to return. */ 3882 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3883 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3884 3885 /* If we're done, stop now. */ 3886 if (bno >= end || n >= *nmap) 3887 break; 3888 3889 /* Else go on to the next record. */ 3890 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3891 eof = true; 3892 } 3893 *nmap = n; 3894 return 0; 3895 } 3896 3897 /* 3898 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3899 * global pool and the extent inserted into the inode in-core extent tree. 3900 * 3901 * On entry, got refers to the first extent beyond the offset of the extent to 3902 * allocate or eof is specified if no such extent exists. On return, got refers 3903 * to the extent record that was inserted to the inode fork. 3904 * 3905 * Note that the allocated extent may have been merged with contiguous extents 3906 * during insertion into the inode fork. Thus, got does not reflect the current 3907 * state of the inode fork on return. If necessary, the caller can use lastx to 3908 * look up the updated record in the inode fork. 3909 */ 3910 int 3911 xfs_bmapi_reserve_delalloc( 3912 struct xfs_inode *ip, 3913 int whichfork, 3914 xfs_fileoff_t off, 3915 xfs_filblks_t len, 3916 xfs_filblks_t prealloc, 3917 struct xfs_bmbt_irec *got, 3918 struct xfs_iext_cursor *icur, 3919 int eof) 3920 { 3921 struct xfs_mount *mp = ip->i_mount; 3922 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3923 xfs_extlen_t alen; 3924 xfs_extlen_t indlen; 3925 int error; 3926 xfs_fileoff_t aoff = off; 3927 3928 /* 3929 * Cap the alloc length. Keep track of prealloc so we know whether to 3930 * tag the inode before we return. 3931 */ 3932 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3933 if (!eof) 3934 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3935 if (prealloc && alen >= len) 3936 prealloc = alen - len; 3937 3938 /* Figure out the extent size, adjust alen */ 3939 if (whichfork == XFS_COW_FORK) { 3940 struct xfs_bmbt_irec prev; 3941 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3942 3943 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3944 prev.br_startoff = NULLFILEOFF; 3945 3946 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3947 1, 0, &aoff, &alen); 3948 ASSERT(!error); 3949 } 3950 3951 /* 3952 * Make a transaction-less quota reservation for delayed allocation 3953 * blocks. This number gets adjusted later. We return if we haven't 3954 * allocated blocks already inside this loop. 3955 */ 3956 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3957 XFS_QMOPT_RES_REGBLKS); 3958 if (error) 3959 return error; 3960 3961 /* 3962 * Split changing sb for alen and indlen since they could be coming 3963 * from different places. 3964 */ 3965 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3966 ASSERT(indlen > 0); 3967 3968 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3969 if (error) 3970 goto out_unreserve_quota; 3971 3972 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3973 if (error) 3974 goto out_unreserve_blocks; 3975 3976 3977 ip->i_delayed_blks += alen; 3978 3979 got->br_startoff = aoff; 3980 got->br_startblock = nullstartblock(indlen); 3981 got->br_blockcount = alen; 3982 got->br_state = XFS_EXT_NORM; 3983 3984 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3985 3986 /* 3987 * Tag the inode if blocks were preallocated. Note that COW fork 3988 * preallocation can occur at the start or end of the extent, even when 3989 * prealloc == 0, so we must also check the aligned offset and length. 3990 */ 3991 if (whichfork == XFS_DATA_FORK && prealloc) 3992 xfs_inode_set_eofblocks_tag(ip); 3993 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3994 xfs_inode_set_cowblocks_tag(ip); 3995 3996 return 0; 3997 3998 out_unreserve_blocks: 3999 xfs_mod_fdblocks(mp, alen, false); 4000 out_unreserve_quota: 4001 if (XFS_IS_QUOTA_ON(mp)) 4002 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 4003 XFS_QMOPT_RES_REGBLKS); 4004 return error; 4005 } 4006 4007 static int 4008 xfs_bmapi_allocate( 4009 struct xfs_bmalloca *bma) 4010 { 4011 struct xfs_mount *mp = bma->ip->i_mount; 4012 int whichfork = xfs_bmapi_whichfork(bma->flags); 4013 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4014 int tmp_logflags = 0; 4015 int error; 4016 4017 ASSERT(bma->length > 0); 4018 4019 /* 4020 * For the wasdelay case, we could also just allocate the stuff asked 4021 * for in this bmap call but that wouldn't be as good. 4022 */ 4023 if (bma->wasdel) { 4024 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4025 bma->offset = bma->got.br_startoff; 4026 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4027 } else { 4028 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4029 if (!bma->eof) 4030 bma->length = XFS_FILBLKS_MIN(bma->length, 4031 bma->got.br_startoff - bma->offset); 4032 } 4033 4034 /* 4035 * Set the data type being allocated. For the data fork, the first data 4036 * in the file is treated differently to all other allocations. For the 4037 * attribute fork, we only need to ensure the allocated range is not on 4038 * the busy list. 4039 */ 4040 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4041 bma->datatype = XFS_ALLOC_NOBUSY; 4042 if (whichfork == XFS_DATA_FORK) { 4043 if (bma->offset == 0) 4044 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4045 else 4046 bma->datatype |= XFS_ALLOC_USERDATA; 4047 } 4048 if (bma->flags & XFS_BMAPI_ZERO) 4049 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4050 } 4051 4052 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4053 4054 /* 4055 * Only want to do the alignment at the eof if it is userdata and 4056 * allocation length is larger than a stripe unit. 4057 */ 4058 if (mp->m_dalign && bma->length >= mp->m_dalign && 4059 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4060 error = xfs_bmap_isaeof(bma, whichfork); 4061 if (error) 4062 return error; 4063 } 4064 4065 error = xfs_bmap_alloc(bma); 4066 if (error) 4067 return error; 4068 4069 if (bma->blkno == NULLFSBLOCK) 4070 return 0; 4071 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4072 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4073 /* 4074 * Bump the number of extents we've allocated 4075 * in this call. 4076 */ 4077 bma->nallocs++; 4078 4079 if (bma->cur) 4080 bma->cur->bc_private.b.flags = 4081 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4082 4083 bma->got.br_startoff = bma->offset; 4084 bma->got.br_startblock = bma->blkno; 4085 bma->got.br_blockcount = bma->length; 4086 bma->got.br_state = XFS_EXT_NORM; 4087 4088 /* 4089 * In the data fork, a wasdelay extent has been initialized, so 4090 * shouldn't be flagged as unwritten. 4091 * 4092 * For the cow fork, however, we convert delalloc reservations 4093 * (extents allocated for speculative preallocation) to 4094 * allocated unwritten extents, and only convert the unwritten 4095 * extents to real extents when we're about to write the data. 4096 */ 4097 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4098 (bma->flags & XFS_BMAPI_PREALLOC)) 4099 bma->got.br_state = XFS_EXT_UNWRITTEN; 4100 4101 if (bma->wasdel) 4102 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4103 else 4104 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4105 whichfork, &bma->icur, &bma->cur, &bma->got, 4106 &bma->logflags, bma->flags); 4107 4108 bma->logflags |= tmp_logflags; 4109 if (error) 4110 return error; 4111 4112 /* 4113 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4114 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4115 * the neighbouring ones. 4116 */ 4117 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4118 4119 ASSERT(bma->got.br_startoff <= bma->offset); 4120 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4121 bma->offset + bma->length); 4122 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4123 bma->got.br_state == XFS_EXT_UNWRITTEN); 4124 return 0; 4125 } 4126 4127 STATIC int 4128 xfs_bmapi_convert_unwritten( 4129 struct xfs_bmalloca *bma, 4130 struct xfs_bmbt_irec *mval, 4131 xfs_filblks_t len, 4132 int flags) 4133 { 4134 int whichfork = xfs_bmapi_whichfork(flags); 4135 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4136 int tmp_logflags = 0; 4137 int error; 4138 4139 /* check if we need to do unwritten->real conversion */ 4140 if (mval->br_state == XFS_EXT_UNWRITTEN && 4141 (flags & XFS_BMAPI_PREALLOC)) 4142 return 0; 4143 4144 /* check if we need to do real->unwritten conversion */ 4145 if (mval->br_state == XFS_EXT_NORM && 4146 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4147 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4148 return 0; 4149 4150 /* 4151 * Modify (by adding) the state flag, if writing. 4152 */ 4153 ASSERT(mval->br_blockcount <= len); 4154 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4155 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4156 bma->ip, whichfork); 4157 } 4158 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4159 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4160 4161 /* 4162 * Before insertion into the bmbt, zero the range being converted 4163 * if required. 4164 */ 4165 if (flags & XFS_BMAPI_ZERO) { 4166 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4167 mval->br_blockcount); 4168 if (error) 4169 return error; 4170 } 4171 4172 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4173 &bma->icur, &bma->cur, mval, &tmp_logflags); 4174 /* 4175 * Log the inode core unconditionally in the unwritten extent conversion 4176 * path because the conversion might not have done so (e.g., if the 4177 * extent count hasn't changed). We need to make sure the inode is dirty 4178 * in the transaction for the sake of fsync(), even if nothing has 4179 * changed, because fsync() will not force the log for this transaction 4180 * unless it sees the inode pinned. 4181 * 4182 * Note: If we're only converting cow fork extents, there aren't 4183 * any on-disk updates to make, so we don't need to log anything. 4184 */ 4185 if (whichfork != XFS_COW_FORK) 4186 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4187 if (error) 4188 return error; 4189 4190 /* 4191 * Update our extent pointer, given that 4192 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4193 * of the neighbouring ones. 4194 */ 4195 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4196 4197 /* 4198 * We may have combined previously unwritten space with written space, 4199 * so generate another request. 4200 */ 4201 if (mval->br_blockcount < len) 4202 return -EAGAIN; 4203 return 0; 4204 } 4205 4206 /* 4207 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4208 * extent state if necessary. Details behaviour is controlled by the flags 4209 * parameter. Only allocates blocks from a single allocation group, to avoid 4210 * locking problems. 4211 */ 4212 int 4213 xfs_bmapi_write( 4214 struct xfs_trans *tp, /* transaction pointer */ 4215 struct xfs_inode *ip, /* incore inode */ 4216 xfs_fileoff_t bno, /* starting file offs. mapped */ 4217 xfs_filblks_t len, /* length to map in file */ 4218 int flags, /* XFS_BMAPI_... */ 4219 xfs_extlen_t total, /* total blocks needed */ 4220 struct xfs_bmbt_irec *mval, /* output: map values */ 4221 int *nmap) /* i/o: mval size/count */ 4222 { 4223 struct xfs_mount *mp = ip->i_mount; 4224 struct xfs_ifork *ifp; 4225 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */ 4226 xfs_fileoff_t end; /* end of mapped file region */ 4227 bool eof = false; /* after the end of extents */ 4228 int error; /* error return */ 4229 int n; /* current extent index */ 4230 xfs_fileoff_t obno; /* old block number (offset) */ 4231 int whichfork; /* data or attr fork */ 4232 4233 #ifdef DEBUG 4234 xfs_fileoff_t orig_bno; /* original block number value */ 4235 int orig_flags; /* original flags arg value */ 4236 xfs_filblks_t orig_len; /* original value of len arg */ 4237 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4238 int orig_nmap; /* original value of *nmap */ 4239 4240 orig_bno = bno; 4241 orig_len = len; 4242 orig_flags = flags; 4243 orig_mval = mval; 4244 orig_nmap = *nmap; 4245 #endif 4246 whichfork = xfs_bmapi_whichfork(flags); 4247 4248 ASSERT(*nmap >= 1); 4249 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4250 ASSERT(tp != NULL || 4251 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) == 4252 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)); 4253 ASSERT(len > 0); 4254 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4255 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4256 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4257 4258 /* zeroing is for currently only for data extents, not metadata */ 4259 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4260 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4261 /* 4262 * we can allocate unwritten extents or pre-zero allocated blocks, 4263 * but it makes no sense to do both at once. This would result in 4264 * zeroing the unwritten extent twice, but it still being an 4265 * unwritten extent.... 4266 */ 4267 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4268 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4269 4270 if (unlikely(XFS_TEST_ERROR( 4271 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4272 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4273 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4274 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4275 return -EFSCORRUPTED; 4276 } 4277 4278 if (XFS_FORCED_SHUTDOWN(mp)) 4279 return -EIO; 4280 4281 ifp = XFS_IFORK_PTR(ip, whichfork); 4282 4283 XFS_STATS_INC(mp, xs_blk_mapw); 4284 4285 if (!tp || tp->t_firstblock == NULLFSBLOCK) { 4286 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) 4287 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; 4288 else 4289 bma.minleft = 1; 4290 } else { 4291 bma.minleft = 0; 4292 } 4293 4294 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4295 error = xfs_iread_extents(tp, ip, whichfork); 4296 if (error) 4297 goto error0; 4298 } 4299 4300 n = 0; 4301 end = bno + len; 4302 obno = bno; 4303 4304 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4305 eof = true; 4306 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4307 bma.prev.br_startoff = NULLFILEOFF; 4308 bma.tp = tp; 4309 bma.ip = ip; 4310 bma.total = total; 4311 bma.datatype = 0; 4312 4313 while (bno < end && n < *nmap) { 4314 bool need_alloc = false, wasdelay = false; 4315 4316 /* in hole or beyond EOF? */ 4317 if (eof || bma.got.br_startoff > bno) { 4318 /* 4319 * CoW fork conversions should /never/ hit EOF or 4320 * holes. There should always be something for us 4321 * to work on. 4322 */ 4323 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4324 (flags & XFS_BMAPI_COWFORK))); 4325 4326 if (flags & XFS_BMAPI_DELALLOC) { 4327 /* 4328 * For the COW fork we can reasonably get a 4329 * request for converting an extent that races 4330 * with other threads already having converted 4331 * part of it, as there converting COW to 4332 * regular blocks is not protected using the 4333 * IOLOCK. 4334 */ 4335 ASSERT(flags & XFS_BMAPI_COWFORK); 4336 if (!(flags & XFS_BMAPI_COWFORK)) { 4337 error = -EIO; 4338 goto error0; 4339 } 4340 4341 if (eof || bno >= end) 4342 break; 4343 } else { 4344 need_alloc = true; 4345 } 4346 } else if (isnullstartblock(bma.got.br_startblock)) { 4347 wasdelay = true; 4348 } 4349 4350 /* 4351 * First, deal with the hole before the allocated space 4352 * that we found, if any. 4353 */ 4354 if ((need_alloc || wasdelay) && 4355 !(flags & XFS_BMAPI_CONVERT_ONLY)) { 4356 bma.eof = eof; 4357 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4358 bma.wasdel = wasdelay; 4359 bma.offset = bno; 4360 bma.flags = flags; 4361 4362 /* 4363 * There's a 32/64 bit type mismatch between the 4364 * allocation length request (which can be 64 bits in 4365 * length) and the bma length request, which is 4366 * xfs_extlen_t and therefore 32 bits. Hence we have to 4367 * check for 32-bit overflows and handle them here. 4368 */ 4369 if (len > (xfs_filblks_t)MAXEXTLEN) 4370 bma.length = MAXEXTLEN; 4371 else 4372 bma.length = len; 4373 4374 ASSERT(len > 0); 4375 ASSERT(bma.length > 0); 4376 error = xfs_bmapi_allocate(&bma); 4377 if (error) 4378 goto error0; 4379 if (bma.blkno == NULLFSBLOCK) 4380 break; 4381 4382 /* 4383 * If this is a CoW allocation, record the data in 4384 * the refcount btree for orphan recovery. 4385 */ 4386 if (whichfork == XFS_COW_FORK) { 4387 error = xfs_refcount_alloc_cow_extent(tp, 4388 bma.blkno, bma.length); 4389 if (error) 4390 goto error0; 4391 } 4392 } 4393 4394 /* Deal with the allocated space we found. */ 4395 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4396 end, n, flags); 4397 4398 /* Execute unwritten extent conversion if necessary */ 4399 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4400 if (error == -EAGAIN) 4401 continue; 4402 if (error) 4403 goto error0; 4404 4405 /* update the extent map to return */ 4406 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4407 4408 /* 4409 * If we're done, stop now. Stop when we've allocated 4410 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4411 * the transaction may get too big. 4412 */ 4413 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4414 break; 4415 4416 /* Else go on to the next record. */ 4417 bma.prev = bma.got; 4418 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4419 eof = true; 4420 } 4421 *nmap = n; 4422 4423 /* 4424 * Transform from btree to extents, give it cur. 4425 */ 4426 if (xfs_bmap_wants_extents(ip, whichfork)) { 4427 int tmp_logflags = 0; 4428 4429 ASSERT(bma.cur); 4430 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, 4431 &tmp_logflags, whichfork); 4432 bma.logflags |= tmp_logflags; 4433 if (error) 4434 goto error0; 4435 } 4436 4437 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4438 XFS_IFORK_NEXTENTS(ip, whichfork) > 4439 XFS_IFORK_MAXEXT(ip, whichfork)); 4440 error = 0; 4441 error0: 4442 /* 4443 * Log everything. Do this after conversion, there's no point in 4444 * logging the extent records if we've converted to btree format. 4445 */ 4446 if ((bma.logflags & xfs_ilog_fext(whichfork)) && 4447 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4448 bma.logflags &= ~xfs_ilog_fext(whichfork); 4449 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) && 4450 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 4451 bma.logflags &= ~xfs_ilog_fbroot(whichfork); 4452 /* 4453 * Log whatever the flags say, even if error. Otherwise we might miss 4454 * detecting a case where the data is changed, there's an error, 4455 * and it's not logged so we don't shutdown when we should. 4456 */ 4457 if (bma.logflags) 4458 xfs_trans_log_inode(tp, ip, bma.logflags); 4459 4460 if (bma.cur) { 4461 xfs_btree_del_cursor(bma.cur, error); 4462 } 4463 if (!error) 4464 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4465 orig_nmap, *nmap); 4466 return error; 4467 } 4468 4469 int 4470 xfs_bmapi_remap( 4471 struct xfs_trans *tp, 4472 struct xfs_inode *ip, 4473 xfs_fileoff_t bno, 4474 xfs_filblks_t len, 4475 xfs_fsblock_t startblock, 4476 int flags) 4477 { 4478 struct xfs_mount *mp = ip->i_mount; 4479 struct xfs_ifork *ifp; 4480 struct xfs_btree_cur *cur = NULL; 4481 struct xfs_bmbt_irec got; 4482 struct xfs_iext_cursor icur; 4483 int whichfork = xfs_bmapi_whichfork(flags); 4484 int logflags = 0, error; 4485 4486 ifp = XFS_IFORK_PTR(ip, whichfork); 4487 ASSERT(len > 0); 4488 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4489 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4490 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4491 XFS_BMAPI_NORMAP))); 4492 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4493 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4494 4495 if (unlikely(XFS_TEST_ERROR( 4496 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4497 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4498 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4499 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4500 return -EFSCORRUPTED; 4501 } 4502 4503 if (XFS_FORCED_SHUTDOWN(mp)) 4504 return -EIO; 4505 4506 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4507 error = xfs_iread_extents(tp, ip, whichfork); 4508 if (error) 4509 return error; 4510 } 4511 4512 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4513 /* make sure we only reflink into a hole. */ 4514 ASSERT(got.br_startoff > bno); 4515 ASSERT(got.br_startoff - bno >= len); 4516 } 4517 4518 ip->i_d.di_nblocks += len; 4519 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4520 4521 if (ifp->if_flags & XFS_IFBROOT) { 4522 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4523 cur->bc_private.b.flags = 0; 4524 } 4525 4526 got.br_startoff = bno; 4527 got.br_startblock = startblock; 4528 got.br_blockcount = len; 4529 if (flags & XFS_BMAPI_PREALLOC) 4530 got.br_state = XFS_EXT_UNWRITTEN; 4531 else 4532 got.br_state = XFS_EXT_NORM; 4533 4534 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4535 &cur, &got, &logflags, flags); 4536 if (error) 4537 goto error0; 4538 4539 if (xfs_bmap_wants_extents(ip, whichfork)) { 4540 int tmp_logflags = 0; 4541 4542 error = xfs_bmap_btree_to_extents(tp, ip, cur, 4543 &tmp_logflags, whichfork); 4544 logflags |= tmp_logflags; 4545 } 4546 4547 error0: 4548 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4549 logflags &= ~XFS_ILOG_DEXT; 4550 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4551 logflags &= ~XFS_ILOG_DBROOT; 4552 4553 if (logflags) 4554 xfs_trans_log_inode(tp, ip, logflags); 4555 if (cur) 4556 xfs_btree_del_cursor(cur, error); 4557 return error; 4558 } 4559 4560 /* 4561 * When a delalloc extent is split (e.g., due to a hole punch), the original 4562 * indlen reservation must be shared across the two new extents that are left 4563 * behind. 4564 * 4565 * Given the original reservation and the worst case indlen for the two new 4566 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4567 * reservation fairly across the two new extents. If necessary, steal available 4568 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4569 * ores == 1). The number of stolen blocks is returned. The availability and 4570 * subsequent accounting of stolen blocks is the responsibility of the caller. 4571 */ 4572 static xfs_filblks_t 4573 xfs_bmap_split_indlen( 4574 xfs_filblks_t ores, /* original res. */ 4575 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4576 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4577 xfs_filblks_t avail) /* stealable blocks */ 4578 { 4579 xfs_filblks_t len1 = *indlen1; 4580 xfs_filblks_t len2 = *indlen2; 4581 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4582 xfs_filblks_t stolen = 0; 4583 xfs_filblks_t resfactor; 4584 4585 /* 4586 * Steal as many blocks as we can to try and satisfy the worst case 4587 * indlen for both new extents. 4588 */ 4589 if (ores < nres && avail) 4590 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4591 ores += stolen; 4592 4593 /* nothing else to do if we've satisfied the new reservation */ 4594 if (ores >= nres) 4595 return stolen; 4596 4597 /* 4598 * We can't meet the total required reservation for the two extents. 4599 * Calculate the percent of the overall shortage between both extents 4600 * and apply this percentage to each of the requested indlen values. 4601 * This distributes the shortage fairly and reduces the chances that one 4602 * of the two extents is left with nothing when extents are repeatedly 4603 * split. 4604 */ 4605 resfactor = (ores * 100); 4606 do_div(resfactor, nres); 4607 len1 *= resfactor; 4608 do_div(len1, 100); 4609 len2 *= resfactor; 4610 do_div(len2, 100); 4611 ASSERT(len1 + len2 <= ores); 4612 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4613 4614 /* 4615 * Hand out the remainder to each extent. If one of the two reservations 4616 * is zero, we want to make sure that one gets a block first. The loop 4617 * below starts with len1, so hand len2 a block right off the bat if it 4618 * is zero. 4619 */ 4620 ores -= (len1 + len2); 4621 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4622 if (ores && !len2 && *indlen2) { 4623 len2++; 4624 ores--; 4625 } 4626 while (ores) { 4627 if (len1 < *indlen1) { 4628 len1++; 4629 ores--; 4630 } 4631 if (!ores) 4632 break; 4633 if (len2 < *indlen2) { 4634 len2++; 4635 ores--; 4636 } 4637 } 4638 4639 *indlen1 = len1; 4640 *indlen2 = len2; 4641 4642 return stolen; 4643 } 4644 4645 int 4646 xfs_bmap_del_extent_delay( 4647 struct xfs_inode *ip, 4648 int whichfork, 4649 struct xfs_iext_cursor *icur, 4650 struct xfs_bmbt_irec *got, 4651 struct xfs_bmbt_irec *del) 4652 { 4653 struct xfs_mount *mp = ip->i_mount; 4654 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4655 struct xfs_bmbt_irec new; 4656 int64_t da_old, da_new, da_diff = 0; 4657 xfs_fileoff_t del_endoff, got_endoff; 4658 xfs_filblks_t got_indlen, new_indlen, stolen; 4659 int state = xfs_bmap_fork_to_state(whichfork); 4660 int error = 0; 4661 bool isrt; 4662 4663 XFS_STATS_INC(mp, xs_del_exlist); 4664 4665 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4666 del_endoff = del->br_startoff + del->br_blockcount; 4667 got_endoff = got->br_startoff + got->br_blockcount; 4668 da_old = startblockval(got->br_startblock); 4669 da_new = 0; 4670 4671 ASSERT(del->br_blockcount > 0); 4672 ASSERT(got->br_startoff <= del->br_startoff); 4673 ASSERT(got_endoff >= del_endoff); 4674 4675 if (isrt) { 4676 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4677 4678 do_div(rtexts, mp->m_sb.sb_rextsize); 4679 xfs_mod_frextents(mp, rtexts); 4680 } 4681 4682 /* 4683 * Update the inode delalloc counter now and wait to update the 4684 * sb counters as we might have to borrow some blocks for the 4685 * indirect block accounting. 4686 */ 4687 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4688 -((long)del->br_blockcount), 0, 4689 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4690 if (error) 4691 return error; 4692 ip->i_delayed_blks -= del->br_blockcount; 4693 4694 if (got->br_startoff == del->br_startoff) 4695 state |= BMAP_LEFT_FILLING; 4696 if (got_endoff == del_endoff) 4697 state |= BMAP_RIGHT_FILLING; 4698 4699 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4700 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4701 /* 4702 * Matches the whole extent. Delete the entry. 4703 */ 4704 xfs_iext_remove(ip, icur, state); 4705 xfs_iext_prev(ifp, icur); 4706 break; 4707 case BMAP_LEFT_FILLING: 4708 /* 4709 * Deleting the first part of the extent. 4710 */ 4711 got->br_startoff = del_endoff; 4712 got->br_blockcount -= del->br_blockcount; 4713 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4714 got->br_blockcount), da_old); 4715 got->br_startblock = nullstartblock((int)da_new); 4716 xfs_iext_update_extent(ip, state, icur, got); 4717 break; 4718 case BMAP_RIGHT_FILLING: 4719 /* 4720 * Deleting the last part of the extent. 4721 */ 4722 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4723 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4724 got->br_blockcount), da_old); 4725 got->br_startblock = nullstartblock((int)da_new); 4726 xfs_iext_update_extent(ip, state, icur, got); 4727 break; 4728 case 0: 4729 /* 4730 * Deleting the middle of the extent. 4731 * 4732 * Distribute the original indlen reservation across the two new 4733 * extents. Steal blocks from the deleted extent if necessary. 4734 * Stealing blocks simply fudges the fdblocks accounting below. 4735 * Warn if either of the new indlen reservations is zero as this 4736 * can lead to delalloc problems. 4737 */ 4738 got->br_blockcount = del->br_startoff - got->br_startoff; 4739 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4740 4741 new.br_blockcount = got_endoff - del_endoff; 4742 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4743 4744 WARN_ON_ONCE(!got_indlen || !new_indlen); 4745 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4746 del->br_blockcount); 4747 4748 got->br_startblock = nullstartblock((int)got_indlen); 4749 4750 new.br_startoff = del_endoff; 4751 new.br_state = got->br_state; 4752 new.br_startblock = nullstartblock((int)new_indlen); 4753 4754 xfs_iext_update_extent(ip, state, icur, got); 4755 xfs_iext_next(ifp, icur); 4756 xfs_iext_insert(ip, icur, &new, state); 4757 4758 da_new = got_indlen + new_indlen - stolen; 4759 del->br_blockcount -= stolen; 4760 break; 4761 } 4762 4763 ASSERT(da_old >= da_new); 4764 da_diff = da_old - da_new; 4765 if (!isrt) 4766 da_diff += del->br_blockcount; 4767 if (da_diff) 4768 xfs_mod_fdblocks(mp, da_diff, false); 4769 return error; 4770 } 4771 4772 void 4773 xfs_bmap_del_extent_cow( 4774 struct xfs_inode *ip, 4775 struct xfs_iext_cursor *icur, 4776 struct xfs_bmbt_irec *got, 4777 struct xfs_bmbt_irec *del) 4778 { 4779 struct xfs_mount *mp = ip->i_mount; 4780 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4781 struct xfs_bmbt_irec new; 4782 xfs_fileoff_t del_endoff, got_endoff; 4783 int state = BMAP_COWFORK; 4784 4785 XFS_STATS_INC(mp, xs_del_exlist); 4786 4787 del_endoff = del->br_startoff + del->br_blockcount; 4788 got_endoff = got->br_startoff + got->br_blockcount; 4789 4790 ASSERT(del->br_blockcount > 0); 4791 ASSERT(got->br_startoff <= del->br_startoff); 4792 ASSERT(got_endoff >= del_endoff); 4793 ASSERT(!isnullstartblock(got->br_startblock)); 4794 4795 if (got->br_startoff == del->br_startoff) 4796 state |= BMAP_LEFT_FILLING; 4797 if (got_endoff == del_endoff) 4798 state |= BMAP_RIGHT_FILLING; 4799 4800 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4801 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4802 /* 4803 * Matches the whole extent. Delete the entry. 4804 */ 4805 xfs_iext_remove(ip, icur, state); 4806 xfs_iext_prev(ifp, icur); 4807 break; 4808 case BMAP_LEFT_FILLING: 4809 /* 4810 * Deleting the first part of the extent. 4811 */ 4812 got->br_startoff = del_endoff; 4813 got->br_blockcount -= del->br_blockcount; 4814 got->br_startblock = del->br_startblock + del->br_blockcount; 4815 xfs_iext_update_extent(ip, state, icur, got); 4816 break; 4817 case BMAP_RIGHT_FILLING: 4818 /* 4819 * Deleting the last part of the extent. 4820 */ 4821 got->br_blockcount -= del->br_blockcount; 4822 xfs_iext_update_extent(ip, state, icur, got); 4823 break; 4824 case 0: 4825 /* 4826 * Deleting the middle of the extent. 4827 */ 4828 got->br_blockcount = del->br_startoff - got->br_startoff; 4829 4830 new.br_startoff = del_endoff; 4831 new.br_blockcount = got_endoff - del_endoff; 4832 new.br_state = got->br_state; 4833 new.br_startblock = del->br_startblock + del->br_blockcount; 4834 4835 xfs_iext_update_extent(ip, state, icur, got); 4836 xfs_iext_next(ifp, icur); 4837 xfs_iext_insert(ip, icur, &new, state); 4838 break; 4839 } 4840 ip->i_delayed_blks -= del->br_blockcount; 4841 } 4842 4843 /* 4844 * Called by xfs_bmapi to update file extent records and the btree 4845 * after removing space. 4846 */ 4847 STATIC int /* error */ 4848 xfs_bmap_del_extent_real( 4849 xfs_inode_t *ip, /* incore inode pointer */ 4850 xfs_trans_t *tp, /* current transaction pointer */ 4851 struct xfs_iext_cursor *icur, 4852 xfs_btree_cur_t *cur, /* if null, not a btree */ 4853 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4854 int *logflagsp, /* inode logging flags */ 4855 int whichfork, /* data or attr fork */ 4856 int bflags) /* bmapi flags */ 4857 { 4858 xfs_fsblock_t del_endblock=0; /* first block past del */ 4859 xfs_fileoff_t del_endoff; /* first offset past del */ 4860 int do_fx; /* free extent at end of routine */ 4861 int error; /* error return value */ 4862 int flags = 0;/* inode logging flags */ 4863 struct xfs_bmbt_irec got; /* current extent entry */ 4864 xfs_fileoff_t got_endoff; /* first offset past got */ 4865 int i; /* temp state */ 4866 struct xfs_ifork *ifp; /* inode fork pointer */ 4867 xfs_mount_t *mp; /* mount structure */ 4868 xfs_filblks_t nblks; /* quota/sb block count */ 4869 xfs_bmbt_irec_t new; /* new record to be inserted */ 4870 /* REFERENCED */ 4871 uint qfield; /* quota field to update */ 4872 int state = xfs_bmap_fork_to_state(whichfork); 4873 struct xfs_bmbt_irec old; 4874 4875 mp = ip->i_mount; 4876 XFS_STATS_INC(mp, xs_del_exlist); 4877 4878 ifp = XFS_IFORK_PTR(ip, whichfork); 4879 ASSERT(del->br_blockcount > 0); 4880 xfs_iext_get_extent(ifp, icur, &got); 4881 ASSERT(got.br_startoff <= del->br_startoff); 4882 del_endoff = del->br_startoff + del->br_blockcount; 4883 got_endoff = got.br_startoff + got.br_blockcount; 4884 ASSERT(got_endoff >= del_endoff); 4885 ASSERT(!isnullstartblock(got.br_startblock)); 4886 qfield = 0; 4887 error = 0; 4888 4889 /* 4890 * If it's the case where the directory code is running with no block 4891 * reservation, and the deleted block is in the middle of its extent, 4892 * and the resulting insert of an extent would cause transformation to 4893 * btree format, then reject it. The calling code will then swap blocks 4894 * around instead. We have to do this now, rather than waiting for the 4895 * conversion to btree format, since the transaction will be dirty then. 4896 */ 4897 if (tp->t_blk_res == 0 && 4898 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4899 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4900 XFS_IFORK_MAXEXT(ip, whichfork) && 4901 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4902 return -ENOSPC; 4903 4904 flags = XFS_ILOG_CORE; 4905 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4906 xfs_fsblock_t bno; 4907 xfs_filblks_t len; 4908 xfs_extlen_t mod; 4909 4910 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4911 &mod); 4912 ASSERT(mod == 0); 4913 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 4914 &mod); 4915 ASSERT(mod == 0); 4916 4917 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 4918 if (error) 4919 goto done; 4920 do_fx = 0; 4921 nblks = len * mp->m_sb.sb_rextsize; 4922 qfield = XFS_TRANS_DQ_RTBCOUNT; 4923 } else { 4924 do_fx = 1; 4925 nblks = del->br_blockcount; 4926 qfield = XFS_TRANS_DQ_BCOUNT; 4927 } 4928 4929 del_endblock = del->br_startblock + del->br_blockcount; 4930 if (cur) { 4931 error = xfs_bmbt_lookup_eq(cur, &got, &i); 4932 if (error) 4933 goto done; 4934 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 4935 } 4936 4937 if (got.br_startoff == del->br_startoff) 4938 state |= BMAP_LEFT_FILLING; 4939 if (got_endoff == del_endoff) 4940 state |= BMAP_RIGHT_FILLING; 4941 4942 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4943 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4944 /* 4945 * Matches the whole extent. Delete the entry. 4946 */ 4947 xfs_iext_remove(ip, icur, state); 4948 xfs_iext_prev(ifp, icur); 4949 XFS_IFORK_NEXT_SET(ip, whichfork, 4950 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 4951 flags |= XFS_ILOG_CORE; 4952 if (!cur) { 4953 flags |= xfs_ilog_fext(whichfork); 4954 break; 4955 } 4956 if ((error = xfs_btree_delete(cur, &i))) 4957 goto done; 4958 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 4959 break; 4960 case BMAP_LEFT_FILLING: 4961 /* 4962 * Deleting the first part of the extent. 4963 */ 4964 got.br_startoff = del_endoff; 4965 got.br_startblock = del_endblock; 4966 got.br_blockcount -= del->br_blockcount; 4967 xfs_iext_update_extent(ip, state, icur, &got); 4968 if (!cur) { 4969 flags |= xfs_ilog_fext(whichfork); 4970 break; 4971 } 4972 error = xfs_bmbt_update(cur, &got); 4973 if (error) 4974 goto done; 4975 break; 4976 case BMAP_RIGHT_FILLING: 4977 /* 4978 * Deleting the last part of the extent. 4979 */ 4980 got.br_blockcount -= del->br_blockcount; 4981 xfs_iext_update_extent(ip, state, icur, &got); 4982 if (!cur) { 4983 flags |= xfs_ilog_fext(whichfork); 4984 break; 4985 } 4986 error = xfs_bmbt_update(cur, &got); 4987 if (error) 4988 goto done; 4989 break; 4990 case 0: 4991 /* 4992 * Deleting the middle of the extent. 4993 */ 4994 old = got; 4995 4996 got.br_blockcount = del->br_startoff - got.br_startoff; 4997 xfs_iext_update_extent(ip, state, icur, &got); 4998 4999 new.br_startoff = del_endoff; 5000 new.br_blockcount = got_endoff - del_endoff; 5001 new.br_state = got.br_state; 5002 new.br_startblock = del_endblock; 5003 5004 flags |= XFS_ILOG_CORE; 5005 if (cur) { 5006 error = xfs_bmbt_update(cur, &got); 5007 if (error) 5008 goto done; 5009 error = xfs_btree_increment(cur, 0, &i); 5010 if (error) 5011 goto done; 5012 cur->bc_rec.b = new; 5013 error = xfs_btree_insert(cur, &i); 5014 if (error && error != -ENOSPC) 5015 goto done; 5016 /* 5017 * If get no-space back from btree insert, it tried a 5018 * split, and we have a zero block reservation. Fix up 5019 * our state and return the error. 5020 */ 5021 if (error == -ENOSPC) { 5022 /* 5023 * Reset the cursor, don't trust it after any 5024 * insert operation. 5025 */ 5026 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5027 if (error) 5028 goto done; 5029 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5030 /* 5031 * Update the btree record back 5032 * to the original value. 5033 */ 5034 error = xfs_bmbt_update(cur, &old); 5035 if (error) 5036 goto done; 5037 /* 5038 * Reset the extent record back 5039 * to the original value. 5040 */ 5041 xfs_iext_update_extent(ip, state, icur, &old); 5042 flags = 0; 5043 error = -ENOSPC; 5044 goto done; 5045 } 5046 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5047 } else 5048 flags |= xfs_ilog_fext(whichfork); 5049 XFS_IFORK_NEXT_SET(ip, whichfork, 5050 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5051 xfs_iext_next(ifp, icur); 5052 xfs_iext_insert(ip, icur, &new, state); 5053 break; 5054 } 5055 5056 /* remove reverse mapping */ 5057 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5058 if (error) 5059 goto done; 5060 5061 /* 5062 * If we need to, add to list of extents to delete. 5063 */ 5064 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5065 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5066 error = xfs_refcount_decrease_extent(tp, del); 5067 if (error) 5068 goto done; 5069 } else { 5070 __xfs_bmap_add_free(tp, del->br_startblock, 5071 del->br_blockcount, NULL, 5072 (bflags & XFS_BMAPI_NODISCARD) || 5073 del->br_state == XFS_EXT_UNWRITTEN); 5074 } 5075 } 5076 5077 /* 5078 * Adjust inode # blocks in the file. 5079 */ 5080 if (nblks) 5081 ip->i_d.di_nblocks -= nblks; 5082 /* 5083 * Adjust quota data. 5084 */ 5085 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5086 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5087 5088 done: 5089 *logflagsp = flags; 5090 return error; 5091 } 5092 5093 /* 5094 * Unmap (remove) blocks from a file. 5095 * If nexts is nonzero then the number of extents to remove is limited to 5096 * that value. If not all extents in the block range can be removed then 5097 * *done is set. 5098 */ 5099 int /* error */ 5100 __xfs_bunmapi( 5101 struct xfs_trans *tp, /* transaction pointer */ 5102 struct xfs_inode *ip, /* incore inode */ 5103 xfs_fileoff_t start, /* first file offset deleted */ 5104 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5105 int flags, /* misc flags */ 5106 xfs_extnum_t nexts) /* number of extents max */ 5107 { 5108 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5109 struct xfs_bmbt_irec del; /* extent being deleted */ 5110 int error; /* error return value */ 5111 xfs_extnum_t extno; /* extent number in list */ 5112 struct xfs_bmbt_irec got; /* current extent record */ 5113 struct xfs_ifork *ifp; /* inode fork pointer */ 5114 int isrt; /* freeing in rt area */ 5115 int logflags; /* transaction logging flags */ 5116 xfs_extlen_t mod; /* rt extent offset */ 5117 struct xfs_mount *mp; /* mount structure */ 5118 int tmp_logflags; /* partial logging flags */ 5119 int wasdel; /* was a delayed alloc extent */ 5120 int whichfork; /* data or attribute fork */ 5121 xfs_fsblock_t sum; 5122 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5123 xfs_fileoff_t max_len; 5124 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5125 xfs_fileoff_t end; 5126 struct xfs_iext_cursor icur; 5127 bool done = false; 5128 5129 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5130 5131 whichfork = xfs_bmapi_whichfork(flags); 5132 ASSERT(whichfork != XFS_COW_FORK); 5133 ifp = XFS_IFORK_PTR(ip, whichfork); 5134 if (unlikely( 5135 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5136 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5137 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5138 ip->i_mount); 5139 return -EFSCORRUPTED; 5140 } 5141 mp = ip->i_mount; 5142 if (XFS_FORCED_SHUTDOWN(mp)) 5143 return -EIO; 5144 5145 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5146 ASSERT(len > 0); 5147 ASSERT(nexts >= 0); 5148 5149 /* 5150 * Guesstimate how many blocks we can unmap without running the risk of 5151 * blowing out the transaction with a mix of EFIs and reflink 5152 * adjustments. 5153 */ 5154 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5155 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5156 else 5157 max_len = len; 5158 5159 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5160 (error = xfs_iread_extents(tp, ip, whichfork))) 5161 return error; 5162 if (xfs_iext_count(ifp) == 0) { 5163 *rlen = 0; 5164 return 0; 5165 } 5166 XFS_STATS_INC(mp, xs_blk_unmap); 5167 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5168 end = start + len; 5169 5170 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5171 *rlen = 0; 5172 return 0; 5173 } 5174 end--; 5175 5176 logflags = 0; 5177 if (ifp->if_flags & XFS_IFBROOT) { 5178 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5179 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5180 cur->bc_private.b.flags = 0; 5181 } else 5182 cur = NULL; 5183 5184 if (isrt) { 5185 /* 5186 * Synchronize by locking the bitmap inode. 5187 */ 5188 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5189 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5190 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5191 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5192 } 5193 5194 extno = 0; 5195 while (end != (xfs_fileoff_t)-1 && end >= start && 5196 (nexts == 0 || extno < nexts) && max_len > 0) { 5197 /* 5198 * Is the found extent after a hole in which end lives? 5199 * Just back up to the previous extent, if so. 5200 */ 5201 if (got.br_startoff > end && 5202 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5203 done = true; 5204 break; 5205 } 5206 /* 5207 * Is the last block of this extent before the range 5208 * we're supposed to delete? If so, we're done. 5209 */ 5210 end = XFS_FILEOFF_MIN(end, 5211 got.br_startoff + got.br_blockcount - 1); 5212 if (end < start) 5213 break; 5214 /* 5215 * Then deal with the (possibly delayed) allocated space 5216 * we found. 5217 */ 5218 del = got; 5219 wasdel = isnullstartblock(del.br_startblock); 5220 5221 /* 5222 * Make sure we don't touch multiple AGF headers out of order 5223 * in a single transaction, as that could cause AB-BA deadlocks. 5224 */ 5225 if (!wasdel) { 5226 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5227 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5228 break; 5229 prev_agno = agno; 5230 } 5231 if (got.br_startoff < start) { 5232 del.br_startoff = start; 5233 del.br_blockcount -= start - got.br_startoff; 5234 if (!wasdel) 5235 del.br_startblock += start - got.br_startoff; 5236 } 5237 if (del.br_startoff + del.br_blockcount > end + 1) 5238 del.br_blockcount = end + 1 - del.br_startoff; 5239 5240 /* How much can we safely unmap? */ 5241 if (max_len < del.br_blockcount) { 5242 del.br_startoff += del.br_blockcount - max_len; 5243 if (!wasdel) 5244 del.br_startblock += del.br_blockcount - max_len; 5245 del.br_blockcount = max_len; 5246 } 5247 5248 if (!isrt) 5249 goto delete; 5250 5251 sum = del.br_startblock + del.br_blockcount; 5252 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5253 if (mod) { 5254 /* 5255 * Realtime extent not lined up at the end. 5256 * The extent could have been split into written 5257 * and unwritten pieces, or we could just be 5258 * unmapping part of it. But we can't really 5259 * get rid of part of a realtime extent. 5260 */ 5261 if (del.br_state == XFS_EXT_UNWRITTEN) { 5262 /* 5263 * This piece is unwritten, or we're not 5264 * using unwritten extents. Skip over it. 5265 */ 5266 ASSERT(end >= mod); 5267 end -= mod > del.br_blockcount ? 5268 del.br_blockcount : mod; 5269 if (end < got.br_startoff && 5270 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5271 done = true; 5272 break; 5273 } 5274 continue; 5275 } 5276 /* 5277 * It's written, turn it unwritten. 5278 * This is better than zeroing it. 5279 */ 5280 ASSERT(del.br_state == XFS_EXT_NORM); 5281 ASSERT(tp->t_blk_res > 0); 5282 /* 5283 * If this spans a realtime extent boundary, 5284 * chop it back to the start of the one we end at. 5285 */ 5286 if (del.br_blockcount > mod) { 5287 del.br_startoff += del.br_blockcount - mod; 5288 del.br_startblock += del.br_blockcount - mod; 5289 del.br_blockcount = mod; 5290 } 5291 del.br_state = XFS_EXT_UNWRITTEN; 5292 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5293 whichfork, &icur, &cur, &del, 5294 &logflags); 5295 if (error) 5296 goto error0; 5297 goto nodelete; 5298 } 5299 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5300 if (mod) { 5301 /* 5302 * Realtime extent is lined up at the end but not 5303 * at the front. We'll get rid of full extents if 5304 * we can. 5305 */ 5306 mod = mp->m_sb.sb_rextsize - mod; 5307 if (del.br_blockcount > mod) { 5308 del.br_blockcount -= mod; 5309 del.br_startoff += mod; 5310 del.br_startblock += mod; 5311 } else if (del.br_startoff == start && 5312 (del.br_state == XFS_EXT_UNWRITTEN || 5313 tp->t_blk_res == 0)) { 5314 /* 5315 * Can't make it unwritten. There isn't 5316 * a full extent here so just skip it. 5317 */ 5318 ASSERT(end >= del.br_blockcount); 5319 end -= del.br_blockcount; 5320 if (got.br_startoff > end && 5321 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5322 done = true; 5323 break; 5324 } 5325 continue; 5326 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5327 struct xfs_bmbt_irec prev; 5328 5329 /* 5330 * This one is already unwritten. 5331 * It must have a written left neighbor. 5332 * Unwrite the killed part of that one and 5333 * try again. 5334 */ 5335 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5336 ASSERT(0); 5337 ASSERT(prev.br_state == XFS_EXT_NORM); 5338 ASSERT(!isnullstartblock(prev.br_startblock)); 5339 ASSERT(del.br_startblock == 5340 prev.br_startblock + prev.br_blockcount); 5341 if (prev.br_startoff < start) { 5342 mod = start - prev.br_startoff; 5343 prev.br_blockcount -= mod; 5344 prev.br_startblock += mod; 5345 prev.br_startoff = start; 5346 } 5347 prev.br_state = XFS_EXT_UNWRITTEN; 5348 error = xfs_bmap_add_extent_unwritten_real(tp, 5349 ip, whichfork, &icur, &cur, 5350 &prev, &logflags); 5351 if (error) 5352 goto error0; 5353 goto nodelete; 5354 } else { 5355 ASSERT(del.br_state == XFS_EXT_NORM); 5356 del.br_state = XFS_EXT_UNWRITTEN; 5357 error = xfs_bmap_add_extent_unwritten_real(tp, 5358 ip, whichfork, &icur, &cur, 5359 &del, &logflags); 5360 if (error) 5361 goto error0; 5362 goto nodelete; 5363 } 5364 } 5365 5366 delete: 5367 if (wasdel) { 5368 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5369 &got, &del); 5370 } else { 5371 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5372 &del, &tmp_logflags, whichfork, 5373 flags); 5374 logflags |= tmp_logflags; 5375 } 5376 5377 if (error) 5378 goto error0; 5379 5380 max_len -= del.br_blockcount; 5381 end = del.br_startoff - 1; 5382 nodelete: 5383 /* 5384 * If not done go on to the next (previous) record. 5385 */ 5386 if (end != (xfs_fileoff_t)-1 && end >= start) { 5387 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5388 (got.br_startoff > end && 5389 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5390 done = true; 5391 break; 5392 } 5393 extno++; 5394 } 5395 } 5396 if (done || end == (xfs_fileoff_t)-1 || end < start) 5397 *rlen = 0; 5398 else 5399 *rlen = end - start + 1; 5400 5401 /* 5402 * Convert to a btree if necessary. 5403 */ 5404 if (xfs_bmap_needs_btree(ip, whichfork)) { 5405 ASSERT(cur == NULL); 5406 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5407 &tmp_logflags, whichfork); 5408 logflags |= tmp_logflags; 5409 if (error) 5410 goto error0; 5411 } 5412 /* 5413 * transform from btree to extents, give it cur 5414 */ 5415 else if (xfs_bmap_wants_extents(ip, whichfork)) { 5416 ASSERT(cur != NULL); 5417 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, 5418 whichfork); 5419 logflags |= tmp_logflags; 5420 if (error) 5421 goto error0; 5422 } 5423 /* 5424 * transform from extents to local? 5425 */ 5426 error = 0; 5427 error0: 5428 /* 5429 * Log everything. Do this after conversion, there's no point in 5430 * logging the extent records if we've converted to btree format. 5431 */ 5432 if ((logflags & xfs_ilog_fext(whichfork)) && 5433 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5434 logflags &= ~xfs_ilog_fext(whichfork); 5435 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5436 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5437 logflags &= ~xfs_ilog_fbroot(whichfork); 5438 /* 5439 * Log inode even in the error case, if the transaction 5440 * is dirty we'll need to shut down the filesystem. 5441 */ 5442 if (logflags) 5443 xfs_trans_log_inode(tp, ip, logflags); 5444 if (cur) { 5445 if (!error) 5446 cur->bc_private.b.allocated = 0; 5447 xfs_btree_del_cursor(cur, error); 5448 } 5449 return error; 5450 } 5451 5452 /* Unmap a range of a file. */ 5453 int 5454 xfs_bunmapi( 5455 xfs_trans_t *tp, 5456 struct xfs_inode *ip, 5457 xfs_fileoff_t bno, 5458 xfs_filblks_t len, 5459 int flags, 5460 xfs_extnum_t nexts, 5461 int *done) 5462 { 5463 int error; 5464 5465 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5466 *done = (len == 0); 5467 return error; 5468 } 5469 5470 /* 5471 * Determine whether an extent shift can be accomplished by a merge with the 5472 * extent that precedes the target hole of the shift. 5473 */ 5474 STATIC bool 5475 xfs_bmse_can_merge( 5476 struct xfs_bmbt_irec *left, /* preceding extent */ 5477 struct xfs_bmbt_irec *got, /* current extent to shift */ 5478 xfs_fileoff_t shift) /* shift fsb */ 5479 { 5480 xfs_fileoff_t startoff; 5481 5482 startoff = got->br_startoff - shift; 5483 5484 /* 5485 * The extent, once shifted, must be adjacent in-file and on-disk with 5486 * the preceding extent. 5487 */ 5488 if ((left->br_startoff + left->br_blockcount != startoff) || 5489 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5490 (left->br_state != got->br_state) || 5491 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5492 return false; 5493 5494 return true; 5495 } 5496 5497 /* 5498 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5499 * hole in the file. If an extent shift would result in the extent being fully 5500 * adjacent to the extent that currently precedes the hole, we can merge with 5501 * the preceding extent rather than do the shift. 5502 * 5503 * This function assumes the caller has verified a shift-by-merge is possible 5504 * with the provided extents via xfs_bmse_can_merge(). 5505 */ 5506 STATIC int 5507 xfs_bmse_merge( 5508 struct xfs_trans *tp, 5509 struct xfs_inode *ip, 5510 int whichfork, 5511 xfs_fileoff_t shift, /* shift fsb */ 5512 struct xfs_iext_cursor *icur, 5513 struct xfs_bmbt_irec *got, /* extent to shift */ 5514 struct xfs_bmbt_irec *left, /* preceding extent */ 5515 struct xfs_btree_cur *cur, 5516 int *logflags) /* output */ 5517 { 5518 struct xfs_bmbt_irec new; 5519 xfs_filblks_t blockcount; 5520 int error, i; 5521 struct xfs_mount *mp = ip->i_mount; 5522 5523 blockcount = left->br_blockcount + got->br_blockcount; 5524 5525 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5526 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5527 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5528 5529 new = *left; 5530 new.br_blockcount = blockcount; 5531 5532 /* 5533 * Update the on-disk extent count, the btree if necessary and log the 5534 * inode. 5535 */ 5536 XFS_IFORK_NEXT_SET(ip, whichfork, 5537 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5538 *logflags |= XFS_ILOG_CORE; 5539 if (!cur) { 5540 *logflags |= XFS_ILOG_DEXT; 5541 goto done; 5542 } 5543 5544 /* lookup and remove the extent to merge */ 5545 error = xfs_bmbt_lookup_eq(cur, got, &i); 5546 if (error) 5547 return error; 5548 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5549 5550 error = xfs_btree_delete(cur, &i); 5551 if (error) 5552 return error; 5553 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5554 5555 /* lookup and update size of the previous extent */ 5556 error = xfs_bmbt_lookup_eq(cur, left, &i); 5557 if (error) 5558 return error; 5559 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5560 5561 error = xfs_bmbt_update(cur, &new); 5562 if (error) 5563 return error; 5564 5565 done: 5566 xfs_iext_remove(ip, icur, 0); 5567 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5568 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5569 &new); 5570 5571 /* update reverse mapping. rmap functions merge the rmaps for us */ 5572 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5573 if (error) 5574 return error; 5575 memcpy(&new, got, sizeof(new)); 5576 new.br_startoff = left->br_startoff + left->br_blockcount; 5577 return xfs_rmap_map_extent(tp, ip, whichfork, &new); 5578 } 5579 5580 static int 5581 xfs_bmap_shift_update_extent( 5582 struct xfs_trans *tp, 5583 struct xfs_inode *ip, 5584 int whichfork, 5585 struct xfs_iext_cursor *icur, 5586 struct xfs_bmbt_irec *got, 5587 struct xfs_btree_cur *cur, 5588 int *logflags, 5589 xfs_fileoff_t startoff) 5590 { 5591 struct xfs_mount *mp = ip->i_mount; 5592 struct xfs_bmbt_irec prev = *got; 5593 int error, i; 5594 5595 *logflags |= XFS_ILOG_CORE; 5596 5597 got->br_startoff = startoff; 5598 5599 if (cur) { 5600 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5601 if (error) 5602 return error; 5603 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5604 5605 error = xfs_bmbt_update(cur, got); 5606 if (error) 5607 return error; 5608 } else { 5609 *logflags |= XFS_ILOG_DEXT; 5610 } 5611 5612 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5613 got); 5614 5615 /* update reverse mapping */ 5616 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5617 if (error) 5618 return error; 5619 return xfs_rmap_map_extent(tp, ip, whichfork, got); 5620 } 5621 5622 int 5623 xfs_bmap_collapse_extents( 5624 struct xfs_trans *tp, 5625 struct xfs_inode *ip, 5626 xfs_fileoff_t *next_fsb, 5627 xfs_fileoff_t offset_shift_fsb, 5628 bool *done) 5629 { 5630 int whichfork = XFS_DATA_FORK; 5631 struct xfs_mount *mp = ip->i_mount; 5632 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5633 struct xfs_btree_cur *cur = NULL; 5634 struct xfs_bmbt_irec got, prev; 5635 struct xfs_iext_cursor icur; 5636 xfs_fileoff_t new_startoff; 5637 int error = 0; 5638 int logflags = 0; 5639 5640 if (unlikely(XFS_TEST_ERROR( 5641 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5642 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5643 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5644 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5645 return -EFSCORRUPTED; 5646 } 5647 5648 if (XFS_FORCED_SHUTDOWN(mp)) 5649 return -EIO; 5650 5651 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5652 5653 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5654 error = xfs_iread_extents(tp, ip, whichfork); 5655 if (error) 5656 return error; 5657 } 5658 5659 if (ifp->if_flags & XFS_IFBROOT) { 5660 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5661 cur->bc_private.b.flags = 0; 5662 } 5663 5664 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5665 *done = true; 5666 goto del_cursor; 5667 } 5668 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5669 del_cursor); 5670 5671 new_startoff = got.br_startoff - offset_shift_fsb; 5672 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5673 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5674 error = -EINVAL; 5675 goto del_cursor; 5676 } 5677 5678 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5679 error = xfs_bmse_merge(tp, ip, whichfork, 5680 offset_shift_fsb, &icur, &got, &prev, 5681 cur, &logflags); 5682 if (error) 5683 goto del_cursor; 5684 goto done; 5685 } 5686 } else { 5687 if (got.br_startoff < offset_shift_fsb) { 5688 error = -EINVAL; 5689 goto del_cursor; 5690 } 5691 } 5692 5693 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5694 cur, &logflags, new_startoff); 5695 if (error) 5696 goto del_cursor; 5697 5698 done: 5699 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5700 *done = true; 5701 goto del_cursor; 5702 } 5703 5704 *next_fsb = got.br_startoff; 5705 del_cursor: 5706 if (cur) 5707 xfs_btree_del_cursor(cur, error); 5708 if (logflags) 5709 xfs_trans_log_inode(tp, ip, logflags); 5710 return error; 5711 } 5712 5713 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5714 int 5715 xfs_bmap_can_insert_extents( 5716 struct xfs_inode *ip, 5717 xfs_fileoff_t off, 5718 xfs_fileoff_t shift) 5719 { 5720 struct xfs_bmbt_irec got; 5721 int is_empty; 5722 int error = 0; 5723 5724 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5725 5726 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5727 return -EIO; 5728 5729 xfs_ilock(ip, XFS_ILOCK_EXCL); 5730 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5731 if (!error && !is_empty && got.br_startoff >= off && 5732 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5733 error = -EINVAL; 5734 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5735 5736 return error; 5737 } 5738 5739 int 5740 xfs_bmap_insert_extents( 5741 struct xfs_trans *tp, 5742 struct xfs_inode *ip, 5743 xfs_fileoff_t *next_fsb, 5744 xfs_fileoff_t offset_shift_fsb, 5745 bool *done, 5746 xfs_fileoff_t stop_fsb) 5747 { 5748 int whichfork = XFS_DATA_FORK; 5749 struct xfs_mount *mp = ip->i_mount; 5750 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5751 struct xfs_btree_cur *cur = NULL; 5752 struct xfs_bmbt_irec got, next; 5753 struct xfs_iext_cursor icur; 5754 xfs_fileoff_t new_startoff; 5755 int error = 0; 5756 int logflags = 0; 5757 5758 if (unlikely(XFS_TEST_ERROR( 5759 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5760 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5761 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5762 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5763 return -EFSCORRUPTED; 5764 } 5765 5766 if (XFS_FORCED_SHUTDOWN(mp)) 5767 return -EIO; 5768 5769 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5770 5771 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5772 error = xfs_iread_extents(tp, ip, whichfork); 5773 if (error) 5774 return error; 5775 } 5776 5777 if (ifp->if_flags & XFS_IFBROOT) { 5778 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5779 cur->bc_private.b.flags = 0; 5780 } 5781 5782 if (*next_fsb == NULLFSBLOCK) { 5783 xfs_iext_last(ifp, &icur); 5784 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5785 stop_fsb > got.br_startoff) { 5786 *done = true; 5787 goto del_cursor; 5788 } 5789 } else { 5790 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5791 *done = true; 5792 goto del_cursor; 5793 } 5794 } 5795 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5796 del_cursor); 5797 5798 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5799 error = -EIO; 5800 goto del_cursor; 5801 } 5802 5803 new_startoff = got.br_startoff + offset_shift_fsb; 5804 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5805 if (new_startoff + got.br_blockcount > next.br_startoff) { 5806 error = -EINVAL; 5807 goto del_cursor; 5808 } 5809 5810 /* 5811 * Unlike a left shift (which involves a hole punch), a right 5812 * shift does not modify extent neighbors in any way. We should 5813 * never find mergeable extents in this scenario. Check anyways 5814 * and warn if we encounter two extents that could be one. 5815 */ 5816 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5817 WARN_ON_ONCE(1); 5818 } 5819 5820 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5821 cur, &logflags, new_startoff); 5822 if (error) 5823 goto del_cursor; 5824 5825 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5826 stop_fsb >= got.br_startoff + got.br_blockcount) { 5827 *done = true; 5828 goto del_cursor; 5829 } 5830 5831 *next_fsb = got.br_startoff; 5832 del_cursor: 5833 if (cur) 5834 xfs_btree_del_cursor(cur, error); 5835 if (logflags) 5836 xfs_trans_log_inode(tp, ip, logflags); 5837 return error; 5838 } 5839 5840 /* 5841 * Splits an extent into two extents at split_fsb block such that it is the 5842 * first block of the current_ext. @ext is a target extent to be split. 5843 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5844 * hole or the first block of extents, just return 0. 5845 */ 5846 STATIC int 5847 xfs_bmap_split_extent_at( 5848 struct xfs_trans *tp, 5849 struct xfs_inode *ip, 5850 xfs_fileoff_t split_fsb) 5851 { 5852 int whichfork = XFS_DATA_FORK; 5853 struct xfs_btree_cur *cur = NULL; 5854 struct xfs_bmbt_irec got; 5855 struct xfs_bmbt_irec new; /* split extent */ 5856 struct xfs_mount *mp = ip->i_mount; 5857 struct xfs_ifork *ifp; 5858 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5859 struct xfs_iext_cursor icur; 5860 int error = 0; 5861 int logflags = 0; 5862 int i = 0; 5863 5864 if (unlikely(XFS_TEST_ERROR( 5865 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5866 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5867 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5868 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5869 XFS_ERRLEVEL_LOW, mp); 5870 return -EFSCORRUPTED; 5871 } 5872 5873 if (XFS_FORCED_SHUTDOWN(mp)) 5874 return -EIO; 5875 5876 ifp = XFS_IFORK_PTR(ip, whichfork); 5877 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5878 /* Read in all the extents */ 5879 error = xfs_iread_extents(tp, ip, whichfork); 5880 if (error) 5881 return error; 5882 } 5883 5884 /* 5885 * If there are not extents, or split_fsb lies in a hole we are done. 5886 */ 5887 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5888 got.br_startoff >= split_fsb) 5889 return 0; 5890 5891 gotblkcnt = split_fsb - got.br_startoff; 5892 new.br_startoff = split_fsb; 5893 new.br_startblock = got.br_startblock + gotblkcnt; 5894 new.br_blockcount = got.br_blockcount - gotblkcnt; 5895 new.br_state = got.br_state; 5896 5897 if (ifp->if_flags & XFS_IFBROOT) { 5898 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5899 cur->bc_private.b.flags = 0; 5900 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5901 if (error) 5902 goto del_cursor; 5903 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5904 } 5905 5906 got.br_blockcount = gotblkcnt; 5907 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5908 &got); 5909 5910 logflags = XFS_ILOG_CORE; 5911 if (cur) { 5912 error = xfs_bmbt_update(cur, &got); 5913 if (error) 5914 goto del_cursor; 5915 } else 5916 logflags |= XFS_ILOG_DEXT; 5917 5918 /* Add new extent */ 5919 xfs_iext_next(ifp, &icur); 5920 xfs_iext_insert(ip, &icur, &new, 0); 5921 XFS_IFORK_NEXT_SET(ip, whichfork, 5922 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5923 5924 if (cur) { 5925 error = xfs_bmbt_lookup_eq(cur, &new, &i); 5926 if (error) 5927 goto del_cursor; 5928 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 5929 error = xfs_btree_insert(cur, &i); 5930 if (error) 5931 goto del_cursor; 5932 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5933 } 5934 5935 /* 5936 * Convert to a btree if necessary. 5937 */ 5938 if (xfs_bmap_needs_btree(ip, whichfork)) { 5939 int tmp_logflags; /* partial log flag return val */ 5940 5941 ASSERT(cur == NULL); 5942 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5943 &tmp_logflags, whichfork); 5944 logflags |= tmp_logflags; 5945 } 5946 5947 del_cursor: 5948 if (cur) { 5949 cur->bc_private.b.allocated = 0; 5950 xfs_btree_del_cursor(cur, error); 5951 } 5952 5953 if (logflags) 5954 xfs_trans_log_inode(tp, ip, logflags); 5955 return error; 5956 } 5957 5958 int 5959 xfs_bmap_split_extent( 5960 struct xfs_inode *ip, 5961 xfs_fileoff_t split_fsb) 5962 { 5963 struct xfs_mount *mp = ip->i_mount; 5964 struct xfs_trans *tp; 5965 int error; 5966 5967 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 5968 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 5969 if (error) 5970 return error; 5971 5972 xfs_ilock(ip, XFS_ILOCK_EXCL); 5973 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 5974 5975 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 5976 if (error) 5977 goto out; 5978 5979 return xfs_trans_commit(tp); 5980 5981 out: 5982 xfs_trans_cancel(tp); 5983 return error; 5984 } 5985 5986 /* Deferred mapping is only for real extents in the data fork. */ 5987 static bool 5988 xfs_bmap_is_update_needed( 5989 struct xfs_bmbt_irec *bmap) 5990 { 5991 return bmap->br_startblock != HOLESTARTBLOCK && 5992 bmap->br_startblock != DELAYSTARTBLOCK; 5993 } 5994 5995 /* Record a bmap intent. */ 5996 static int 5997 __xfs_bmap_add( 5998 struct xfs_trans *tp, 5999 enum xfs_bmap_intent_type type, 6000 struct xfs_inode *ip, 6001 int whichfork, 6002 struct xfs_bmbt_irec *bmap) 6003 { 6004 struct xfs_bmap_intent *bi; 6005 6006 trace_xfs_bmap_defer(tp->t_mountp, 6007 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6008 type, 6009 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6010 ip->i_ino, whichfork, 6011 bmap->br_startoff, 6012 bmap->br_blockcount, 6013 bmap->br_state); 6014 6015 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6016 INIT_LIST_HEAD(&bi->bi_list); 6017 bi->bi_type = type; 6018 bi->bi_owner = ip; 6019 bi->bi_whichfork = whichfork; 6020 bi->bi_bmap = *bmap; 6021 6022 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6023 return 0; 6024 } 6025 6026 /* Map an extent into a file. */ 6027 int 6028 xfs_bmap_map_extent( 6029 struct xfs_trans *tp, 6030 struct xfs_inode *ip, 6031 struct xfs_bmbt_irec *PREV) 6032 { 6033 if (!xfs_bmap_is_update_needed(PREV)) 6034 return 0; 6035 6036 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6037 } 6038 6039 /* Unmap an extent out of a file. */ 6040 int 6041 xfs_bmap_unmap_extent( 6042 struct xfs_trans *tp, 6043 struct xfs_inode *ip, 6044 struct xfs_bmbt_irec *PREV) 6045 { 6046 if (!xfs_bmap_is_update_needed(PREV)) 6047 return 0; 6048 6049 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6050 } 6051 6052 /* 6053 * Process one of the deferred bmap operations. We pass back the 6054 * btree cursor to maintain our lock on the bmapbt between calls. 6055 */ 6056 int 6057 xfs_bmap_finish_one( 6058 struct xfs_trans *tp, 6059 struct xfs_inode *ip, 6060 enum xfs_bmap_intent_type type, 6061 int whichfork, 6062 xfs_fileoff_t startoff, 6063 xfs_fsblock_t startblock, 6064 xfs_filblks_t *blockcount, 6065 xfs_exntst_t state) 6066 { 6067 int error = 0; 6068 6069 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6070 6071 trace_xfs_bmap_deferred(tp->t_mountp, 6072 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6073 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6074 ip->i_ino, whichfork, startoff, *blockcount, state); 6075 6076 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6077 return -EFSCORRUPTED; 6078 6079 if (XFS_TEST_ERROR(false, tp->t_mountp, 6080 XFS_ERRTAG_BMAP_FINISH_ONE)) 6081 return -EIO; 6082 6083 switch (type) { 6084 case XFS_BMAP_MAP: 6085 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6086 startblock, 0); 6087 *blockcount = 0; 6088 break; 6089 case XFS_BMAP_UNMAP: 6090 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6091 XFS_BMAPI_REMAP, 1); 6092 break; 6093 default: 6094 ASSERT(0); 6095 error = -EFSCORRUPTED; 6096 } 6097 6098 return error; 6099 } 6100 6101 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6102 xfs_failaddr_t 6103 xfs_bmap_validate_extent( 6104 struct xfs_inode *ip, 6105 int whichfork, 6106 struct xfs_bmbt_irec *irec) 6107 { 6108 struct xfs_mount *mp = ip->i_mount; 6109 xfs_fsblock_t endfsb; 6110 bool isrt; 6111 6112 isrt = XFS_IS_REALTIME_INODE(ip); 6113 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6114 if (isrt) { 6115 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6116 return __this_address; 6117 if (!xfs_verify_rtbno(mp, endfsb)) 6118 return __this_address; 6119 } else { 6120 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6121 return __this_address; 6122 if (!xfs_verify_fsbno(mp, endfsb)) 6123 return __this_address; 6124 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6125 XFS_FSB_TO_AGNO(mp, endfsb)) 6126 return __this_address; 6127 } 6128 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6129 return __this_address; 6130 return NULL; 6131 } 6132