1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_da_format.h" 17 #include "xfs_da_btree.h" 18 #include "xfs_dir2.h" 19 #include "xfs_inode.h" 20 #include "xfs_btree.h" 21 #include "xfs_trans.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_extfree_item.h" 24 #include "xfs_alloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_rtalloc.h" 29 #include "xfs_errortag.h" 30 #include "xfs_error.h" 31 #include "xfs_quota.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_buf_item.h" 34 #include "xfs_trace.h" 35 #include "xfs_symlink.h" 36 #include "xfs_attr_leaf.h" 37 #include "xfs_filestream.h" 38 #include "xfs_rmap.h" 39 #include "xfs_ag_resv.h" 40 #include "xfs_refcount.h" 41 #include "xfs_icache.h" 42 43 44 kmem_zone_t *xfs_bmap_free_item_zone; 45 46 /* 47 * Miscellaneous helper functions 48 */ 49 50 /* 51 * Compute and fill in the value of the maximum depth of a bmap btree 52 * in this filesystem. Done once, during mount. 53 */ 54 void 55 xfs_bmap_compute_maxlevels( 56 xfs_mount_t *mp, /* file system mount structure */ 57 int whichfork) /* data or attr fork */ 58 { 59 int level; /* btree level */ 60 uint maxblocks; /* max blocks at this level */ 61 uint maxleafents; /* max leaf entries possible */ 62 int maxrootrecs; /* max records in root block */ 63 int minleafrecs; /* min records in leaf block */ 64 int minnoderecs; /* min records in node block */ 65 int sz; /* root block size */ 66 67 /* 68 * The maximum number of extents in a file, hence the maximum 69 * number of leaf entries, is controlled by the type of di_nextents 70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents 71 * (a signed 16-bit number, xfs_aextnum_t). 72 * 73 * Note that we can no longer assume that if we are in ATTR1 that 74 * the fork offset of all the inodes will be 75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted 76 * with ATTR2 and then mounted back with ATTR1, keeping the 77 * di_forkoff's fixed but probably at various positions. Therefore, 78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario 79 * of a minimum size available. 80 */ 81 if (whichfork == XFS_DATA_FORK) { 82 maxleafents = MAXEXTNUM; 83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 84 } else { 85 maxleafents = MAXAEXTNUM; 86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 87 } 88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 89 minleafrecs = mp->m_bmap_dmnr[0]; 90 minnoderecs = mp->m_bmap_dmnr[1]; 91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; 92 for (level = 1; maxblocks > 1; level++) { 93 if (maxblocks <= maxrootrecs) 94 maxblocks = 1; 95 else 96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; 97 } 98 mp->m_bm_maxlevels[whichfork] = level; 99 } 100 101 STATIC int /* error */ 102 xfs_bmbt_lookup_eq( 103 struct xfs_btree_cur *cur, 104 struct xfs_bmbt_irec *irec, 105 int *stat) /* success/failure */ 106 { 107 cur->bc_rec.b = *irec; 108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 109 } 110 111 STATIC int /* error */ 112 xfs_bmbt_lookup_first( 113 struct xfs_btree_cur *cur, 114 int *stat) /* success/failure */ 115 { 116 cur->bc_rec.b.br_startoff = 0; 117 cur->bc_rec.b.br_startblock = 0; 118 cur->bc_rec.b.br_blockcount = 0; 119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 120 } 121 122 /* 123 * Check if the inode needs to be converted to btree format. 124 */ 125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 126 { 127 return whichfork != XFS_COW_FORK && 128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 129 XFS_IFORK_NEXTENTS(ip, whichfork) > 130 XFS_IFORK_MAXEXT(ip, whichfork); 131 } 132 133 /* 134 * Check if the inode should be converted to extent format. 135 */ 136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 137 { 138 return whichfork != XFS_COW_FORK && 139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && 140 XFS_IFORK_NEXTENTS(ip, whichfork) <= 141 XFS_IFORK_MAXEXT(ip, whichfork); 142 } 143 144 /* 145 * Update the record referred to by cur to the value given by irec 146 * This either works (return 0) or gets an EFSCORRUPTED error. 147 */ 148 STATIC int 149 xfs_bmbt_update( 150 struct xfs_btree_cur *cur, 151 struct xfs_bmbt_irec *irec) 152 { 153 union xfs_btree_rec rec; 154 155 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 156 return xfs_btree_update(cur, &rec); 157 } 158 159 /* 160 * Compute the worst-case number of indirect blocks that will be used 161 * for ip's delayed extent of length "len". 162 */ 163 STATIC xfs_filblks_t 164 xfs_bmap_worst_indlen( 165 xfs_inode_t *ip, /* incore inode pointer */ 166 xfs_filblks_t len) /* delayed extent length */ 167 { 168 int level; /* btree level number */ 169 int maxrecs; /* maximum record count at this level */ 170 xfs_mount_t *mp; /* mount structure */ 171 xfs_filblks_t rval; /* return value */ 172 173 mp = ip->i_mount; 174 maxrecs = mp->m_bmap_dmxr[0]; 175 for (level = 0, rval = 0; 176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 177 level++) { 178 len += maxrecs - 1; 179 do_div(len, maxrecs); 180 rval += len; 181 if (len == 1) 182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 183 level - 1; 184 if (level == 0) 185 maxrecs = mp->m_bmap_dmxr[1]; 186 } 187 return rval; 188 } 189 190 /* 191 * Calculate the default attribute fork offset for newly created inodes. 192 */ 193 uint 194 xfs_default_attroffset( 195 struct xfs_inode *ip) 196 { 197 struct xfs_mount *mp = ip->i_mount; 198 uint offset; 199 200 if (mp->m_sb.sb_inodesize == 256) { 201 offset = XFS_LITINO(mp, ip->i_d.di_version) - 202 XFS_BMDR_SPACE_CALC(MINABTPTRS); 203 } else { 204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 205 } 206 207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); 208 return offset; 209 } 210 211 /* 212 * Helper routine to reset inode di_forkoff field when switching 213 * attribute fork from local to extent format - we reset it where 214 * possible to make space available for inline data fork extents. 215 */ 216 STATIC void 217 xfs_bmap_forkoff_reset( 218 xfs_inode_t *ip, 219 int whichfork) 220 { 221 if (whichfork == XFS_ATTR_FORK && 222 ip->i_d.di_format != XFS_DINODE_FMT_DEV && 223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { 224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 225 226 if (dfl_forkoff > ip->i_d.di_forkoff) 227 ip->i_d.di_forkoff = dfl_forkoff; 228 } 229 } 230 231 #ifdef DEBUG 232 STATIC struct xfs_buf * 233 xfs_bmap_get_bp( 234 struct xfs_btree_cur *cur, 235 xfs_fsblock_t bno) 236 { 237 struct xfs_log_item *lip; 238 int i; 239 240 if (!cur) 241 return NULL; 242 243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { 244 if (!cur->bc_bufs[i]) 245 break; 246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) 247 return cur->bc_bufs[i]; 248 } 249 250 /* Chase down all the log items to see if the bp is there */ 251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 253 254 if (bip->bli_item.li_type == XFS_LI_BUF && 255 XFS_BUF_ADDR(bip->bli_buf) == bno) 256 return bip->bli_buf; 257 } 258 259 return NULL; 260 } 261 262 STATIC void 263 xfs_check_block( 264 struct xfs_btree_block *block, 265 xfs_mount_t *mp, 266 int root, 267 short sz) 268 { 269 int i, j, dmxr; 270 __be64 *pp, *thispa; /* pointer to block address */ 271 xfs_bmbt_key_t *prevp, *keyp; 272 273 ASSERT(be16_to_cpu(block->bb_level) > 0); 274 275 prevp = NULL; 276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 277 dmxr = mp->m_bmap_dmxr[0]; 278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 279 280 if (prevp) { 281 ASSERT(be64_to_cpu(prevp->br_startoff) < 282 be64_to_cpu(keyp->br_startoff)); 283 } 284 prevp = keyp; 285 286 /* 287 * Compare the block numbers to see if there are dups. 288 */ 289 if (root) 290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 291 else 292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 293 294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 295 if (root) 296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 297 else 298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 299 if (*thispa == *pp) { 300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 301 __func__, j, i, 302 (unsigned long long)be64_to_cpu(*thispa)); 303 xfs_err(mp, "%s: ptrs are equal in node\n", 304 __func__); 305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 306 } 307 } 308 } 309 } 310 311 /* 312 * Check that the extents for the inode ip are in the right order in all 313 * btree leaves. THis becomes prohibitively expensive for large extent count 314 * files, so don't bother with inodes that have more than 10,000 extents in 315 * them. The btree record ordering checks will still be done, so for such large 316 * bmapbt constructs that is going to catch most corruptions. 317 */ 318 STATIC void 319 xfs_bmap_check_leaf_extents( 320 xfs_btree_cur_t *cur, /* btree cursor or null */ 321 xfs_inode_t *ip, /* incore inode pointer */ 322 int whichfork) /* data or attr fork */ 323 { 324 struct xfs_btree_block *block; /* current btree block */ 325 xfs_fsblock_t bno; /* block # of "block" */ 326 xfs_buf_t *bp; /* buffer for "block" */ 327 int error; /* error return value */ 328 xfs_extnum_t i=0, j; /* index into the extents list */ 329 struct xfs_ifork *ifp; /* fork structure */ 330 int level; /* btree level, for checking */ 331 xfs_mount_t *mp; /* file system mount structure */ 332 __be64 *pp; /* pointer to block address */ 333 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 336 int bp_release = 0; 337 338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 339 return; 340 } 341 342 /* skip large extent count inodes */ 343 if (ip->i_d.di_nextents > 10000) 344 return; 345 346 bno = NULLFSBLOCK; 347 mp = ip->i_mount; 348 ifp = XFS_IFORK_PTR(ip, whichfork); 349 block = ifp->if_broot; 350 /* 351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 352 */ 353 level = be16_to_cpu(block->bb_level); 354 ASSERT(level > 0); 355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 357 bno = be64_to_cpu(*pp); 358 359 ASSERT(bno != NULLFSBLOCK); 360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 362 363 /* 364 * Go down the tree until leaf level is reached, following the first 365 * pointer (leftmost) at each level. 366 */ 367 while (level-- > 0) { 368 /* See if buf is in cur first */ 369 bp_release = 0; 370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 371 if (!bp) { 372 bp_release = 1; 373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 374 XFS_BMAP_BTREE_REF, 375 &xfs_bmbt_buf_ops); 376 if (error) 377 goto error_norelse; 378 } 379 block = XFS_BUF_TO_BLOCK(bp); 380 if (level == 0) 381 break; 382 383 /* 384 * Check this block for basic sanity (increasing keys and 385 * no duplicate blocks). 386 */ 387 388 xfs_check_block(block, mp, 0, 0); 389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 390 bno = be64_to_cpu(*pp); 391 XFS_WANT_CORRUPTED_GOTO(mp, 392 xfs_verify_fsbno(mp, bno), error0); 393 if (bp_release) { 394 bp_release = 0; 395 xfs_trans_brelse(NULL, bp); 396 } 397 } 398 399 /* 400 * Here with bp and block set to the leftmost leaf node in the tree. 401 */ 402 i = 0; 403 404 /* 405 * Loop over all leaf nodes checking that all extents are in the right order. 406 */ 407 for (;;) { 408 xfs_fsblock_t nextbno; 409 xfs_extnum_t num_recs; 410 411 412 num_recs = xfs_btree_get_numrecs(block); 413 414 /* 415 * Read-ahead the next leaf block, if any. 416 */ 417 418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 419 420 /* 421 * Check all the extents to make sure they are OK. 422 * If we had a previous block, the last entry should 423 * conform with the first entry in this one. 424 */ 425 426 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 427 if (i) { 428 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 429 xfs_bmbt_disk_get_blockcount(&last) <= 430 xfs_bmbt_disk_get_startoff(ep)); 431 } 432 for (j = 1; j < num_recs; j++) { 433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 434 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 435 xfs_bmbt_disk_get_blockcount(ep) <= 436 xfs_bmbt_disk_get_startoff(nextp)); 437 ep = nextp; 438 } 439 440 last = *ep; 441 i += num_recs; 442 if (bp_release) { 443 bp_release = 0; 444 xfs_trans_brelse(NULL, bp); 445 } 446 bno = nextbno; 447 /* 448 * If we've reached the end, stop. 449 */ 450 if (bno == NULLFSBLOCK) 451 break; 452 453 bp_release = 0; 454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 455 if (!bp) { 456 bp_release = 1; 457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, 458 XFS_BMAP_BTREE_REF, 459 &xfs_bmbt_buf_ops); 460 if (error) 461 goto error_norelse; 462 } 463 block = XFS_BUF_TO_BLOCK(bp); 464 } 465 466 return; 467 468 error0: 469 xfs_warn(mp, "%s: at error0", __func__); 470 if (bp_release) 471 xfs_trans_brelse(NULL, bp); 472 error_norelse: 473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 474 __func__, i); 475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 477 return; 478 } 479 480 /* 481 * Validate that the bmbt_irecs being returned from bmapi are valid 482 * given the caller's original parameters. Specifically check the 483 * ranges of the returned irecs to ensure that they only extend beyond 484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 485 */ 486 STATIC void 487 xfs_bmap_validate_ret( 488 xfs_fileoff_t bno, 489 xfs_filblks_t len, 490 int flags, 491 xfs_bmbt_irec_t *mval, 492 int nmap, 493 int ret_nmap) 494 { 495 int i; /* index to map values */ 496 497 ASSERT(ret_nmap <= nmap); 498 499 for (i = 0; i < ret_nmap; i++) { 500 ASSERT(mval[i].br_blockcount > 0); 501 if (!(flags & XFS_BMAPI_ENTIRE)) { 502 ASSERT(mval[i].br_startoff >= bno); 503 ASSERT(mval[i].br_blockcount <= len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 505 bno + len); 506 } else { 507 ASSERT(mval[i].br_startoff < bno + len); 508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 509 bno); 510 } 511 ASSERT(i == 0 || 512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 513 mval[i].br_startoff); 514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 515 mval[i].br_startblock != HOLESTARTBLOCK); 516 ASSERT(mval[i].br_state == XFS_EXT_NORM || 517 mval[i].br_state == XFS_EXT_UNWRITTEN); 518 } 519 } 520 521 #else 522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 524 #endif /* DEBUG */ 525 526 /* 527 * bmap free list manipulation functions 528 */ 529 530 /* 531 * Add the extent to the list of extents to be free at transaction end. 532 * The list is maintained sorted (by block number). 533 */ 534 void 535 __xfs_bmap_add_free( 536 struct xfs_trans *tp, 537 xfs_fsblock_t bno, 538 xfs_filblks_t len, 539 const struct xfs_owner_info *oinfo, 540 bool skip_discard) 541 { 542 struct xfs_extent_free_item *new; /* new element */ 543 #ifdef DEBUG 544 struct xfs_mount *mp = tp->t_mountp; 545 xfs_agnumber_t agno; 546 xfs_agblock_t agbno; 547 548 ASSERT(bno != NULLFSBLOCK); 549 ASSERT(len > 0); 550 ASSERT(len <= MAXEXTLEN); 551 ASSERT(!isnullstartblock(bno)); 552 agno = XFS_FSB_TO_AGNO(mp, bno); 553 agbno = XFS_FSB_TO_AGBNO(mp, bno); 554 ASSERT(agno < mp->m_sb.sb_agcount); 555 ASSERT(agbno < mp->m_sb.sb_agblocks); 556 ASSERT(len < mp->m_sb.sb_agblocks); 557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks); 558 #endif 559 ASSERT(xfs_bmap_free_item_zone != NULL); 560 561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 562 new->xefi_startblock = bno; 563 new->xefi_blockcount = (xfs_extlen_t)len; 564 if (oinfo) 565 new->xefi_oinfo = *oinfo; 566 else 567 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 568 new->xefi_skip_discard = skip_discard; 569 trace_xfs_bmap_free_defer(tp->t_mountp, 570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, 571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); 572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); 573 } 574 575 /* 576 * Inode fork format manipulation functions 577 */ 578 579 /* 580 * Convert the inode format to extent format if it currently is in btree format, 581 * but the extent list is small enough that it fits into the extent format. 582 * 583 * Since the extents are already in-core, all we have to do is give up the space 584 * for the btree root and pitch the leaf block. 585 */ 586 STATIC int /* error */ 587 xfs_bmap_btree_to_extents( 588 struct xfs_trans *tp, /* transaction pointer */ 589 struct xfs_inode *ip, /* incore inode pointer */ 590 struct xfs_btree_cur *cur, /* btree cursor */ 591 int *logflagsp, /* inode logging flags */ 592 int whichfork) /* data or attr fork */ 593 { 594 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 595 struct xfs_mount *mp = ip->i_mount; 596 struct xfs_btree_block *rblock = ifp->if_broot; 597 struct xfs_btree_block *cblock;/* child btree block */ 598 xfs_fsblock_t cbno; /* child block number */ 599 xfs_buf_t *cbp; /* child block's buffer */ 600 int error; /* error return value */ 601 __be64 *pp; /* ptr to block address */ 602 struct xfs_owner_info oinfo; 603 604 /* check if we actually need the extent format first: */ 605 if (!xfs_bmap_wants_extents(ip, whichfork)) 606 return 0; 607 608 ASSERT(cur); 609 ASSERT(whichfork != XFS_COW_FORK); 610 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 611 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 612 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 613 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 614 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 615 616 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 617 cbno = be64_to_cpu(*pp); 618 #ifdef DEBUG 619 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, 620 xfs_btree_check_lptr(cur, cbno, 1)); 621 #endif 622 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF, 623 &xfs_bmbt_buf_ops); 624 if (error) 625 return error; 626 cblock = XFS_BUF_TO_BLOCK(cbp); 627 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 628 return error; 629 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 630 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo); 631 ip->i_d.di_nblocks--; 632 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 633 xfs_trans_binval(tp, cbp); 634 if (cur->bc_bufs[0] == cbp) 635 cur->bc_bufs[0] = NULL; 636 xfs_iroot_realloc(ip, -1, whichfork); 637 ASSERT(ifp->if_broot == NULL); 638 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); 639 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 640 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 641 return 0; 642 } 643 644 /* 645 * Convert an extents-format file into a btree-format file. 646 * The new file will have a root block (in the inode) and a single child block. 647 */ 648 STATIC int /* error */ 649 xfs_bmap_extents_to_btree( 650 struct xfs_trans *tp, /* transaction pointer */ 651 struct xfs_inode *ip, /* incore inode pointer */ 652 struct xfs_btree_cur **curp, /* cursor returned to caller */ 653 int wasdel, /* converting a delayed alloc */ 654 int *logflagsp, /* inode logging flags */ 655 int whichfork) /* data or attr fork */ 656 { 657 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 658 struct xfs_buf *abp; /* buffer for ablock */ 659 struct xfs_alloc_arg args; /* allocation arguments */ 660 struct xfs_bmbt_rec *arp; /* child record pointer */ 661 struct xfs_btree_block *block; /* btree root block */ 662 struct xfs_btree_cur *cur; /* bmap btree cursor */ 663 int error; /* error return value */ 664 struct xfs_ifork *ifp; /* inode fork pointer */ 665 struct xfs_bmbt_key *kp; /* root block key pointer */ 666 struct xfs_mount *mp; /* mount structure */ 667 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 668 struct xfs_iext_cursor icur; 669 struct xfs_bmbt_irec rec; 670 xfs_extnum_t cnt = 0; 671 672 mp = ip->i_mount; 673 ASSERT(whichfork != XFS_COW_FORK); 674 ifp = XFS_IFORK_PTR(ip, whichfork); 675 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 676 677 /* 678 * Make space in the inode incore. This needs to be undone if we fail 679 * to expand the root. 680 */ 681 xfs_iroot_realloc(ip, 1, whichfork); 682 ifp->if_flags |= XFS_IFBROOT; 683 684 /* 685 * Fill in the root. 686 */ 687 block = ifp->if_broot; 688 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 689 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 690 XFS_BTREE_LONG_PTRS); 691 /* 692 * Need a cursor. Can't allocate until bb_level is filled in. 693 */ 694 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 695 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 696 /* 697 * Convert to a btree with two levels, one record in root. 698 */ 699 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); 700 memset(&args, 0, sizeof(args)); 701 args.tp = tp; 702 args.mp = mp; 703 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 704 if (tp->t_firstblock == NULLFSBLOCK) { 705 args.type = XFS_ALLOCTYPE_START_BNO; 706 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 707 } else if (tp->t_flags & XFS_TRANS_LOWMODE) { 708 args.type = XFS_ALLOCTYPE_START_BNO; 709 args.fsbno = tp->t_firstblock; 710 } else { 711 args.type = XFS_ALLOCTYPE_NEAR_BNO; 712 args.fsbno = tp->t_firstblock; 713 } 714 args.minlen = args.maxlen = args.prod = 1; 715 args.wasdel = wasdel; 716 *logflagsp = 0; 717 error = xfs_alloc_vextent(&args); 718 if (error) 719 goto out_root_realloc; 720 721 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 722 error = -ENOSPC; 723 goto out_root_realloc; 724 } 725 726 /* 727 * Allocation can't fail, the space was reserved. 728 */ 729 ASSERT(tp->t_firstblock == NULLFSBLOCK || 730 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock)); 731 tp->t_firstblock = args.fsbno; 732 cur->bc_private.b.allocated++; 733 ip->i_d.di_nblocks++; 734 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 735 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 736 if (!abp) { 737 error = -EFSCORRUPTED; 738 goto out_unreserve_dquot; 739 } 740 741 /* 742 * Fill in the child block. 743 */ 744 abp->b_ops = &xfs_bmbt_buf_ops; 745 ablock = XFS_BUF_TO_BLOCK(abp); 746 xfs_btree_init_block_int(mp, ablock, abp->b_bn, 747 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 748 XFS_BTREE_LONG_PTRS); 749 750 for_each_xfs_iext(ifp, &icur, &rec) { 751 if (isnullstartblock(rec.br_startblock)) 752 continue; 753 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 754 xfs_bmbt_disk_set_all(arp, &rec); 755 cnt++; 756 } 757 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); 758 xfs_btree_set_numrecs(ablock, cnt); 759 760 /* 761 * Fill in the root key and pointer. 762 */ 763 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 764 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 765 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 766 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 767 be16_to_cpu(block->bb_level))); 768 *pp = cpu_to_be64(args.fsbno); 769 770 /* 771 * Do all this logging at the end so that 772 * the root is at the right level. 773 */ 774 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 775 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 776 ASSERT(*curp == NULL); 777 *curp = cur; 778 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 779 return 0; 780 781 out_unreserve_dquot: 782 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 783 out_root_realloc: 784 xfs_iroot_realloc(ip, -1, whichfork); 785 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 786 ASSERT(ifp->if_broot == NULL); 787 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 788 789 return error; 790 } 791 792 /* 793 * Convert a local file to an extents file. 794 * This code is out of bounds for data forks of regular files, 795 * since the file data needs to get logged so things will stay consistent. 796 * (The bmap-level manipulations are ok, though). 797 */ 798 void 799 xfs_bmap_local_to_extents_empty( 800 struct xfs_inode *ip, 801 int whichfork) 802 { 803 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 804 805 ASSERT(whichfork != XFS_COW_FORK); 806 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 807 ASSERT(ifp->if_bytes == 0); 808 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); 809 810 xfs_bmap_forkoff_reset(ip, whichfork); 811 ifp->if_flags &= ~XFS_IFINLINE; 812 ifp->if_flags |= XFS_IFEXTENTS; 813 ifp->if_u1.if_root = NULL; 814 ifp->if_height = 0; 815 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 816 } 817 818 819 STATIC int /* error */ 820 xfs_bmap_local_to_extents( 821 xfs_trans_t *tp, /* transaction pointer */ 822 xfs_inode_t *ip, /* incore inode pointer */ 823 xfs_extlen_t total, /* total blocks needed by transaction */ 824 int *logflagsp, /* inode logging flags */ 825 int whichfork, 826 void (*init_fn)(struct xfs_trans *tp, 827 struct xfs_buf *bp, 828 struct xfs_inode *ip, 829 struct xfs_ifork *ifp)) 830 { 831 int error = 0; 832 int flags; /* logging flags returned */ 833 struct xfs_ifork *ifp; /* inode fork pointer */ 834 xfs_alloc_arg_t args; /* allocation arguments */ 835 xfs_buf_t *bp; /* buffer for extent block */ 836 struct xfs_bmbt_irec rec; 837 struct xfs_iext_cursor icur; 838 839 /* 840 * We don't want to deal with the case of keeping inode data inline yet. 841 * So sending the data fork of a regular inode is invalid. 842 */ 843 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 844 ifp = XFS_IFORK_PTR(ip, whichfork); 845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 846 847 if (!ifp->if_bytes) { 848 xfs_bmap_local_to_extents_empty(ip, whichfork); 849 flags = XFS_ILOG_CORE; 850 goto done; 851 } 852 853 flags = 0; 854 error = 0; 855 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE); 856 memset(&args, 0, sizeof(args)); 857 args.tp = tp; 858 args.mp = ip->i_mount; 859 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 860 /* 861 * Allocate a block. We know we need only one, since the 862 * file currently fits in an inode. 863 */ 864 if (tp->t_firstblock == NULLFSBLOCK) { 865 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); 866 args.type = XFS_ALLOCTYPE_START_BNO; 867 } else { 868 args.fsbno = tp->t_firstblock; 869 args.type = XFS_ALLOCTYPE_NEAR_BNO; 870 } 871 args.total = total; 872 args.minlen = args.maxlen = args.prod = 1; 873 error = xfs_alloc_vextent(&args); 874 if (error) 875 goto done; 876 877 /* Can't fail, the space was reserved. */ 878 ASSERT(args.fsbno != NULLFSBLOCK); 879 ASSERT(args.len == 1); 880 tp->t_firstblock = args.fsbno; 881 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); 882 883 /* 884 * Initialize the block, copy the data and log the remote buffer. 885 * 886 * The callout is responsible for logging because the remote format 887 * might differ from the local format and thus we don't know how much to 888 * log here. Note that init_fn must also set the buffer log item type 889 * correctly. 890 */ 891 init_fn(tp, bp, ip, ifp); 892 893 /* account for the change in fork size */ 894 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 895 xfs_bmap_local_to_extents_empty(ip, whichfork); 896 flags |= XFS_ILOG_CORE; 897 898 ifp->if_u1.if_root = NULL; 899 ifp->if_height = 0; 900 901 rec.br_startoff = 0; 902 rec.br_startblock = args.fsbno; 903 rec.br_blockcount = 1; 904 rec.br_state = XFS_EXT_NORM; 905 xfs_iext_first(ifp, &icur); 906 xfs_iext_insert(ip, &icur, &rec, 0); 907 908 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 909 ip->i_d.di_nblocks = 1; 910 xfs_trans_mod_dquot_byino(tp, ip, 911 XFS_TRANS_DQ_BCOUNT, 1L); 912 flags |= xfs_ilog_fext(whichfork); 913 914 done: 915 *logflagsp = flags; 916 return error; 917 } 918 919 /* 920 * Called from xfs_bmap_add_attrfork to handle btree format files. 921 */ 922 STATIC int /* error */ 923 xfs_bmap_add_attrfork_btree( 924 xfs_trans_t *tp, /* transaction pointer */ 925 xfs_inode_t *ip, /* incore inode pointer */ 926 int *flags) /* inode logging flags */ 927 { 928 xfs_btree_cur_t *cur; /* btree cursor */ 929 int error; /* error return value */ 930 xfs_mount_t *mp; /* file system mount struct */ 931 int stat; /* newroot status */ 932 933 mp = ip->i_mount; 934 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) 935 *flags |= XFS_ILOG_DBROOT; 936 else { 937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 938 error = xfs_bmbt_lookup_first(cur, &stat); 939 if (error) 940 goto error0; 941 /* must be at least one entry */ 942 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0); 943 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 944 goto error0; 945 if (stat == 0) { 946 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 947 return -ENOSPC; 948 } 949 cur->bc_private.b.allocated = 0; 950 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 951 } 952 return 0; 953 error0: 954 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 955 return error; 956 } 957 958 /* 959 * Called from xfs_bmap_add_attrfork to handle extents format files. 960 */ 961 STATIC int /* error */ 962 xfs_bmap_add_attrfork_extents( 963 struct xfs_trans *tp, /* transaction pointer */ 964 struct xfs_inode *ip, /* incore inode pointer */ 965 int *flags) /* inode logging flags */ 966 { 967 xfs_btree_cur_t *cur; /* bmap btree cursor */ 968 int error; /* error return value */ 969 970 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) 971 return 0; 972 cur = NULL; 973 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 974 XFS_DATA_FORK); 975 if (cur) { 976 cur->bc_private.b.allocated = 0; 977 xfs_btree_del_cursor(cur, error); 978 } 979 return error; 980 } 981 982 /* 983 * Called from xfs_bmap_add_attrfork to handle local format files. Each 984 * different data fork content type needs a different callout to do the 985 * conversion. Some are basic and only require special block initialisation 986 * callouts for the data formating, others (directories) are so specialised they 987 * handle everything themselves. 988 * 989 * XXX (dgc): investigate whether directory conversion can use the generic 990 * formatting callout. It should be possible - it's just a very complex 991 * formatter. 992 */ 993 STATIC int /* error */ 994 xfs_bmap_add_attrfork_local( 995 struct xfs_trans *tp, /* transaction pointer */ 996 struct xfs_inode *ip, /* incore inode pointer */ 997 int *flags) /* inode logging flags */ 998 { 999 struct xfs_da_args dargs; /* args for dir/attr code */ 1000 1001 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1002 return 0; 1003 1004 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1005 memset(&dargs, 0, sizeof(dargs)); 1006 dargs.geo = ip->i_mount->m_dir_geo; 1007 dargs.dp = ip; 1008 dargs.total = dargs.geo->fsbcount; 1009 dargs.whichfork = XFS_DATA_FORK; 1010 dargs.trans = tp; 1011 return xfs_dir2_sf_to_block(&dargs); 1012 } 1013 1014 if (S_ISLNK(VFS_I(ip)->i_mode)) 1015 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 1016 XFS_DATA_FORK, 1017 xfs_symlink_local_to_remote); 1018 1019 /* should only be called for types that support local format data */ 1020 ASSERT(0); 1021 return -EFSCORRUPTED; 1022 } 1023 1024 /* Set an inode attr fork off based on the format */ 1025 int 1026 xfs_bmap_set_attrforkoff( 1027 struct xfs_inode *ip, 1028 int size, 1029 int *version) 1030 { 1031 switch (ip->i_d.di_format) { 1032 case XFS_DINODE_FMT_DEV: 1033 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 1034 break; 1035 case XFS_DINODE_FMT_LOCAL: 1036 case XFS_DINODE_FMT_EXTENTS: 1037 case XFS_DINODE_FMT_BTREE: 1038 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 1039 if (!ip->i_d.di_forkoff) 1040 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; 1041 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version) 1042 *version = 2; 1043 break; 1044 default: 1045 ASSERT(0); 1046 return -EINVAL; 1047 } 1048 1049 return 0; 1050 } 1051 1052 /* 1053 * Convert inode from non-attributed to attributed. 1054 * Must not be in a transaction, ip must not be locked. 1055 */ 1056 int /* error code */ 1057 xfs_bmap_add_attrfork( 1058 xfs_inode_t *ip, /* incore inode pointer */ 1059 int size, /* space new attribute needs */ 1060 int rsvd) /* xact may use reserved blks */ 1061 { 1062 xfs_mount_t *mp; /* mount structure */ 1063 xfs_trans_t *tp; /* transaction pointer */ 1064 int blks; /* space reservation */ 1065 int version = 1; /* superblock attr version */ 1066 int logflags; /* logging flags */ 1067 int error; /* error return value */ 1068 1069 ASSERT(XFS_IFORK_Q(ip) == 0); 1070 1071 mp = ip->i_mount; 1072 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1073 1074 blks = XFS_ADDAFORK_SPACE_RES(mp); 1075 1076 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0, 1077 rsvd ? XFS_TRANS_RESERVE : 0, &tp); 1078 if (error) 1079 return error; 1080 1081 xfs_ilock(ip, XFS_ILOCK_EXCL); 1082 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? 1083 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : 1084 XFS_QMOPT_RES_REGBLKS); 1085 if (error) 1086 goto trans_cancel; 1087 if (XFS_IFORK_Q(ip)) 1088 goto trans_cancel; 1089 if (ip->i_d.di_anextents != 0) { 1090 error = -EFSCORRUPTED; 1091 goto trans_cancel; 1092 } 1093 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { 1094 /* 1095 * For inodes coming from pre-6.2 filesystems. 1096 */ 1097 ASSERT(ip->i_d.di_aformat == 0); 1098 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 1099 } 1100 1101 xfs_trans_ijoin(tp, ip, 0); 1102 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1103 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1104 if (error) 1105 goto trans_cancel; 1106 ASSERT(ip->i_afp == NULL); 1107 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1108 ip->i_afp->if_flags = XFS_IFEXTENTS; 1109 logflags = 0; 1110 switch (ip->i_d.di_format) { 1111 case XFS_DINODE_FMT_LOCAL: 1112 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1113 break; 1114 case XFS_DINODE_FMT_EXTENTS: 1115 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1116 break; 1117 case XFS_DINODE_FMT_BTREE: 1118 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1119 break; 1120 default: 1121 error = 0; 1122 break; 1123 } 1124 if (logflags) 1125 xfs_trans_log_inode(tp, ip, logflags); 1126 if (error) 1127 goto trans_cancel; 1128 if (!xfs_sb_version_hasattr(&mp->m_sb) || 1129 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) { 1130 bool log_sb = false; 1131 1132 spin_lock(&mp->m_sb_lock); 1133 if (!xfs_sb_version_hasattr(&mp->m_sb)) { 1134 xfs_sb_version_addattr(&mp->m_sb); 1135 log_sb = true; 1136 } 1137 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) { 1138 xfs_sb_version_addattr2(&mp->m_sb); 1139 log_sb = true; 1140 } 1141 spin_unlock(&mp->m_sb_lock); 1142 if (log_sb) 1143 xfs_log_sb(tp); 1144 } 1145 1146 error = xfs_trans_commit(tp); 1147 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1148 return error; 1149 1150 trans_cancel: 1151 xfs_trans_cancel(tp); 1152 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1153 return error; 1154 } 1155 1156 /* 1157 * Internal and external extent tree search functions. 1158 */ 1159 1160 /* 1161 * Read in extents from a btree-format inode. 1162 */ 1163 int 1164 xfs_iread_extents( 1165 struct xfs_trans *tp, 1166 struct xfs_inode *ip, 1167 int whichfork) 1168 { 1169 struct xfs_mount *mp = ip->i_mount; 1170 int state = xfs_bmap_fork_to_state(whichfork); 1171 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1172 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 1173 struct xfs_btree_block *block = ifp->if_broot; 1174 struct xfs_iext_cursor icur; 1175 struct xfs_bmbt_irec new; 1176 xfs_fsblock_t bno; 1177 struct xfs_buf *bp; 1178 xfs_extnum_t i, j; 1179 int level; 1180 __be64 *pp; 1181 int error; 1182 1183 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1184 1185 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1186 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1187 return -EFSCORRUPTED; 1188 } 1189 1190 /* 1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1192 */ 1193 level = be16_to_cpu(block->bb_level); 1194 if (unlikely(level == 0)) { 1195 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 1196 return -EFSCORRUPTED; 1197 } 1198 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1199 bno = be64_to_cpu(*pp); 1200 1201 /* 1202 * Go down the tree until leaf level is reached, following the first 1203 * pointer (leftmost) at each level. 1204 */ 1205 while (level-- > 0) { 1206 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1207 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1208 if (error) 1209 goto out; 1210 block = XFS_BUF_TO_BLOCK(bp); 1211 if (level == 0) 1212 break; 1213 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1214 bno = be64_to_cpu(*pp); 1215 XFS_WANT_CORRUPTED_GOTO(mp, 1216 xfs_verify_fsbno(mp, bno), out_brelse); 1217 xfs_trans_brelse(tp, bp); 1218 } 1219 1220 /* 1221 * Here with bp and block set to the leftmost leaf node in the tree. 1222 */ 1223 i = 0; 1224 xfs_iext_first(ifp, &icur); 1225 1226 /* 1227 * Loop over all leaf nodes. Copy information to the extent records. 1228 */ 1229 for (;;) { 1230 xfs_bmbt_rec_t *frp; 1231 xfs_fsblock_t nextbno; 1232 xfs_extnum_t num_recs; 1233 1234 num_recs = xfs_btree_get_numrecs(block); 1235 if (unlikely(i + num_recs > nextents)) { 1236 xfs_warn(ip->i_mount, 1237 "corrupt dinode %Lu, (btree extents).", 1238 (unsigned long long) ip->i_ino); 1239 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1240 __func__, block, sizeof(*block), 1241 __this_address); 1242 error = -EFSCORRUPTED; 1243 goto out_brelse; 1244 } 1245 /* 1246 * Read-ahead the next leaf block, if any. 1247 */ 1248 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 1249 if (nextbno != NULLFSBLOCK) 1250 xfs_btree_reada_bufl(mp, nextbno, 1, 1251 &xfs_bmbt_buf_ops); 1252 /* 1253 * Copy records into the extent records. 1254 */ 1255 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1256 for (j = 0; j < num_recs; j++, frp++, i++) { 1257 xfs_failaddr_t fa; 1258 1259 xfs_bmbt_disk_get_all(frp, &new); 1260 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1261 if (fa) { 1262 error = -EFSCORRUPTED; 1263 xfs_inode_verifier_error(ip, error, 1264 "xfs_iread_extents(2)", 1265 frp, sizeof(*frp), fa); 1266 goto out_brelse; 1267 } 1268 xfs_iext_insert(ip, &icur, &new, state); 1269 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); 1270 xfs_iext_next(ifp, &icur); 1271 } 1272 xfs_trans_brelse(tp, bp); 1273 bno = nextbno; 1274 /* 1275 * If we've reached the end, stop. 1276 */ 1277 if (bno == NULLFSBLOCK) 1278 break; 1279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, 1280 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops); 1281 if (error) 1282 goto out; 1283 block = XFS_BUF_TO_BLOCK(bp); 1284 } 1285 1286 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) { 1287 error = -EFSCORRUPTED; 1288 goto out; 1289 } 1290 ASSERT(i == xfs_iext_count(ifp)); 1291 1292 ifp->if_flags |= XFS_IFEXTENTS; 1293 return 0; 1294 1295 out_brelse: 1296 xfs_trans_brelse(tp, bp); 1297 out: 1298 xfs_iext_destroy(ifp); 1299 return error; 1300 } 1301 1302 /* 1303 * Returns the relative block number of the first unused block(s) in the given 1304 * fork with at least "len" logically contiguous blocks free. This is the 1305 * lowest-address hole if the fork has holes, else the first block past the end 1306 * of fork. Return 0 if the fork is currently local (in-inode). 1307 */ 1308 int /* error */ 1309 xfs_bmap_first_unused( 1310 struct xfs_trans *tp, /* transaction pointer */ 1311 struct xfs_inode *ip, /* incore inode */ 1312 xfs_extlen_t len, /* size of hole to find */ 1313 xfs_fileoff_t *first_unused, /* unused block */ 1314 int whichfork) /* data or attr fork */ 1315 { 1316 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1317 struct xfs_bmbt_irec got; 1318 struct xfs_iext_cursor icur; 1319 xfs_fileoff_t lastaddr = 0; 1320 xfs_fileoff_t lowest, max; 1321 int error; 1322 1323 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || 1324 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || 1325 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 1326 1327 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1328 *first_unused = 0; 1329 return 0; 1330 } 1331 1332 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1333 error = xfs_iread_extents(tp, ip, whichfork); 1334 if (error) 1335 return error; 1336 } 1337 1338 lowest = max = *first_unused; 1339 for_each_xfs_iext(ifp, &icur, &got) { 1340 /* 1341 * See if the hole before this extent will work. 1342 */ 1343 if (got.br_startoff >= lowest + len && 1344 got.br_startoff - max >= len) 1345 break; 1346 lastaddr = got.br_startoff + got.br_blockcount; 1347 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1348 } 1349 1350 *first_unused = max; 1351 return 0; 1352 } 1353 1354 /* 1355 * Returns the file-relative block number of the last block - 1 before 1356 * last_block (input value) in the file. 1357 * This is not based on i_size, it is based on the extent records. 1358 * Returns 0 for local files, as they do not have extent records. 1359 */ 1360 int /* error */ 1361 xfs_bmap_last_before( 1362 struct xfs_trans *tp, /* transaction pointer */ 1363 struct xfs_inode *ip, /* incore inode */ 1364 xfs_fileoff_t *last_block, /* last block */ 1365 int whichfork) /* data or attr fork */ 1366 { 1367 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1368 struct xfs_bmbt_irec got; 1369 struct xfs_iext_cursor icur; 1370 int error; 1371 1372 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 1373 case XFS_DINODE_FMT_LOCAL: 1374 *last_block = 0; 1375 return 0; 1376 case XFS_DINODE_FMT_BTREE: 1377 case XFS_DINODE_FMT_EXTENTS: 1378 break; 1379 default: 1380 return -EIO; 1381 } 1382 1383 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1384 error = xfs_iread_extents(tp, ip, whichfork); 1385 if (error) 1386 return error; 1387 } 1388 1389 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1390 *last_block = 0; 1391 return 0; 1392 } 1393 1394 int 1395 xfs_bmap_last_extent( 1396 struct xfs_trans *tp, 1397 struct xfs_inode *ip, 1398 int whichfork, 1399 struct xfs_bmbt_irec *rec, 1400 int *is_empty) 1401 { 1402 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 1403 struct xfs_iext_cursor icur; 1404 int error; 1405 1406 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1407 error = xfs_iread_extents(tp, ip, whichfork); 1408 if (error) 1409 return error; 1410 } 1411 1412 xfs_iext_last(ifp, &icur); 1413 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1414 *is_empty = 1; 1415 else 1416 *is_empty = 0; 1417 return 0; 1418 } 1419 1420 /* 1421 * Check the last inode extent to determine whether this allocation will result 1422 * in blocks being allocated at the end of the file. When we allocate new data 1423 * blocks at the end of the file which do not start at the previous data block, 1424 * we will try to align the new blocks at stripe unit boundaries. 1425 * 1426 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1427 * at, or past the EOF. 1428 */ 1429 STATIC int 1430 xfs_bmap_isaeof( 1431 struct xfs_bmalloca *bma, 1432 int whichfork) 1433 { 1434 struct xfs_bmbt_irec rec; 1435 int is_empty; 1436 int error; 1437 1438 bma->aeof = false; 1439 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1440 &is_empty); 1441 if (error) 1442 return error; 1443 1444 if (is_empty) { 1445 bma->aeof = true; 1446 return 0; 1447 } 1448 1449 /* 1450 * Check if we are allocation or past the last extent, or at least into 1451 * the last delayed allocated extent. 1452 */ 1453 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1454 (bma->offset >= rec.br_startoff && 1455 isnullstartblock(rec.br_startblock)); 1456 return 0; 1457 } 1458 1459 /* 1460 * Returns the file-relative block number of the first block past eof in 1461 * the file. This is not based on i_size, it is based on the extent records. 1462 * Returns 0 for local files, as they do not have extent records. 1463 */ 1464 int 1465 xfs_bmap_last_offset( 1466 struct xfs_inode *ip, 1467 xfs_fileoff_t *last_block, 1468 int whichfork) 1469 { 1470 struct xfs_bmbt_irec rec; 1471 int is_empty; 1472 int error; 1473 1474 *last_block = 0; 1475 1476 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) 1477 return 0; 1478 1479 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1480 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1481 return -EIO; 1482 1483 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1484 if (error || is_empty) 1485 return error; 1486 1487 *last_block = rec.br_startoff + rec.br_blockcount; 1488 return 0; 1489 } 1490 1491 /* 1492 * Returns whether the selected fork of the inode has exactly one 1493 * block or not. For the data fork we check this matches di_size, 1494 * implying the file's range is 0..bsize-1. 1495 */ 1496 int /* 1=>1 block, 0=>otherwise */ 1497 xfs_bmap_one_block( 1498 xfs_inode_t *ip, /* incore inode */ 1499 int whichfork) /* data or attr fork */ 1500 { 1501 struct xfs_ifork *ifp; /* inode fork pointer */ 1502 int rval; /* return value */ 1503 xfs_bmbt_irec_t s; /* internal version of extent */ 1504 struct xfs_iext_cursor icur; 1505 1506 #ifndef DEBUG 1507 if (whichfork == XFS_DATA_FORK) 1508 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; 1509 #endif /* !DEBUG */ 1510 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) 1511 return 0; 1512 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1513 return 0; 1514 ifp = XFS_IFORK_PTR(ip, whichfork); 1515 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 1516 xfs_iext_first(ifp, &icur); 1517 xfs_iext_get_extent(ifp, &icur, &s); 1518 rval = s.br_startoff == 0 && s.br_blockcount == 1; 1519 if (rval && whichfork == XFS_DATA_FORK) 1520 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); 1521 return rval; 1522 } 1523 1524 /* 1525 * Extent tree manipulation functions used during allocation. 1526 */ 1527 1528 /* 1529 * Convert a delayed allocation to a real allocation. 1530 */ 1531 STATIC int /* error */ 1532 xfs_bmap_add_extent_delay_real( 1533 struct xfs_bmalloca *bma, 1534 int whichfork) 1535 { 1536 struct xfs_bmbt_irec *new = &bma->got; 1537 int error; /* error return value */ 1538 int i; /* temp state */ 1539 struct xfs_ifork *ifp; /* inode fork pointer */ 1540 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1541 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1542 /* left is 0, right is 1, prev is 2 */ 1543 int rval=0; /* return value (logging flags) */ 1544 int state = xfs_bmap_fork_to_state(whichfork); 1545 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1546 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1547 xfs_filblks_t temp=0; /* value for da_new calculations */ 1548 int tmp_rval; /* partial logging flags */ 1549 struct xfs_mount *mp; 1550 xfs_extnum_t *nextents; 1551 struct xfs_bmbt_irec old; 1552 1553 mp = bma->ip->i_mount; 1554 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 1555 ASSERT(whichfork != XFS_ATTR_FORK); 1556 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : 1557 &bma->ip->i_d.di_nextents); 1558 1559 ASSERT(!isnullstartblock(new->br_startblock)); 1560 ASSERT(!bma->cur || 1561 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 1562 1563 XFS_STATS_INC(mp, xs_add_exlist); 1564 1565 #define LEFT r[0] 1566 #define RIGHT r[1] 1567 #define PREV r[2] 1568 1569 /* 1570 * Set up a bunch of variables to make the tests simpler. 1571 */ 1572 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1573 new_endoff = new->br_startoff + new->br_blockcount; 1574 ASSERT(isnullstartblock(PREV.br_startblock)); 1575 ASSERT(PREV.br_startoff <= new->br_startoff); 1576 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1577 1578 da_old = startblockval(PREV.br_startblock); 1579 da_new = 0; 1580 1581 /* 1582 * Set flags determining what part of the previous delayed allocation 1583 * extent is being replaced by a real allocation. 1584 */ 1585 if (PREV.br_startoff == new->br_startoff) 1586 state |= BMAP_LEFT_FILLING; 1587 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1588 state |= BMAP_RIGHT_FILLING; 1589 1590 /* 1591 * Check and set flags if this segment has a left neighbor. 1592 * Don't set contiguous if the combined extent would be too large. 1593 */ 1594 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1595 state |= BMAP_LEFT_VALID; 1596 if (isnullstartblock(LEFT.br_startblock)) 1597 state |= BMAP_LEFT_DELAY; 1598 } 1599 1600 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1601 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1602 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1603 LEFT.br_state == new->br_state && 1604 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 1605 state |= BMAP_LEFT_CONTIG; 1606 1607 /* 1608 * Check and set flags if this segment has a right neighbor. 1609 * Don't set contiguous if the combined extent would be too large. 1610 * Also check for all-three-contiguous being too large. 1611 */ 1612 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1613 state |= BMAP_RIGHT_VALID; 1614 if (isnullstartblock(RIGHT.br_startblock)) 1615 state |= BMAP_RIGHT_DELAY; 1616 } 1617 1618 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1619 new_endoff == RIGHT.br_startoff && 1620 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1621 new->br_state == RIGHT.br_state && 1622 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1623 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1624 BMAP_RIGHT_FILLING)) != 1625 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1626 BMAP_RIGHT_FILLING) || 1627 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1628 <= MAXEXTLEN)) 1629 state |= BMAP_RIGHT_CONTIG; 1630 1631 error = 0; 1632 /* 1633 * Switch out based on the FILLING and CONTIG state bits. 1634 */ 1635 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1636 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1637 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1638 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1639 /* 1640 * Filling in all of a previously delayed allocation extent. 1641 * The left and right neighbors are both contiguous with new. 1642 */ 1643 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1644 1645 xfs_iext_remove(bma->ip, &bma->icur, state); 1646 xfs_iext_remove(bma->ip, &bma->icur, state); 1647 xfs_iext_prev(ifp, &bma->icur); 1648 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1649 (*nextents)--; 1650 1651 if (bma->cur == NULL) 1652 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1653 else { 1654 rval = XFS_ILOG_CORE; 1655 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1656 if (error) 1657 goto done; 1658 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1659 error = xfs_btree_delete(bma->cur, &i); 1660 if (error) 1661 goto done; 1662 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1663 error = xfs_btree_decrement(bma->cur, 0, &i); 1664 if (error) 1665 goto done; 1666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1667 error = xfs_bmbt_update(bma->cur, &LEFT); 1668 if (error) 1669 goto done; 1670 } 1671 break; 1672 1673 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1674 /* 1675 * Filling in all of a previously delayed allocation extent. 1676 * The left neighbor is contiguous, the right is not. 1677 */ 1678 old = LEFT; 1679 LEFT.br_blockcount += PREV.br_blockcount; 1680 1681 xfs_iext_remove(bma->ip, &bma->icur, state); 1682 xfs_iext_prev(ifp, &bma->icur); 1683 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1684 1685 if (bma->cur == NULL) 1686 rval = XFS_ILOG_DEXT; 1687 else { 1688 rval = 0; 1689 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1690 if (error) 1691 goto done; 1692 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1693 error = xfs_bmbt_update(bma->cur, &LEFT); 1694 if (error) 1695 goto done; 1696 } 1697 break; 1698 1699 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1700 /* 1701 * Filling in all of a previously delayed allocation extent. 1702 * The right neighbor is contiguous, the left is not. Take care 1703 * with delay -> unwritten extent allocation here because the 1704 * delalloc record we are overwriting is always written. 1705 */ 1706 PREV.br_startblock = new->br_startblock; 1707 PREV.br_blockcount += RIGHT.br_blockcount; 1708 PREV.br_state = new->br_state; 1709 1710 xfs_iext_next(ifp, &bma->icur); 1711 xfs_iext_remove(bma->ip, &bma->icur, state); 1712 xfs_iext_prev(ifp, &bma->icur); 1713 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1714 1715 if (bma->cur == NULL) 1716 rval = XFS_ILOG_DEXT; 1717 else { 1718 rval = 0; 1719 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1720 if (error) 1721 goto done; 1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1723 error = xfs_bmbt_update(bma->cur, &PREV); 1724 if (error) 1725 goto done; 1726 } 1727 break; 1728 1729 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1730 /* 1731 * Filling in all of a previously delayed allocation extent. 1732 * Neither the left nor right neighbors are contiguous with 1733 * the new one. 1734 */ 1735 PREV.br_startblock = new->br_startblock; 1736 PREV.br_state = new->br_state; 1737 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1738 1739 (*nextents)++; 1740 if (bma->cur == NULL) 1741 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1742 else { 1743 rval = XFS_ILOG_CORE; 1744 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1745 if (error) 1746 goto done; 1747 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1748 error = xfs_btree_insert(bma->cur, &i); 1749 if (error) 1750 goto done; 1751 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1752 } 1753 break; 1754 1755 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1756 /* 1757 * Filling in the first part of a previous delayed allocation. 1758 * The left neighbor is contiguous. 1759 */ 1760 old = LEFT; 1761 temp = PREV.br_blockcount - new->br_blockcount; 1762 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1763 startblockval(PREV.br_startblock)); 1764 1765 LEFT.br_blockcount += new->br_blockcount; 1766 1767 PREV.br_blockcount = temp; 1768 PREV.br_startoff += new->br_blockcount; 1769 PREV.br_startblock = nullstartblock(da_new); 1770 1771 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1772 xfs_iext_prev(ifp, &bma->icur); 1773 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1774 1775 if (bma->cur == NULL) 1776 rval = XFS_ILOG_DEXT; 1777 else { 1778 rval = 0; 1779 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1780 if (error) 1781 goto done; 1782 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1783 error = xfs_bmbt_update(bma->cur, &LEFT); 1784 if (error) 1785 goto done; 1786 } 1787 break; 1788 1789 case BMAP_LEFT_FILLING: 1790 /* 1791 * Filling in the first part of a previous delayed allocation. 1792 * The left neighbor is not contiguous. 1793 */ 1794 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1795 (*nextents)++; 1796 if (bma->cur == NULL) 1797 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1798 else { 1799 rval = XFS_ILOG_CORE; 1800 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1801 if (error) 1802 goto done; 1803 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1804 error = xfs_btree_insert(bma->cur, &i); 1805 if (error) 1806 goto done; 1807 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1808 } 1809 1810 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1811 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1812 &bma->cur, 1, &tmp_rval, whichfork); 1813 rval |= tmp_rval; 1814 if (error) 1815 goto done; 1816 } 1817 1818 temp = PREV.br_blockcount - new->br_blockcount; 1819 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1820 startblockval(PREV.br_startblock) - 1821 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1822 1823 PREV.br_startoff = new_endoff; 1824 PREV.br_blockcount = temp; 1825 PREV.br_startblock = nullstartblock(da_new); 1826 xfs_iext_next(ifp, &bma->icur); 1827 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1828 xfs_iext_prev(ifp, &bma->icur); 1829 break; 1830 1831 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1832 /* 1833 * Filling in the last part of a previous delayed allocation. 1834 * The right neighbor is contiguous with the new allocation. 1835 */ 1836 old = RIGHT; 1837 RIGHT.br_startoff = new->br_startoff; 1838 RIGHT.br_startblock = new->br_startblock; 1839 RIGHT.br_blockcount += new->br_blockcount; 1840 1841 if (bma->cur == NULL) 1842 rval = XFS_ILOG_DEXT; 1843 else { 1844 rval = 0; 1845 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1846 if (error) 1847 goto done; 1848 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1849 error = xfs_bmbt_update(bma->cur, &RIGHT); 1850 if (error) 1851 goto done; 1852 } 1853 1854 temp = PREV.br_blockcount - new->br_blockcount; 1855 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1856 startblockval(PREV.br_startblock)); 1857 1858 PREV.br_blockcount = temp; 1859 PREV.br_startblock = nullstartblock(da_new); 1860 1861 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1862 xfs_iext_next(ifp, &bma->icur); 1863 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1864 break; 1865 1866 case BMAP_RIGHT_FILLING: 1867 /* 1868 * Filling in the last part of a previous delayed allocation. 1869 * The right neighbor is not contiguous. 1870 */ 1871 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1872 (*nextents)++; 1873 if (bma->cur == NULL) 1874 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1875 else { 1876 rval = XFS_ILOG_CORE; 1877 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1878 if (error) 1879 goto done; 1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1881 error = xfs_btree_insert(bma->cur, &i); 1882 if (error) 1883 goto done; 1884 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1885 } 1886 1887 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1888 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1889 &bma->cur, 1, &tmp_rval, whichfork); 1890 rval |= tmp_rval; 1891 if (error) 1892 goto done; 1893 } 1894 1895 temp = PREV.br_blockcount - new->br_blockcount; 1896 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1897 startblockval(PREV.br_startblock) - 1898 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 1899 1900 PREV.br_startblock = nullstartblock(da_new); 1901 PREV.br_blockcount = temp; 1902 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1903 xfs_iext_next(ifp, &bma->icur); 1904 break; 1905 1906 case 0: 1907 /* 1908 * Filling in the middle part of a previous delayed allocation. 1909 * Contiguity is impossible here. 1910 * This case is avoided almost all the time. 1911 * 1912 * We start with a delayed allocation: 1913 * 1914 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1915 * PREV @ idx 1916 * 1917 * and we are allocating: 1918 * +rrrrrrrrrrrrrrrrr+ 1919 * new 1920 * 1921 * and we set it up for insertion as: 1922 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1923 * new 1924 * PREV @ idx LEFT RIGHT 1925 * inserted at idx + 1 1926 */ 1927 old = PREV; 1928 1929 /* LEFT is the new middle */ 1930 LEFT = *new; 1931 1932 /* RIGHT is the new right */ 1933 RIGHT.br_state = PREV.br_state; 1934 RIGHT.br_startoff = new_endoff; 1935 RIGHT.br_blockcount = 1936 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1937 RIGHT.br_startblock = 1938 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1939 RIGHT.br_blockcount)); 1940 1941 /* truncate PREV */ 1942 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1943 PREV.br_startblock = 1944 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1945 PREV.br_blockcount)); 1946 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1947 1948 xfs_iext_next(ifp, &bma->icur); 1949 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1950 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1951 (*nextents)++; 1952 1953 if (bma->cur == NULL) 1954 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1955 else { 1956 rval = XFS_ILOG_CORE; 1957 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1958 if (error) 1959 goto done; 1960 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1961 error = xfs_btree_insert(bma->cur, &i); 1962 if (error) 1963 goto done; 1964 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1965 } 1966 1967 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1968 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1969 &bma->cur, 1, &tmp_rval, whichfork); 1970 rval |= tmp_rval; 1971 if (error) 1972 goto done; 1973 } 1974 1975 da_new = startblockval(PREV.br_startblock) + 1976 startblockval(RIGHT.br_startblock); 1977 break; 1978 1979 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1980 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1981 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1982 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1983 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1984 case BMAP_LEFT_CONTIG: 1985 case BMAP_RIGHT_CONTIG: 1986 /* 1987 * These cases are all impossible. 1988 */ 1989 ASSERT(0); 1990 } 1991 1992 /* add reverse mapping unless caller opted out */ 1993 if (!(bma->flags & XFS_BMAPI_NORMAP)) { 1994 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1995 if (error) 1996 goto done; 1997 } 1998 1999 /* convert to a btree if necessary */ 2000 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 2001 int tmp_logflags; /* partial log flag return val */ 2002 2003 ASSERT(bma->cur == NULL); 2004 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 2005 &bma->cur, da_old > 0, &tmp_logflags, 2006 whichfork); 2007 bma->logflags |= tmp_logflags; 2008 if (error) 2009 goto done; 2010 } 2011 2012 if (da_new != da_old) 2013 xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 2014 2015 if (bma->cur) { 2016 da_new += bma->cur->bc_private.b.allocated; 2017 bma->cur->bc_private.b.allocated = 0; 2018 } 2019 2020 /* adjust for changes in reserved delayed indirect blocks */ 2021 if (da_new != da_old) { 2022 ASSERT(state == 0 || da_new < da_old); 2023 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 2024 false); 2025 } 2026 2027 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 2028 done: 2029 if (whichfork != XFS_COW_FORK) 2030 bma->logflags |= rval; 2031 return error; 2032 #undef LEFT 2033 #undef RIGHT 2034 #undef PREV 2035 } 2036 2037 /* 2038 * Convert an unwritten allocation to a real allocation or vice versa. 2039 */ 2040 int /* error */ 2041 xfs_bmap_add_extent_unwritten_real( 2042 struct xfs_trans *tp, 2043 xfs_inode_t *ip, /* incore inode pointer */ 2044 int whichfork, 2045 struct xfs_iext_cursor *icur, 2046 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 2047 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 2048 int *logflagsp) /* inode logging flags */ 2049 { 2050 xfs_btree_cur_t *cur; /* btree cursor */ 2051 int error; /* error return value */ 2052 int i; /* temp state */ 2053 struct xfs_ifork *ifp; /* inode fork pointer */ 2054 xfs_fileoff_t new_endoff; /* end offset of new entry */ 2055 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 2056 /* left is 0, right is 1, prev is 2 */ 2057 int rval=0; /* return value (logging flags) */ 2058 int state = xfs_bmap_fork_to_state(whichfork); 2059 struct xfs_mount *mp = ip->i_mount; 2060 struct xfs_bmbt_irec old; 2061 2062 *logflagsp = 0; 2063 2064 cur = *curp; 2065 ifp = XFS_IFORK_PTR(ip, whichfork); 2066 2067 ASSERT(!isnullstartblock(new->br_startblock)); 2068 2069 XFS_STATS_INC(mp, xs_add_exlist); 2070 2071 #define LEFT r[0] 2072 #define RIGHT r[1] 2073 #define PREV r[2] 2074 2075 /* 2076 * Set up a bunch of variables to make the tests simpler. 2077 */ 2078 error = 0; 2079 xfs_iext_get_extent(ifp, icur, &PREV); 2080 ASSERT(new->br_state != PREV.br_state); 2081 new_endoff = new->br_startoff + new->br_blockcount; 2082 ASSERT(PREV.br_startoff <= new->br_startoff); 2083 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2084 2085 /* 2086 * Set flags determining what part of the previous oldext allocation 2087 * extent is being replaced by a newext allocation. 2088 */ 2089 if (PREV.br_startoff == new->br_startoff) 2090 state |= BMAP_LEFT_FILLING; 2091 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2092 state |= BMAP_RIGHT_FILLING; 2093 2094 /* 2095 * Check and set flags if this segment has a left neighbor. 2096 * Don't set contiguous if the combined extent would be too large. 2097 */ 2098 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2099 state |= BMAP_LEFT_VALID; 2100 if (isnullstartblock(LEFT.br_startblock)) 2101 state |= BMAP_LEFT_DELAY; 2102 } 2103 2104 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2105 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2106 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2107 LEFT.br_state == new->br_state && 2108 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2109 state |= BMAP_LEFT_CONTIG; 2110 2111 /* 2112 * Check and set flags if this segment has a right neighbor. 2113 * Don't set contiguous if the combined extent would be too large. 2114 * Also check for all-three-contiguous being too large. 2115 */ 2116 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2117 state |= BMAP_RIGHT_VALID; 2118 if (isnullstartblock(RIGHT.br_startblock)) 2119 state |= BMAP_RIGHT_DELAY; 2120 } 2121 2122 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2123 new_endoff == RIGHT.br_startoff && 2124 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2125 new->br_state == RIGHT.br_state && 2126 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 2127 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2128 BMAP_RIGHT_FILLING)) != 2129 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2130 BMAP_RIGHT_FILLING) || 2131 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2132 <= MAXEXTLEN)) 2133 state |= BMAP_RIGHT_CONTIG; 2134 2135 /* 2136 * Switch out based on the FILLING and CONTIG state bits. 2137 */ 2138 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2139 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2140 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2141 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2142 /* 2143 * Setting all of a previous oldext extent to newext. 2144 * The left and right neighbors are both contiguous with new. 2145 */ 2146 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2147 2148 xfs_iext_remove(ip, icur, state); 2149 xfs_iext_remove(ip, icur, state); 2150 xfs_iext_prev(ifp, icur); 2151 xfs_iext_update_extent(ip, state, icur, &LEFT); 2152 XFS_IFORK_NEXT_SET(ip, whichfork, 2153 XFS_IFORK_NEXTENTS(ip, whichfork) - 2); 2154 if (cur == NULL) 2155 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2156 else { 2157 rval = XFS_ILOG_CORE; 2158 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2159 if (error) 2160 goto done; 2161 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2162 if ((error = xfs_btree_delete(cur, &i))) 2163 goto done; 2164 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2165 if ((error = xfs_btree_decrement(cur, 0, &i))) 2166 goto done; 2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2168 if ((error = xfs_btree_delete(cur, &i))) 2169 goto done; 2170 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2171 if ((error = xfs_btree_decrement(cur, 0, &i))) 2172 goto done; 2173 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2174 error = xfs_bmbt_update(cur, &LEFT); 2175 if (error) 2176 goto done; 2177 } 2178 break; 2179 2180 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2181 /* 2182 * Setting all of a previous oldext extent to newext. 2183 * The left neighbor is contiguous, the right is not. 2184 */ 2185 LEFT.br_blockcount += PREV.br_blockcount; 2186 2187 xfs_iext_remove(ip, icur, state); 2188 xfs_iext_prev(ifp, icur); 2189 xfs_iext_update_extent(ip, state, icur, &LEFT); 2190 XFS_IFORK_NEXT_SET(ip, whichfork, 2191 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2192 if (cur == NULL) 2193 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2194 else { 2195 rval = XFS_ILOG_CORE; 2196 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2197 if (error) 2198 goto done; 2199 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2200 if ((error = xfs_btree_delete(cur, &i))) 2201 goto done; 2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2203 if ((error = xfs_btree_decrement(cur, 0, &i))) 2204 goto done; 2205 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2206 error = xfs_bmbt_update(cur, &LEFT); 2207 if (error) 2208 goto done; 2209 } 2210 break; 2211 2212 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2213 /* 2214 * Setting all of a previous oldext extent to newext. 2215 * The right neighbor is contiguous, the left is not. 2216 */ 2217 PREV.br_blockcount += RIGHT.br_blockcount; 2218 PREV.br_state = new->br_state; 2219 2220 xfs_iext_next(ifp, icur); 2221 xfs_iext_remove(ip, icur, state); 2222 xfs_iext_prev(ifp, icur); 2223 xfs_iext_update_extent(ip, state, icur, &PREV); 2224 2225 XFS_IFORK_NEXT_SET(ip, whichfork, 2226 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2227 if (cur == NULL) 2228 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2229 else { 2230 rval = XFS_ILOG_CORE; 2231 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2232 if (error) 2233 goto done; 2234 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2235 if ((error = xfs_btree_delete(cur, &i))) 2236 goto done; 2237 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2238 if ((error = xfs_btree_decrement(cur, 0, &i))) 2239 goto done; 2240 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2241 error = xfs_bmbt_update(cur, &PREV); 2242 if (error) 2243 goto done; 2244 } 2245 break; 2246 2247 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2248 /* 2249 * Setting all of a previous oldext extent to newext. 2250 * Neither the left nor right neighbors are contiguous with 2251 * the new one. 2252 */ 2253 PREV.br_state = new->br_state; 2254 xfs_iext_update_extent(ip, state, icur, &PREV); 2255 2256 if (cur == NULL) 2257 rval = XFS_ILOG_DEXT; 2258 else { 2259 rval = 0; 2260 error = xfs_bmbt_lookup_eq(cur, new, &i); 2261 if (error) 2262 goto done; 2263 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2264 error = xfs_bmbt_update(cur, &PREV); 2265 if (error) 2266 goto done; 2267 } 2268 break; 2269 2270 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2271 /* 2272 * Setting the first part of a previous oldext extent to newext. 2273 * The left neighbor is contiguous. 2274 */ 2275 LEFT.br_blockcount += new->br_blockcount; 2276 2277 old = PREV; 2278 PREV.br_startoff += new->br_blockcount; 2279 PREV.br_startblock += new->br_blockcount; 2280 PREV.br_blockcount -= new->br_blockcount; 2281 2282 xfs_iext_update_extent(ip, state, icur, &PREV); 2283 xfs_iext_prev(ifp, icur); 2284 xfs_iext_update_extent(ip, state, icur, &LEFT); 2285 2286 if (cur == NULL) 2287 rval = XFS_ILOG_DEXT; 2288 else { 2289 rval = 0; 2290 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2291 if (error) 2292 goto done; 2293 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2294 error = xfs_bmbt_update(cur, &PREV); 2295 if (error) 2296 goto done; 2297 error = xfs_btree_decrement(cur, 0, &i); 2298 if (error) 2299 goto done; 2300 error = xfs_bmbt_update(cur, &LEFT); 2301 if (error) 2302 goto done; 2303 } 2304 break; 2305 2306 case BMAP_LEFT_FILLING: 2307 /* 2308 * Setting the first part of a previous oldext extent to newext. 2309 * The left neighbor is not contiguous. 2310 */ 2311 old = PREV; 2312 PREV.br_startoff += new->br_blockcount; 2313 PREV.br_startblock += new->br_blockcount; 2314 PREV.br_blockcount -= new->br_blockcount; 2315 2316 xfs_iext_update_extent(ip, state, icur, &PREV); 2317 xfs_iext_insert(ip, icur, new, state); 2318 XFS_IFORK_NEXT_SET(ip, whichfork, 2319 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2320 if (cur == NULL) 2321 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2322 else { 2323 rval = XFS_ILOG_CORE; 2324 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2325 if (error) 2326 goto done; 2327 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2328 error = xfs_bmbt_update(cur, &PREV); 2329 if (error) 2330 goto done; 2331 cur->bc_rec.b = *new; 2332 if ((error = xfs_btree_insert(cur, &i))) 2333 goto done; 2334 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2335 } 2336 break; 2337 2338 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2339 /* 2340 * Setting the last part of a previous oldext extent to newext. 2341 * The right neighbor is contiguous with the new allocation. 2342 */ 2343 old = PREV; 2344 PREV.br_blockcount -= new->br_blockcount; 2345 2346 RIGHT.br_startoff = new->br_startoff; 2347 RIGHT.br_startblock = new->br_startblock; 2348 RIGHT.br_blockcount += new->br_blockcount; 2349 2350 xfs_iext_update_extent(ip, state, icur, &PREV); 2351 xfs_iext_next(ifp, icur); 2352 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2353 2354 if (cur == NULL) 2355 rval = XFS_ILOG_DEXT; 2356 else { 2357 rval = 0; 2358 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2359 if (error) 2360 goto done; 2361 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2362 error = xfs_bmbt_update(cur, &PREV); 2363 if (error) 2364 goto done; 2365 error = xfs_btree_increment(cur, 0, &i); 2366 if (error) 2367 goto done; 2368 error = xfs_bmbt_update(cur, &RIGHT); 2369 if (error) 2370 goto done; 2371 } 2372 break; 2373 2374 case BMAP_RIGHT_FILLING: 2375 /* 2376 * Setting the last part of a previous oldext extent to newext. 2377 * The right neighbor is not contiguous. 2378 */ 2379 old = PREV; 2380 PREV.br_blockcount -= new->br_blockcount; 2381 2382 xfs_iext_update_extent(ip, state, icur, &PREV); 2383 xfs_iext_next(ifp, icur); 2384 xfs_iext_insert(ip, icur, new, state); 2385 2386 XFS_IFORK_NEXT_SET(ip, whichfork, 2387 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2388 if (cur == NULL) 2389 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2390 else { 2391 rval = XFS_ILOG_CORE; 2392 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2393 if (error) 2394 goto done; 2395 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2396 error = xfs_bmbt_update(cur, &PREV); 2397 if (error) 2398 goto done; 2399 error = xfs_bmbt_lookup_eq(cur, new, &i); 2400 if (error) 2401 goto done; 2402 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2403 if ((error = xfs_btree_insert(cur, &i))) 2404 goto done; 2405 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2406 } 2407 break; 2408 2409 case 0: 2410 /* 2411 * Setting the middle part of a previous oldext extent to 2412 * newext. Contiguity is impossible here. 2413 * One extent becomes three extents. 2414 */ 2415 old = PREV; 2416 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2417 2418 r[0] = *new; 2419 r[1].br_startoff = new_endoff; 2420 r[1].br_blockcount = 2421 old.br_startoff + old.br_blockcount - new_endoff; 2422 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2423 r[1].br_state = PREV.br_state; 2424 2425 xfs_iext_update_extent(ip, state, icur, &PREV); 2426 xfs_iext_next(ifp, icur); 2427 xfs_iext_insert(ip, icur, &r[1], state); 2428 xfs_iext_insert(ip, icur, &r[0], state); 2429 2430 XFS_IFORK_NEXT_SET(ip, whichfork, 2431 XFS_IFORK_NEXTENTS(ip, whichfork) + 2); 2432 if (cur == NULL) 2433 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2434 else { 2435 rval = XFS_ILOG_CORE; 2436 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2437 if (error) 2438 goto done; 2439 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2440 /* new right extent - oldext */ 2441 error = xfs_bmbt_update(cur, &r[1]); 2442 if (error) 2443 goto done; 2444 /* new left extent - oldext */ 2445 cur->bc_rec.b = PREV; 2446 if ((error = xfs_btree_insert(cur, &i))) 2447 goto done; 2448 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2449 /* 2450 * Reset the cursor to the position of the new extent 2451 * we are about to insert as we can't trust it after 2452 * the previous insert. 2453 */ 2454 error = xfs_bmbt_lookup_eq(cur, new, &i); 2455 if (error) 2456 goto done; 2457 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2458 /* new middle extent - newext */ 2459 if ((error = xfs_btree_insert(cur, &i))) 2460 goto done; 2461 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2462 } 2463 break; 2464 2465 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2466 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2467 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2468 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2469 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2470 case BMAP_LEFT_CONTIG: 2471 case BMAP_RIGHT_CONTIG: 2472 /* 2473 * These cases are all impossible. 2474 */ 2475 ASSERT(0); 2476 } 2477 2478 /* update reverse mappings */ 2479 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2480 if (error) 2481 goto done; 2482 2483 /* convert to a btree if necessary */ 2484 if (xfs_bmap_needs_btree(ip, whichfork)) { 2485 int tmp_logflags; /* partial log flag return val */ 2486 2487 ASSERT(cur == NULL); 2488 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2489 &tmp_logflags, whichfork); 2490 *logflagsp |= tmp_logflags; 2491 if (error) 2492 goto done; 2493 } 2494 2495 /* clear out the allocated field, done with it now in any case. */ 2496 if (cur) { 2497 cur->bc_private.b.allocated = 0; 2498 *curp = cur; 2499 } 2500 2501 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2502 done: 2503 *logflagsp |= rval; 2504 return error; 2505 #undef LEFT 2506 #undef RIGHT 2507 #undef PREV 2508 } 2509 2510 /* 2511 * Convert a hole to a delayed allocation. 2512 */ 2513 STATIC void 2514 xfs_bmap_add_extent_hole_delay( 2515 xfs_inode_t *ip, /* incore inode pointer */ 2516 int whichfork, 2517 struct xfs_iext_cursor *icur, 2518 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2519 { 2520 struct xfs_ifork *ifp; /* inode fork pointer */ 2521 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2522 xfs_filblks_t newlen=0; /* new indirect size */ 2523 xfs_filblks_t oldlen=0; /* old indirect size */ 2524 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2525 int state = xfs_bmap_fork_to_state(whichfork); 2526 xfs_filblks_t temp; /* temp for indirect calculations */ 2527 2528 ifp = XFS_IFORK_PTR(ip, whichfork); 2529 ASSERT(isnullstartblock(new->br_startblock)); 2530 2531 /* 2532 * Check and set flags if this segment has a left neighbor 2533 */ 2534 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2535 state |= BMAP_LEFT_VALID; 2536 if (isnullstartblock(left.br_startblock)) 2537 state |= BMAP_LEFT_DELAY; 2538 } 2539 2540 /* 2541 * Check and set flags if the current (right) segment exists. 2542 * If it doesn't exist, we're converting the hole at end-of-file. 2543 */ 2544 if (xfs_iext_get_extent(ifp, icur, &right)) { 2545 state |= BMAP_RIGHT_VALID; 2546 if (isnullstartblock(right.br_startblock)) 2547 state |= BMAP_RIGHT_DELAY; 2548 } 2549 2550 /* 2551 * Set contiguity flags on the left and right neighbors. 2552 * Don't let extents get too large, even if the pieces are contiguous. 2553 */ 2554 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2555 left.br_startoff + left.br_blockcount == new->br_startoff && 2556 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2557 state |= BMAP_LEFT_CONTIG; 2558 2559 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2560 new->br_startoff + new->br_blockcount == right.br_startoff && 2561 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2562 (!(state & BMAP_LEFT_CONTIG) || 2563 (left.br_blockcount + new->br_blockcount + 2564 right.br_blockcount <= MAXEXTLEN))) 2565 state |= BMAP_RIGHT_CONTIG; 2566 2567 /* 2568 * Switch out based on the contiguity flags. 2569 */ 2570 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2571 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2572 /* 2573 * New allocation is contiguous with delayed allocations 2574 * on the left and on the right. 2575 * Merge all three into a single extent record. 2576 */ 2577 temp = left.br_blockcount + new->br_blockcount + 2578 right.br_blockcount; 2579 2580 oldlen = startblockval(left.br_startblock) + 2581 startblockval(new->br_startblock) + 2582 startblockval(right.br_startblock); 2583 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2584 oldlen); 2585 left.br_startblock = nullstartblock(newlen); 2586 left.br_blockcount = temp; 2587 2588 xfs_iext_remove(ip, icur, state); 2589 xfs_iext_prev(ifp, icur); 2590 xfs_iext_update_extent(ip, state, icur, &left); 2591 break; 2592 2593 case BMAP_LEFT_CONTIG: 2594 /* 2595 * New allocation is contiguous with a delayed allocation 2596 * on the left. 2597 * Merge the new allocation with the left neighbor. 2598 */ 2599 temp = left.br_blockcount + new->br_blockcount; 2600 2601 oldlen = startblockval(left.br_startblock) + 2602 startblockval(new->br_startblock); 2603 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2604 oldlen); 2605 left.br_blockcount = temp; 2606 left.br_startblock = nullstartblock(newlen); 2607 2608 xfs_iext_prev(ifp, icur); 2609 xfs_iext_update_extent(ip, state, icur, &left); 2610 break; 2611 2612 case BMAP_RIGHT_CONTIG: 2613 /* 2614 * New allocation is contiguous with a delayed allocation 2615 * on the right. 2616 * Merge the new allocation with the right neighbor. 2617 */ 2618 temp = new->br_blockcount + right.br_blockcount; 2619 oldlen = startblockval(new->br_startblock) + 2620 startblockval(right.br_startblock); 2621 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2622 oldlen); 2623 right.br_startoff = new->br_startoff; 2624 right.br_startblock = nullstartblock(newlen); 2625 right.br_blockcount = temp; 2626 xfs_iext_update_extent(ip, state, icur, &right); 2627 break; 2628 2629 case 0: 2630 /* 2631 * New allocation is not contiguous with another 2632 * delayed allocation. 2633 * Insert a new entry. 2634 */ 2635 oldlen = newlen = 0; 2636 xfs_iext_insert(ip, icur, new, state); 2637 break; 2638 } 2639 if (oldlen != newlen) { 2640 ASSERT(oldlen > newlen); 2641 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2642 false); 2643 /* 2644 * Nothing to do for disk quota accounting here. 2645 */ 2646 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 2647 } 2648 } 2649 2650 /* 2651 * Convert a hole to a real allocation. 2652 */ 2653 STATIC int /* error */ 2654 xfs_bmap_add_extent_hole_real( 2655 struct xfs_trans *tp, 2656 struct xfs_inode *ip, 2657 int whichfork, 2658 struct xfs_iext_cursor *icur, 2659 struct xfs_btree_cur **curp, 2660 struct xfs_bmbt_irec *new, 2661 int *logflagsp, 2662 int flags) 2663 { 2664 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 2665 struct xfs_mount *mp = ip->i_mount; 2666 struct xfs_btree_cur *cur = *curp; 2667 int error; /* error return value */ 2668 int i; /* temp state */ 2669 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2670 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2671 int rval=0; /* return value (logging flags) */ 2672 int state = xfs_bmap_fork_to_state(whichfork); 2673 struct xfs_bmbt_irec old; 2674 2675 ASSERT(!isnullstartblock(new->br_startblock)); 2676 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 2677 2678 XFS_STATS_INC(mp, xs_add_exlist); 2679 2680 /* 2681 * Check and set flags if this segment has a left neighbor. 2682 */ 2683 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2684 state |= BMAP_LEFT_VALID; 2685 if (isnullstartblock(left.br_startblock)) 2686 state |= BMAP_LEFT_DELAY; 2687 } 2688 2689 /* 2690 * Check and set flags if this segment has a current value. 2691 * Not true if we're inserting into the "hole" at eof. 2692 */ 2693 if (xfs_iext_get_extent(ifp, icur, &right)) { 2694 state |= BMAP_RIGHT_VALID; 2695 if (isnullstartblock(right.br_startblock)) 2696 state |= BMAP_RIGHT_DELAY; 2697 } 2698 2699 /* 2700 * We're inserting a real allocation between "left" and "right". 2701 * Set the contiguity flags. Don't let extents get too large. 2702 */ 2703 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2704 left.br_startoff + left.br_blockcount == new->br_startoff && 2705 left.br_startblock + left.br_blockcount == new->br_startblock && 2706 left.br_state == new->br_state && 2707 left.br_blockcount + new->br_blockcount <= MAXEXTLEN) 2708 state |= BMAP_LEFT_CONTIG; 2709 2710 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2711 new->br_startoff + new->br_blockcount == right.br_startoff && 2712 new->br_startblock + new->br_blockcount == right.br_startblock && 2713 new->br_state == right.br_state && 2714 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 2715 (!(state & BMAP_LEFT_CONTIG) || 2716 left.br_blockcount + new->br_blockcount + 2717 right.br_blockcount <= MAXEXTLEN)) 2718 state |= BMAP_RIGHT_CONTIG; 2719 2720 error = 0; 2721 /* 2722 * Select which case we're in here, and implement it. 2723 */ 2724 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2725 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2726 /* 2727 * New allocation is contiguous with real allocations on the 2728 * left and on the right. 2729 * Merge all three into a single extent record. 2730 */ 2731 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2732 2733 xfs_iext_remove(ip, icur, state); 2734 xfs_iext_prev(ifp, icur); 2735 xfs_iext_update_extent(ip, state, icur, &left); 2736 2737 XFS_IFORK_NEXT_SET(ip, whichfork, 2738 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2739 if (cur == NULL) { 2740 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2741 } else { 2742 rval = XFS_ILOG_CORE; 2743 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2744 if (error) 2745 goto done; 2746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2747 error = xfs_btree_delete(cur, &i); 2748 if (error) 2749 goto done; 2750 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2751 error = xfs_btree_decrement(cur, 0, &i); 2752 if (error) 2753 goto done; 2754 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2755 error = xfs_bmbt_update(cur, &left); 2756 if (error) 2757 goto done; 2758 } 2759 break; 2760 2761 case BMAP_LEFT_CONTIG: 2762 /* 2763 * New allocation is contiguous with a real allocation 2764 * on the left. 2765 * Merge the new allocation with the left neighbor. 2766 */ 2767 old = left; 2768 left.br_blockcount += new->br_blockcount; 2769 2770 xfs_iext_prev(ifp, icur); 2771 xfs_iext_update_extent(ip, state, icur, &left); 2772 2773 if (cur == NULL) { 2774 rval = xfs_ilog_fext(whichfork); 2775 } else { 2776 rval = 0; 2777 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2778 if (error) 2779 goto done; 2780 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2781 error = xfs_bmbt_update(cur, &left); 2782 if (error) 2783 goto done; 2784 } 2785 break; 2786 2787 case BMAP_RIGHT_CONTIG: 2788 /* 2789 * New allocation is contiguous with a real allocation 2790 * on the right. 2791 * Merge the new allocation with the right neighbor. 2792 */ 2793 old = right; 2794 2795 right.br_startoff = new->br_startoff; 2796 right.br_startblock = new->br_startblock; 2797 right.br_blockcount += new->br_blockcount; 2798 xfs_iext_update_extent(ip, state, icur, &right); 2799 2800 if (cur == NULL) { 2801 rval = xfs_ilog_fext(whichfork); 2802 } else { 2803 rval = 0; 2804 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2805 if (error) 2806 goto done; 2807 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2808 error = xfs_bmbt_update(cur, &right); 2809 if (error) 2810 goto done; 2811 } 2812 break; 2813 2814 case 0: 2815 /* 2816 * New allocation is not contiguous with another 2817 * real allocation. 2818 * Insert a new entry. 2819 */ 2820 xfs_iext_insert(ip, icur, new, state); 2821 XFS_IFORK_NEXT_SET(ip, whichfork, 2822 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2823 if (cur == NULL) { 2824 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2825 } else { 2826 rval = XFS_ILOG_CORE; 2827 error = xfs_bmbt_lookup_eq(cur, new, &i); 2828 if (error) 2829 goto done; 2830 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 2831 error = xfs_btree_insert(cur, &i); 2832 if (error) 2833 goto done; 2834 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 2835 } 2836 break; 2837 } 2838 2839 /* add reverse mapping unless caller opted out */ 2840 if (!(flags & XFS_BMAPI_NORMAP)) { 2841 error = xfs_rmap_map_extent(tp, ip, whichfork, new); 2842 if (error) 2843 goto done; 2844 } 2845 2846 /* convert to a btree if necessary */ 2847 if (xfs_bmap_needs_btree(ip, whichfork)) { 2848 int tmp_logflags; /* partial log flag return val */ 2849 2850 ASSERT(cur == NULL); 2851 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2852 &tmp_logflags, whichfork); 2853 *logflagsp |= tmp_logflags; 2854 cur = *curp; 2855 if (error) 2856 goto done; 2857 } 2858 2859 /* clear out the allocated field, done with it now in any case. */ 2860 if (cur) 2861 cur->bc_private.b.allocated = 0; 2862 2863 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2864 done: 2865 *logflagsp |= rval; 2866 return error; 2867 } 2868 2869 /* 2870 * Functions used in the extent read, allocate and remove paths 2871 */ 2872 2873 /* 2874 * Adjust the size of the new extent based on di_extsize and rt extsize. 2875 */ 2876 int 2877 xfs_bmap_extsize_align( 2878 xfs_mount_t *mp, 2879 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2880 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2881 xfs_extlen_t extsz, /* align to this extent size */ 2882 int rt, /* is this a realtime inode? */ 2883 int eof, /* is extent at end-of-file? */ 2884 int delay, /* creating delalloc extent? */ 2885 int convert, /* overwriting unwritten extent? */ 2886 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2887 xfs_extlen_t *lenp) /* in/out: aligned length */ 2888 { 2889 xfs_fileoff_t orig_off; /* original offset */ 2890 xfs_extlen_t orig_alen; /* original length */ 2891 xfs_fileoff_t orig_end; /* original off+len */ 2892 xfs_fileoff_t nexto; /* next file offset */ 2893 xfs_fileoff_t prevo; /* previous file offset */ 2894 xfs_fileoff_t align_off; /* temp for offset */ 2895 xfs_extlen_t align_alen; /* temp for length */ 2896 xfs_extlen_t temp; /* temp for calculations */ 2897 2898 if (convert) 2899 return 0; 2900 2901 orig_off = align_off = *offp; 2902 orig_alen = align_alen = *lenp; 2903 orig_end = orig_off + orig_alen; 2904 2905 /* 2906 * If this request overlaps an existing extent, then don't 2907 * attempt to perform any additional alignment. 2908 */ 2909 if (!delay && !eof && 2910 (orig_off >= gotp->br_startoff) && 2911 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2912 return 0; 2913 } 2914 2915 /* 2916 * If the file offset is unaligned vs. the extent size 2917 * we need to align it. This will be possible unless 2918 * the file was previously written with a kernel that didn't 2919 * perform this alignment, or if a truncate shot us in the 2920 * foot. 2921 */ 2922 div_u64_rem(orig_off, extsz, &temp); 2923 if (temp) { 2924 align_alen += temp; 2925 align_off -= temp; 2926 } 2927 2928 /* Same adjustment for the end of the requested area. */ 2929 temp = (align_alen % extsz); 2930 if (temp) 2931 align_alen += extsz - temp; 2932 2933 /* 2934 * For large extent hint sizes, the aligned extent might be larger than 2935 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls 2936 * the length back under MAXEXTLEN. The outer allocation loops handle 2937 * short allocation just fine, so it is safe to do this. We only want to 2938 * do it when we are forced to, though, because it means more allocation 2939 * operations are required. 2940 */ 2941 while (align_alen > MAXEXTLEN) 2942 align_alen -= extsz; 2943 ASSERT(align_alen <= MAXEXTLEN); 2944 2945 /* 2946 * If the previous block overlaps with this proposed allocation 2947 * then move the start forward without adjusting the length. 2948 */ 2949 if (prevp->br_startoff != NULLFILEOFF) { 2950 if (prevp->br_startblock == HOLESTARTBLOCK) 2951 prevo = prevp->br_startoff; 2952 else 2953 prevo = prevp->br_startoff + prevp->br_blockcount; 2954 } else 2955 prevo = 0; 2956 if (align_off != orig_off && align_off < prevo) 2957 align_off = prevo; 2958 /* 2959 * If the next block overlaps with this proposed allocation 2960 * then move the start back without adjusting the length, 2961 * but not before offset 0. 2962 * This may of course make the start overlap previous block, 2963 * and if we hit the offset 0 limit then the next block 2964 * can still overlap too. 2965 */ 2966 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2967 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2968 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2969 nexto = gotp->br_startoff + gotp->br_blockcount; 2970 else 2971 nexto = gotp->br_startoff; 2972 } else 2973 nexto = NULLFILEOFF; 2974 if (!eof && 2975 align_off + align_alen != orig_end && 2976 align_off + align_alen > nexto) 2977 align_off = nexto > align_alen ? nexto - align_alen : 0; 2978 /* 2979 * If we're now overlapping the next or previous extent that 2980 * means we can't fit an extsz piece in this hole. Just move 2981 * the start forward to the first valid spot and set 2982 * the length so we hit the end. 2983 */ 2984 if (align_off != orig_off && align_off < prevo) 2985 align_off = prevo; 2986 if (align_off + align_alen != orig_end && 2987 align_off + align_alen > nexto && 2988 nexto != NULLFILEOFF) { 2989 ASSERT(nexto > prevo); 2990 align_alen = nexto - align_off; 2991 } 2992 2993 /* 2994 * If realtime, and the result isn't a multiple of the realtime 2995 * extent size we need to remove blocks until it is. 2996 */ 2997 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2998 /* 2999 * We're not covering the original request, or 3000 * we won't be able to once we fix the length. 3001 */ 3002 if (orig_off < align_off || 3003 orig_end > align_off + align_alen || 3004 align_alen - temp < orig_alen) 3005 return -EINVAL; 3006 /* 3007 * Try to fix it by moving the start up. 3008 */ 3009 if (align_off + temp <= orig_off) { 3010 align_alen -= temp; 3011 align_off += temp; 3012 } 3013 /* 3014 * Try to fix it by moving the end in. 3015 */ 3016 else if (align_off + align_alen - temp >= orig_end) 3017 align_alen -= temp; 3018 /* 3019 * Set the start to the minimum then trim the length. 3020 */ 3021 else { 3022 align_alen -= orig_off - align_off; 3023 align_off = orig_off; 3024 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3025 } 3026 /* 3027 * Result doesn't cover the request, fail it. 3028 */ 3029 if (orig_off < align_off || orig_end > align_off + align_alen) 3030 return -EINVAL; 3031 } else { 3032 ASSERT(orig_off >= align_off); 3033 /* see MAXEXTLEN handling above */ 3034 ASSERT(orig_end <= align_off + align_alen || 3035 align_alen + extsz > MAXEXTLEN); 3036 } 3037 3038 #ifdef DEBUG 3039 if (!eof && gotp->br_startoff != NULLFILEOFF) 3040 ASSERT(align_off + align_alen <= gotp->br_startoff); 3041 if (prevp->br_startoff != NULLFILEOFF) 3042 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3043 #endif 3044 3045 *lenp = align_alen; 3046 *offp = align_off; 3047 return 0; 3048 } 3049 3050 #define XFS_ALLOC_GAP_UNITS 4 3051 3052 void 3053 xfs_bmap_adjacent( 3054 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3055 { 3056 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3057 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3058 xfs_mount_t *mp; /* mount point structure */ 3059 int nullfb; /* true if ap->firstblock isn't set */ 3060 int rt; /* true if inode is realtime */ 3061 3062 #define ISVALID(x,y) \ 3063 (rt ? \ 3064 (x) < mp->m_sb.sb_rblocks : \ 3065 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3066 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3067 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3068 3069 mp = ap->ip->i_mount; 3070 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3071 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3072 xfs_alloc_is_userdata(ap->datatype); 3073 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3074 ap->tp->t_firstblock); 3075 /* 3076 * If allocating at eof, and there's a previous real block, 3077 * try to use its last block as our starting point. 3078 */ 3079 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3080 !isnullstartblock(ap->prev.br_startblock) && 3081 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3082 ap->prev.br_startblock)) { 3083 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3084 /* 3085 * Adjust for the gap between prevp and us. 3086 */ 3087 adjust = ap->offset - 3088 (ap->prev.br_startoff + ap->prev.br_blockcount); 3089 if (adjust && 3090 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3091 ap->blkno += adjust; 3092 } 3093 /* 3094 * If not at eof, then compare the two neighbor blocks. 3095 * Figure out whether either one gives us a good starting point, 3096 * and pick the better one. 3097 */ 3098 else if (!ap->eof) { 3099 xfs_fsblock_t gotbno; /* right side block number */ 3100 xfs_fsblock_t gotdiff=0; /* right side difference */ 3101 xfs_fsblock_t prevbno; /* left side block number */ 3102 xfs_fsblock_t prevdiff=0; /* left side difference */ 3103 3104 /* 3105 * If there's a previous (left) block, select a requested 3106 * start block based on it. 3107 */ 3108 if (ap->prev.br_startoff != NULLFILEOFF && 3109 !isnullstartblock(ap->prev.br_startblock) && 3110 (prevbno = ap->prev.br_startblock + 3111 ap->prev.br_blockcount) && 3112 ISVALID(prevbno, ap->prev.br_startblock)) { 3113 /* 3114 * Calculate gap to end of previous block. 3115 */ 3116 adjust = prevdiff = ap->offset - 3117 (ap->prev.br_startoff + 3118 ap->prev.br_blockcount); 3119 /* 3120 * Figure the startblock based on the previous block's 3121 * end and the gap size. 3122 * Heuristic! 3123 * If the gap is large relative to the piece we're 3124 * allocating, or using it gives us an invalid block 3125 * number, then just use the end of the previous block. 3126 */ 3127 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3128 ISVALID(prevbno + prevdiff, 3129 ap->prev.br_startblock)) 3130 prevbno += adjust; 3131 else 3132 prevdiff += adjust; 3133 /* 3134 * If the firstblock forbids it, can't use it, 3135 * must use default. 3136 */ 3137 if (!rt && !nullfb && 3138 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno) 3139 prevbno = NULLFSBLOCK; 3140 } 3141 /* 3142 * No previous block or can't follow it, just default. 3143 */ 3144 else 3145 prevbno = NULLFSBLOCK; 3146 /* 3147 * If there's a following (right) block, select a requested 3148 * start block based on it. 3149 */ 3150 if (!isnullstartblock(ap->got.br_startblock)) { 3151 /* 3152 * Calculate gap to start of next block. 3153 */ 3154 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3155 /* 3156 * Figure the startblock based on the next block's 3157 * start and the gap size. 3158 */ 3159 gotbno = ap->got.br_startblock; 3160 /* 3161 * Heuristic! 3162 * If the gap is large relative to the piece we're 3163 * allocating, or using it gives us an invalid block 3164 * number, then just use the start of the next block 3165 * offset by our length. 3166 */ 3167 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3168 ISVALID(gotbno - gotdiff, gotbno)) 3169 gotbno -= adjust; 3170 else if (ISVALID(gotbno - ap->length, gotbno)) { 3171 gotbno -= ap->length; 3172 gotdiff += adjust - ap->length; 3173 } else 3174 gotdiff += adjust; 3175 /* 3176 * If the firstblock forbids it, can't use it, 3177 * must use default. 3178 */ 3179 if (!rt && !nullfb && 3180 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno) 3181 gotbno = NULLFSBLOCK; 3182 } 3183 /* 3184 * No next block, just default. 3185 */ 3186 else 3187 gotbno = NULLFSBLOCK; 3188 /* 3189 * If both valid, pick the better one, else the only good 3190 * one, else ap->blkno is already set (to 0 or the inode block). 3191 */ 3192 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3193 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3194 else if (prevbno != NULLFSBLOCK) 3195 ap->blkno = prevbno; 3196 else if (gotbno != NULLFSBLOCK) 3197 ap->blkno = gotbno; 3198 } 3199 #undef ISVALID 3200 } 3201 3202 static int 3203 xfs_bmap_longest_free_extent( 3204 struct xfs_trans *tp, 3205 xfs_agnumber_t ag, 3206 xfs_extlen_t *blen, 3207 int *notinit) 3208 { 3209 struct xfs_mount *mp = tp->t_mountp; 3210 struct xfs_perag *pag; 3211 xfs_extlen_t longest; 3212 int error = 0; 3213 3214 pag = xfs_perag_get(mp, ag); 3215 if (!pag->pagf_init) { 3216 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK); 3217 if (error) 3218 goto out; 3219 3220 if (!pag->pagf_init) { 3221 *notinit = 1; 3222 goto out; 3223 } 3224 } 3225 3226 longest = xfs_alloc_longest_free_extent(pag, 3227 xfs_alloc_min_freelist(mp, pag), 3228 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3229 if (*blen < longest) 3230 *blen = longest; 3231 3232 out: 3233 xfs_perag_put(pag); 3234 return error; 3235 } 3236 3237 static void 3238 xfs_bmap_select_minlen( 3239 struct xfs_bmalloca *ap, 3240 struct xfs_alloc_arg *args, 3241 xfs_extlen_t *blen, 3242 int notinit) 3243 { 3244 if (notinit || *blen < ap->minlen) { 3245 /* 3246 * Since we did a BUF_TRYLOCK above, it is possible that 3247 * there is space for this request. 3248 */ 3249 args->minlen = ap->minlen; 3250 } else if (*blen < args->maxlen) { 3251 /* 3252 * If the best seen length is less than the request length, 3253 * use the best as the minimum. 3254 */ 3255 args->minlen = *blen; 3256 } else { 3257 /* 3258 * Otherwise we've seen an extent as big as maxlen, use that 3259 * as the minimum. 3260 */ 3261 args->minlen = args->maxlen; 3262 } 3263 } 3264 3265 STATIC int 3266 xfs_bmap_btalloc_nullfb( 3267 struct xfs_bmalloca *ap, 3268 struct xfs_alloc_arg *args, 3269 xfs_extlen_t *blen) 3270 { 3271 struct xfs_mount *mp = ap->ip->i_mount; 3272 xfs_agnumber_t ag, startag; 3273 int notinit = 0; 3274 int error; 3275 3276 args->type = XFS_ALLOCTYPE_START_BNO; 3277 args->total = ap->total; 3278 3279 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3280 if (startag == NULLAGNUMBER) 3281 startag = ag = 0; 3282 3283 while (*blen < args->maxlen) { 3284 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3285 ¬init); 3286 if (error) 3287 return error; 3288 3289 if (++ag == mp->m_sb.sb_agcount) 3290 ag = 0; 3291 if (ag == startag) 3292 break; 3293 } 3294 3295 xfs_bmap_select_minlen(ap, args, blen, notinit); 3296 return 0; 3297 } 3298 3299 STATIC int 3300 xfs_bmap_btalloc_filestreams( 3301 struct xfs_bmalloca *ap, 3302 struct xfs_alloc_arg *args, 3303 xfs_extlen_t *blen) 3304 { 3305 struct xfs_mount *mp = ap->ip->i_mount; 3306 xfs_agnumber_t ag; 3307 int notinit = 0; 3308 int error; 3309 3310 args->type = XFS_ALLOCTYPE_NEAR_BNO; 3311 args->total = ap->total; 3312 3313 ag = XFS_FSB_TO_AGNO(mp, args->fsbno); 3314 if (ag == NULLAGNUMBER) 3315 ag = 0; 3316 3317 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init); 3318 if (error) 3319 return error; 3320 3321 if (*blen < args->maxlen) { 3322 error = xfs_filestream_new_ag(ap, &ag); 3323 if (error) 3324 return error; 3325 3326 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, 3327 ¬init); 3328 if (error) 3329 return error; 3330 3331 } 3332 3333 xfs_bmap_select_minlen(ap, args, blen, notinit); 3334 3335 /* 3336 * Set the failure fallback case to look in the selected AG as stream 3337 * may have moved. 3338 */ 3339 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 3340 return 0; 3341 } 3342 3343 /* Update all inode and quota accounting for the allocation we just did. */ 3344 static void 3345 xfs_bmap_btalloc_accounting( 3346 struct xfs_bmalloca *ap, 3347 struct xfs_alloc_arg *args) 3348 { 3349 if (ap->flags & XFS_BMAPI_COWFORK) { 3350 /* 3351 * COW fork blocks are in-core only and thus are treated as 3352 * in-core quota reservation (like delalloc blocks) even when 3353 * converted to real blocks. The quota reservation is not 3354 * accounted to disk until blocks are remapped to the data 3355 * fork. So if these blocks were previously delalloc, we 3356 * already have quota reservation and there's nothing to do 3357 * yet. 3358 */ 3359 if (ap->wasdel) { 3360 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3361 return; 3362 } 3363 3364 /* 3365 * Otherwise, we've allocated blocks in a hole. The transaction 3366 * has acquired in-core quota reservation for this extent. 3367 * Rather than account these as real blocks, however, we reduce 3368 * the transaction quota reservation based on the allocation. 3369 * This essentially transfers the transaction quota reservation 3370 * to that of a delalloc extent. 3371 */ 3372 ap->ip->i_delayed_blks += args->len; 3373 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3374 -(long)args->len); 3375 return; 3376 } 3377 3378 /* data/attr fork only */ 3379 ap->ip->i_d.di_nblocks += args->len; 3380 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3381 if (ap->wasdel) { 3382 ap->ip->i_delayed_blks -= args->len; 3383 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3384 } 3385 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3386 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3387 args->len); 3388 } 3389 3390 STATIC int 3391 xfs_bmap_btalloc( 3392 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3393 { 3394 xfs_mount_t *mp; /* mount point structure */ 3395 xfs_alloctype_t atype = 0; /* type for allocation routines */ 3396 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3397 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 3398 xfs_agnumber_t ag; 3399 xfs_alloc_arg_t args; 3400 xfs_fileoff_t orig_offset; 3401 xfs_extlen_t orig_length; 3402 xfs_extlen_t blen; 3403 xfs_extlen_t nextminlen = 0; 3404 int nullfb; /* true if ap->firstblock isn't set */ 3405 int isaligned; 3406 int tryagain; 3407 int error; 3408 int stripe_align; 3409 3410 ASSERT(ap->length); 3411 orig_offset = ap->offset; 3412 orig_length = ap->length; 3413 3414 mp = ap->ip->i_mount; 3415 3416 /* stripe alignment for allocation is determined by mount parameters */ 3417 stripe_align = 0; 3418 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 3419 stripe_align = mp->m_swidth; 3420 else if (mp->m_dalign) 3421 stripe_align = mp->m_dalign; 3422 3423 if (ap->flags & XFS_BMAPI_COWFORK) 3424 align = xfs_get_cowextsz_hint(ap->ip); 3425 else if (xfs_alloc_is_userdata(ap->datatype)) 3426 align = xfs_get_extsz_hint(ap->ip); 3427 if (align) { 3428 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3429 align, 0, ap->eof, 0, ap->conv, 3430 &ap->offset, &ap->length); 3431 ASSERT(!error); 3432 ASSERT(ap->length); 3433 } 3434 3435 3436 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; 3437 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, 3438 ap->tp->t_firstblock); 3439 if (nullfb) { 3440 if (xfs_alloc_is_userdata(ap->datatype) && 3441 xfs_inode_is_filestream(ap->ip)) { 3442 ag = xfs_filestream_lookup_ag(ap->ip); 3443 ag = (ag != NULLAGNUMBER) ? ag : 0; 3444 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); 3445 } else { 3446 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 3447 } 3448 } else 3449 ap->blkno = ap->tp->t_firstblock; 3450 3451 xfs_bmap_adjacent(ap); 3452 3453 /* 3454 * If allowed, use ap->blkno; otherwise must use firstblock since 3455 * it's in the right allocation group. 3456 */ 3457 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno) 3458 ; 3459 else 3460 ap->blkno = ap->tp->t_firstblock; 3461 /* 3462 * Normal allocation, done through xfs_alloc_vextent. 3463 */ 3464 tryagain = isaligned = 0; 3465 memset(&args, 0, sizeof(args)); 3466 args.tp = ap->tp; 3467 args.mp = mp; 3468 args.fsbno = ap->blkno; 3469 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3470 3471 /* Trim the allocation back to the maximum an AG can fit. */ 3472 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3473 blen = 0; 3474 if (nullfb) { 3475 /* 3476 * Search for an allocation group with a single extent large 3477 * enough for the request. If one isn't found, then adjust 3478 * the minimum allocation size to the largest space found. 3479 */ 3480 if (xfs_alloc_is_userdata(ap->datatype) && 3481 xfs_inode_is_filestream(ap->ip)) 3482 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen); 3483 else 3484 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); 3485 if (error) 3486 return error; 3487 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3488 if (xfs_inode_is_filestream(ap->ip)) 3489 args.type = XFS_ALLOCTYPE_FIRST_AG; 3490 else 3491 args.type = XFS_ALLOCTYPE_START_BNO; 3492 args.total = args.minlen = ap->minlen; 3493 } else { 3494 args.type = XFS_ALLOCTYPE_NEAR_BNO; 3495 args.total = ap->total; 3496 args.minlen = ap->minlen; 3497 } 3498 /* apply extent size hints if obtained earlier */ 3499 if (align) { 3500 args.prod = align; 3501 div_u64_rem(ap->offset, args.prod, &args.mod); 3502 if (args.mod) 3503 args.mod = args.prod - args.mod; 3504 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3505 args.prod = 1; 3506 args.mod = 0; 3507 } else { 3508 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3509 div_u64_rem(ap->offset, args.prod, &args.mod); 3510 if (args.mod) 3511 args.mod = args.prod - args.mod; 3512 } 3513 /* 3514 * If we are not low on available data blocks, and the 3515 * underlying logical volume manager is a stripe, and 3516 * the file offset is zero then try to allocate data 3517 * blocks on stripe unit boundary. 3518 * NOTE: ap->aeof is only set if the allocation length 3519 * is >= the stripe unit and the allocation offset is 3520 * at the end of file. 3521 */ 3522 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) { 3523 if (!ap->offset) { 3524 args.alignment = stripe_align; 3525 atype = args.type; 3526 isaligned = 1; 3527 /* 3528 * Adjust for alignment 3529 */ 3530 if (blen > args.alignment && blen <= args.maxlen) 3531 args.minlen = blen - args.alignment; 3532 args.minalignslop = 0; 3533 } else { 3534 /* 3535 * First try an exact bno allocation. 3536 * If it fails then do a near or start bno 3537 * allocation with alignment turned on. 3538 */ 3539 atype = args.type; 3540 tryagain = 1; 3541 args.type = XFS_ALLOCTYPE_THIS_BNO; 3542 args.alignment = 1; 3543 /* 3544 * Compute the minlen+alignment for the 3545 * next case. Set slop so that the value 3546 * of minlen+alignment+slop doesn't go up 3547 * between the calls. 3548 */ 3549 if (blen > stripe_align && blen <= args.maxlen) 3550 nextminlen = blen - stripe_align; 3551 else 3552 nextminlen = args.minlen; 3553 if (nextminlen + stripe_align > args.minlen + 1) 3554 args.minalignslop = 3555 nextminlen + stripe_align - 3556 args.minlen - 1; 3557 else 3558 args.minalignslop = 0; 3559 } 3560 } else { 3561 args.alignment = 1; 3562 args.minalignslop = 0; 3563 } 3564 args.minleft = ap->minleft; 3565 args.wasdel = ap->wasdel; 3566 args.resv = XFS_AG_RESV_NONE; 3567 args.datatype = ap->datatype; 3568 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) 3569 args.ip = ap->ip; 3570 3571 error = xfs_alloc_vextent(&args); 3572 if (error) 3573 return error; 3574 3575 if (tryagain && args.fsbno == NULLFSBLOCK) { 3576 /* 3577 * Exact allocation failed. Now try with alignment 3578 * turned on. 3579 */ 3580 args.type = atype; 3581 args.fsbno = ap->blkno; 3582 args.alignment = stripe_align; 3583 args.minlen = nextminlen; 3584 args.minalignslop = 0; 3585 isaligned = 1; 3586 if ((error = xfs_alloc_vextent(&args))) 3587 return error; 3588 } 3589 if (isaligned && args.fsbno == NULLFSBLOCK) { 3590 /* 3591 * allocation failed, so turn off alignment and 3592 * try again. 3593 */ 3594 args.type = atype; 3595 args.fsbno = ap->blkno; 3596 args.alignment = 0; 3597 if ((error = xfs_alloc_vextent(&args))) 3598 return error; 3599 } 3600 if (args.fsbno == NULLFSBLOCK && nullfb && 3601 args.minlen > ap->minlen) { 3602 args.minlen = ap->minlen; 3603 args.type = XFS_ALLOCTYPE_START_BNO; 3604 args.fsbno = ap->blkno; 3605 if ((error = xfs_alloc_vextent(&args))) 3606 return error; 3607 } 3608 if (args.fsbno == NULLFSBLOCK && nullfb) { 3609 args.fsbno = 0; 3610 args.type = XFS_ALLOCTYPE_FIRST_AG; 3611 args.total = ap->minlen; 3612 if ((error = xfs_alloc_vextent(&args))) 3613 return error; 3614 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3615 } 3616 if (args.fsbno != NULLFSBLOCK) { 3617 /* 3618 * check the allocation happened at the same or higher AG than 3619 * the first block that was allocated. 3620 */ 3621 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK || 3622 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <= 3623 XFS_FSB_TO_AGNO(mp, args.fsbno)); 3624 3625 ap->blkno = args.fsbno; 3626 if (ap->tp->t_firstblock == NULLFSBLOCK) 3627 ap->tp->t_firstblock = args.fsbno; 3628 ASSERT(nullfb || fb_agno <= args.agno); 3629 ap->length = args.len; 3630 /* 3631 * If the extent size hint is active, we tried to round the 3632 * caller's allocation request offset down to extsz and the 3633 * length up to another extsz boundary. If we found a free 3634 * extent we mapped it in starting at this new offset. If the 3635 * newly mapped space isn't long enough to cover any of the 3636 * range of offsets that was originally requested, move the 3637 * mapping up so that we can fill as much of the caller's 3638 * original request as possible. Free space is apparently 3639 * very fragmented so we're unlikely to be able to satisfy the 3640 * hints anyway. 3641 */ 3642 if (ap->length <= orig_length) 3643 ap->offset = orig_offset; 3644 else if (ap->offset + ap->length < orig_offset + orig_length) 3645 ap->offset = orig_offset + orig_length - ap->length; 3646 xfs_bmap_btalloc_accounting(ap, &args); 3647 } else { 3648 ap->blkno = NULLFSBLOCK; 3649 ap->length = 0; 3650 } 3651 return 0; 3652 } 3653 3654 /* 3655 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 3656 * It figures out where to ask the underlying allocator to put the new extent. 3657 */ 3658 STATIC int 3659 xfs_bmap_alloc( 3660 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3661 { 3662 if (XFS_IS_REALTIME_INODE(ap->ip) && 3663 xfs_alloc_is_userdata(ap->datatype)) 3664 return xfs_bmap_rtalloc(ap); 3665 return xfs_bmap_btalloc(ap); 3666 } 3667 3668 /* Trim extent to fit a logical block range. */ 3669 void 3670 xfs_trim_extent( 3671 struct xfs_bmbt_irec *irec, 3672 xfs_fileoff_t bno, 3673 xfs_filblks_t len) 3674 { 3675 xfs_fileoff_t distance; 3676 xfs_fileoff_t end = bno + len; 3677 3678 if (irec->br_startoff + irec->br_blockcount <= bno || 3679 irec->br_startoff >= end) { 3680 irec->br_blockcount = 0; 3681 return; 3682 } 3683 3684 if (irec->br_startoff < bno) { 3685 distance = bno - irec->br_startoff; 3686 if (isnullstartblock(irec->br_startblock)) 3687 irec->br_startblock = DELAYSTARTBLOCK; 3688 if (irec->br_startblock != DELAYSTARTBLOCK && 3689 irec->br_startblock != HOLESTARTBLOCK) 3690 irec->br_startblock += distance; 3691 irec->br_startoff += distance; 3692 irec->br_blockcount -= distance; 3693 } 3694 3695 if (end < irec->br_startoff + irec->br_blockcount) { 3696 distance = irec->br_startoff + irec->br_blockcount - end; 3697 irec->br_blockcount -= distance; 3698 } 3699 } 3700 3701 /* 3702 * Trim the returned map to the required bounds 3703 */ 3704 STATIC void 3705 xfs_bmapi_trim_map( 3706 struct xfs_bmbt_irec *mval, 3707 struct xfs_bmbt_irec *got, 3708 xfs_fileoff_t *bno, 3709 xfs_filblks_t len, 3710 xfs_fileoff_t obno, 3711 xfs_fileoff_t end, 3712 int n, 3713 int flags) 3714 { 3715 if ((flags & XFS_BMAPI_ENTIRE) || 3716 got->br_startoff + got->br_blockcount <= obno) { 3717 *mval = *got; 3718 if (isnullstartblock(got->br_startblock)) 3719 mval->br_startblock = DELAYSTARTBLOCK; 3720 return; 3721 } 3722 3723 if (obno > *bno) 3724 *bno = obno; 3725 ASSERT((*bno >= obno) || (n == 0)); 3726 ASSERT(*bno < end); 3727 mval->br_startoff = *bno; 3728 if (isnullstartblock(got->br_startblock)) 3729 mval->br_startblock = DELAYSTARTBLOCK; 3730 else 3731 mval->br_startblock = got->br_startblock + 3732 (*bno - got->br_startoff); 3733 /* 3734 * Return the minimum of what we got and what we asked for for 3735 * the length. We can use the len variable here because it is 3736 * modified below and we could have been there before coming 3737 * here if the first part of the allocation didn't overlap what 3738 * was asked for. 3739 */ 3740 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3741 got->br_blockcount - (*bno - got->br_startoff)); 3742 mval->br_state = got->br_state; 3743 ASSERT(mval->br_blockcount <= len); 3744 return; 3745 } 3746 3747 /* 3748 * Update and validate the extent map to return 3749 */ 3750 STATIC void 3751 xfs_bmapi_update_map( 3752 struct xfs_bmbt_irec **map, 3753 xfs_fileoff_t *bno, 3754 xfs_filblks_t *len, 3755 xfs_fileoff_t obno, 3756 xfs_fileoff_t end, 3757 int *n, 3758 int flags) 3759 { 3760 xfs_bmbt_irec_t *mval = *map; 3761 3762 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3763 ((mval->br_startoff + mval->br_blockcount) <= end)); 3764 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3765 (mval->br_startoff < obno)); 3766 3767 *bno = mval->br_startoff + mval->br_blockcount; 3768 *len = end - *bno; 3769 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3770 /* update previous map with new information */ 3771 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3772 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3773 ASSERT(mval->br_state == mval[-1].br_state); 3774 mval[-1].br_blockcount = mval->br_blockcount; 3775 mval[-1].br_state = mval->br_state; 3776 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3777 mval[-1].br_startblock != DELAYSTARTBLOCK && 3778 mval[-1].br_startblock != HOLESTARTBLOCK && 3779 mval->br_startblock == mval[-1].br_startblock + 3780 mval[-1].br_blockcount && 3781 mval[-1].br_state == mval->br_state) { 3782 ASSERT(mval->br_startoff == 3783 mval[-1].br_startoff + mval[-1].br_blockcount); 3784 mval[-1].br_blockcount += mval->br_blockcount; 3785 } else if (*n > 0 && 3786 mval->br_startblock == DELAYSTARTBLOCK && 3787 mval[-1].br_startblock == DELAYSTARTBLOCK && 3788 mval->br_startoff == 3789 mval[-1].br_startoff + mval[-1].br_blockcount) { 3790 mval[-1].br_blockcount += mval->br_blockcount; 3791 mval[-1].br_state = mval->br_state; 3792 } else if (!((*n == 0) && 3793 ((mval->br_startoff + mval->br_blockcount) <= 3794 obno))) { 3795 mval++; 3796 (*n)++; 3797 } 3798 *map = mval; 3799 } 3800 3801 /* 3802 * Map file blocks to filesystem blocks without allocation. 3803 */ 3804 int 3805 xfs_bmapi_read( 3806 struct xfs_inode *ip, 3807 xfs_fileoff_t bno, 3808 xfs_filblks_t len, 3809 struct xfs_bmbt_irec *mval, 3810 int *nmap, 3811 int flags) 3812 { 3813 struct xfs_mount *mp = ip->i_mount; 3814 struct xfs_ifork *ifp; 3815 struct xfs_bmbt_irec got; 3816 xfs_fileoff_t obno; 3817 xfs_fileoff_t end; 3818 struct xfs_iext_cursor icur; 3819 int error; 3820 bool eof = false; 3821 int n = 0; 3822 int whichfork = xfs_bmapi_whichfork(flags); 3823 3824 ASSERT(*nmap >= 1); 3825 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| 3826 XFS_BMAPI_COWFORK))); 3827 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3828 3829 if (unlikely(XFS_TEST_ERROR( 3830 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 3831 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 3832 mp, XFS_ERRTAG_BMAPIFORMAT))) { 3833 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 3834 return -EFSCORRUPTED; 3835 } 3836 3837 if (XFS_FORCED_SHUTDOWN(mp)) 3838 return -EIO; 3839 3840 XFS_STATS_INC(mp, xs_blk_mapr); 3841 3842 ifp = XFS_IFORK_PTR(ip, whichfork); 3843 3844 /* No CoW fork? Return a hole. */ 3845 if (whichfork == XFS_COW_FORK && !ifp) { 3846 mval->br_startoff = bno; 3847 mval->br_startblock = HOLESTARTBLOCK; 3848 mval->br_blockcount = len; 3849 mval->br_state = XFS_EXT_NORM; 3850 *nmap = 1; 3851 return 0; 3852 } 3853 3854 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3855 error = xfs_iread_extents(NULL, ip, whichfork); 3856 if (error) 3857 return error; 3858 } 3859 3860 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3861 eof = true; 3862 end = bno + len; 3863 obno = bno; 3864 3865 while (bno < end && n < *nmap) { 3866 /* Reading past eof, act as though there's a hole up to end. */ 3867 if (eof) 3868 got.br_startoff = end; 3869 if (got.br_startoff > bno) { 3870 /* Reading in a hole. */ 3871 mval->br_startoff = bno; 3872 mval->br_startblock = HOLESTARTBLOCK; 3873 mval->br_blockcount = 3874 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3875 mval->br_state = XFS_EXT_NORM; 3876 bno += mval->br_blockcount; 3877 len -= mval->br_blockcount; 3878 mval++; 3879 n++; 3880 continue; 3881 } 3882 3883 /* set up the extent map to return. */ 3884 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3885 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3886 3887 /* If we're done, stop now. */ 3888 if (bno >= end || n >= *nmap) 3889 break; 3890 3891 /* Else go on to the next record. */ 3892 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3893 eof = true; 3894 } 3895 *nmap = n; 3896 return 0; 3897 } 3898 3899 /* 3900 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3901 * global pool and the extent inserted into the inode in-core extent tree. 3902 * 3903 * On entry, got refers to the first extent beyond the offset of the extent to 3904 * allocate or eof is specified if no such extent exists. On return, got refers 3905 * to the extent record that was inserted to the inode fork. 3906 * 3907 * Note that the allocated extent may have been merged with contiguous extents 3908 * during insertion into the inode fork. Thus, got does not reflect the current 3909 * state of the inode fork on return. If necessary, the caller can use lastx to 3910 * look up the updated record in the inode fork. 3911 */ 3912 int 3913 xfs_bmapi_reserve_delalloc( 3914 struct xfs_inode *ip, 3915 int whichfork, 3916 xfs_fileoff_t off, 3917 xfs_filblks_t len, 3918 xfs_filblks_t prealloc, 3919 struct xfs_bmbt_irec *got, 3920 struct xfs_iext_cursor *icur, 3921 int eof) 3922 { 3923 struct xfs_mount *mp = ip->i_mount; 3924 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 3925 xfs_extlen_t alen; 3926 xfs_extlen_t indlen; 3927 int error; 3928 xfs_fileoff_t aoff = off; 3929 3930 /* 3931 * Cap the alloc length. Keep track of prealloc so we know whether to 3932 * tag the inode before we return. 3933 */ 3934 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN); 3935 if (!eof) 3936 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3937 if (prealloc && alen >= len) 3938 prealloc = alen - len; 3939 3940 /* Figure out the extent size, adjust alen */ 3941 if (whichfork == XFS_COW_FORK) { 3942 struct xfs_bmbt_irec prev; 3943 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 3944 3945 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 3946 prev.br_startoff = NULLFILEOFF; 3947 3948 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 3949 1, 0, &aoff, &alen); 3950 ASSERT(!error); 3951 } 3952 3953 /* 3954 * Make a transaction-less quota reservation for delayed allocation 3955 * blocks. This number gets adjusted later. We return if we haven't 3956 * allocated blocks already inside this loop. 3957 */ 3958 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, 3959 XFS_QMOPT_RES_REGBLKS); 3960 if (error) 3961 return error; 3962 3963 /* 3964 * Split changing sb for alen and indlen since they could be coming 3965 * from different places. 3966 */ 3967 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 3968 ASSERT(indlen > 0); 3969 3970 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 3971 if (error) 3972 goto out_unreserve_quota; 3973 3974 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 3975 if (error) 3976 goto out_unreserve_blocks; 3977 3978 3979 ip->i_delayed_blks += alen; 3980 xfs_mod_delalloc(ip->i_mount, alen + indlen); 3981 3982 got->br_startoff = aoff; 3983 got->br_startblock = nullstartblock(indlen); 3984 got->br_blockcount = alen; 3985 got->br_state = XFS_EXT_NORM; 3986 3987 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 3988 3989 /* 3990 * Tag the inode if blocks were preallocated. Note that COW fork 3991 * preallocation can occur at the start or end of the extent, even when 3992 * prealloc == 0, so we must also check the aligned offset and length. 3993 */ 3994 if (whichfork == XFS_DATA_FORK && prealloc) 3995 xfs_inode_set_eofblocks_tag(ip); 3996 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 3997 xfs_inode_set_cowblocks_tag(ip); 3998 3999 return 0; 4000 4001 out_unreserve_blocks: 4002 xfs_mod_fdblocks(mp, alen, false); 4003 out_unreserve_quota: 4004 if (XFS_IS_QUOTA_ON(mp)) 4005 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, 4006 XFS_QMOPT_RES_REGBLKS); 4007 return error; 4008 } 4009 4010 static int 4011 xfs_bmapi_allocate( 4012 struct xfs_bmalloca *bma) 4013 { 4014 struct xfs_mount *mp = bma->ip->i_mount; 4015 int whichfork = xfs_bmapi_whichfork(bma->flags); 4016 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4017 int tmp_logflags = 0; 4018 int error; 4019 4020 ASSERT(bma->length > 0); 4021 4022 /* 4023 * For the wasdelay case, we could also just allocate the stuff asked 4024 * for in this bmap call but that wouldn't be as good. 4025 */ 4026 if (bma->wasdel) { 4027 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4028 bma->offset = bma->got.br_startoff; 4029 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev); 4030 } else { 4031 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN); 4032 if (!bma->eof) 4033 bma->length = XFS_FILBLKS_MIN(bma->length, 4034 bma->got.br_startoff - bma->offset); 4035 } 4036 4037 /* 4038 * Set the data type being allocated. For the data fork, the first data 4039 * in the file is treated differently to all other allocations. For the 4040 * attribute fork, we only need to ensure the allocated range is not on 4041 * the busy list. 4042 */ 4043 if (!(bma->flags & XFS_BMAPI_METADATA)) { 4044 bma->datatype = XFS_ALLOC_NOBUSY; 4045 if (whichfork == XFS_DATA_FORK) { 4046 if (bma->offset == 0) 4047 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4048 else 4049 bma->datatype |= XFS_ALLOC_USERDATA; 4050 } 4051 if (bma->flags & XFS_BMAPI_ZERO) 4052 bma->datatype |= XFS_ALLOC_USERDATA_ZERO; 4053 } 4054 4055 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; 4056 4057 /* 4058 * Only want to do the alignment at the eof if it is userdata and 4059 * allocation length is larger than a stripe unit. 4060 */ 4061 if (mp->m_dalign && bma->length >= mp->m_dalign && 4062 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { 4063 error = xfs_bmap_isaeof(bma, whichfork); 4064 if (error) 4065 return error; 4066 } 4067 4068 error = xfs_bmap_alloc(bma); 4069 if (error) 4070 return error; 4071 4072 if (bma->blkno == NULLFSBLOCK) 4073 return 0; 4074 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) 4075 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4076 /* 4077 * Bump the number of extents we've allocated 4078 * in this call. 4079 */ 4080 bma->nallocs++; 4081 4082 if (bma->cur) 4083 bma->cur->bc_private.b.flags = 4084 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; 4085 4086 bma->got.br_startoff = bma->offset; 4087 bma->got.br_startblock = bma->blkno; 4088 bma->got.br_blockcount = bma->length; 4089 bma->got.br_state = XFS_EXT_NORM; 4090 4091 /* 4092 * In the data fork, a wasdelay extent has been initialized, so 4093 * shouldn't be flagged as unwritten. 4094 * 4095 * For the cow fork, however, we convert delalloc reservations 4096 * (extents allocated for speculative preallocation) to 4097 * allocated unwritten extents, and only convert the unwritten 4098 * extents to real extents when we're about to write the data. 4099 */ 4100 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) && 4101 (bma->flags & XFS_BMAPI_PREALLOC)) 4102 bma->got.br_state = XFS_EXT_UNWRITTEN; 4103 4104 if (bma->wasdel) 4105 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4106 else 4107 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4108 whichfork, &bma->icur, &bma->cur, &bma->got, 4109 &bma->logflags, bma->flags); 4110 4111 bma->logflags |= tmp_logflags; 4112 if (error) 4113 return error; 4114 4115 /* 4116 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4117 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4118 * the neighbouring ones. 4119 */ 4120 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4121 4122 ASSERT(bma->got.br_startoff <= bma->offset); 4123 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4124 bma->offset + bma->length); 4125 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4126 bma->got.br_state == XFS_EXT_UNWRITTEN); 4127 return 0; 4128 } 4129 4130 STATIC int 4131 xfs_bmapi_convert_unwritten( 4132 struct xfs_bmalloca *bma, 4133 struct xfs_bmbt_irec *mval, 4134 xfs_filblks_t len, 4135 int flags) 4136 { 4137 int whichfork = xfs_bmapi_whichfork(flags); 4138 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); 4139 int tmp_logflags = 0; 4140 int error; 4141 4142 /* check if we need to do unwritten->real conversion */ 4143 if (mval->br_state == XFS_EXT_UNWRITTEN && 4144 (flags & XFS_BMAPI_PREALLOC)) 4145 return 0; 4146 4147 /* check if we need to do real->unwritten conversion */ 4148 if (mval->br_state == XFS_EXT_NORM && 4149 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4150 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4151 return 0; 4152 4153 /* 4154 * Modify (by adding) the state flag, if writing. 4155 */ 4156 ASSERT(mval->br_blockcount <= len); 4157 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) { 4158 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4159 bma->ip, whichfork); 4160 } 4161 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4162 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4163 4164 /* 4165 * Before insertion into the bmbt, zero the range being converted 4166 * if required. 4167 */ 4168 if (flags & XFS_BMAPI_ZERO) { 4169 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4170 mval->br_blockcount); 4171 if (error) 4172 return error; 4173 } 4174 4175 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4176 &bma->icur, &bma->cur, mval, &tmp_logflags); 4177 /* 4178 * Log the inode core unconditionally in the unwritten extent conversion 4179 * path because the conversion might not have done so (e.g., if the 4180 * extent count hasn't changed). We need to make sure the inode is dirty 4181 * in the transaction for the sake of fsync(), even if nothing has 4182 * changed, because fsync() will not force the log for this transaction 4183 * unless it sees the inode pinned. 4184 * 4185 * Note: If we're only converting cow fork extents, there aren't 4186 * any on-disk updates to make, so we don't need to log anything. 4187 */ 4188 if (whichfork != XFS_COW_FORK) 4189 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4190 if (error) 4191 return error; 4192 4193 /* 4194 * Update our extent pointer, given that 4195 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4196 * of the neighbouring ones. 4197 */ 4198 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4199 4200 /* 4201 * We may have combined previously unwritten space with written space, 4202 * so generate another request. 4203 */ 4204 if (mval->br_blockcount < len) 4205 return -EAGAIN; 4206 return 0; 4207 } 4208 4209 static inline xfs_extlen_t 4210 xfs_bmapi_minleft( 4211 struct xfs_trans *tp, 4212 struct xfs_inode *ip, 4213 int fork) 4214 { 4215 if (tp && tp->t_firstblock != NULLFSBLOCK) 4216 return 0; 4217 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE) 4218 return 1; 4219 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1; 4220 } 4221 4222 /* 4223 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4224 * a case where the data is changed, there's an error, and it's not logged so we 4225 * don't shutdown when we should. Don't bother logging extents/btree changes if 4226 * we converted to the other format. 4227 */ 4228 static void 4229 xfs_bmapi_finish( 4230 struct xfs_bmalloca *bma, 4231 int whichfork, 4232 int error) 4233 { 4234 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4235 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 4236 bma->logflags &= ~xfs_ilog_fext(whichfork); 4237 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4238 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE) 4239 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4240 4241 if (bma->logflags) 4242 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4243 if (bma->cur) 4244 xfs_btree_del_cursor(bma->cur, error); 4245 } 4246 4247 /* 4248 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4249 * extent state if necessary. Details behaviour is controlled by the flags 4250 * parameter. Only allocates blocks from a single allocation group, to avoid 4251 * locking problems. 4252 */ 4253 int 4254 xfs_bmapi_write( 4255 struct xfs_trans *tp, /* transaction pointer */ 4256 struct xfs_inode *ip, /* incore inode */ 4257 xfs_fileoff_t bno, /* starting file offs. mapped */ 4258 xfs_filblks_t len, /* length to map in file */ 4259 int flags, /* XFS_BMAPI_... */ 4260 xfs_extlen_t total, /* total blocks needed */ 4261 struct xfs_bmbt_irec *mval, /* output: map values */ 4262 int *nmap) /* i/o: mval size/count */ 4263 { 4264 struct xfs_bmalloca bma = { 4265 .tp = tp, 4266 .ip = ip, 4267 .total = total, 4268 }; 4269 struct xfs_mount *mp = ip->i_mount; 4270 struct xfs_ifork *ifp; 4271 xfs_fileoff_t end; /* end of mapped file region */ 4272 bool eof = false; /* after the end of extents */ 4273 int error; /* error return */ 4274 int n; /* current extent index */ 4275 xfs_fileoff_t obno; /* old block number (offset) */ 4276 int whichfork; /* data or attr fork */ 4277 4278 #ifdef DEBUG 4279 xfs_fileoff_t orig_bno; /* original block number value */ 4280 int orig_flags; /* original flags arg value */ 4281 xfs_filblks_t orig_len; /* original value of len arg */ 4282 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4283 int orig_nmap; /* original value of *nmap */ 4284 4285 orig_bno = bno; 4286 orig_len = len; 4287 orig_flags = flags; 4288 orig_mval = mval; 4289 orig_nmap = *nmap; 4290 #endif 4291 whichfork = xfs_bmapi_whichfork(flags); 4292 4293 ASSERT(*nmap >= 1); 4294 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4295 ASSERT(tp != NULL); 4296 ASSERT(len > 0); 4297 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); 4298 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4299 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4300 4301 /* zeroing is for currently only for data extents, not metadata */ 4302 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4303 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4304 /* 4305 * we can allocate unwritten extents or pre-zero allocated blocks, 4306 * but it makes no sense to do both at once. This would result in 4307 * zeroing the unwritten extent twice, but it still being an 4308 * unwritten extent.... 4309 */ 4310 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4311 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4312 4313 if (unlikely(XFS_TEST_ERROR( 4314 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4315 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4316 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4317 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4318 return -EFSCORRUPTED; 4319 } 4320 4321 if (XFS_FORCED_SHUTDOWN(mp)) 4322 return -EIO; 4323 4324 ifp = XFS_IFORK_PTR(ip, whichfork); 4325 4326 XFS_STATS_INC(mp, xs_blk_mapw); 4327 4328 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4329 error = xfs_iread_extents(tp, ip, whichfork); 4330 if (error) 4331 goto error0; 4332 } 4333 4334 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4335 eof = true; 4336 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4337 bma.prev.br_startoff = NULLFILEOFF; 4338 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4339 4340 n = 0; 4341 end = bno + len; 4342 obno = bno; 4343 while (bno < end && n < *nmap) { 4344 bool need_alloc = false, wasdelay = false; 4345 4346 /* in hole or beyond EOF? */ 4347 if (eof || bma.got.br_startoff > bno) { 4348 /* 4349 * CoW fork conversions should /never/ hit EOF or 4350 * holes. There should always be something for us 4351 * to work on. 4352 */ 4353 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4354 (flags & XFS_BMAPI_COWFORK))); 4355 4356 need_alloc = true; 4357 } else if (isnullstartblock(bma.got.br_startblock)) { 4358 wasdelay = true; 4359 } 4360 4361 /* 4362 * First, deal with the hole before the allocated space 4363 * that we found, if any. 4364 */ 4365 if (need_alloc || wasdelay) { 4366 bma.eof = eof; 4367 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4368 bma.wasdel = wasdelay; 4369 bma.offset = bno; 4370 bma.flags = flags; 4371 4372 /* 4373 * There's a 32/64 bit type mismatch between the 4374 * allocation length request (which can be 64 bits in 4375 * length) and the bma length request, which is 4376 * xfs_extlen_t and therefore 32 bits. Hence we have to 4377 * check for 32-bit overflows and handle them here. 4378 */ 4379 if (len > (xfs_filblks_t)MAXEXTLEN) 4380 bma.length = MAXEXTLEN; 4381 else 4382 bma.length = len; 4383 4384 ASSERT(len > 0); 4385 ASSERT(bma.length > 0); 4386 error = xfs_bmapi_allocate(&bma); 4387 if (error) 4388 goto error0; 4389 if (bma.blkno == NULLFSBLOCK) 4390 break; 4391 4392 /* 4393 * If this is a CoW allocation, record the data in 4394 * the refcount btree for orphan recovery. 4395 */ 4396 if (whichfork == XFS_COW_FORK) { 4397 error = xfs_refcount_alloc_cow_extent(tp, 4398 bma.blkno, bma.length); 4399 if (error) 4400 goto error0; 4401 } 4402 } 4403 4404 /* Deal with the allocated space we found. */ 4405 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4406 end, n, flags); 4407 4408 /* Execute unwritten extent conversion if necessary */ 4409 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4410 if (error == -EAGAIN) 4411 continue; 4412 if (error) 4413 goto error0; 4414 4415 /* update the extent map to return */ 4416 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4417 4418 /* 4419 * If we're done, stop now. Stop when we've allocated 4420 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4421 * the transaction may get too big. 4422 */ 4423 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4424 break; 4425 4426 /* Else go on to the next record. */ 4427 bma.prev = bma.got; 4428 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4429 eof = true; 4430 } 4431 *nmap = n; 4432 4433 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4434 whichfork); 4435 if (error) 4436 goto error0; 4437 4438 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4439 XFS_IFORK_NEXTENTS(ip, whichfork) > 4440 XFS_IFORK_MAXEXT(ip, whichfork)); 4441 xfs_bmapi_finish(&bma, whichfork, 0); 4442 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4443 orig_nmap, *nmap); 4444 return 0; 4445 error0: 4446 xfs_bmapi_finish(&bma, whichfork, error); 4447 return error; 4448 } 4449 4450 /* 4451 * Convert an existing delalloc extent to real blocks based on file offset. This 4452 * attempts to allocate the entire delalloc extent and may require multiple 4453 * invocations to allocate the target offset if a large enough physical extent 4454 * is not available. 4455 */ 4456 int 4457 xfs_bmapi_convert_delalloc( 4458 struct xfs_inode *ip, 4459 int whichfork, 4460 xfs_fileoff_t offset_fsb, 4461 struct xfs_bmbt_irec *imap, 4462 unsigned int *seq) 4463 { 4464 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4465 struct xfs_mount *mp = ip->i_mount; 4466 struct xfs_bmalloca bma = { NULL }; 4467 struct xfs_trans *tp; 4468 int error; 4469 4470 /* 4471 * Space for the extent and indirect blocks was reserved when the 4472 * delalloc extent was created so there's no need to do so here. 4473 */ 4474 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4475 XFS_TRANS_RESERVE, &tp); 4476 if (error) 4477 return error; 4478 4479 xfs_ilock(ip, XFS_ILOCK_EXCL); 4480 xfs_trans_ijoin(tp, ip, 0); 4481 4482 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4483 bma.got.br_startoff > offset_fsb) { 4484 /* 4485 * No extent found in the range we are trying to convert. This 4486 * should only happen for the COW fork, where another thread 4487 * might have moved the extent to the data fork in the meantime. 4488 */ 4489 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4490 error = -EAGAIN; 4491 goto out_trans_cancel; 4492 } 4493 4494 /* 4495 * If we find a real extent here we raced with another thread converting 4496 * the extent. Just return the real extent at this offset. 4497 */ 4498 if (!isnullstartblock(bma.got.br_startblock)) { 4499 *imap = bma.got; 4500 *seq = READ_ONCE(ifp->if_seq); 4501 goto out_trans_cancel; 4502 } 4503 4504 bma.tp = tp; 4505 bma.ip = ip; 4506 bma.wasdel = true; 4507 bma.offset = bma.got.br_startoff; 4508 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN); 4509 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); 4510 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4511 if (whichfork == XFS_COW_FORK) 4512 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 4513 4514 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4515 bma.prev.br_startoff = NULLFILEOFF; 4516 4517 error = xfs_bmapi_allocate(&bma); 4518 if (error) 4519 goto out_finish; 4520 4521 error = -ENOSPC; 4522 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4523 goto out_finish; 4524 error = -EFSCORRUPTED; 4525 if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip))) 4526 goto out_finish; 4527 4528 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4529 XFS_STATS_INC(mp, xs_xstrat_quick); 4530 4531 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4532 *imap = bma.got; 4533 *seq = READ_ONCE(ifp->if_seq); 4534 4535 if (whichfork == XFS_COW_FORK) { 4536 error = xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4537 bma.length); 4538 if (error) 4539 goto out_finish; 4540 } 4541 4542 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4543 whichfork); 4544 if (error) 4545 goto out_finish; 4546 4547 xfs_bmapi_finish(&bma, whichfork, 0); 4548 error = xfs_trans_commit(tp); 4549 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4550 return error; 4551 4552 out_finish: 4553 xfs_bmapi_finish(&bma, whichfork, error); 4554 out_trans_cancel: 4555 xfs_trans_cancel(tp); 4556 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4557 return error; 4558 } 4559 4560 int 4561 xfs_bmapi_remap( 4562 struct xfs_trans *tp, 4563 struct xfs_inode *ip, 4564 xfs_fileoff_t bno, 4565 xfs_filblks_t len, 4566 xfs_fsblock_t startblock, 4567 int flags) 4568 { 4569 struct xfs_mount *mp = ip->i_mount; 4570 struct xfs_ifork *ifp; 4571 struct xfs_btree_cur *cur = NULL; 4572 struct xfs_bmbt_irec got; 4573 struct xfs_iext_cursor icur; 4574 int whichfork = xfs_bmapi_whichfork(flags); 4575 int logflags = 0, error; 4576 4577 ifp = XFS_IFORK_PTR(ip, whichfork); 4578 ASSERT(len > 0); 4579 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN); 4580 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4581 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4582 XFS_BMAPI_NORMAP))); 4583 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4584 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4585 4586 if (unlikely(XFS_TEST_ERROR( 4587 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 4588 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4589 mp, XFS_ERRTAG_BMAPIFORMAT))) { 4590 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp); 4591 return -EFSCORRUPTED; 4592 } 4593 4594 if (XFS_FORCED_SHUTDOWN(mp)) 4595 return -EIO; 4596 4597 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 4598 error = xfs_iread_extents(tp, ip, whichfork); 4599 if (error) 4600 return error; 4601 } 4602 4603 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4604 /* make sure we only reflink into a hole. */ 4605 ASSERT(got.br_startoff > bno); 4606 ASSERT(got.br_startoff - bno >= len); 4607 } 4608 4609 ip->i_d.di_nblocks += len; 4610 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4611 4612 if (ifp->if_flags & XFS_IFBROOT) { 4613 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4614 cur->bc_private.b.flags = 0; 4615 } 4616 4617 got.br_startoff = bno; 4618 got.br_startblock = startblock; 4619 got.br_blockcount = len; 4620 if (flags & XFS_BMAPI_PREALLOC) 4621 got.br_state = XFS_EXT_UNWRITTEN; 4622 else 4623 got.br_state = XFS_EXT_NORM; 4624 4625 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4626 &cur, &got, &logflags, flags); 4627 if (error) 4628 goto error0; 4629 4630 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4631 4632 error0: 4633 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) 4634 logflags &= ~XFS_ILOG_DEXT; 4635 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) 4636 logflags &= ~XFS_ILOG_DBROOT; 4637 4638 if (logflags) 4639 xfs_trans_log_inode(tp, ip, logflags); 4640 if (cur) 4641 xfs_btree_del_cursor(cur, error); 4642 return error; 4643 } 4644 4645 /* 4646 * When a delalloc extent is split (e.g., due to a hole punch), the original 4647 * indlen reservation must be shared across the two new extents that are left 4648 * behind. 4649 * 4650 * Given the original reservation and the worst case indlen for the two new 4651 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4652 * reservation fairly across the two new extents. If necessary, steal available 4653 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4654 * ores == 1). The number of stolen blocks is returned. The availability and 4655 * subsequent accounting of stolen blocks is the responsibility of the caller. 4656 */ 4657 static xfs_filblks_t 4658 xfs_bmap_split_indlen( 4659 xfs_filblks_t ores, /* original res. */ 4660 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4661 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4662 xfs_filblks_t avail) /* stealable blocks */ 4663 { 4664 xfs_filblks_t len1 = *indlen1; 4665 xfs_filblks_t len2 = *indlen2; 4666 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4667 xfs_filblks_t stolen = 0; 4668 xfs_filblks_t resfactor; 4669 4670 /* 4671 * Steal as many blocks as we can to try and satisfy the worst case 4672 * indlen for both new extents. 4673 */ 4674 if (ores < nres && avail) 4675 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4676 ores += stolen; 4677 4678 /* nothing else to do if we've satisfied the new reservation */ 4679 if (ores >= nres) 4680 return stolen; 4681 4682 /* 4683 * We can't meet the total required reservation for the two extents. 4684 * Calculate the percent of the overall shortage between both extents 4685 * and apply this percentage to each of the requested indlen values. 4686 * This distributes the shortage fairly and reduces the chances that one 4687 * of the two extents is left with nothing when extents are repeatedly 4688 * split. 4689 */ 4690 resfactor = (ores * 100); 4691 do_div(resfactor, nres); 4692 len1 *= resfactor; 4693 do_div(len1, 100); 4694 len2 *= resfactor; 4695 do_div(len2, 100); 4696 ASSERT(len1 + len2 <= ores); 4697 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4698 4699 /* 4700 * Hand out the remainder to each extent. If one of the two reservations 4701 * is zero, we want to make sure that one gets a block first. The loop 4702 * below starts with len1, so hand len2 a block right off the bat if it 4703 * is zero. 4704 */ 4705 ores -= (len1 + len2); 4706 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4707 if (ores && !len2 && *indlen2) { 4708 len2++; 4709 ores--; 4710 } 4711 while (ores) { 4712 if (len1 < *indlen1) { 4713 len1++; 4714 ores--; 4715 } 4716 if (!ores) 4717 break; 4718 if (len2 < *indlen2) { 4719 len2++; 4720 ores--; 4721 } 4722 } 4723 4724 *indlen1 = len1; 4725 *indlen2 = len2; 4726 4727 return stolen; 4728 } 4729 4730 int 4731 xfs_bmap_del_extent_delay( 4732 struct xfs_inode *ip, 4733 int whichfork, 4734 struct xfs_iext_cursor *icur, 4735 struct xfs_bmbt_irec *got, 4736 struct xfs_bmbt_irec *del) 4737 { 4738 struct xfs_mount *mp = ip->i_mount; 4739 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 4740 struct xfs_bmbt_irec new; 4741 int64_t da_old, da_new, da_diff = 0; 4742 xfs_fileoff_t del_endoff, got_endoff; 4743 xfs_filblks_t got_indlen, new_indlen, stolen; 4744 int state = xfs_bmap_fork_to_state(whichfork); 4745 int error = 0; 4746 bool isrt; 4747 4748 XFS_STATS_INC(mp, xs_del_exlist); 4749 4750 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4751 del_endoff = del->br_startoff + del->br_blockcount; 4752 got_endoff = got->br_startoff + got->br_blockcount; 4753 da_old = startblockval(got->br_startblock); 4754 da_new = 0; 4755 4756 ASSERT(del->br_blockcount > 0); 4757 ASSERT(got->br_startoff <= del->br_startoff); 4758 ASSERT(got_endoff >= del_endoff); 4759 4760 if (isrt) { 4761 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4762 4763 do_div(rtexts, mp->m_sb.sb_rextsize); 4764 xfs_mod_frextents(mp, rtexts); 4765 } 4766 4767 /* 4768 * Update the inode delalloc counter now and wait to update the 4769 * sb counters as we might have to borrow some blocks for the 4770 * indirect block accounting. 4771 */ 4772 error = xfs_trans_reserve_quota_nblks(NULL, ip, 4773 -((long)del->br_blockcount), 0, 4774 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4775 if (error) 4776 return error; 4777 ip->i_delayed_blks -= del->br_blockcount; 4778 4779 if (got->br_startoff == del->br_startoff) 4780 state |= BMAP_LEFT_FILLING; 4781 if (got_endoff == del_endoff) 4782 state |= BMAP_RIGHT_FILLING; 4783 4784 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4785 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4786 /* 4787 * Matches the whole extent. Delete the entry. 4788 */ 4789 xfs_iext_remove(ip, icur, state); 4790 xfs_iext_prev(ifp, icur); 4791 break; 4792 case BMAP_LEFT_FILLING: 4793 /* 4794 * Deleting the first part of the extent. 4795 */ 4796 got->br_startoff = del_endoff; 4797 got->br_blockcount -= del->br_blockcount; 4798 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4799 got->br_blockcount), da_old); 4800 got->br_startblock = nullstartblock((int)da_new); 4801 xfs_iext_update_extent(ip, state, icur, got); 4802 break; 4803 case BMAP_RIGHT_FILLING: 4804 /* 4805 * Deleting the last part of the extent. 4806 */ 4807 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4808 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4809 got->br_blockcount), da_old); 4810 got->br_startblock = nullstartblock((int)da_new); 4811 xfs_iext_update_extent(ip, state, icur, got); 4812 break; 4813 case 0: 4814 /* 4815 * Deleting the middle of the extent. 4816 * 4817 * Distribute the original indlen reservation across the two new 4818 * extents. Steal blocks from the deleted extent if necessary. 4819 * Stealing blocks simply fudges the fdblocks accounting below. 4820 * Warn if either of the new indlen reservations is zero as this 4821 * can lead to delalloc problems. 4822 */ 4823 got->br_blockcount = del->br_startoff - got->br_startoff; 4824 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4825 4826 new.br_blockcount = got_endoff - del_endoff; 4827 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4828 4829 WARN_ON_ONCE(!got_indlen || !new_indlen); 4830 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4831 del->br_blockcount); 4832 4833 got->br_startblock = nullstartblock((int)got_indlen); 4834 4835 new.br_startoff = del_endoff; 4836 new.br_state = got->br_state; 4837 new.br_startblock = nullstartblock((int)new_indlen); 4838 4839 xfs_iext_update_extent(ip, state, icur, got); 4840 xfs_iext_next(ifp, icur); 4841 xfs_iext_insert(ip, icur, &new, state); 4842 4843 da_new = got_indlen + new_indlen - stolen; 4844 del->br_blockcount -= stolen; 4845 break; 4846 } 4847 4848 ASSERT(da_old >= da_new); 4849 da_diff = da_old - da_new; 4850 if (!isrt) 4851 da_diff += del->br_blockcount; 4852 if (da_diff) { 4853 xfs_mod_fdblocks(mp, da_diff, false); 4854 xfs_mod_delalloc(mp, -da_diff); 4855 } 4856 return error; 4857 } 4858 4859 void 4860 xfs_bmap_del_extent_cow( 4861 struct xfs_inode *ip, 4862 struct xfs_iext_cursor *icur, 4863 struct xfs_bmbt_irec *got, 4864 struct xfs_bmbt_irec *del) 4865 { 4866 struct xfs_mount *mp = ip->i_mount; 4867 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 4868 struct xfs_bmbt_irec new; 4869 xfs_fileoff_t del_endoff, got_endoff; 4870 int state = BMAP_COWFORK; 4871 4872 XFS_STATS_INC(mp, xs_del_exlist); 4873 4874 del_endoff = del->br_startoff + del->br_blockcount; 4875 got_endoff = got->br_startoff + got->br_blockcount; 4876 4877 ASSERT(del->br_blockcount > 0); 4878 ASSERT(got->br_startoff <= del->br_startoff); 4879 ASSERT(got_endoff >= del_endoff); 4880 ASSERT(!isnullstartblock(got->br_startblock)); 4881 4882 if (got->br_startoff == del->br_startoff) 4883 state |= BMAP_LEFT_FILLING; 4884 if (got_endoff == del_endoff) 4885 state |= BMAP_RIGHT_FILLING; 4886 4887 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4888 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4889 /* 4890 * Matches the whole extent. Delete the entry. 4891 */ 4892 xfs_iext_remove(ip, icur, state); 4893 xfs_iext_prev(ifp, icur); 4894 break; 4895 case BMAP_LEFT_FILLING: 4896 /* 4897 * Deleting the first part of the extent. 4898 */ 4899 got->br_startoff = del_endoff; 4900 got->br_blockcount -= del->br_blockcount; 4901 got->br_startblock = del->br_startblock + del->br_blockcount; 4902 xfs_iext_update_extent(ip, state, icur, got); 4903 break; 4904 case BMAP_RIGHT_FILLING: 4905 /* 4906 * Deleting the last part of the extent. 4907 */ 4908 got->br_blockcount -= del->br_blockcount; 4909 xfs_iext_update_extent(ip, state, icur, got); 4910 break; 4911 case 0: 4912 /* 4913 * Deleting the middle of the extent. 4914 */ 4915 got->br_blockcount = del->br_startoff - got->br_startoff; 4916 4917 new.br_startoff = del_endoff; 4918 new.br_blockcount = got_endoff - del_endoff; 4919 new.br_state = got->br_state; 4920 new.br_startblock = del->br_startblock + del->br_blockcount; 4921 4922 xfs_iext_update_extent(ip, state, icur, got); 4923 xfs_iext_next(ifp, icur); 4924 xfs_iext_insert(ip, icur, &new, state); 4925 break; 4926 } 4927 ip->i_delayed_blks -= del->br_blockcount; 4928 } 4929 4930 /* 4931 * Called by xfs_bmapi to update file extent records and the btree 4932 * after removing space. 4933 */ 4934 STATIC int /* error */ 4935 xfs_bmap_del_extent_real( 4936 xfs_inode_t *ip, /* incore inode pointer */ 4937 xfs_trans_t *tp, /* current transaction pointer */ 4938 struct xfs_iext_cursor *icur, 4939 xfs_btree_cur_t *cur, /* if null, not a btree */ 4940 xfs_bmbt_irec_t *del, /* data to remove from extents */ 4941 int *logflagsp, /* inode logging flags */ 4942 int whichfork, /* data or attr fork */ 4943 int bflags) /* bmapi flags */ 4944 { 4945 xfs_fsblock_t del_endblock=0; /* first block past del */ 4946 xfs_fileoff_t del_endoff; /* first offset past del */ 4947 int do_fx; /* free extent at end of routine */ 4948 int error; /* error return value */ 4949 int flags = 0;/* inode logging flags */ 4950 struct xfs_bmbt_irec got; /* current extent entry */ 4951 xfs_fileoff_t got_endoff; /* first offset past got */ 4952 int i; /* temp state */ 4953 struct xfs_ifork *ifp; /* inode fork pointer */ 4954 xfs_mount_t *mp; /* mount structure */ 4955 xfs_filblks_t nblks; /* quota/sb block count */ 4956 xfs_bmbt_irec_t new; /* new record to be inserted */ 4957 /* REFERENCED */ 4958 uint qfield; /* quota field to update */ 4959 int state = xfs_bmap_fork_to_state(whichfork); 4960 struct xfs_bmbt_irec old; 4961 4962 mp = ip->i_mount; 4963 XFS_STATS_INC(mp, xs_del_exlist); 4964 4965 ifp = XFS_IFORK_PTR(ip, whichfork); 4966 ASSERT(del->br_blockcount > 0); 4967 xfs_iext_get_extent(ifp, icur, &got); 4968 ASSERT(got.br_startoff <= del->br_startoff); 4969 del_endoff = del->br_startoff + del->br_blockcount; 4970 got_endoff = got.br_startoff + got.br_blockcount; 4971 ASSERT(got_endoff >= del_endoff); 4972 ASSERT(!isnullstartblock(got.br_startblock)); 4973 qfield = 0; 4974 error = 0; 4975 4976 /* 4977 * If it's the case where the directory code is running with no block 4978 * reservation, and the deleted block is in the middle of its extent, 4979 * and the resulting insert of an extent would cause transformation to 4980 * btree format, then reject it. The calling code will then swap blocks 4981 * around instead. We have to do this now, rather than waiting for the 4982 * conversion to btree format, since the transaction will be dirty then. 4983 */ 4984 if (tp->t_blk_res == 0 && 4985 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && 4986 XFS_IFORK_NEXTENTS(ip, whichfork) >= 4987 XFS_IFORK_MAXEXT(ip, whichfork) && 4988 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 4989 return -ENOSPC; 4990 4991 flags = XFS_ILOG_CORE; 4992 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 4993 xfs_fsblock_t bno; 4994 xfs_filblks_t len; 4995 xfs_extlen_t mod; 4996 4997 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, 4998 &mod); 4999 ASSERT(mod == 0); 5000 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 5001 &mod); 5002 ASSERT(mod == 0); 5003 5004 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 5005 if (error) 5006 goto done; 5007 do_fx = 0; 5008 nblks = len * mp->m_sb.sb_rextsize; 5009 qfield = XFS_TRANS_DQ_RTBCOUNT; 5010 } else { 5011 do_fx = 1; 5012 nblks = del->br_blockcount; 5013 qfield = XFS_TRANS_DQ_BCOUNT; 5014 } 5015 5016 del_endblock = del->br_startblock + del->br_blockcount; 5017 if (cur) { 5018 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5019 if (error) 5020 goto done; 5021 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5022 } 5023 5024 if (got.br_startoff == del->br_startoff) 5025 state |= BMAP_LEFT_FILLING; 5026 if (got_endoff == del_endoff) 5027 state |= BMAP_RIGHT_FILLING; 5028 5029 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5030 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5031 /* 5032 * Matches the whole extent. Delete the entry. 5033 */ 5034 xfs_iext_remove(ip, icur, state); 5035 xfs_iext_prev(ifp, icur); 5036 XFS_IFORK_NEXT_SET(ip, whichfork, 5037 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5038 flags |= XFS_ILOG_CORE; 5039 if (!cur) { 5040 flags |= xfs_ilog_fext(whichfork); 5041 break; 5042 } 5043 if ((error = xfs_btree_delete(cur, &i))) 5044 goto done; 5045 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5046 break; 5047 case BMAP_LEFT_FILLING: 5048 /* 5049 * Deleting the first part of the extent. 5050 */ 5051 got.br_startoff = del_endoff; 5052 got.br_startblock = del_endblock; 5053 got.br_blockcount -= del->br_blockcount; 5054 xfs_iext_update_extent(ip, state, icur, &got); 5055 if (!cur) { 5056 flags |= xfs_ilog_fext(whichfork); 5057 break; 5058 } 5059 error = xfs_bmbt_update(cur, &got); 5060 if (error) 5061 goto done; 5062 break; 5063 case BMAP_RIGHT_FILLING: 5064 /* 5065 * Deleting the last part of the extent. 5066 */ 5067 got.br_blockcount -= del->br_blockcount; 5068 xfs_iext_update_extent(ip, state, icur, &got); 5069 if (!cur) { 5070 flags |= xfs_ilog_fext(whichfork); 5071 break; 5072 } 5073 error = xfs_bmbt_update(cur, &got); 5074 if (error) 5075 goto done; 5076 break; 5077 case 0: 5078 /* 5079 * Deleting the middle of the extent. 5080 */ 5081 old = got; 5082 5083 got.br_blockcount = del->br_startoff - got.br_startoff; 5084 xfs_iext_update_extent(ip, state, icur, &got); 5085 5086 new.br_startoff = del_endoff; 5087 new.br_blockcount = got_endoff - del_endoff; 5088 new.br_state = got.br_state; 5089 new.br_startblock = del_endblock; 5090 5091 flags |= XFS_ILOG_CORE; 5092 if (cur) { 5093 error = xfs_bmbt_update(cur, &got); 5094 if (error) 5095 goto done; 5096 error = xfs_btree_increment(cur, 0, &i); 5097 if (error) 5098 goto done; 5099 cur->bc_rec.b = new; 5100 error = xfs_btree_insert(cur, &i); 5101 if (error && error != -ENOSPC) 5102 goto done; 5103 /* 5104 * If get no-space back from btree insert, it tried a 5105 * split, and we have a zero block reservation. Fix up 5106 * our state and return the error. 5107 */ 5108 if (error == -ENOSPC) { 5109 /* 5110 * Reset the cursor, don't trust it after any 5111 * insert operation. 5112 */ 5113 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5114 if (error) 5115 goto done; 5116 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5117 /* 5118 * Update the btree record back 5119 * to the original value. 5120 */ 5121 error = xfs_bmbt_update(cur, &old); 5122 if (error) 5123 goto done; 5124 /* 5125 * Reset the extent record back 5126 * to the original value. 5127 */ 5128 xfs_iext_update_extent(ip, state, icur, &old); 5129 flags = 0; 5130 error = -ENOSPC; 5131 goto done; 5132 } 5133 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 5134 } else 5135 flags |= xfs_ilog_fext(whichfork); 5136 XFS_IFORK_NEXT_SET(ip, whichfork, 5137 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5138 xfs_iext_next(ifp, icur); 5139 xfs_iext_insert(ip, icur, &new, state); 5140 break; 5141 } 5142 5143 /* remove reverse mapping */ 5144 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5145 if (error) 5146 goto done; 5147 5148 /* 5149 * If we need to, add to list of extents to delete. 5150 */ 5151 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5152 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5153 error = xfs_refcount_decrease_extent(tp, del); 5154 if (error) 5155 goto done; 5156 } else { 5157 __xfs_bmap_add_free(tp, del->br_startblock, 5158 del->br_blockcount, NULL, 5159 (bflags & XFS_BMAPI_NODISCARD) || 5160 del->br_state == XFS_EXT_UNWRITTEN); 5161 } 5162 } 5163 5164 /* 5165 * Adjust inode # blocks in the file. 5166 */ 5167 if (nblks) 5168 ip->i_d.di_nblocks -= nblks; 5169 /* 5170 * Adjust quota data. 5171 */ 5172 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5173 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5174 5175 done: 5176 *logflagsp = flags; 5177 return error; 5178 } 5179 5180 /* 5181 * Unmap (remove) blocks from a file. 5182 * If nexts is nonzero then the number of extents to remove is limited to 5183 * that value. If not all extents in the block range can be removed then 5184 * *done is set. 5185 */ 5186 int /* error */ 5187 __xfs_bunmapi( 5188 struct xfs_trans *tp, /* transaction pointer */ 5189 struct xfs_inode *ip, /* incore inode */ 5190 xfs_fileoff_t start, /* first file offset deleted */ 5191 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5192 int flags, /* misc flags */ 5193 xfs_extnum_t nexts) /* number of extents max */ 5194 { 5195 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5196 struct xfs_bmbt_irec del; /* extent being deleted */ 5197 int error; /* error return value */ 5198 xfs_extnum_t extno; /* extent number in list */ 5199 struct xfs_bmbt_irec got; /* current extent record */ 5200 struct xfs_ifork *ifp; /* inode fork pointer */ 5201 int isrt; /* freeing in rt area */ 5202 int logflags; /* transaction logging flags */ 5203 xfs_extlen_t mod; /* rt extent offset */ 5204 struct xfs_mount *mp; /* mount structure */ 5205 int tmp_logflags; /* partial logging flags */ 5206 int wasdel; /* was a delayed alloc extent */ 5207 int whichfork; /* data or attribute fork */ 5208 xfs_fsblock_t sum; 5209 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5210 xfs_fileoff_t max_len; 5211 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; 5212 xfs_fileoff_t end; 5213 struct xfs_iext_cursor icur; 5214 bool done = false; 5215 5216 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5217 5218 whichfork = xfs_bmapi_whichfork(flags); 5219 ASSERT(whichfork != XFS_COW_FORK); 5220 ifp = XFS_IFORK_PTR(ip, whichfork); 5221 if (unlikely( 5222 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5223 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5224 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5225 ip->i_mount); 5226 return -EFSCORRUPTED; 5227 } 5228 mp = ip->i_mount; 5229 if (XFS_FORCED_SHUTDOWN(mp)) 5230 return -EIO; 5231 5232 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5233 ASSERT(len > 0); 5234 ASSERT(nexts >= 0); 5235 5236 /* 5237 * Guesstimate how many blocks we can unmap without running the risk of 5238 * blowing out the transaction with a mix of EFIs and reflink 5239 * adjustments. 5240 */ 5241 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5242 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5243 else 5244 max_len = len; 5245 5246 if (!(ifp->if_flags & XFS_IFEXTENTS) && 5247 (error = xfs_iread_extents(tp, ip, whichfork))) 5248 return error; 5249 if (xfs_iext_count(ifp) == 0) { 5250 *rlen = 0; 5251 return 0; 5252 } 5253 XFS_STATS_INC(mp, xs_blk_unmap); 5254 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5255 end = start + len; 5256 5257 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5258 *rlen = 0; 5259 return 0; 5260 } 5261 end--; 5262 5263 logflags = 0; 5264 if (ifp->if_flags & XFS_IFBROOT) { 5265 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); 5266 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5267 cur->bc_private.b.flags = 0; 5268 } else 5269 cur = NULL; 5270 5271 if (isrt) { 5272 /* 5273 * Synchronize by locking the bitmap inode. 5274 */ 5275 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5276 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5277 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5278 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5279 } 5280 5281 extno = 0; 5282 while (end != (xfs_fileoff_t)-1 && end >= start && 5283 (nexts == 0 || extno < nexts) && max_len > 0) { 5284 /* 5285 * Is the found extent after a hole in which end lives? 5286 * Just back up to the previous extent, if so. 5287 */ 5288 if (got.br_startoff > end && 5289 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5290 done = true; 5291 break; 5292 } 5293 /* 5294 * Is the last block of this extent before the range 5295 * we're supposed to delete? If so, we're done. 5296 */ 5297 end = XFS_FILEOFF_MIN(end, 5298 got.br_startoff + got.br_blockcount - 1); 5299 if (end < start) 5300 break; 5301 /* 5302 * Then deal with the (possibly delayed) allocated space 5303 * we found. 5304 */ 5305 del = got; 5306 wasdel = isnullstartblock(del.br_startblock); 5307 5308 /* 5309 * Make sure we don't touch multiple AGF headers out of order 5310 * in a single transaction, as that could cause AB-BA deadlocks. 5311 */ 5312 if (!wasdel) { 5313 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); 5314 if (prev_agno != NULLAGNUMBER && prev_agno > agno) 5315 break; 5316 prev_agno = agno; 5317 } 5318 if (got.br_startoff < start) { 5319 del.br_startoff = start; 5320 del.br_blockcount -= start - got.br_startoff; 5321 if (!wasdel) 5322 del.br_startblock += start - got.br_startoff; 5323 } 5324 if (del.br_startoff + del.br_blockcount > end + 1) 5325 del.br_blockcount = end + 1 - del.br_startoff; 5326 5327 /* How much can we safely unmap? */ 5328 if (max_len < del.br_blockcount) { 5329 del.br_startoff += del.br_blockcount - max_len; 5330 if (!wasdel) 5331 del.br_startblock += del.br_blockcount - max_len; 5332 del.br_blockcount = max_len; 5333 } 5334 5335 if (!isrt) 5336 goto delete; 5337 5338 sum = del.br_startblock + del.br_blockcount; 5339 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5340 if (mod) { 5341 /* 5342 * Realtime extent not lined up at the end. 5343 * The extent could have been split into written 5344 * and unwritten pieces, or we could just be 5345 * unmapping part of it. But we can't really 5346 * get rid of part of a realtime extent. 5347 */ 5348 if (del.br_state == XFS_EXT_UNWRITTEN) { 5349 /* 5350 * This piece is unwritten, or we're not 5351 * using unwritten extents. Skip over it. 5352 */ 5353 ASSERT(end >= mod); 5354 end -= mod > del.br_blockcount ? 5355 del.br_blockcount : mod; 5356 if (end < got.br_startoff && 5357 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5358 done = true; 5359 break; 5360 } 5361 continue; 5362 } 5363 /* 5364 * It's written, turn it unwritten. 5365 * This is better than zeroing it. 5366 */ 5367 ASSERT(del.br_state == XFS_EXT_NORM); 5368 ASSERT(tp->t_blk_res > 0); 5369 /* 5370 * If this spans a realtime extent boundary, 5371 * chop it back to the start of the one we end at. 5372 */ 5373 if (del.br_blockcount > mod) { 5374 del.br_startoff += del.br_blockcount - mod; 5375 del.br_startblock += del.br_blockcount - mod; 5376 del.br_blockcount = mod; 5377 } 5378 del.br_state = XFS_EXT_UNWRITTEN; 5379 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5380 whichfork, &icur, &cur, &del, 5381 &logflags); 5382 if (error) 5383 goto error0; 5384 goto nodelete; 5385 } 5386 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5387 if (mod) { 5388 /* 5389 * Realtime extent is lined up at the end but not 5390 * at the front. We'll get rid of full extents if 5391 * we can. 5392 */ 5393 mod = mp->m_sb.sb_rextsize - mod; 5394 if (del.br_blockcount > mod) { 5395 del.br_blockcount -= mod; 5396 del.br_startoff += mod; 5397 del.br_startblock += mod; 5398 } else if (del.br_startoff == start && 5399 (del.br_state == XFS_EXT_UNWRITTEN || 5400 tp->t_blk_res == 0)) { 5401 /* 5402 * Can't make it unwritten. There isn't 5403 * a full extent here so just skip it. 5404 */ 5405 ASSERT(end >= del.br_blockcount); 5406 end -= del.br_blockcount; 5407 if (got.br_startoff > end && 5408 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5409 done = true; 5410 break; 5411 } 5412 continue; 5413 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5414 struct xfs_bmbt_irec prev; 5415 5416 /* 5417 * This one is already unwritten. 5418 * It must have a written left neighbor. 5419 * Unwrite the killed part of that one and 5420 * try again. 5421 */ 5422 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5423 ASSERT(0); 5424 ASSERT(prev.br_state == XFS_EXT_NORM); 5425 ASSERT(!isnullstartblock(prev.br_startblock)); 5426 ASSERT(del.br_startblock == 5427 prev.br_startblock + prev.br_blockcount); 5428 if (prev.br_startoff < start) { 5429 mod = start - prev.br_startoff; 5430 prev.br_blockcount -= mod; 5431 prev.br_startblock += mod; 5432 prev.br_startoff = start; 5433 } 5434 prev.br_state = XFS_EXT_UNWRITTEN; 5435 error = xfs_bmap_add_extent_unwritten_real(tp, 5436 ip, whichfork, &icur, &cur, 5437 &prev, &logflags); 5438 if (error) 5439 goto error0; 5440 goto nodelete; 5441 } else { 5442 ASSERT(del.br_state == XFS_EXT_NORM); 5443 del.br_state = XFS_EXT_UNWRITTEN; 5444 error = xfs_bmap_add_extent_unwritten_real(tp, 5445 ip, whichfork, &icur, &cur, 5446 &del, &logflags); 5447 if (error) 5448 goto error0; 5449 goto nodelete; 5450 } 5451 } 5452 5453 delete: 5454 if (wasdel) { 5455 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5456 &got, &del); 5457 } else { 5458 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5459 &del, &tmp_logflags, whichfork, 5460 flags); 5461 logflags |= tmp_logflags; 5462 } 5463 5464 if (error) 5465 goto error0; 5466 5467 max_len -= del.br_blockcount; 5468 end = del.br_startoff - 1; 5469 nodelete: 5470 /* 5471 * If not done go on to the next (previous) record. 5472 */ 5473 if (end != (xfs_fileoff_t)-1 && end >= start) { 5474 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5475 (got.br_startoff > end && 5476 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5477 done = true; 5478 break; 5479 } 5480 extno++; 5481 } 5482 } 5483 if (done || end == (xfs_fileoff_t)-1 || end < start) 5484 *rlen = 0; 5485 else 5486 *rlen = end - start + 1; 5487 5488 /* 5489 * Convert to a btree if necessary. 5490 */ 5491 if (xfs_bmap_needs_btree(ip, whichfork)) { 5492 ASSERT(cur == NULL); 5493 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5494 &tmp_logflags, whichfork); 5495 logflags |= tmp_logflags; 5496 } else { 5497 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5498 whichfork); 5499 } 5500 5501 error0: 5502 /* 5503 * Log everything. Do this after conversion, there's no point in 5504 * logging the extent records if we've converted to btree format. 5505 */ 5506 if ((logflags & xfs_ilog_fext(whichfork)) && 5507 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5508 logflags &= ~xfs_ilog_fext(whichfork); 5509 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5510 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) 5511 logflags &= ~xfs_ilog_fbroot(whichfork); 5512 /* 5513 * Log inode even in the error case, if the transaction 5514 * is dirty we'll need to shut down the filesystem. 5515 */ 5516 if (logflags) 5517 xfs_trans_log_inode(tp, ip, logflags); 5518 if (cur) { 5519 if (!error) 5520 cur->bc_private.b.allocated = 0; 5521 xfs_btree_del_cursor(cur, error); 5522 } 5523 return error; 5524 } 5525 5526 /* Unmap a range of a file. */ 5527 int 5528 xfs_bunmapi( 5529 xfs_trans_t *tp, 5530 struct xfs_inode *ip, 5531 xfs_fileoff_t bno, 5532 xfs_filblks_t len, 5533 int flags, 5534 xfs_extnum_t nexts, 5535 int *done) 5536 { 5537 int error; 5538 5539 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5540 *done = (len == 0); 5541 return error; 5542 } 5543 5544 /* 5545 * Determine whether an extent shift can be accomplished by a merge with the 5546 * extent that precedes the target hole of the shift. 5547 */ 5548 STATIC bool 5549 xfs_bmse_can_merge( 5550 struct xfs_bmbt_irec *left, /* preceding extent */ 5551 struct xfs_bmbt_irec *got, /* current extent to shift */ 5552 xfs_fileoff_t shift) /* shift fsb */ 5553 { 5554 xfs_fileoff_t startoff; 5555 5556 startoff = got->br_startoff - shift; 5557 5558 /* 5559 * The extent, once shifted, must be adjacent in-file and on-disk with 5560 * the preceding extent. 5561 */ 5562 if ((left->br_startoff + left->br_blockcount != startoff) || 5563 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5564 (left->br_state != got->br_state) || 5565 (left->br_blockcount + got->br_blockcount > MAXEXTLEN)) 5566 return false; 5567 5568 return true; 5569 } 5570 5571 /* 5572 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5573 * hole in the file. If an extent shift would result in the extent being fully 5574 * adjacent to the extent that currently precedes the hole, we can merge with 5575 * the preceding extent rather than do the shift. 5576 * 5577 * This function assumes the caller has verified a shift-by-merge is possible 5578 * with the provided extents via xfs_bmse_can_merge(). 5579 */ 5580 STATIC int 5581 xfs_bmse_merge( 5582 struct xfs_trans *tp, 5583 struct xfs_inode *ip, 5584 int whichfork, 5585 xfs_fileoff_t shift, /* shift fsb */ 5586 struct xfs_iext_cursor *icur, 5587 struct xfs_bmbt_irec *got, /* extent to shift */ 5588 struct xfs_bmbt_irec *left, /* preceding extent */ 5589 struct xfs_btree_cur *cur, 5590 int *logflags) /* output */ 5591 { 5592 struct xfs_bmbt_irec new; 5593 xfs_filblks_t blockcount; 5594 int error, i; 5595 struct xfs_mount *mp = ip->i_mount; 5596 5597 blockcount = left->br_blockcount + got->br_blockcount; 5598 5599 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5600 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5601 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5602 5603 new = *left; 5604 new.br_blockcount = blockcount; 5605 5606 /* 5607 * Update the on-disk extent count, the btree if necessary and log the 5608 * inode. 5609 */ 5610 XFS_IFORK_NEXT_SET(ip, whichfork, 5611 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 5612 *logflags |= XFS_ILOG_CORE; 5613 if (!cur) { 5614 *logflags |= XFS_ILOG_DEXT; 5615 goto done; 5616 } 5617 5618 /* lookup and remove the extent to merge */ 5619 error = xfs_bmbt_lookup_eq(cur, got, &i); 5620 if (error) 5621 return error; 5622 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5623 5624 error = xfs_btree_delete(cur, &i); 5625 if (error) 5626 return error; 5627 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5628 5629 /* lookup and update size of the previous extent */ 5630 error = xfs_bmbt_lookup_eq(cur, left, &i); 5631 if (error) 5632 return error; 5633 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5634 5635 error = xfs_bmbt_update(cur, &new); 5636 if (error) 5637 return error; 5638 5639 done: 5640 xfs_iext_remove(ip, icur, 0); 5641 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur); 5642 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5643 &new); 5644 5645 /* update reverse mapping. rmap functions merge the rmaps for us */ 5646 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5647 if (error) 5648 return error; 5649 memcpy(&new, got, sizeof(new)); 5650 new.br_startoff = left->br_startoff + left->br_blockcount; 5651 return xfs_rmap_map_extent(tp, ip, whichfork, &new); 5652 } 5653 5654 static int 5655 xfs_bmap_shift_update_extent( 5656 struct xfs_trans *tp, 5657 struct xfs_inode *ip, 5658 int whichfork, 5659 struct xfs_iext_cursor *icur, 5660 struct xfs_bmbt_irec *got, 5661 struct xfs_btree_cur *cur, 5662 int *logflags, 5663 xfs_fileoff_t startoff) 5664 { 5665 struct xfs_mount *mp = ip->i_mount; 5666 struct xfs_bmbt_irec prev = *got; 5667 int error, i; 5668 5669 *logflags |= XFS_ILOG_CORE; 5670 5671 got->br_startoff = startoff; 5672 5673 if (cur) { 5674 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5675 if (error) 5676 return error; 5677 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 5678 5679 error = xfs_bmbt_update(cur, got); 5680 if (error) 5681 return error; 5682 } else { 5683 *logflags |= XFS_ILOG_DEXT; 5684 } 5685 5686 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5687 got); 5688 5689 /* update reverse mapping */ 5690 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5691 if (error) 5692 return error; 5693 return xfs_rmap_map_extent(tp, ip, whichfork, got); 5694 } 5695 5696 int 5697 xfs_bmap_collapse_extents( 5698 struct xfs_trans *tp, 5699 struct xfs_inode *ip, 5700 xfs_fileoff_t *next_fsb, 5701 xfs_fileoff_t offset_shift_fsb, 5702 bool *done) 5703 { 5704 int whichfork = XFS_DATA_FORK; 5705 struct xfs_mount *mp = ip->i_mount; 5706 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5707 struct xfs_btree_cur *cur = NULL; 5708 struct xfs_bmbt_irec got, prev; 5709 struct xfs_iext_cursor icur; 5710 xfs_fileoff_t new_startoff; 5711 int error = 0; 5712 int logflags = 0; 5713 5714 if (unlikely(XFS_TEST_ERROR( 5715 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5716 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5717 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5718 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5719 return -EFSCORRUPTED; 5720 } 5721 5722 if (XFS_FORCED_SHUTDOWN(mp)) 5723 return -EIO; 5724 5725 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5726 5727 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5728 error = xfs_iread_extents(tp, ip, whichfork); 5729 if (error) 5730 return error; 5731 } 5732 5733 if (ifp->if_flags & XFS_IFBROOT) { 5734 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5735 cur->bc_private.b.flags = 0; 5736 } 5737 5738 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5739 *done = true; 5740 goto del_cursor; 5741 } 5742 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5743 del_cursor); 5744 5745 new_startoff = got.br_startoff - offset_shift_fsb; 5746 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5747 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5748 error = -EINVAL; 5749 goto del_cursor; 5750 } 5751 5752 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5753 error = xfs_bmse_merge(tp, ip, whichfork, 5754 offset_shift_fsb, &icur, &got, &prev, 5755 cur, &logflags); 5756 if (error) 5757 goto del_cursor; 5758 goto done; 5759 } 5760 } else { 5761 if (got.br_startoff < offset_shift_fsb) { 5762 error = -EINVAL; 5763 goto del_cursor; 5764 } 5765 } 5766 5767 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5768 cur, &logflags, new_startoff); 5769 if (error) 5770 goto del_cursor; 5771 5772 done: 5773 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5774 *done = true; 5775 goto del_cursor; 5776 } 5777 5778 *next_fsb = got.br_startoff; 5779 del_cursor: 5780 if (cur) 5781 xfs_btree_del_cursor(cur, error); 5782 if (logflags) 5783 xfs_trans_log_inode(tp, ip, logflags); 5784 return error; 5785 } 5786 5787 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5788 int 5789 xfs_bmap_can_insert_extents( 5790 struct xfs_inode *ip, 5791 xfs_fileoff_t off, 5792 xfs_fileoff_t shift) 5793 { 5794 struct xfs_bmbt_irec got; 5795 int is_empty; 5796 int error = 0; 5797 5798 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5799 5800 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 5801 return -EIO; 5802 5803 xfs_ilock(ip, XFS_ILOCK_EXCL); 5804 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5805 if (!error && !is_empty && got.br_startoff >= off && 5806 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5807 error = -EINVAL; 5808 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5809 5810 return error; 5811 } 5812 5813 int 5814 xfs_bmap_insert_extents( 5815 struct xfs_trans *tp, 5816 struct xfs_inode *ip, 5817 xfs_fileoff_t *next_fsb, 5818 xfs_fileoff_t offset_shift_fsb, 5819 bool *done, 5820 xfs_fileoff_t stop_fsb) 5821 { 5822 int whichfork = XFS_DATA_FORK; 5823 struct xfs_mount *mp = ip->i_mount; 5824 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 5825 struct xfs_btree_cur *cur = NULL; 5826 struct xfs_bmbt_irec got, next; 5827 struct xfs_iext_cursor icur; 5828 xfs_fileoff_t new_startoff; 5829 int error = 0; 5830 int logflags = 0; 5831 5832 if (unlikely(XFS_TEST_ERROR( 5833 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5834 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5835 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5836 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 5837 return -EFSCORRUPTED; 5838 } 5839 5840 if (XFS_FORCED_SHUTDOWN(mp)) 5841 return -EIO; 5842 5843 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5844 5845 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5846 error = xfs_iread_extents(tp, ip, whichfork); 5847 if (error) 5848 return error; 5849 } 5850 5851 if (ifp->if_flags & XFS_IFBROOT) { 5852 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5853 cur->bc_private.b.flags = 0; 5854 } 5855 5856 if (*next_fsb == NULLFSBLOCK) { 5857 xfs_iext_last(ifp, &icur); 5858 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5859 stop_fsb > got.br_startoff) { 5860 *done = true; 5861 goto del_cursor; 5862 } 5863 } else { 5864 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5865 *done = true; 5866 goto del_cursor; 5867 } 5868 } 5869 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock), 5870 del_cursor); 5871 5872 if (stop_fsb >= got.br_startoff + got.br_blockcount) { 5873 error = -EIO; 5874 goto del_cursor; 5875 } 5876 5877 new_startoff = got.br_startoff + offset_shift_fsb; 5878 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5879 if (new_startoff + got.br_blockcount > next.br_startoff) { 5880 error = -EINVAL; 5881 goto del_cursor; 5882 } 5883 5884 /* 5885 * Unlike a left shift (which involves a hole punch), a right 5886 * shift does not modify extent neighbors in any way. We should 5887 * never find mergeable extents in this scenario. Check anyways 5888 * and warn if we encounter two extents that could be one. 5889 */ 5890 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5891 WARN_ON_ONCE(1); 5892 } 5893 5894 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5895 cur, &logflags, new_startoff); 5896 if (error) 5897 goto del_cursor; 5898 5899 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 5900 stop_fsb >= got.br_startoff + got.br_blockcount) { 5901 *done = true; 5902 goto del_cursor; 5903 } 5904 5905 *next_fsb = got.br_startoff; 5906 del_cursor: 5907 if (cur) 5908 xfs_btree_del_cursor(cur, error); 5909 if (logflags) 5910 xfs_trans_log_inode(tp, ip, logflags); 5911 return error; 5912 } 5913 5914 /* 5915 * Splits an extent into two extents at split_fsb block such that it is the 5916 * first block of the current_ext. @ext is a target extent to be split. 5917 * @split_fsb is a block where the extents is split. If split_fsb lies in a 5918 * hole or the first block of extents, just return 0. 5919 */ 5920 STATIC int 5921 xfs_bmap_split_extent_at( 5922 struct xfs_trans *tp, 5923 struct xfs_inode *ip, 5924 xfs_fileoff_t split_fsb) 5925 { 5926 int whichfork = XFS_DATA_FORK; 5927 struct xfs_btree_cur *cur = NULL; 5928 struct xfs_bmbt_irec got; 5929 struct xfs_bmbt_irec new; /* split extent */ 5930 struct xfs_mount *mp = ip->i_mount; 5931 struct xfs_ifork *ifp; 5932 xfs_fsblock_t gotblkcnt; /* new block count for got */ 5933 struct xfs_iext_cursor icur; 5934 int error = 0; 5935 int logflags = 0; 5936 int i = 0; 5937 5938 if (unlikely(XFS_TEST_ERROR( 5939 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 5940 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 5941 mp, XFS_ERRTAG_BMAPIFORMAT))) { 5942 XFS_ERROR_REPORT("xfs_bmap_split_extent_at", 5943 XFS_ERRLEVEL_LOW, mp); 5944 return -EFSCORRUPTED; 5945 } 5946 5947 if (XFS_FORCED_SHUTDOWN(mp)) 5948 return -EIO; 5949 5950 ifp = XFS_IFORK_PTR(ip, whichfork); 5951 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 5952 /* Read in all the extents */ 5953 error = xfs_iread_extents(tp, ip, whichfork); 5954 if (error) 5955 return error; 5956 } 5957 5958 /* 5959 * If there are not extents, or split_fsb lies in a hole we are done. 5960 */ 5961 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 5962 got.br_startoff >= split_fsb) 5963 return 0; 5964 5965 gotblkcnt = split_fsb - got.br_startoff; 5966 new.br_startoff = split_fsb; 5967 new.br_startblock = got.br_startblock + gotblkcnt; 5968 new.br_blockcount = got.br_blockcount - gotblkcnt; 5969 new.br_state = got.br_state; 5970 5971 if (ifp->if_flags & XFS_IFBROOT) { 5972 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5973 cur->bc_private.b.flags = 0; 5974 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5975 if (error) 5976 goto del_cursor; 5977 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 5978 } 5979 5980 got.br_blockcount = gotblkcnt; 5981 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 5982 &got); 5983 5984 logflags = XFS_ILOG_CORE; 5985 if (cur) { 5986 error = xfs_bmbt_update(cur, &got); 5987 if (error) 5988 goto del_cursor; 5989 } else 5990 logflags |= XFS_ILOG_DEXT; 5991 5992 /* Add new extent */ 5993 xfs_iext_next(ifp, &icur); 5994 xfs_iext_insert(ip, &icur, &new, 0); 5995 XFS_IFORK_NEXT_SET(ip, whichfork, 5996 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5997 5998 if (cur) { 5999 error = xfs_bmbt_lookup_eq(cur, &new, &i); 6000 if (error) 6001 goto del_cursor; 6002 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor); 6003 error = xfs_btree_insert(cur, &i); 6004 if (error) 6005 goto del_cursor; 6006 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor); 6007 } 6008 6009 /* 6010 * Convert to a btree if necessary. 6011 */ 6012 if (xfs_bmap_needs_btree(ip, whichfork)) { 6013 int tmp_logflags; /* partial log flag return val */ 6014 6015 ASSERT(cur == NULL); 6016 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6017 &tmp_logflags, whichfork); 6018 logflags |= tmp_logflags; 6019 } 6020 6021 del_cursor: 6022 if (cur) { 6023 cur->bc_private.b.allocated = 0; 6024 xfs_btree_del_cursor(cur, error); 6025 } 6026 6027 if (logflags) 6028 xfs_trans_log_inode(tp, ip, logflags); 6029 return error; 6030 } 6031 6032 int 6033 xfs_bmap_split_extent( 6034 struct xfs_inode *ip, 6035 xfs_fileoff_t split_fsb) 6036 { 6037 struct xfs_mount *mp = ip->i_mount; 6038 struct xfs_trans *tp; 6039 int error; 6040 6041 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 6042 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp); 6043 if (error) 6044 return error; 6045 6046 xfs_ilock(ip, XFS_ILOCK_EXCL); 6047 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 6048 6049 error = xfs_bmap_split_extent_at(tp, ip, split_fsb); 6050 if (error) 6051 goto out; 6052 6053 return xfs_trans_commit(tp); 6054 6055 out: 6056 xfs_trans_cancel(tp); 6057 return error; 6058 } 6059 6060 /* Deferred mapping is only for real extents in the data fork. */ 6061 static bool 6062 xfs_bmap_is_update_needed( 6063 struct xfs_bmbt_irec *bmap) 6064 { 6065 return bmap->br_startblock != HOLESTARTBLOCK && 6066 bmap->br_startblock != DELAYSTARTBLOCK; 6067 } 6068 6069 /* Record a bmap intent. */ 6070 static int 6071 __xfs_bmap_add( 6072 struct xfs_trans *tp, 6073 enum xfs_bmap_intent_type type, 6074 struct xfs_inode *ip, 6075 int whichfork, 6076 struct xfs_bmbt_irec *bmap) 6077 { 6078 struct xfs_bmap_intent *bi; 6079 6080 trace_xfs_bmap_defer(tp->t_mountp, 6081 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6082 type, 6083 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6084 ip->i_ino, whichfork, 6085 bmap->br_startoff, 6086 bmap->br_blockcount, 6087 bmap->br_state); 6088 6089 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6090 INIT_LIST_HEAD(&bi->bi_list); 6091 bi->bi_type = type; 6092 bi->bi_owner = ip; 6093 bi->bi_whichfork = whichfork; 6094 bi->bi_bmap = *bmap; 6095 6096 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6097 return 0; 6098 } 6099 6100 /* Map an extent into a file. */ 6101 int 6102 xfs_bmap_map_extent( 6103 struct xfs_trans *tp, 6104 struct xfs_inode *ip, 6105 struct xfs_bmbt_irec *PREV) 6106 { 6107 if (!xfs_bmap_is_update_needed(PREV)) 6108 return 0; 6109 6110 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6111 } 6112 6113 /* Unmap an extent out of a file. */ 6114 int 6115 xfs_bmap_unmap_extent( 6116 struct xfs_trans *tp, 6117 struct xfs_inode *ip, 6118 struct xfs_bmbt_irec *PREV) 6119 { 6120 if (!xfs_bmap_is_update_needed(PREV)) 6121 return 0; 6122 6123 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6124 } 6125 6126 /* 6127 * Process one of the deferred bmap operations. We pass back the 6128 * btree cursor to maintain our lock on the bmapbt between calls. 6129 */ 6130 int 6131 xfs_bmap_finish_one( 6132 struct xfs_trans *tp, 6133 struct xfs_inode *ip, 6134 enum xfs_bmap_intent_type type, 6135 int whichfork, 6136 xfs_fileoff_t startoff, 6137 xfs_fsblock_t startblock, 6138 xfs_filblks_t *blockcount, 6139 xfs_exntst_t state) 6140 { 6141 int error = 0; 6142 6143 ASSERT(tp->t_firstblock == NULLFSBLOCK); 6144 6145 trace_xfs_bmap_deferred(tp->t_mountp, 6146 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6147 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6148 ip->i_ino, whichfork, startoff, *blockcount, state); 6149 6150 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) 6151 return -EFSCORRUPTED; 6152 6153 if (XFS_TEST_ERROR(false, tp->t_mountp, 6154 XFS_ERRTAG_BMAP_FINISH_ONE)) 6155 return -EIO; 6156 6157 switch (type) { 6158 case XFS_BMAP_MAP: 6159 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, 6160 startblock, 0); 6161 *blockcount = 0; 6162 break; 6163 case XFS_BMAP_UNMAP: 6164 error = __xfs_bunmapi(tp, ip, startoff, blockcount, 6165 XFS_BMAPI_REMAP, 1); 6166 break; 6167 default: 6168 ASSERT(0); 6169 error = -EFSCORRUPTED; 6170 } 6171 6172 return error; 6173 } 6174 6175 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6176 xfs_failaddr_t 6177 xfs_bmap_validate_extent( 6178 struct xfs_inode *ip, 6179 int whichfork, 6180 struct xfs_bmbt_irec *irec) 6181 { 6182 struct xfs_mount *mp = ip->i_mount; 6183 xfs_fsblock_t endfsb; 6184 bool isrt; 6185 6186 isrt = XFS_IS_REALTIME_INODE(ip); 6187 endfsb = irec->br_startblock + irec->br_blockcount - 1; 6188 if (isrt) { 6189 if (!xfs_verify_rtbno(mp, irec->br_startblock)) 6190 return __this_address; 6191 if (!xfs_verify_rtbno(mp, endfsb)) 6192 return __this_address; 6193 } else { 6194 if (!xfs_verify_fsbno(mp, irec->br_startblock)) 6195 return __this_address; 6196 if (!xfs_verify_fsbno(mp, endfsb)) 6197 return __this_address; 6198 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) != 6199 XFS_FSB_TO_AGNO(mp, endfsb)) 6200 return __this_address; 6201 } 6202 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6203 return __this_address; 6204 return NULL; 6205 } 6206