1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_dir2.h" 17 #include "xfs_inode.h" 18 #include "xfs_btree.h" 19 #include "xfs_trans.h" 20 #include "xfs_alloc.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_bmap_btree.h" 24 #include "xfs_rtbitmap.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_quota.h" 28 #include "xfs_trans_space.h" 29 #include "xfs_buf_item.h" 30 #include "xfs_trace.h" 31 #include "xfs_attr_leaf.h" 32 #include "xfs_filestream.h" 33 #include "xfs_rmap.h" 34 #include "xfs_ag.h" 35 #include "xfs_ag_resv.h" 36 #include "xfs_refcount.h" 37 #include "xfs_icache.h" 38 #include "xfs_iomap.h" 39 40 struct kmem_cache *xfs_bmap_intent_cache; 41 42 /* 43 * Miscellaneous helper functions 44 */ 45 46 /* 47 * Compute and fill in the value of the maximum depth of a bmap btree 48 * in this filesystem. Done once, during mount. 49 */ 50 void 51 xfs_bmap_compute_maxlevels( 52 xfs_mount_t *mp, /* file system mount structure */ 53 int whichfork) /* data or attr fork */ 54 { 55 uint64_t maxblocks; /* max blocks at this level */ 56 xfs_extnum_t maxleafents; /* max leaf entries possible */ 57 int level; /* btree level */ 58 int maxrootrecs; /* max records in root block */ 59 int minleafrecs; /* min records in leaf block */ 60 int minnoderecs; /* min records in node block */ 61 int sz; /* root block size */ 62 63 /* 64 * The maximum number of extents in a fork, hence the maximum number of 65 * leaf entries, is controlled by the size of the on-disk extent count. 66 * 67 * Note that we can no longer assume that if we are in ATTR1 that the 68 * fork offset of all the inodes will be 69 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with 70 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed 71 * but probably at various positions. Therefore, for both ATTR1 and 72 * ATTR2 we have to assume the worst case scenario of a minimum size 73 * available. 74 */ 75 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp), 76 whichfork); 77 if (whichfork == XFS_DATA_FORK) 78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 79 else 80 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 81 82 maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 83 minleafrecs = mp->m_bmap_dmnr[0]; 84 minnoderecs = mp->m_bmap_dmnr[1]; 85 maxblocks = howmany_64(maxleafents, minleafrecs); 86 for (level = 1; maxblocks > 1; level++) { 87 if (maxblocks <= maxrootrecs) 88 maxblocks = 1; 89 else 90 maxblocks = howmany_64(maxblocks, minnoderecs); 91 } 92 mp->m_bm_maxlevels[whichfork] = level; 93 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk()); 94 } 95 96 unsigned int 97 xfs_bmap_compute_attr_offset( 98 struct xfs_mount *mp) 99 { 100 if (mp->m_sb.sb_inodesize == 256) 101 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 102 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 103 } 104 105 STATIC int /* error */ 106 xfs_bmbt_lookup_eq( 107 struct xfs_btree_cur *cur, 108 struct xfs_bmbt_irec *irec, 109 int *stat) /* success/failure */ 110 { 111 cur->bc_rec.b = *irec; 112 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 113 } 114 115 STATIC int /* error */ 116 xfs_bmbt_lookup_first( 117 struct xfs_btree_cur *cur, 118 int *stat) /* success/failure */ 119 { 120 cur->bc_rec.b.br_startoff = 0; 121 cur->bc_rec.b.br_startblock = 0; 122 cur->bc_rec.b.br_blockcount = 0; 123 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 124 } 125 126 /* 127 * Check if the inode needs to be converted to btree format. 128 */ 129 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 130 { 131 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 132 133 return whichfork != XFS_COW_FORK && 134 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 135 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); 136 } 137 138 /* 139 * Check if the inode should be converted to extent format. 140 */ 141 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 142 { 143 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 144 145 return whichfork != XFS_COW_FORK && 146 ifp->if_format == XFS_DINODE_FMT_BTREE && 147 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); 148 } 149 150 /* 151 * Update the record referred to by cur to the value given by irec 152 * This either works (return 0) or gets an EFSCORRUPTED error. 153 */ 154 STATIC int 155 xfs_bmbt_update( 156 struct xfs_btree_cur *cur, 157 struct xfs_bmbt_irec *irec) 158 { 159 union xfs_btree_rec rec; 160 161 xfs_bmbt_disk_set_all(&rec.bmbt, irec); 162 return xfs_btree_update(cur, &rec); 163 } 164 165 /* 166 * Compute the worst-case number of indirect blocks that will be used 167 * for ip's delayed extent of length "len". 168 */ 169 STATIC xfs_filblks_t 170 xfs_bmap_worst_indlen( 171 xfs_inode_t *ip, /* incore inode pointer */ 172 xfs_filblks_t len) /* delayed extent length */ 173 { 174 int level; /* btree level number */ 175 int maxrecs; /* maximum record count at this level */ 176 xfs_mount_t *mp; /* mount structure */ 177 xfs_filblks_t rval; /* return value */ 178 179 mp = ip->i_mount; 180 maxrecs = mp->m_bmap_dmxr[0]; 181 for (level = 0, rval = 0; 182 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 183 level++) { 184 len += maxrecs - 1; 185 do_div(len, maxrecs); 186 rval += len; 187 if (len == 1) 188 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 189 level - 1; 190 if (level == 0) 191 maxrecs = mp->m_bmap_dmxr[1]; 192 } 193 return rval; 194 } 195 196 /* 197 * Calculate the default attribute fork offset for newly created inodes. 198 */ 199 uint 200 xfs_default_attroffset( 201 struct xfs_inode *ip) 202 { 203 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV) 204 return roundup(sizeof(xfs_dev_t), 8); 205 return M_IGEO(ip->i_mount)->attr_fork_offset; 206 } 207 208 /* 209 * Helper routine to reset inode i_forkoff field when switching attribute fork 210 * from local to extent format - we reset it where possible to make space 211 * available for inline data fork extents. 212 */ 213 STATIC void 214 xfs_bmap_forkoff_reset( 215 xfs_inode_t *ip, 216 int whichfork) 217 { 218 if (whichfork == XFS_ATTR_FORK && 219 ip->i_df.if_format != XFS_DINODE_FMT_DEV && 220 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { 221 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 222 223 if (dfl_forkoff > ip->i_forkoff) 224 ip->i_forkoff = dfl_forkoff; 225 } 226 } 227 228 #ifdef DEBUG 229 STATIC struct xfs_buf * 230 xfs_bmap_get_bp( 231 struct xfs_btree_cur *cur, 232 xfs_fsblock_t bno) 233 { 234 struct xfs_log_item *lip; 235 int i; 236 237 if (!cur) 238 return NULL; 239 240 for (i = 0; i < cur->bc_maxlevels; i++) { 241 if (!cur->bc_levels[i].bp) 242 break; 243 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno) 244 return cur->bc_levels[i].bp; 245 } 246 247 /* Chase down all the log items to see if the bp is there */ 248 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 249 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 250 251 if (bip->bli_item.li_type == XFS_LI_BUF && 252 xfs_buf_daddr(bip->bli_buf) == bno) 253 return bip->bli_buf; 254 } 255 256 return NULL; 257 } 258 259 STATIC void 260 xfs_check_block( 261 struct xfs_btree_block *block, 262 xfs_mount_t *mp, 263 int root, 264 short sz) 265 { 266 int i, j, dmxr; 267 __be64 *pp, *thispa; /* pointer to block address */ 268 xfs_bmbt_key_t *prevp, *keyp; 269 270 ASSERT(be16_to_cpu(block->bb_level) > 0); 271 272 prevp = NULL; 273 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 274 dmxr = mp->m_bmap_dmxr[0]; 275 keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 276 277 if (prevp) { 278 ASSERT(be64_to_cpu(prevp->br_startoff) < 279 be64_to_cpu(keyp->br_startoff)); 280 } 281 prevp = keyp; 282 283 /* 284 * Compare the block numbers to see if there are dups. 285 */ 286 if (root) 287 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 288 else 289 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 290 291 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 292 if (root) 293 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 294 else 295 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 296 if (*thispa == *pp) { 297 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld", 298 __func__, j, i, 299 (unsigned long long)be64_to_cpu(*thispa)); 300 xfs_err(mp, "%s: ptrs are equal in node\n", 301 __func__); 302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 303 } 304 } 305 } 306 } 307 308 /* 309 * Check that the extents for the inode ip are in the right order in all 310 * btree leaves. THis becomes prohibitively expensive for large extent count 311 * files, so don't bother with inodes that have more than 10,000 extents in 312 * them. The btree record ordering checks will still be done, so for such large 313 * bmapbt constructs that is going to catch most corruptions. 314 */ 315 STATIC void 316 xfs_bmap_check_leaf_extents( 317 struct xfs_btree_cur *cur, /* btree cursor or null */ 318 xfs_inode_t *ip, /* incore inode pointer */ 319 int whichfork) /* data or attr fork */ 320 { 321 struct xfs_mount *mp = ip->i_mount; 322 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 323 struct xfs_btree_block *block; /* current btree block */ 324 xfs_fsblock_t bno; /* block # of "block" */ 325 struct xfs_buf *bp; /* buffer for "block" */ 326 int error; /* error return value */ 327 xfs_extnum_t i=0, j; /* index into the extents list */ 328 int level; /* btree level, for checking */ 329 __be64 *pp; /* pointer to block address */ 330 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 331 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 332 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 333 int bp_release = 0; 334 335 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 336 return; 337 338 /* skip large extent count inodes */ 339 if (ip->i_df.if_nextents > 10000) 340 return; 341 342 bno = NULLFSBLOCK; 343 block = ifp->if_broot; 344 /* 345 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 346 */ 347 level = be16_to_cpu(block->bb_level); 348 ASSERT(level > 0); 349 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 350 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 351 bno = be64_to_cpu(*pp); 352 353 ASSERT(bno != NULLFSBLOCK); 354 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 355 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 356 357 /* 358 * Go down the tree until leaf level is reached, following the first 359 * pointer (leftmost) at each level. 360 */ 361 while (level-- > 0) { 362 /* See if buf is in cur first */ 363 bp_release = 0; 364 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 365 if (!bp) { 366 bp_release = 1; 367 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 368 XFS_BMAP_BTREE_REF, 369 &xfs_bmbt_buf_ops); 370 if (error) 371 goto error_norelse; 372 } 373 block = XFS_BUF_TO_BLOCK(bp); 374 if (level == 0) 375 break; 376 377 /* 378 * Check this block for basic sanity (increasing keys and 379 * no duplicate blocks). 380 */ 381 382 xfs_check_block(block, mp, 0, 0); 383 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 384 bno = be64_to_cpu(*pp); 385 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) { 386 error = -EFSCORRUPTED; 387 goto error0; 388 } 389 if (bp_release) { 390 bp_release = 0; 391 xfs_trans_brelse(NULL, bp); 392 } 393 } 394 395 /* 396 * Here with bp and block set to the leftmost leaf node in the tree. 397 */ 398 i = 0; 399 400 /* 401 * Loop over all leaf nodes checking that all extents are in the right order. 402 */ 403 for (;;) { 404 xfs_fsblock_t nextbno; 405 xfs_extnum_t num_recs; 406 407 408 num_recs = xfs_btree_get_numrecs(block); 409 410 /* 411 * Read-ahead the next leaf block, if any. 412 */ 413 414 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 415 416 /* 417 * Check all the extents to make sure they are OK. 418 * If we had a previous block, the last entry should 419 * conform with the first entry in this one. 420 */ 421 422 ep = XFS_BMBT_REC_ADDR(mp, block, 1); 423 if (i) { 424 ASSERT(xfs_bmbt_disk_get_startoff(&last) + 425 xfs_bmbt_disk_get_blockcount(&last) <= 426 xfs_bmbt_disk_get_startoff(ep)); 427 } 428 for (j = 1; j < num_recs; j++) { 429 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 430 ASSERT(xfs_bmbt_disk_get_startoff(ep) + 431 xfs_bmbt_disk_get_blockcount(ep) <= 432 xfs_bmbt_disk_get_startoff(nextp)); 433 ep = nextp; 434 } 435 436 last = *ep; 437 i += num_recs; 438 if (bp_release) { 439 bp_release = 0; 440 xfs_trans_brelse(NULL, bp); 441 } 442 bno = nextbno; 443 /* 444 * If we've reached the end, stop. 445 */ 446 if (bno == NULLFSBLOCK) 447 break; 448 449 bp_release = 0; 450 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 451 if (!bp) { 452 bp_release = 1; 453 error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 454 XFS_BMAP_BTREE_REF, 455 &xfs_bmbt_buf_ops); 456 if (error) 457 goto error_norelse; 458 } 459 block = XFS_BUF_TO_BLOCK(bp); 460 } 461 462 return; 463 464 error0: 465 xfs_warn(mp, "%s: at error0", __func__); 466 if (bp_release) 467 xfs_trans_brelse(NULL, bp); 468 error_norelse: 469 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents", 470 __func__, i); 471 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 472 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 473 return; 474 } 475 476 /* 477 * Validate that the bmbt_irecs being returned from bmapi are valid 478 * given the caller's original parameters. Specifically check the 479 * ranges of the returned irecs to ensure that they only extend beyond 480 * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 481 */ 482 STATIC void 483 xfs_bmap_validate_ret( 484 xfs_fileoff_t bno, 485 xfs_filblks_t len, 486 uint32_t flags, 487 xfs_bmbt_irec_t *mval, 488 int nmap, 489 int ret_nmap) 490 { 491 int i; /* index to map values */ 492 493 ASSERT(ret_nmap <= nmap); 494 495 for (i = 0; i < ret_nmap; i++) { 496 ASSERT(mval[i].br_blockcount > 0); 497 if (!(flags & XFS_BMAPI_ENTIRE)) { 498 ASSERT(mval[i].br_startoff >= bno); 499 ASSERT(mval[i].br_blockcount <= len); 500 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 501 bno + len); 502 } else { 503 ASSERT(mval[i].br_startoff < bno + len); 504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 505 bno); 506 } 507 ASSERT(i == 0 || 508 mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 509 mval[i].br_startoff); 510 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 511 mval[i].br_startblock != HOLESTARTBLOCK); 512 ASSERT(mval[i].br_state == XFS_EXT_NORM || 513 mval[i].br_state == XFS_EXT_UNWRITTEN); 514 } 515 } 516 517 #else 518 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 519 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 520 #endif /* DEBUG */ 521 522 /* 523 * Inode fork format manipulation functions 524 */ 525 526 /* 527 * Convert the inode format to extent format if it currently is in btree format, 528 * but the extent list is small enough that it fits into the extent format. 529 * 530 * Since the extents are already in-core, all we have to do is give up the space 531 * for the btree root and pitch the leaf block. 532 */ 533 STATIC int /* error */ 534 xfs_bmap_btree_to_extents( 535 struct xfs_trans *tp, /* transaction pointer */ 536 struct xfs_inode *ip, /* incore inode pointer */ 537 struct xfs_btree_cur *cur, /* btree cursor */ 538 int *logflagsp, /* inode logging flags */ 539 int whichfork) /* data or attr fork */ 540 { 541 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 542 struct xfs_mount *mp = ip->i_mount; 543 struct xfs_btree_block *rblock = ifp->if_broot; 544 struct xfs_btree_block *cblock;/* child btree block */ 545 xfs_fsblock_t cbno; /* child block number */ 546 struct xfs_buf *cbp; /* child block's buffer */ 547 int error; /* error return value */ 548 __be64 *pp; /* ptr to block address */ 549 struct xfs_owner_info oinfo; 550 551 /* check if we actually need the extent format first: */ 552 if (!xfs_bmap_wants_extents(ip, whichfork)) 553 return 0; 554 555 ASSERT(cur); 556 ASSERT(whichfork != XFS_COW_FORK); 557 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 558 ASSERT(be16_to_cpu(rblock->bb_level) == 1); 559 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 560 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 561 562 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 563 cbno = be64_to_cpu(*pp); 564 #ifdef DEBUG 565 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1))) 566 return -EFSCORRUPTED; 567 #endif 568 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF, 569 &xfs_bmbt_buf_ops); 570 if (error) 571 return error; 572 cblock = XFS_BUF_TO_BLOCK(cbp); 573 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 574 return error; 575 576 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 577 error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo, 578 XFS_AG_RESV_NONE); 579 if (error) 580 return error; 581 582 ip->i_nblocks--; 583 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 584 xfs_trans_binval(tp, cbp); 585 if (cur->bc_levels[0].bp == cbp) 586 cur->bc_levels[0].bp = NULL; 587 xfs_iroot_realloc(ip, -1, whichfork); 588 ASSERT(ifp->if_broot == NULL); 589 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 590 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 591 return 0; 592 } 593 594 /* 595 * Convert an extents-format file into a btree-format file. 596 * The new file will have a root block (in the inode) and a single child block. 597 */ 598 STATIC int /* error */ 599 xfs_bmap_extents_to_btree( 600 struct xfs_trans *tp, /* transaction pointer */ 601 struct xfs_inode *ip, /* incore inode pointer */ 602 struct xfs_btree_cur **curp, /* cursor returned to caller */ 603 int wasdel, /* converting a delayed alloc */ 604 int *logflagsp, /* inode logging flags */ 605 int whichfork) /* data or attr fork */ 606 { 607 struct xfs_btree_block *ablock; /* allocated (child) bt block */ 608 struct xfs_buf *abp; /* buffer for ablock */ 609 struct xfs_alloc_arg args; /* allocation arguments */ 610 struct xfs_bmbt_rec *arp; /* child record pointer */ 611 struct xfs_btree_block *block; /* btree root block */ 612 struct xfs_btree_cur *cur; /* bmap btree cursor */ 613 int error; /* error return value */ 614 struct xfs_ifork *ifp; /* inode fork pointer */ 615 struct xfs_bmbt_key *kp; /* root block key pointer */ 616 struct xfs_mount *mp; /* mount structure */ 617 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 618 struct xfs_iext_cursor icur; 619 struct xfs_bmbt_irec rec; 620 xfs_extnum_t cnt = 0; 621 622 mp = ip->i_mount; 623 ASSERT(whichfork != XFS_COW_FORK); 624 ifp = xfs_ifork_ptr(ip, whichfork); 625 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); 626 627 /* 628 * Make space in the inode incore. This needs to be undone if we fail 629 * to expand the root. 630 */ 631 xfs_iroot_realloc(ip, 1, whichfork); 632 633 /* 634 * Fill in the root. 635 */ 636 block = ifp->if_broot; 637 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 638 XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 639 XFS_BTREE_LONG_PTRS); 640 /* 641 * Need a cursor. Can't allocate until bb_level is filled in. 642 */ 643 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 644 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 645 /* 646 * Convert to a btree with two levels, one record in root. 647 */ 648 ifp->if_format = XFS_DINODE_FMT_BTREE; 649 memset(&args, 0, sizeof(args)); 650 args.tp = tp; 651 args.mp = mp; 652 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 653 654 args.minlen = args.maxlen = args.prod = 1; 655 args.wasdel = wasdel; 656 *logflagsp = 0; 657 error = xfs_alloc_vextent_start_ag(&args, 658 XFS_INO_TO_FSB(mp, ip->i_ino)); 659 if (error) 660 goto out_root_realloc; 661 662 /* 663 * Allocation can't fail, the space was reserved. 664 */ 665 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 666 error = -ENOSPC; 667 goto out_root_realloc; 668 } 669 670 cur->bc_ino.allocated++; 671 ip->i_nblocks++; 672 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 673 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, 674 XFS_FSB_TO_DADDR(mp, args.fsbno), 675 mp->m_bsize, 0, &abp); 676 if (error) 677 goto out_unreserve_dquot; 678 679 /* 680 * Fill in the child block. 681 */ 682 abp->b_ops = &xfs_bmbt_buf_ops; 683 ablock = XFS_BUF_TO_BLOCK(abp); 684 xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp), 685 XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 686 XFS_BTREE_LONG_PTRS); 687 688 for_each_xfs_iext(ifp, &icur, &rec) { 689 if (isnullstartblock(rec.br_startblock)) 690 continue; 691 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 692 xfs_bmbt_disk_set_all(arp, &rec); 693 cnt++; 694 } 695 ASSERT(cnt == ifp->if_nextents); 696 xfs_btree_set_numrecs(ablock, cnt); 697 698 /* 699 * Fill in the root key and pointer. 700 */ 701 kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 702 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 703 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 704 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 705 be16_to_cpu(block->bb_level))); 706 *pp = cpu_to_be64(args.fsbno); 707 708 /* 709 * Do all this logging at the end so that 710 * the root is at the right level. 711 */ 712 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 713 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 714 ASSERT(*curp == NULL); 715 *curp = cur; 716 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 717 return 0; 718 719 out_unreserve_dquot: 720 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 721 out_root_realloc: 722 xfs_iroot_realloc(ip, -1, whichfork); 723 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 724 ASSERT(ifp->if_broot == NULL); 725 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 726 727 return error; 728 } 729 730 /* 731 * Convert a local file to an extents file. 732 * This code is out of bounds for data forks of regular files, 733 * since the file data needs to get logged so things will stay consistent. 734 * (The bmap-level manipulations are ok, though). 735 */ 736 void 737 xfs_bmap_local_to_extents_empty( 738 struct xfs_trans *tp, 739 struct xfs_inode *ip, 740 int whichfork) 741 { 742 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 743 744 ASSERT(whichfork != XFS_COW_FORK); 745 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 746 ASSERT(ifp->if_bytes == 0); 747 ASSERT(ifp->if_nextents == 0); 748 749 xfs_bmap_forkoff_reset(ip, whichfork); 750 ifp->if_u1.if_root = NULL; 751 ifp->if_height = 0; 752 ifp->if_format = XFS_DINODE_FMT_EXTENTS; 753 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 754 } 755 756 757 STATIC int /* error */ 758 xfs_bmap_local_to_extents( 759 xfs_trans_t *tp, /* transaction pointer */ 760 xfs_inode_t *ip, /* incore inode pointer */ 761 xfs_extlen_t total, /* total blocks needed by transaction */ 762 int *logflagsp, /* inode logging flags */ 763 int whichfork, 764 void (*init_fn)(struct xfs_trans *tp, 765 struct xfs_buf *bp, 766 struct xfs_inode *ip, 767 struct xfs_ifork *ifp)) 768 { 769 int error = 0; 770 int flags; /* logging flags returned */ 771 struct xfs_ifork *ifp; /* inode fork pointer */ 772 xfs_alloc_arg_t args; /* allocation arguments */ 773 struct xfs_buf *bp; /* buffer for extent block */ 774 struct xfs_bmbt_irec rec; 775 struct xfs_iext_cursor icur; 776 777 /* 778 * We don't want to deal with the case of keeping inode data inline yet. 779 * So sending the data fork of a regular inode is invalid. 780 */ 781 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 782 ifp = xfs_ifork_ptr(ip, whichfork); 783 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 784 785 if (!ifp->if_bytes) { 786 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 787 flags = XFS_ILOG_CORE; 788 goto done; 789 } 790 791 flags = 0; 792 error = 0; 793 memset(&args, 0, sizeof(args)); 794 args.tp = tp; 795 args.mp = ip->i_mount; 796 args.total = total; 797 args.minlen = args.maxlen = args.prod = 1; 798 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 799 800 /* 801 * Allocate a block. We know we need only one, since the 802 * file currently fits in an inode. 803 */ 804 args.total = total; 805 args.minlen = args.maxlen = args.prod = 1; 806 error = xfs_alloc_vextent_start_ag(&args, 807 XFS_INO_TO_FSB(args.mp, ip->i_ino)); 808 if (error) 809 goto done; 810 811 /* Can't fail, the space was reserved. */ 812 ASSERT(args.fsbno != NULLFSBLOCK); 813 ASSERT(args.len == 1); 814 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, 815 XFS_FSB_TO_DADDR(args.mp, args.fsbno), 816 args.mp->m_bsize, 0, &bp); 817 if (error) 818 goto done; 819 820 /* 821 * Initialize the block, copy the data and log the remote buffer. 822 * 823 * The callout is responsible for logging because the remote format 824 * might differ from the local format and thus we don't know how much to 825 * log here. Note that init_fn must also set the buffer log item type 826 * correctly. 827 */ 828 init_fn(tp, bp, ip, ifp); 829 830 /* account for the change in fork size */ 831 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 832 xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 833 flags |= XFS_ILOG_CORE; 834 835 ifp->if_u1.if_root = NULL; 836 ifp->if_height = 0; 837 838 rec.br_startoff = 0; 839 rec.br_startblock = args.fsbno; 840 rec.br_blockcount = 1; 841 rec.br_state = XFS_EXT_NORM; 842 xfs_iext_first(ifp, &icur); 843 xfs_iext_insert(ip, &icur, &rec, 0); 844 845 ifp->if_nextents = 1; 846 ip->i_nblocks = 1; 847 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 848 flags |= xfs_ilog_fext(whichfork); 849 850 done: 851 *logflagsp = flags; 852 return error; 853 } 854 855 /* 856 * Called from xfs_bmap_add_attrfork to handle btree format files. 857 */ 858 STATIC int /* error */ 859 xfs_bmap_add_attrfork_btree( 860 xfs_trans_t *tp, /* transaction pointer */ 861 xfs_inode_t *ip, /* incore inode pointer */ 862 int *flags) /* inode logging flags */ 863 { 864 struct xfs_btree_block *block = ip->i_df.if_broot; 865 struct xfs_btree_cur *cur; /* btree cursor */ 866 int error; /* error return value */ 867 xfs_mount_t *mp; /* file system mount struct */ 868 int stat; /* newroot status */ 869 870 mp = ip->i_mount; 871 872 if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip)) 873 *flags |= XFS_ILOG_DBROOT; 874 else { 875 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 876 error = xfs_bmbt_lookup_first(cur, &stat); 877 if (error) 878 goto error0; 879 /* must be at least one entry */ 880 if (XFS_IS_CORRUPT(mp, stat != 1)) { 881 error = -EFSCORRUPTED; 882 goto error0; 883 } 884 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 885 goto error0; 886 if (stat == 0) { 887 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 888 return -ENOSPC; 889 } 890 cur->bc_ino.allocated = 0; 891 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 892 } 893 return 0; 894 error0: 895 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 896 return error; 897 } 898 899 /* 900 * Called from xfs_bmap_add_attrfork to handle extents format files. 901 */ 902 STATIC int /* error */ 903 xfs_bmap_add_attrfork_extents( 904 struct xfs_trans *tp, /* transaction pointer */ 905 struct xfs_inode *ip, /* incore inode pointer */ 906 int *flags) /* inode logging flags */ 907 { 908 struct xfs_btree_cur *cur; /* bmap btree cursor */ 909 int error; /* error return value */ 910 911 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= 912 xfs_inode_data_fork_size(ip)) 913 return 0; 914 cur = NULL; 915 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 916 XFS_DATA_FORK); 917 if (cur) { 918 cur->bc_ino.allocated = 0; 919 xfs_btree_del_cursor(cur, error); 920 } 921 return error; 922 } 923 924 /* 925 * Called from xfs_bmap_add_attrfork to handle local format files. Each 926 * different data fork content type needs a different callout to do the 927 * conversion. Some are basic and only require special block initialisation 928 * callouts for the data formating, others (directories) are so specialised they 929 * handle everything themselves. 930 * 931 * XXX (dgc): investigate whether directory conversion can use the generic 932 * formatting callout. It should be possible - it's just a very complex 933 * formatter. 934 */ 935 STATIC int /* error */ 936 xfs_bmap_add_attrfork_local( 937 struct xfs_trans *tp, /* transaction pointer */ 938 struct xfs_inode *ip, /* incore inode pointer */ 939 int *flags) /* inode logging flags */ 940 { 941 struct xfs_da_args dargs; /* args for dir/attr code */ 942 943 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip)) 944 return 0; 945 946 if (S_ISDIR(VFS_I(ip)->i_mode)) { 947 memset(&dargs, 0, sizeof(dargs)); 948 dargs.geo = ip->i_mount->m_dir_geo; 949 dargs.dp = ip; 950 dargs.total = dargs.geo->fsbcount; 951 dargs.whichfork = XFS_DATA_FORK; 952 dargs.trans = tp; 953 return xfs_dir2_sf_to_block(&dargs); 954 } 955 956 if (S_ISLNK(VFS_I(ip)->i_mode)) 957 return xfs_bmap_local_to_extents(tp, ip, 1, flags, 958 XFS_DATA_FORK, 959 xfs_symlink_local_to_remote); 960 961 /* should only be called for types that support local format data */ 962 ASSERT(0); 963 return -EFSCORRUPTED; 964 } 965 966 /* 967 * Set an inode attr fork offset based on the format of the data fork. 968 */ 969 static int 970 xfs_bmap_set_attrforkoff( 971 struct xfs_inode *ip, 972 int size, 973 int *version) 974 { 975 int default_size = xfs_default_attroffset(ip) >> 3; 976 977 switch (ip->i_df.if_format) { 978 case XFS_DINODE_FMT_DEV: 979 ip->i_forkoff = default_size; 980 break; 981 case XFS_DINODE_FMT_LOCAL: 982 case XFS_DINODE_FMT_EXTENTS: 983 case XFS_DINODE_FMT_BTREE: 984 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size); 985 if (!ip->i_forkoff) 986 ip->i_forkoff = default_size; 987 else if (xfs_has_attr2(ip->i_mount) && version) 988 *version = 2; 989 break; 990 default: 991 ASSERT(0); 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 /* 999 * Convert inode from non-attributed to attributed. 1000 * Must not be in a transaction, ip must not be locked. 1001 */ 1002 int /* error code */ 1003 xfs_bmap_add_attrfork( 1004 xfs_inode_t *ip, /* incore inode pointer */ 1005 int size, /* space new attribute needs */ 1006 int rsvd) /* xact may use reserved blks */ 1007 { 1008 xfs_mount_t *mp; /* mount structure */ 1009 xfs_trans_t *tp; /* transaction pointer */ 1010 int blks; /* space reservation */ 1011 int version = 1; /* superblock attr version */ 1012 int logflags; /* logging flags */ 1013 int error; /* error return value */ 1014 1015 ASSERT(xfs_inode_has_attr_fork(ip) == 0); 1016 1017 mp = ip->i_mount; 1018 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1019 1020 blks = XFS_ADDAFORK_SPACE_RES(mp); 1021 1022 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0, 1023 rsvd, &tp); 1024 if (error) 1025 return error; 1026 if (xfs_inode_has_attr_fork(ip)) 1027 goto trans_cancel; 1028 1029 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1030 error = xfs_bmap_set_attrforkoff(ip, size, &version); 1031 if (error) 1032 goto trans_cancel; 1033 1034 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 1035 logflags = 0; 1036 switch (ip->i_df.if_format) { 1037 case XFS_DINODE_FMT_LOCAL: 1038 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 1039 break; 1040 case XFS_DINODE_FMT_EXTENTS: 1041 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 1042 break; 1043 case XFS_DINODE_FMT_BTREE: 1044 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 1045 break; 1046 default: 1047 error = 0; 1048 break; 1049 } 1050 if (logflags) 1051 xfs_trans_log_inode(tp, ip, logflags); 1052 if (error) 1053 goto trans_cancel; 1054 if (!xfs_has_attr(mp) || 1055 (!xfs_has_attr2(mp) && version == 2)) { 1056 bool log_sb = false; 1057 1058 spin_lock(&mp->m_sb_lock); 1059 if (!xfs_has_attr(mp)) { 1060 xfs_add_attr(mp); 1061 log_sb = true; 1062 } 1063 if (!xfs_has_attr2(mp) && version == 2) { 1064 xfs_add_attr2(mp); 1065 log_sb = true; 1066 } 1067 spin_unlock(&mp->m_sb_lock); 1068 if (log_sb) 1069 xfs_log_sb(tp); 1070 } 1071 1072 error = xfs_trans_commit(tp); 1073 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1074 return error; 1075 1076 trans_cancel: 1077 xfs_trans_cancel(tp); 1078 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1079 return error; 1080 } 1081 1082 /* 1083 * Internal and external extent tree search functions. 1084 */ 1085 1086 struct xfs_iread_state { 1087 struct xfs_iext_cursor icur; 1088 xfs_extnum_t loaded; 1089 }; 1090 1091 int 1092 xfs_bmap_complain_bad_rec( 1093 struct xfs_inode *ip, 1094 int whichfork, 1095 xfs_failaddr_t fa, 1096 const struct xfs_bmbt_irec *irec) 1097 { 1098 struct xfs_mount *mp = ip->i_mount; 1099 const char *forkname; 1100 1101 switch (whichfork) { 1102 case XFS_DATA_FORK: forkname = "data"; break; 1103 case XFS_ATTR_FORK: forkname = "attr"; break; 1104 case XFS_COW_FORK: forkname = "CoW"; break; 1105 default: forkname = "???"; break; 1106 } 1107 1108 xfs_warn(mp, 1109 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!", 1110 ip->i_ino, forkname, fa); 1111 xfs_warn(mp, 1112 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x", 1113 irec->br_startoff, irec->br_startblock, irec->br_blockcount, 1114 irec->br_state); 1115 1116 return -EFSCORRUPTED; 1117 } 1118 1119 /* Stuff every bmbt record from this block into the incore extent map. */ 1120 static int 1121 xfs_iread_bmbt_block( 1122 struct xfs_btree_cur *cur, 1123 int level, 1124 void *priv) 1125 { 1126 struct xfs_iread_state *ir = priv; 1127 struct xfs_mount *mp = cur->bc_mp; 1128 struct xfs_inode *ip = cur->bc_ino.ip; 1129 struct xfs_btree_block *block; 1130 struct xfs_buf *bp; 1131 struct xfs_bmbt_rec *frp; 1132 xfs_extnum_t num_recs; 1133 xfs_extnum_t j; 1134 int whichfork = cur->bc_ino.whichfork; 1135 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1136 1137 block = xfs_btree_get_block(cur, level, &bp); 1138 1139 /* Abort if we find more records than nextents. */ 1140 num_recs = xfs_btree_get_numrecs(block); 1141 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { 1142 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", 1143 (unsigned long long)ip->i_ino); 1144 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, 1145 sizeof(*block), __this_address); 1146 return -EFSCORRUPTED; 1147 } 1148 1149 /* Copy records into the incore cache. */ 1150 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1151 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) { 1152 struct xfs_bmbt_irec new; 1153 xfs_failaddr_t fa; 1154 1155 xfs_bmbt_disk_get_all(frp, &new); 1156 fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1157 if (fa) { 1158 xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1159 "xfs_iread_extents(2)", frp, 1160 sizeof(*frp), fa); 1161 return xfs_bmap_complain_bad_rec(ip, whichfork, fa, 1162 &new); 1163 } 1164 xfs_iext_insert(ip, &ir->icur, &new, 1165 xfs_bmap_fork_to_state(whichfork)); 1166 trace_xfs_read_extent(ip, &ir->icur, 1167 xfs_bmap_fork_to_state(whichfork), _THIS_IP_); 1168 xfs_iext_next(ifp, &ir->icur); 1169 } 1170 1171 return 0; 1172 } 1173 1174 /* 1175 * Read in extents from a btree-format inode. 1176 */ 1177 int 1178 xfs_iread_extents( 1179 struct xfs_trans *tp, 1180 struct xfs_inode *ip, 1181 int whichfork) 1182 { 1183 struct xfs_iread_state ir; 1184 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1185 struct xfs_mount *mp = ip->i_mount; 1186 struct xfs_btree_cur *cur; 1187 int error; 1188 1189 if (!xfs_need_iread_extents(ifp)) 1190 return 0; 1191 1192 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1193 1194 ir.loaded = 0; 1195 xfs_iext_first(ifp, &ir.icur); 1196 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 1197 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block, 1198 XFS_BTREE_VISIT_RECORDS, &ir); 1199 xfs_btree_del_cursor(cur, error); 1200 if (error) 1201 goto out; 1202 1203 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { 1204 error = -EFSCORRUPTED; 1205 goto out; 1206 } 1207 ASSERT(ir.loaded == xfs_iext_count(ifp)); 1208 /* 1209 * Use release semantics so that we can use acquire semantics in 1210 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree 1211 * after that load. 1212 */ 1213 smp_store_release(&ifp->if_needextents, 0); 1214 return 0; 1215 out: 1216 xfs_iext_destroy(ifp); 1217 return error; 1218 } 1219 1220 /* 1221 * Returns the relative block number of the first unused block(s) in the given 1222 * fork with at least "len" logically contiguous blocks free. This is the 1223 * lowest-address hole if the fork has holes, else the first block past the end 1224 * of fork. Return 0 if the fork is currently local (in-inode). 1225 */ 1226 int /* error */ 1227 xfs_bmap_first_unused( 1228 struct xfs_trans *tp, /* transaction pointer */ 1229 struct xfs_inode *ip, /* incore inode */ 1230 xfs_extlen_t len, /* size of hole to find */ 1231 xfs_fileoff_t *first_unused, /* unused block */ 1232 int whichfork) /* data or attr fork */ 1233 { 1234 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1235 struct xfs_bmbt_irec got; 1236 struct xfs_iext_cursor icur; 1237 xfs_fileoff_t lastaddr = 0; 1238 xfs_fileoff_t lowest, max; 1239 int error; 1240 1241 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { 1242 *first_unused = 0; 1243 return 0; 1244 } 1245 1246 ASSERT(xfs_ifork_has_extents(ifp)); 1247 1248 error = xfs_iread_extents(tp, ip, whichfork); 1249 if (error) 1250 return error; 1251 1252 lowest = max = *first_unused; 1253 for_each_xfs_iext(ifp, &icur, &got) { 1254 /* 1255 * See if the hole before this extent will work. 1256 */ 1257 if (got.br_startoff >= lowest + len && 1258 got.br_startoff - max >= len) 1259 break; 1260 lastaddr = got.br_startoff + got.br_blockcount; 1261 max = XFS_FILEOFF_MAX(lastaddr, lowest); 1262 } 1263 1264 *first_unused = max; 1265 return 0; 1266 } 1267 1268 /* 1269 * Returns the file-relative block number of the last block - 1 before 1270 * last_block (input value) in the file. 1271 * This is not based on i_size, it is based on the extent records. 1272 * Returns 0 for local files, as they do not have extent records. 1273 */ 1274 int /* error */ 1275 xfs_bmap_last_before( 1276 struct xfs_trans *tp, /* transaction pointer */ 1277 struct xfs_inode *ip, /* incore inode */ 1278 xfs_fileoff_t *last_block, /* last block */ 1279 int whichfork) /* data or attr fork */ 1280 { 1281 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1282 struct xfs_bmbt_irec got; 1283 struct xfs_iext_cursor icur; 1284 int error; 1285 1286 switch (ifp->if_format) { 1287 case XFS_DINODE_FMT_LOCAL: 1288 *last_block = 0; 1289 return 0; 1290 case XFS_DINODE_FMT_BTREE: 1291 case XFS_DINODE_FMT_EXTENTS: 1292 break; 1293 default: 1294 ASSERT(0); 1295 return -EFSCORRUPTED; 1296 } 1297 1298 error = xfs_iread_extents(tp, ip, whichfork); 1299 if (error) 1300 return error; 1301 1302 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 1303 *last_block = 0; 1304 return 0; 1305 } 1306 1307 int 1308 xfs_bmap_last_extent( 1309 struct xfs_trans *tp, 1310 struct xfs_inode *ip, 1311 int whichfork, 1312 struct xfs_bmbt_irec *rec, 1313 int *is_empty) 1314 { 1315 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1316 struct xfs_iext_cursor icur; 1317 int error; 1318 1319 error = xfs_iread_extents(tp, ip, whichfork); 1320 if (error) 1321 return error; 1322 1323 xfs_iext_last(ifp, &icur); 1324 if (!xfs_iext_get_extent(ifp, &icur, rec)) 1325 *is_empty = 1; 1326 else 1327 *is_empty = 0; 1328 return 0; 1329 } 1330 1331 /* 1332 * Check the last inode extent to determine whether this allocation will result 1333 * in blocks being allocated at the end of the file. When we allocate new data 1334 * blocks at the end of the file which do not start at the previous data block, 1335 * we will try to align the new blocks at stripe unit boundaries. 1336 * 1337 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 1338 * at, or past the EOF. 1339 */ 1340 STATIC int 1341 xfs_bmap_isaeof( 1342 struct xfs_bmalloca *bma, 1343 int whichfork) 1344 { 1345 struct xfs_bmbt_irec rec; 1346 int is_empty; 1347 int error; 1348 1349 bma->aeof = false; 1350 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1351 &is_empty); 1352 if (error) 1353 return error; 1354 1355 if (is_empty) { 1356 bma->aeof = true; 1357 return 0; 1358 } 1359 1360 /* 1361 * Check if we are allocation or past the last extent, or at least into 1362 * the last delayed allocated extent. 1363 */ 1364 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 1365 (bma->offset >= rec.br_startoff && 1366 isnullstartblock(rec.br_startblock)); 1367 return 0; 1368 } 1369 1370 /* 1371 * Returns the file-relative block number of the first block past eof in 1372 * the file. This is not based on i_size, it is based on the extent records. 1373 * Returns 0 for local files, as they do not have extent records. 1374 */ 1375 int 1376 xfs_bmap_last_offset( 1377 struct xfs_inode *ip, 1378 xfs_fileoff_t *last_block, 1379 int whichfork) 1380 { 1381 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1382 struct xfs_bmbt_irec rec; 1383 int is_empty; 1384 int error; 1385 1386 *last_block = 0; 1387 1388 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) 1389 return 0; 1390 1391 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) 1392 return -EFSCORRUPTED; 1393 1394 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1395 if (error || is_empty) 1396 return error; 1397 1398 *last_block = rec.br_startoff + rec.br_blockcount; 1399 return 0; 1400 } 1401 1402 /* 1403 * Extent tree manipulation functions used during allocation. 1404 */ 1405 1406 /* 1407 * Convert a delayed allocation to a real allocation. 1408 */ 1409 STATIC int /* error */ 1410 xfs_bmap_add_extent_delay_real( 1411 struct xfs_bmalloca *bma, 1412 int whichfork) 1413 { 1414 struct xfs_mount *mp = bma->ip->i_mount; 1415 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 1416 struct xfs_bmbt_irec *new = &bma->got; 1417 int error; /* error return value */ 1418 int i; /* temp state */ 1419 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1420 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1421 /* left is 0, right is 1, prev is 2 */ 1422 int rval=0; /* return value (logging flags) */ 1423 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1424 xfs_filblks_t da_new; /* new count del alloc blocks used */ 1425 xfs_filblks_t da_old; /* old count del alloc blocks used */ 1426 xfs_filblks_t temp=0; /* value for da_new calculations */ 1427 int tmp_rval; /* partial logging flags */ 1428 struct xfs_bmbt_irec old; 1429 1430 ASSERT(whichfork != XFS_ATTR_FORK); 1431 ASSERT(!isnullstartblock(new->br_startblock)); 1432 ASSERT(!bma->cur || 1433 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 1434 1435 XFS_STATS_INC(mp, xs_add_exlist); 1436 1437 #define LEFT r[0] 1438 #define RIGHT r[1] 1439 #define PREV r[2] 1440 1441 /* 1442 * Set up a bunch of variables to make the tests simpler. 1443 */ 1444 xfs_iext_get_extent(ifp, &bma->icur, &PREV); 1445 new_endoff = new->br_startoff + new->br_blockcount; 1446 ASSERT(isnullstartblock(PREV.br_startblock)); 1447 ASSERT(PREV.br_startoff <= new->br_startoff); 1448 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1449 1450 da_old = startblockval(PREV.br_startblock); 1451 da_new = 0; 1452 1453 /* 1454 * Set flags determining what part of the previous delayed allocation 1455 * extent is being replaced by a real allocation. 1456 */ 1457 if (PREV.br_startoff == new->br_startoff) 1458 state |= BMAP_LEFT_FILLING; 1459 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 1460 state |= BMAP_RIGHT_FILLING; 1461 1462 /* 1463 * Check and set flags if this segment has a left neighbor. 1464 * Don't set contiguous if the combined extent would be too large. 1465 */ 1466 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 1467 state |= BMAP_LEFT_VALID; 1468 if (isnullstartblock(LEFT.br_startblock)) 1469 state |= BMAP_LEFT_DELAY; 1470 } 1471 1472 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 1473 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1474 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1475 LEFT.br_state == new->br_state && 1476 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 1477 state |= BMAP_LEFT_CONTIG; 1478 1479 /* 1480 * Check and set flags if this segment has a right neighbor. 1481 * Don't set contiguous if the combined extent would be too large. 1482 * Also check for all-three-contiguous being too large. 1483 */ 1484 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 1485 state |= BMAP_RIGHT_VALID; 1486 if (isnullstartblock(RIGHT.br_startblock)) 1487 state |= BMAP_RIGHT_DELAY; 1488 } 1489 1490 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 1491 new_endoff == RIGHT.br_startoff && 1492 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 1493 new->br_state == RIGHT.br_state && 1494 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 1495 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1496 BMAP_RIGHT_FILLING)) != 1497 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 1498 BMAP_RIGHT_FILLING) || 1499 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1500 <= XFS_MAX_BMBT_EXTLEN)) 1501 state |= BMAP_RIGHT_CONTIG; 1502 1503 error = 0; 1504 /* 1505 * Switch out based on the FILLING and CONTIG state bits. 1506 */ 1507 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1508 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 1509 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 1510 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1511 /* 1512 * Filling in all of a previously delayed allocation extent. 1513 * The left and right neighbors are both contiguous with new. 1514 */ 1515 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 1516 1517 xfs_iext_remove(bma->ip, &bma->icur, state); 1518 xfs_iext_remove(bma->ip, &bma->icur, state); 1519 xfs_iext_prev(ifp, &bma->icur); 1520 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1521 ifp->if_nextents--; 1522 1523 if (bma->cur == NULL) 1524 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1525 else { 1526 rval = XFS_ILOG_CORE; 1527 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1528 if (error) 1529 goto done; 1530 if (XFS_IS_CORRUPT(mp, i != 1)) { 1531 error = -EFSCORRUPTED; 1532 goto done; 1533 } 1534 error = xfs_btree_delete(bma->cur, &i); 1535 if (error) 1536 goto done; 1537 if (XFS_IS_CORRUPT(mp, i != 1)) { 1538 error = -EFSCORRUPTED; 1539 goto done; 1540 } 1541 error = xfs_btree_decrement(bma->cur, 0, &i); 1542 if (error) 1543 goto done; 1544 if (XFS_IS_CORRUPT(mp, i != 1)) { 1545 error = -EFSCORRUPTED; 1546 goto done; 1547 } 1548 error = xfs_bmbt_update(bma->cur, &LEFT); 1549 if (error) 1550 goto done; 1551 } 1552 ASSERT(da_new <= da_old); 1553 break; 1554 1555 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1556 /* 1557 * Filling in all of a previously delayed allocation extent. 1558 * The left neighbor is contiguous, the right is not. 1559 */ 1560 old = LEFT; 1561 LEFT.br_blockcount += PREV.br_blockcount; 1562 1563 xfs_iext_remove(bma->ip, &bma->icur, state); 1564 xfs_iext_prev(ifp, &bma->icur); 1565 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1566 1567 if (bma->cur == NULL) 1568 rval = XFS_ILOG_DEXT; 1569 else { 1570 rval = 0; 1571 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1572 if (error) 1573 goto done; 1574 if (XFS_IS_CORRUPT(mp, i != 1)) { 1575 error = -EFSCORRUPTED; 1576 goto done; 1577 } 1578 error = xfs_bmbt_update(bma->cur, &LEFT); 1579 if (error) 1580 goto done; 1581 } 1582 ASSERT(da_new <= da_old); 1583 break; 1584 1585 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1586 /* 1587 * Filling in all of a previously delayed allocation extent. 1588 * The right neighbor is contiguous, the left is not. Take care 1589 * with delay -> unwritten extent allocation here because the 1590 * delalloc record we are overwriting is always written. 1591 */ 1592 PREV.br_startblock = new->br_startblock; 1593 PREV.br_blockcount += RIGHT.br_blockcount; 1594 PREV.br_state = new->br_state; 1595 1596 xfs_iext_next(ifp, &bma->icur); 1597 xfs_iext_remove(bma->ip, &bma->icur, state); 1598 xfs_iext_prev(ifp, &bma->icur); 1599 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1600 1601 if (bma->cur == NULL) 1602 rval = XFS_ILOG_DEXT; 1603 else { 1604 rval = 0; 1605 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 1606 if (error) 1607 goto done; 1608 if (XFS_IS_CORRUPT(mp, i != 1)) { 1609 error = -EFSCORRUPTED; 1610 goto done; 1611 } 1612 error = xfs_bmbt_update(bma->cur, &PREV); 1613 if (error) 1614 goto done; 1615 } 1616 ASSERT(da_new <= da_old); 1617 break; 1618 1619 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1620 /* 1621 * Filling in all of a previously delayed allocation extent. 1622 * Neither the left nor right neighbors are contiguous with 1623 * the new one. 1624 */ 1625 PREV.br_startblock = new->br_startblock; 1626 PREV.br_state = new->br_state; 1627 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1628 ifp->if_nextents++; 1629 1630 if (bma->cur == NULL) 1631 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1632 else { 1633 rval = XFS_ILOG_CORE; 1634 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1635 if (error) 1636 goto done; 1637 if (XFS_IS_CORRUPT(mp, i != 0)) { 1638 error = -EFSCORRUPTED; 1639 goto done; 1640 } 1641 error = xfs_btree_insert(bma->cur, &i); 1642 if (error) 1643 goto done; 1644 if (XFS_IS_CORRUPT(mp, i != 1)) { 1645 error = -EFSCORRUPTED; 1646 goto done; 1647 } 1648 } 1649 ASSERT(da_new <= da_old); 1650 break; 1651 1652 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1653 /* 1654 * Filling in the first part of a previous delayed allocation. 1655 * The left neighbor is contiguous. 1656 */ 1657 old = LEFT; 1658 temp = PREV.br_blockcount - new->br_blockcount; 1659 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1660 startblockval(PREV.br_startblock)); 1661 1662 LEFT.br_blockcount += new->br_blockcount; 1663 1664 PREV.br_blockcount = temp; 1665 PREV.br_startoff += new->br_blockcount; 1666 PREV.br_startblock = nullstartblock(da_new); 1667 1668 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1669 xfs_iext_prev(ifp, &bma->icur); 1670 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1671 1672 if (bma->cur == NULL) 1673 rval = XFS_ILOG_DEXT; 1674 else { 1675 rval = 0; 1676 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1677 if (error) 1678 goto done; 1679 if (XFS_IS_CORRUPT(mp, i != 1)) { 1680 error = -EFSCORRUPTED; 1681 goto done; 1682 } 1683 error = xfs_bmbt_update(bma->cur, &LEFT); 1684 if (error) 1685 goto done; 1686 } 1687 ASSERT(da_new <= da_old); 1688 break; 1689 1690 case BMAP_LEFT_FILLING: 1691 /* 1692 * Filling in the first part of a previous delayed allocation. 1693 * The left neighbor is not contiguous. 1694 */ 1695 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1696 ifp->if_nextents++; 1697 1698 if (bma->cur == NULL) 1699 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1700 else { 1701 rval = XFS_ILOG_CORE; 1702 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1703 if (error) 1704 goto done; 1705 if (XFS_IS_CORRUPT(mp, i != 0)) { 1706 error = -EFSCORRUPTED; 1707 goto done; 1708 } 1709 error = xfs_btree_insert(bma->cur, &i); 1710 if (error) 1711 goto done; 1712 if (XFS_IS_CORRUPT(mp, i != 1)) { 1713 error = -EFSCORRUPTED; 1714 goto done; 1715 } 1716 } 1717 1718 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1719 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1720 &bma->cur, 1, &tmp_rval, whichfork); 1721 rval |= tmp_rval; 1722 if (error) 1723 goto done; 1724 } 1725 1726 temp = PREV.br_blockcount - new->br_blockcount; 1727 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1728 startblockval(PREV.br_startblock) - 1729 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1730 1731 PREV.br_startoff = new_endoff; 1732 PREV.br_blockcount = temp; 1733 PREV.br_startblock = nullstartblock(da_new); 1734 xfs_iext_next(ifp, &bma->icur); 1735 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1736 xfs_iext_prev(ifp, &bma->icur); 1737 break; 1738 1739 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1740 /* 1741 * Filling in the last part of a previous delayed allocation. 1742 * The right neighbor is contiguous with the new allocation. 1743 */ 1744 old = RIGHT; 1745 RIGHT.br_startoff = new->br_startoff; 1746 RIGHT.br_startblock = new->br_startblock; 1747 RIGHT.br_blockcount += new->br_blockcount; 1748 1749 if (bma->cur == NULL) 1750 rval = XFS_ILOG_DEXT; 1751 else { 1752 rval = 0; 1753 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 1754 if (error) 1755 goto done; 1756 if (XFS_IS_CORRUPT(mp, i != 1)) { 1757 error = -EFSCORRUPTED; 1758 goto done; 1759 } 1760 error = xfs_bmbt_update(bma->cur, &RIGHT); 1761 if (error) 1762 goto done; 1763 } 1764 1765 temp = PREV.br_blockcount - new->br_blockcount; 1766 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1767 startblockval(PREV.br_startblock)); 1768 1769 PREV.br_blockcount = temp; 1770 PREV.br_startblock = nullstartblock(da_new); 1771 1772 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1773 xfs_iext_next(ifp, &bma->icur); 1774 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 1775 ASSERT(da_new <= da_old); 1776 break; 1777 1778 case BMAP_RIGHT_FILLING: 1779 /* 1780 * Filling in the last part of a previous delayed allocation. 1781 * The right neighbor is not contiguous. 1782 */ 1783 xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1784 ifp->if_nextents++; 1785 1786 if (bma->cur == NULL) 1787 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1788 else { 1789 rval = XFS_ILOG_CORE; 1790 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1791 if (error) 1792 goto done; 1793 if (XFS_IS_CORRUPT(mp, i != 0)) { 1794 error = -EFSCORRUPTED; 1795 goto done; 1796 } 1797 error = xfs_btree_insert(bma->cur, &i); 1798 if (error) 1799 goto done; 1800 if (XFS_IS_CORRUPT(mp, i != 1)) { 1801 error = -EFSCORRUPTED; 1802 goto done; 1803 } 1804 } 1805 1806 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1807 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1808 &bma->cur, 1, &tmp_rval, whichfork); 1809 rval |= tmp_rval; 1810 if (error) 1811 goto done; 1812 } 1813 1814 temp = PREV.br_blockcount - new->br_blockcount; 1815 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 1816 startblockval(PREV.br_startblock) - 1817 (bma->cur ? bma->cur->bc_ino.allocated : 0)); 1818 1819 PREV.br_startblock = nullstartblock(da_new); 1820 PREV.br_blockcount = temp; 1821 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1822 xfs_iext_next(ifp, &bma->icur); 1823 ASSERT(da_new <= da_old); 1824 break; 1825 1826 case 0: 1827 /* 1828 * Filling in the middle part of a previous delayed allocation. 1829 * Contiguity is impossible here. 1830 * This case is avoided almost all the time. 1831 * 1832 * We start with a delayed allocation: 1833 * 1834 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 1835 * PREV @ idx 1836 * 1837 * and we are allocating: 1838 * +rrrrrrrrrrrrrrrrr+ 1839 * new 1840 * 1841 * and we set it up for insertion as: 1842 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 1843 * new 1844 * PREV @ idx LEFT RIGHT 1845 * inserted at idx + 1 1846 */ 1847 old = PREV; 1848 1849 /* LEFT is the new middle */ 1850 LEFT = *new; 1851 1852 /* RIGHT is the new right */ 1853 RIGHT.br_state = PREV.br_state; 1854 RIGHT.br_startoff = new_endoff; 1855 RIGHT.br_blockcount = 1856 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1857 RIGHT.br_startblock = 1858 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1859 RIGHT.br_blockcount)); 1860 1861 /* truncate PREV */ 1862 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 1863 PREV.br_startblock = 1864 nullstartblock(xfs_bmap_worst_indlen(bma->ip, 1865 PREV.br_blockcount)); 1866 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1867 1868 xfs_iext_next(ifp, &bma->icur); 1869 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 1870 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1871 ifp->if_nextents++; 1872 1873 if (bma->cur == NULL) 1874 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1875 else { 1876 rval = XFS_ILOG_CORE; 1877 error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 1878 if (error) 1879 goto done; 1880 if (XFS_IS_CORRUPT(mp, i != 0)) { 1881 error = -EFSCORRUPTED; 1882 goto done; 1883 } 1884 error = xfs_btree_insert(bma->cur, &i); 1885 if (error) 1886 goto done; 1887 if (XFS_IS_CORRUPT(mp, i != 1)) { 1888 error = -EFSCORRUPTED; 1889 goto done; 1890 } 1891 } 1892 1893 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1894 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1895 &bma->cur, 1, &tmp_rval, whichfork); 1896 rval |= tmp_rval; 1897 if (error) 1898 goto done; 1899 } 1900 1901 da_new = startblockval(PREV.br_startblock) + 1902 startblockval(RIGHT.br_startblock); 1903 break; 1904 1905 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1906 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1907 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 1908 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1909 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1910 case BMAP_LEFT_CONTIG: 1911 case BMAP_RIGHT_CONTIG: 1912 /* 1913 * These cases are all impossible. 1914 */ 1915 ASSERT(0); 1916 } 1917 1918 /* add reverse mapping unless caller opted out */ 1919 if (!(bma->flags & XFS_BMAPI_NORMAP)) 1920 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 1921 1922 /* convert to a btree if necessary */ 1923 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 1924 int tmp_logflags; /* partial log flag return val */ 1925 1926 ASSERT(bma->cur == NULL); 1927 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1928 &bma->cur, da_old > 0, &tmp_logflags, 1929 whichfork); 1930 bma->logflags |= tmp_logflags; 1931 if (error) 1932 goto done; 1933 } 1934 1935 if (da_new != da_old) 1936 xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 1937 1938 if (bma->cur) { 1939 da_new += bma->cur->bc_ino.allocated; 1940 bma->cur->bc_ino.allocated = 0; 1941 } 1942 1943 /* adjust for changes in reserved delayed indirect blocks */ 1944 if (da_new != da_old) 1945 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 1946 true); 1947 1948 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 1949 done: 1950 if (whichfork != XFS_COW_FORK) 1951 bma->logflags |= rval; 1952 return error; 1953 #undef LEFT 1954 #undef RIGHT 1955 #undef PREV 1956 } 1957 1958 /* 1959 * Convert an unwritten allocation to a real allocation or vice versa. 1960 */ 1961 int /* error */ 1962 xfs_bmap_add_extent_unwritten_real( 1963 struct xfs_trans *tp, 1964 xfs_inode_t *ip, /* incore inode pointer */ 1965 int whichfork, 1966 struct xfs_iext_cursor *icur, 1967 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */ 1968 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1969 int *logflagsp) /* inode logging flags */ 1970 { 1971 struct xfs_btree_cur *cur; /* btree cursor */ 1972 int error; /* error return value */ 1973 int i; /* temp state */ 1974 struct xfs_ifork *ifp; /* inode fork pointer */ 1975 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1976 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 1977 /* left is 0, right is 1, prev is 2 */ 1978 int rval=0; /* return value (logging flags) */ 1979 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1980 struct xfs_mount *mp = ip->i_mount; 1981 struct xfs_bmbt_irec old; 1982 1983 *logflagsp = 0; 1984 1985 cur = *curp; 1986 ifp = xfs_ifork_ptr(ip, whichfork); 1987 1988 ASSERT(!isnullstartblock(new->br_startblock)); 1989 1990 XFS_STATS_INC(mp, xs_add_exlist); 1991 1992 #define LEFT r[0] 1993 #define RIGHT r[1] 1994 #define PREV r[2] 1995 1996 /* 1997 * Set up a bunch of variables to make the tests simpler. 1998 */ 1999 error = 0; 2000 xfs_iext_get_extent(ifp, icur, &PREV); 2001 ASSERT(new->br_state != PREV.br_state); 2002 new_endoff = new->br_startoff + new->br_blockcount; 2003 ASSERT(PREV.br_startoff <= new->br_startoff); 2004 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 2005 2006 /* 2007 * Set flags determining what part of the previous oldext allocation 2008 * extent is being replaced by a newext allocation. 2009 */ 2010 if (PREV.br_startoff == new->br_startoff) 2011 state |= BMAP_LEFT_FILLING; 2012 if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 2013 state |= BMAP_RIGHT_FILLING; 2014 2015 /* 2016 * Check and set flags if this segment has a left neighbor. 2017 * Don't set contiguous if the combined extent would be too large. 2018 */ 2019 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 2020 state |= BMAP_LEFT_VALID; 2021 if (isnullstartblock(LEFT.br_startblock)) 2022 state |= BMAP_LEFT_DELAY; 2023 } 2024 2025 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2026 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 2027 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 2028 LEFT.br_state == new->br_state && 2029 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2030 state |= BMAP_LEFT_CONTIG; 2031 2032 /* 2033 * Check and set flags if this segment has a right neighbor. 2034 * Don't set contiguous if the combined extent would be too large. 2035 * Also check for all-three-contiguous being too large. 2036 */ 2037 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 2038 state |= BMAP_RIGHT_VALID; 2039 if (isnullstartblock(RIGHT.br_startblock)) 2040 state |= BMAP_RIGHT_DELAY; 2041 } 2042 2043 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2044 new_endoff == RIGHT.br_startoff && 2045 new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 2046 new->br_state == RIGHT.br_state && 2047 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2048 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2049 BMAP_RIGHT_FILLING)) != 2050 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 2051 BMAP_RIGHT_FILLING) || 2052 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 2053 <= XFS_MAX_BMBT_EXTLEN)) 2054 state |= BMAP_RIGHT_CONTIG; 2055 2056 /* 2057 * Switch out based on the FILLING and CONTIG state bits. 2058 */ 2059 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2060 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 2061 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 2062 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2063 /* 2064 * Setting all of a previous oldext extent to newext. 2065 * The left and right neighbors are both contiguous with new. 2066 */ 2067 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 2068 2069 xfs_iext_remove(ip, icur, state); 2070 xfs_iext_remove(ip, icur, state); 2071 xfs_iext_prev(ifp, icur); 2072 xfs_iext_update_extent(ip, state, icur, &LEFT); 2073 ifp->if_nextents -= 2; 2074 if (cur == NULL) 2075 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2076 else { 2077 rval = XFS_ILOG_CORE; 2078 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2079 if (error) 2080 goto done; 2081 if (XFS_IS_CORRUPT(mp, i != 1)) { 2082 error = -EFSCORRUPTED; 2083 goto done; 2084 } 2085 if ((error = xfs_btree_delete(cur, &i))) 2086 goto done; 2087 if (XFS_IS_CORRUPT(mp, i != 1)) { 2088 error = -EFSCORRUPTED; 2089 goto done; 2090 } 2091 if ((error = xfs_btree_decrement(cur, 0, &i))) 2092 goto done; 2093 if (XFS_IS_CORRUPT(mp, i != 1)) { 2094 error = -EFSCORRUPTED; 2095 goto done; 2096 } 2097 if ((error = xfs_btree_delete(cur, &i))) 2098 goto done; 2099 if (XFS_IS_CORRUPT(mp, i != 1)) { 2100 error = -EFSCORRUPTED; 2101 goto done; 2102 } 2103 if ((error = xfs_btree_decrement(cur, 0, &i))) 2104 goto done; 2105 if (XFS_IS_CORRUPT(mp, i != 1)) { 2106 error = -EFSCORRUPTED; 2107 goto done; 2108 } 2109 error = xfs_bmbt_update(cur, &LEFT); 2110 if (error) 2111 goto done; 2112 } 2113 break; 2114 2115 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2116 /* 2117 * Setting all of a previous oldext extent to newext. 2118 * The left neighbor is contiguous, the right is not. 2119 */ 2120 LEFT.br_blockcount += PREV.br_blockcount; 2121 2122 xfs_iext_remove(ip, icur, state); 2123 xfs_iext_prev(ifp, icur); 2124 xfs_iext_update_extent(ip, state, icur, &LEFT); 2125 ifp->if_nextents--; 2126 if (cur == NULL) 2127 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2128 else { 2129 rval = XFS_ILOG_CORE; 2130 error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2131 if (error) 2132 goto done; 2133 if (XFS_IS_CORRUPT(mp, i != 1)) { 2134 error = -EFSCORRUPTED; 2135 goto done; 2136 } 2137 if ((error = xfs_btree_delete(cur, &i))) 2138 goto done; 2139 if (XFS_IS_CORRUPT(mp, i != 1)) { 2140 error = -EFSCORRUPTED; 2141 goto done; 2142 } 2143 if ((error = xfs_btree_decrement(cur, 0, &i))) 2144 goto done; 2145 if (XFS_IS_CORRUPT(mp, i != 1)) { 2146 error = -EFSCORRUPTED; 2147 goto done; 2148 } 2149 error = xfs_bmbt_update(cur, &LEFT); 2150 if (error) 2151 goto done; 2152 } 2153 break; 2154 2155 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2156 /* 2157 * Setting all of a previous oldext extent to newext. 2158 * The right neighbor is contiguous, the left is not. 2159 */ 2160 PREV.br_blockcount += RIGHT.br_blockcount; 2161 PREV.br_state = new->br_state; 2162 2163 xfs_iext_next(ifp, icur); 2164 xfs_iext_remove(ip, icur, state); 2165 xfs_iext_prev(ifp, icur); 2166 xfs_iext_update_extent(ip, state, icur, &PREV); 2167 ifp->if_nextents--; 2168 2169 if (cur == NULL) 2170 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2171 else { 2172 rval = XFS_ILOG_CORE; 2173 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2174 if (error) 2175 goto done; 2176 if (XFS_IS_CORRUPT(mp, i != 1)) { 2177 error = -EFSCORRUPTED; 2178 goto done; 2179 } 2180 if ((error = xfs_btree_delete(cur, &i))) 2181 goto done; 2182 if (XFS_IS_CORRUPT(mp, i != 1)) { 2183 error = -EFSCORRUPTED; 2184 goto done; 2185 } 2186 if ((error = xfs_btree_decrement(cur, 0, &i))) 2187 goto done; 2188 if (XFS_IS_CORRUPT(mp, i != 1)) { 2189 error = -EFSCORRUPTED; 2190 goto done; 2191 } 2192 error = xfs_bmbt_update(cur, &PREV); 2193 if (error) 2194 goto done; 2195 } 2196 break; 2197 2198 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 2199 /* 2200 * Setting all of a previous oldext extent to newext. 2201 * Neither the left nor right neighbors are contiguous with 2202 * the new one. 2203 */ 2204 PREV.br_state = new->br_state; 2205 xfs_iext_update_extent(ip, state, icur, &PREV); 2206 2207 if (cur == NULL) 2208 rval = XFS_ILOG_DEXT; 2209 else { 2210 rval = 0; 2211 error = xfs_bmbt_lookup_eq(cur, new, &i); 2212 if (error) 2213 goto done; 2214 if (XFS_IS_CORRUPT(mp, i != 1)) { 2215 error = -EFSCORRUPTED; 2216 goto done; 2217 } 2218 error = xfs_bmbt_update(cur, &PREV); 2219 if (error) 2220 goto done; 2221 } 2222 break; 2223 2224 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 2225 /* 2226 * Setting the first part of a previous oldext extent to newext. 2227 * The left neighbor is contiguous. 2228 */ 2229 LEFT.br_blockcount += new->br_blockcount; 2230 2231 old = PREV; 2232 PREV.br_startoff += new->br_blockcount; 2233 PREV.br_startblock += new->br_blockcount; 2234 PREV.br_blockcount -= new->br_blockcount; 2235 2236 xfs_iext_update_extent(ip, state, icur, &PREV); 2237 xfs_iext_prev(ifp, icur); 2238 xfs_iext_update_extent(ip, state, icur, &LEFT); 2239 2240 if (cur == NULL) 2241 rval = XFS_ILOG_DEXT; 2242 else { 2243 rval = 0; 2244 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2245 if (error) 2246 goto done; 2247 if (XFS_IS_CORRUPT(mp, i != 1)) { 2248 error = -EFSCORRUPTED; 2249 goto done; 2250 } 2251 error = xfs_bmbt_update(cur, &PREV); 2252 if (error) 2253 goto done; 2254 error = xfs_btree_decrement(cur, 0, &i); 2255 if (error) 2256 goto done; 2257 error = xfs_bmbt_update(cur, &LEFT); 2258 if (error) 2259 goto done; 2260 } 2261 break; 2262 2263 case BMAP_LEFT_FILLING: 2264 /* 2265 * Setting the first part of a previous oldext extent to newext. 2266 * The left neighbor is not contiguous. 2267 */ 2268 old = PREV; 2269 PREV.br_startoff += new->br_blockcount; 2270 PREV.br_startblock += new->br_blockcount; 2271 PREV.br_blockcount -= new->br_blockcount; 2272 2273 xfs_iext_update_extent(ip, state, icur, &PREV); 2274 xfs_iext_insert(ip, icur, new, state); 2275 ifp->if_nextents++; 2276 2277 if (cur == NULL) 2278 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2279 else { 2280 rval = XFS_ILOG_CORE; 2281 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2282 if (error) 2283 goto done; 2284 if (XFS_IS_CORRUPT(mp, i != 1)) { 2285 error = -EFSCORRUPTED; 2286 goto done; 2287 } 2288 error = xfs_bmbt_update(cur, &PREV); 2289 if (error) 2290 goto done; 2291 cur->bc_rec.b = *new; 2292 if ((error = xfs_btree_insert(cur, &i))) 2293 goto done; 2294 if (XFS_IS_CORRUPT(mp, i != 1)) { 2295 error = -EFSCORRUPTED; 2296 goto done; 2297 } 2298 } 2299 break; 2300 2301 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 2302 /* 2303 * Setting the last part of a previous oldext extent to newext. 2304 * The right neighbor is contiguous with the new allocation. 2305 */ 2306 old = PREV; 2307 PREV.br_blockcount -= new->br_blockcount; 2308 2309 RIGHT.br_startoff = new->br_startoff; 2310 RIGHT.br_startblock = new->br_startblock; 2311 RIGHT.br_blockcount += new->br_blockcount; 2312 2313 xfs_iext_update_extent(ip, state, icur, &PREV); 2314 xfs_iext_next(ifp, icur); 2315 xfs_iext_update_extent(ip, state, icur, &RIGHT); 2316 2317 if (cur == NULL) 2318 rval = XFS_ILOG_DEXT; 2319 else { 2320 rval = 0; 2321 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2322 if (error) 2323 goto done; 2324 if (XFS_IS_CORRUPT(mp, i != 1)) { 2325 error = -EFSCORRUPTED; 2326 goto done; 2327 } 2328 error = xfs_bmbt_update(cur, &PREV); 2329 if (error) 2330 goto done; 2331 error = xfs_btree_increment(cur, 0, &i); 2332 if (error) 2333 goto done; 2334 error = xfs_bmbt_update(cur, &RIGHT); 2335 if (error) 2336 goto done; 2337 } 2338 break; 2339 2340 case BMAP_RIGHT_FILLING: 2341 /* 2342 * Setting the last part of a previous oldext extent to newext. 2343 * The right neighbor is not contiguous. 2344 */ 2345 old = PREV; 2346 PREV.br_blockcount -= new->br_blockcount; 2347 2348 xfs_iext_update_extent(ip, state, icur, &PREV); 2349 xfs_iext_next(ifp, icur); 2350 xfs_iext_insert(ip, icur, new, state); 2351 ifp->if_nextents++; 2352 2353 if (cur == NULL) 2354 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2355 else { 2356 rval = XFS_ILOG_CORE; 2357 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2358 if (error) 2359 goto done; 2360 if (XFS_IS_CORRUPT(mp, i != 1)) { 2361 error = -EFSCORRUPTED; 2362 goto done; 2363 } 2364 error = xfs_bmbt_update(cur, &PREV); 2365 if (error) 2366 goto done; 2367 error = xfs_bmbt_lookup_eq(cur, new, &i); 2368 if (error) 2369 goto done; 2370 if (XFS_IS_CORRUPT(mp, i != 0)) { 2371 error = -EFSCORRUPTED; 2372 goto done; 2373 } 2374 if ((error = xfs_btree_insert(cur, &i))) 2375 goto done; 2376 if (XFS_IS_CORRUPT(mp, i != 1)) { 2377 error = -EFSCORRUPTED; 2378 goto done; 2379 } 2380 } 2381 break; 2382 2383 case 0: 2384 /* 2385 * Setting the middle part of a previous oldext extent to 2386 * newext. Contiguity is impossible here. 2387 * One extent becomes three extents. 2388 */ 2389 old = PREV; 2390 PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 2391 2392 r[0] = *new; 2393 r[1].br_startoff = new_endoff; 2394 r[1].br_blockcount = 2395 old.br_startoff + old.br_blockcount - new_endoff; 2396 r[1].br_startblock = new->br_startblock + new->br_blockcount; 2397 r[1].br_state = PREV.br_state; 2398 2399 xfs_iext_update_extent(ip, state, icur, &PREV); 2400 xfs_iext_next(ifp, icur); 2401 xfs_iext_insert(ip, icur, &r[1], state); 2402 xfs_iext_insert(ip, icur, &r[0], state); 2403 ifp->if_nextents += 2; 2404 2405 if (cur == NULL) 2406 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 2407 else { 2408 rval = XFS_ILOG_CORE; 2409 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2410 if (error) 2411 goto done; 2412 if (XFS_IS_CORRUPT(mp, i != 1)) { 2413 error = -EFSCORRUPTED; 2414 goto done; 2415 } 2416 /* new right extent - oldext */ 2417 error = xfs_bmbt_update(cur, &r[1]); 2418 if (error) 2419 goto done; 2420 /* new left extent - oldext */ 2421 cur->bc_rec.b = PREV; 2422 if ((error = xfs_btree_insert(cur, &i))) 2423 goto done; 2424 if (XFS_IS_CORRUPT(mp, i != 1)) { 2425 error = -EFSCORRUPTED; 2426 goto done; 2427 } 2428 /* 2429 * Reset the cursor to the position of the new extent 2430 * we are about to insert as we can't trust it after 2431 * the previous insert. 2432 */ 2433 error = xfs_bmbt_lookup_eq(cur, new, &i); 2434 if (error) 2435 goto done; 2436 if (XFS_IS_CORRUPT(mp, i != 0)) { 2437 error = -EFSCORRUPTED; 2438 goto done; 2439 } 2440 /* new middle extent - newext */ 2441 if ((error = xfs_btree_insert(cur, &i))) 2442 goto done; 2443 if (XFS_IS_CORRUPT(mp, i != 1)) { 2444 error = -EFSCORRUPTED; 2445 goto done; 2446 } 2447 } 2448 break; 2449 2450 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2451 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2452 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 2453 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 2454 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2455 case BMAP_LEFT_CONTIG: 2456 case BMAP_RIGHT_CONTIG: 2457 /* 2458 * These cases are all impossible. 2459 */ 2460 ASSERT(0); 2461 } 2462 2463 /* update reverse mappings */ 2464 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 2465 2466 /* convert to a btree if necessary */ 2467 if (xfs_bmap_needs_btree(ip, whichfork)) { 2468 int tmp_logflags; /* partial log flag return val */ 2469 2470 ASSERT(cur == NULL); 2471 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2472 &tmp_logflags, whichfork); 2473 *logflagsp |= tmp_logflags; 2474 if (error) 2475 goto done; 2476 } 2477 2478 /* clear out the allocated field, done with it now in any case. */ 2479 if (cur) { 2480 cur->bc_ino.allocated = 0; 2481 *curp = cur; 2482 } 2483 2484 xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 2485 done: 2486 *logflagsp |= rval; 2487 return error; 2488 #undef LEFT 2489 #undef RIGHT 2490 #undef PREV 2491 } 2492 2493 /* 2494 * Convert a hole to a delayed allocation. 2495 */ 2496 STATIC void 2497 xfs_bmap_add_extent_hole_delay( 2498 xfs_inode_t *ip, /* incore inode pointer */ 2499 int whichfork, 2500 struct xfs_iext_cursor *icur, 2501 xfs_bmbt_irec_t *new) /* new data to add to file extents */ 2502 { 2503 struct xfs_ifork *ifp; /* inode fork pointer */ 2504 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2505 xfs_filblks_t newlen=0; /* new indirect size */ 2506 xfs_filblks_t oldlen=0; /* old indirect size */ 2507 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2508 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2509 xfs_filblks_t temp; /* temp for indirect calculations */ 2510 2511 ifp = xfs_ifork_ptr(ip, whichfork); 2512 ASSERT(isnullstartblock(new->br_startblock)); 2513 2514 /* 2515 * Check and set flags if this segment has a left neighbor 2516 */ 2517 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2518 state |= BMAP_LEFT_VALID; 2519 if (isnullstartblock(left.br_startblock)) 2520 state |= BMAP_LEFT_DELAY; 2521 } 2522 2523 /* 2524 * Check and set flags if the current (right) segment exists. 2525 * If it doesn't exist, we're converting the hole at end-of-file. 2526 */ 2527 if (xfs_iext_get_extent(ifp, icur, &right)) { 2528 state |= BMAP_RIGHT_VALID; 2529 if (isnullstartblock(right.br_startblock)) 2530 state |= BMAP_RIGHT_DELAY; 2531 } 2532 2533 /* 2534 * Set contiguity flags on the left and right neighbors. 2535 * Don't let extents get too large, even if the pieces are contiguous. 2536 */ 2537 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 2538 left.br_startoff + left.br_blockcount == new->br_startoff && 2539 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2540 state |= BMAP_LEFT_CONTIG; 2541 2542 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 2543 new->br_startoff + new->br_blockcount == right.br_startoff && 2544 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2545 (!(state & BMAP_LEFT_CONTIG) || 2546 (left.br_blockcount + new->br_blockcount + 2547 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) 2548 state |= BMAP_RIGHT_CONTIG; 2549 2550 /* 2551 * Switch out based on the contiguity flags. 2552 */ 2553 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2554 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2555 /* 2556 * New allocation is contiguous with delayed allocations 2557 * on the left and on the right. 2558 * Merge all three into a single extent record. 2559 */ 2560 temp = left.br_blockcount + new->br_blockcount + 2561 right.br_blockcount; 2562 2563 oldlen = startblockval(left.br_startblock) + 2564 startblockval(new->br_startblock) + 2565 startblockval(right.br_startblock); 2566 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2567 oldlen); 2568 left.br_startblock = nullstartblock(newlen); 2569 left.br_blockcount = temp; 2570 2571 xfs_iext_remove(ip, icur, state); 2572 xfs_iext_prev(ifp, icur); 2573 xfs_iext_update_extent(ip, state, icur, &left); 2574 break; 2575 2576 case BMAP_LEFT_CONTIG: 2577 /* 2578 * New allocation is contiguous with a delayed allocation 2579 * on the left. 2580 * Merge the new allocation with the left neighbor. 2581 */ 2582 temp = left.br_blockcount + new->br_blockcount; 2583 2584 oldlen = startblockval(left.br_startblock) + 2585 startblockval(new->br_startblock); 2586 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2587 oldlen); 2588 left.br_blockcount = temp; 2589 left.br_startblock = nullstartblock(newlen); 2590 2591 xfs_iext_prev(ifp, icur); 2592 xfs_iext_update_extent(ip, state, icur, &left); 2593 break; 2594 2595 case BMAP_RIGHT_CONTIG: 2596 /* 2597 * New allocation is contiguous with a delayed allocation 2598 * on the right. 2599 * Merge the new allocation with the right neighbor. 2600 */ 2601 temp = new->br_blockcount + right.br_blockcount; 2602 oldlen = startblockval(new->br_startblock) + 2603 startblockval(right.br_startblock); 2604 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2605 oldlen); 2606 right.br_startoff = new->br_startoff; 2607 right.br_startblock = nullstartblock(newlen); 2608 right.br_blockcount = temp; 2609 xfs_iext_update_extent(ip, state, icur, &right); 2610 break; 2611 2612 case 0: 2613 /* 2614 * New allocation is not contiguous with another 2615 * delayed allocation. 2616 * Insert a new entry. 2617 */ 2618 oldlen = newlen = 0; 2619 xfs_iext_insert(ip, icur, new, state); 2620 break; 2621 } 2622 if (oldlen != newlen) { 2623 ASSERT(oldlen > newlen); 2624 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 2625 false); 2626 /* 2627 * Nothing to do for disk quota accounting here. 2628 */ 2629 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 2630 } 2631 } 2632 2633 /* 2634 * Convert a hole to a real allocation. 2635 */ 2636 STATIC int /* error */ 2637 xfs_bmap_add_extent_hole_real( 2638 struct xfs_trans *tp, 2639 struct xfs_inode *ip, 2640 int whichfork, 2641 struct xfs_iext_cursor *icur, 2642 struct xfs_btree_cur **curp, 2643 struct xfs_bmbt_irec *new, 2644 int *logflagsp, 2645 uint32_t flags) 2646 { 2647 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 2648 struct xfs_mount *mp = ip->i_mount; 2649 struct xfs_btree_cur *cur = *curp; 2650 int error; /* error return value */ 2651 int i; /* temp state */ 2652 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2653 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2654 int rval=0; /* return value (logging flags) */ 2655 uint32_t state = xfs_bmap_fork_to_state(whichfork); 2656 struct xfs_bmbt_irec old; 2657 2658 ASSERT(!isnullstartblock(new->br_startblock)); 2659 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 2660 2661 XFS_STATS_INC(mp, xs_add_exlist); 2662 2663 /* 2664 * Check and set flags if this segment has a left neighbor. 2665 */ 2666 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 2667 state |= BMAP_LEFT_VALID; 2668 if (isnullstartblock(left.br_startblock)) 2669 state |= BMAP_LEFT_DELAY; 2670 } 2671 2672 /* 2673 * Check and set flags if this segment has a current value. 2674 * Not true if we're inserting into the "hole" at eof. 2675 */ 2676 if (xfs_iext_get_extent(ifp, icur, &right)) { 2677 state |= BMAP_RIGHT_VALID; 2678 if (isnullstartblock(right.br_startblock)) 2679 state |= BMAP_RIGHT_DELAY; 2680 } 2681 2682 /* 2683 * We're inserting a real allocation between "left" and "right". 2684 * Set the contiguity flags. Don't let extents get too large. 2685 */ 2686 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 2687 left.br_startoff + left.br_blockcount == new->br_startoff && 2688 left.br_startblock + left.br_blockcount == new->br_startblock && 2689 left.br_state == new->br_state && 2690 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 2691 state |= BMAP_LEFT_CONTIG; 2692 2693 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 2694 new->br_startoff + new->br_blockcount == right.br_startoff && 2695 new->br_startblock + new->br_blockcount == right.br_startblock && 2696 new->br_state == right.br_state && 2697 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 2698 (!(state & BMAP_LEFT_CONTIG) || 2699 left.br_blockcount + new->br_blockcount + 2700 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)) 2701 state |= BMAP_RIGHT_CONTIG; 2702 2703 error = 0; 2704 /* 2705 * Select which case we're in here, and implement it. 2706 */ 2707 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 2708 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 2709 /* 2710 * New allocation is contiguous with real allocations on the 2711 * left and on the right. 2712 * Merge all three into a single extent record. 2713 */ 2714 left.br_blockcount += new->br_blockcount + right.br_blockcount; 2715 2716 xfs_iext_remove(ip, icur, state); 2717 xfs_iext_prev(ifp, icur); 2718 xfs_iext_update_extent(ip, state, icur, &left); 2719 ifp->if_nextents--; 2720 2721 if (cur == NULL) { 2722 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2723 } else { 2724 rval = XFS_ILOG_CORE; 2725 error = xfs_bmbt_lookup_eq(cur, &right, &i); 2726 if (error) 2727 goto done; 2728 if (XFS_IS_CORRUPT(mp, i != 1)) { 2729 error = -EFSCORRUPTED; 2730 goto done; 2731 } 2732 error = xfs_btree_delete(cur, &i); 2733 if (error) 2734 goto done; 2735 if (XFS_IS_CORRUPT(mp, i != 1)) { 2736 error = -EFSCORRUPTED; 2737 goto done; 2738 } 2739 error = xfs_btree_decrement(cur, 0, &i); 2740 if (error) 2741 goto done; 2742 if (XFS_IS_CORRUPT(mp, i != 1)) { 2743 error = -EFSCORRUPTED; 2744 goto done; 2745 } 2746 error = xfs_bmbt_update(cur, &left); 2747 if (error) 2748 goto done; 2749 } 2750 break; 2751 2752 case BMAP_LEFT_CONTIG: 2753 /* 2754 * New allocation is contiguous with a real allocation 2755 * on the left. 2756 * Merge the new allocation with the left neighbor. 2757 */ 2758 old = left; 2759 left.br_blockcount += new->br_blockcount; 2760 2761 xfs_iext_prev(ifp, icur); 2762 xfs_iext_update_extent(ip, state, icur, &left); 2763 2764 if (cur == NULL) { 2765 rval = xfs_ilog_fext(whichfork); 2766 } else { 2767 rval = 0; 2768 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2769 if (error) 2770 goto done; 2771 if (XFS_IS_CORRUPT(mp, i != 1)) { 2772 error = -EFSCORRUPTED; 2773 goto done; 2774 } 2775 error = xfs_bmbt_update(cur, &left); 2776 if (error) 2777 goto done; 2778 } 2779 break; 2780 2781 case BMAP_RIGHT_CONTIG: 2782 /* 2783 * New allocation is contiguous with a real allocation 2784 * on the right. 2785 * Merge the new allocation with the right neighbor. 2786 */ 2787 old = right; 2788 2789 right.br_startoff = new->br_startoff; 2790 right.br_startblock = new->br_startblock; 2791 right.br_blockcount += new->br_blockcount; 2792 xfs_iext_update_extent(ip, state, icur, &right); 2793 2794 if (cur == NULL) { 2795 rval = xfs_ilog_fext(whichfork); 2796 } else { 2797 rval = 0; 2798 error = xfs_bmbt_lookup_eq(cur, &old, &i); 2799 if (error) 2800 goto done; 2801 if (XFS_IS_CORRUPT(mp, i != 1)) { 2802 error = -EFSCORRUPTED; 2803 goto done; 2804 } 2805 error = xfs_bmbt_update(cur, &right); 2806 if (error) 2807 goto done; 2808 } 2809 break; 2810 2811 case 0: 2812 /* 2813 * New allocation is not contiguous with another 2814 * real allocation. 2815 * Insert a new entry. 2816 */ 2817 xfs_iext_insert(ip, icur, new, state); 2818 ifp->if_nextents++; 2819 2820 if (cur == NULL) { 2821 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 2822 } else { 2823 rval = XFS_ILOG_CORE; 2824 error = xfs_bmbt_lookup_eq(cur, new, &i); 2825 if (error) 2826 goto done; 2827 if (XFS_IS_CORRUPT(mp, i != 0)) { 2828 error = -EFSCORRUPTED; 2829 goto done; 2830 } 2831 error = xfs_btree_insert(cur, &i); 2832 if (error) 2833 goto done; 2834 if (XFS_IS_CORRUPT(mp, i != 1)) { 2835 error = -EFSCORRUPTED; 2836 goto done; 2837 } 2838 } 2839 break; 2840 } 2841 2842 /* add reverse mapping unless caller opted out */ 2843 if (!(flags & XFS_BMAPI_NORMAP)) 2844 xfs_rmap_map_extent(tp, ip, whichfork, new); 2845 2846 /* convert to a btree if necessary */ 2847 if (xfs_bmap_needs_btree(ip, whichfork)) { 2848 int tmp_logflags; /* partial log flag return val */ 2849 2850 ASSERT(cur == NULL); 2851 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2852 &tmp_logflags, whichfork); 2853 *logflagsp |= tmp_logflags; 2854 cur = *curp; 2855 if (error) 2856 goto done; 2857 } 2858 2859 /* clear out the allocated field, done with it now in any case. */ 2860 if (cur) 2861 cur->bc_ino.allocated = 0; 2862 2863 xfs_bmap_check_leaf_extents(cur, ip, whichfork); 2864 done: 2865 *logflagsp |= rval; 2866 return error; 2867 } 2868 2869 /* 2870 * Functions used in the extent read, allocate and remove paths 2871 */ 2872 2873 /* 2874 * Adjust the size of the new extent based on i_extsize and rt extsize. 2875 */ 2876 int 2877 xfs_bmap_extsize_align( 2878 xfs_mount_t *mp, 2879 xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2880 xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2881 xfs_extlen_t extsz, /* align to this extent size */ 2882 int rt, /* is this a realtime inode? */ 2883 int eof, /* is extent at end-of-file? */ 2884 int delay, /* creating delalloc extent? */ 2885 int convert, /* overwriting unwritten extent? */ 2886 xfs_fileoff_t *offp, /* in/out: aligned offset */ 2887 xfs_extlen_t *lenp) /* in/out: aligned length */ 2888 { 2889 xfs_fileoff_t orig_off; /* original offset */ 2890 xfs_extlen_t orig_alen; /* original length */ 2891 xfs_fileoff_t orig_end; /* original off+len */ 2892 xfs_fileoff_t nexto; /* next file offset */ 2893 xfs_fileoff_t prevo; /* previous file offset */ 2894 xfs_fileoff_t align_off; /* temp for offset */ 2895 xfs_extlen_t align_alen; /* temp for length */ 2896 xfs_extlen_t temp; /* temp for calculations */ 2897 2898 if (convert) 2899 return 0; 2900 2901 orig_off = align_off = *offp; 2902 orig_alen = align_alen = *lenp; 2903 orig_end = orig_off + orig_alen; 2904 2905 /* 2906 * If this request overlaps an existing extent, then don't 2907 * attempt to perform any additional alignment. 2908 */ 2909 if (!delay && !eof && 2910 (orig_off >= gotp->br_startoff) && 2911 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2912 return 0; 2913 } 2914 2915 /* 2916 * If the file offset is unaligned vs. the extent size 2917 * we need to align it. This will be possible unless 2918 * the file was previously written with a kernel that didn't 2919 * perform this alignment, or if a truncate shot us in the 2920 * foot. 2921 */ 2922 div_u64_rem(orig_off, extsz, &temp); 2923 if (temp) { 2924 align_alen += temp; 2925 align_off -= temp; 2926 } 2927 2928 /* Same adjustment for the end of the requested area. */ 2929 temp = (align_alen % extsz); 2930 if (temp) 2931 align_alen += extsz - temp; 2932 2933 /* 2934 * For large extent hint sizes, the aligned extent might be larger than 2935 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so 2936 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer 2937 * allocation loops handle short allocation just fine, so it is safe to 2938 * do this. We only want to do it when we are forced to, though, because 2939 * it means more allocation operations are required. 2940 */ 2941 while (align_alen > XFS_MAX_BMBT_EXTLEN) 2942 align_alen -= extsz; 2943 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN); 2944 2945 /* 2946 * If the previous block overlaps with this proposed allocation 2947 * then move the start forward without adjusting the length. 2948 */ 2949 if (prevp->br_startoff != NULLFILEOFF) { 2950 if (prevp->br_startblock == HOLESTARTBLOCK) 2951 prevo = prevp->br_startoff; 2952 else 2953 prevo = prevp->br_startoff + prevp->br_blockcount; 2954 } else 2955 prevo = 0; 2956 if (align_off != orig_off && align_off < prevo) 2957 align_off = prevo; 2958 /* 2959 * If the next block overlaps with this proposed allocation 2960 * then move the start back without adjusting the length, 2961 * but not before offset 0. 2962 * This may of course make the start overlap previous block, 2963 * and if we hit the offset 0 limit then the next block 2964 * can still overlap too. 2965 */ 2966 if (!eof && gotp->br_startoff != NULLFILEOFF) { 2967 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2968 (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2969 nexto = gotp->br_startoff + gotp->br_blockcount; 2970 else 2971 nexto = gotp->br_startoff; 2972 } else 2973 nexto = NULLFILEOFF; 2974 if (!eof && 2975 align_off + align_alen != orig_end && 2976 align_off + align_alen > nexto) 2977 align_off = nexto > align_alen ? nexto - align_alen : 0; 2978 /* 2979 * If we're now overlapping the next or previous extent that 2980 * means we can't fit an extsz piece in this hole. Just move 2981 * the start forward to the first valid spot and set 2982 * the length so we hit the end. 2983 */ 2984 if (align_off != orig_off && align_off < prevo) 2985 align_off = prevo; 2986 if (align_off + align_alen != orig_end && 2987 align_off + align_alen > nexto && 2988 nexto != NULLFILEOFF) { 2989 ASSERT(nexto > prevo); 2990 align_alen = nexto - align_off; 2991 } 2992 2993 /* 2994 * If realtime, and the result isn't a multiple of the realtime 2995 * extent size we need to remove blocks until it is. 2996 */ 2997 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2998 /* 2999 * We're not covering the original request, or 3000 * we won't be able to once we fix the length. 3001 */ 3002 if (orig_off < align_off || 3003 orig_end > align_off + align_alen || 3004 align_alen - temp < orig_alen) 3005 return -EINVAL; 3006 /* 3007 * Try to fix it by moving the start up. 3008 */ 3009 if (align_off + temp <= orig_off) { 3010 align_alen -= temp; 3011 align_off += temp; 3012 } 3013 /* 3014 * Try to fix it by moving the end in. 3015 */ 3016 else if (align_off + align_alen - temp >= orig_end) 3017 align_alen -= temp; 3018 /* 3019 * Set the start to the minimum then trim the length. 3020 */ 3021 else { 3022 align_alen -= orig_off - align_off; 3023 align_off = orig_off; 3024 align_alen -= align_alen % mp->m_sb.sb_rextsize; 3025 } 3026 /* 3027 * Result doesn't cover the request, fail it. 3028 */ 3029 if (orig_off < align_off || orig_end > align_off + align_alen) 3030 return -EINVAL; 3031 } else { 3032 ASSERT(orig_off >= align_off); 3033 /* see XFS_BMBT_MAX_EXTLEN handling above */ 3034 ASSERT(orig_end <= align_off + align_alen || 3035 align_alen + extsz > XFS_MAX_BMBT_EXTLEN); 3036 } 3037 3038 #ifdef DEBUG 3039 if (!eof && gotp->br_startoff != NULLFILEOFF) 3040 ASSERT(align_off + align_alen <= gotp->br_startoff); 3041 if (prevp->br_startoff != NULLFILEOFF) 3042 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 3043 #endif 3044 3045 *lenp = align_alen; 3046 *offp = align_off; 3047 return 0; 3048 } 3049 3050 #define XFS_ALLOC_GAP_UNITS 4 3051 3052 void 3053 xfs_bmap_adjacent( 3054 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 3055 { 3056 xfs_fsblock_t adjust; /* adjustment to block numbers */ 3057 xfs_mount_t *mp; /* mount point structure */ 3058 int rt; /* true if inode is realtime */ 3059 3060 #define ISVALID(x,y) \ 3061 (rt ? \ 3062 (x) < mp->m_sb.sb_rblocks : \ 3063 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 3064 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 3065 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 3066 3067 mp = ap->ip->i_mount; 3068 rt = XFS_IS_REALTIME_INODE(ap->ip) && 3069 (ap->datatype & XFS_ALLOC_USERDATA); 3070 /* 3071 * If allocating at eof, and there's a previous real block, 3072 * try to use its last block as our starting point. 3073 */ 3074 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 3075 !isnullstartblock(ap->prev.br_startblock) && 3076 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 3077 ap->prev.br_startblock)) { 3078 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 3079 /* 3080 * Adjust for the gap between prevp and us. 3081 */ 3082 adjust = ap->offset - 3083 (ap->prev.br_startoff + ap->prev.br_blockcount); 3084 if (adjust && 3085 ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 3086 ap->blkno += adjust; 3087 } 3088 /* 3089 * If not at eof, then compare the two neighbor blocks. 3090 * Figure out whether either one gives us a good starting point, 3091 * and pick the better one. 3092 */ 3093 else if (!ap->eof) { 3094 xfs_fsblock_t gotbno; /* right side block number */ 3095 xfs_fsblock_t gotdiff=0; /* right side difference */ 3096 xfs_fsblock_t prevbno; /* left side block number */ 3097 xfs_fsblock_t prevdiff=0; /* left side difference */ 3098 3099 /* 3100 * If there's a previous (left) block, select a requested 3101 * start block based on it. 3102 */ 3103 if (ap->prev.br_startoff != NULLFILEOFF && 3104 !isnullstartblock(ap->prev.br_startblock) && 3105 (prevbno = ap->prev.br_startblock + 3106 ap->prev.br_blockcount) && 3107 ISVALID(prevbno, ap->prev.br_startblock)) { 3108 /* 3109 * Calculate gap to end of previous block. 3110 */ 3111 adjust = prevdiff = ap->offset - 3112 (ap->prev.br_startoff + 3113 ap->prev.br_blockcount); 3114 /* 3115 * Figure the startblock based on the previous block's 3116 * end and the gap size. 3117 * Heuristic! 3118 * If the gap is large relative to the piece we're 3119 * allocating, or using it gives us an invalid block 3120 * number, then just use the end of the previous block. 3121 */ 3122 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3123 ISVALID(prevbno + prevdiff, 3124 ap->prev.br_startblock)) 3125 prevbno += adjust; 3126 else 3127 prevdiff += adjust; 3128 } 3129 /* 3130 * No previous block or can't follow it, just default. 3131 */ 3132 else 3133 prevbno = NULLFSBLOCK; 3134 /* 3135 * If there's a following (right) block, select a requested 3136 * start block based on it. 3137 */ 3138 if (!isnullstartblock(ap->got.br_startblock)) { 3139 /* 3140 * Calculate gap to start of next block. 3141 */ 3142 adjust = gotdiff = ap->got.br_startoff - ap->offset; 3143 /* 3144 * Figure the startblock based on the next block's 3145 * start and the gap size. 3146 */ 3147 gotbno = ap->got.br_startblock; 3148 /* 3149 * Heuristic! 3150 * If the gap is large relative to the piece we're 3151 * allocating, or using it gives us an invalid block 3152 * number, then just use the start of the next block 3153 * offset by our length. 3154 */ 3155 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 3156 ISVALID(gotbno - gotdiff, gotbno)) 3157 gotbno -= adjust; 3158 else if (ISVALID(gotbno - ap->length, gotbno)) { 3159 gotbno -= ap->length; 3160 gotdiff += adjust - ap->length; 3161 } else 3162 gotdiff += adjust; 3163 } 3164 /* 3165 * No next block, just default. 3166 */ 3167 else 3168 gotbno = NULLFSBLOCK; 3169 /* 3170 * If both valid, pick the better one, else the only good 3171 * one, else ap->blkno is already set (to 0 or the inode block). 3172 */ 3173 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 3174 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 3175 else if (prevbno != NULLFSBLOCK) 3176 ap->blkno = prevbno; 3177 else if (gotbno != NULLFSBLOCK) 3178 ap->blkno = gotbno; 3179 } 3180 #undef ISVALID 3181 } 3182 3183 int 3184 xfs_bmap_longest_free_extent( 3185 struct xfs_perag *pag, 3186 struct xfs_trans *tp, 3187 xfs_extlen_t *blen) 3188 { 3189 xfs_extlen_t longest; 3190 int error = 0; 3191 3192 if (!xfs_perag_initialised_agf(pag)) { 3193 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK, 3194 NULL); 3195 if (error) 3196 return error; 3197 } 3198 3199 longest = xfs_alloc_longest_free_extent(pag, 3200 xfs_alloc_min_freelist(pag->pag_mount, pag), 3201 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 3202 if (*blen < longest) 3203 *blen = longest; 3204 3205 return 0; 3206 } 3207 3208 static xfs_extlen_t 3209 xfs_bmap_select_minlen( 3210 struct xfs_bmalloca *ap, 3211 struct xfs_alloc_arg *args, 3212 xfs_extlen_t blen) 3213 { 3214 3215 /* 3216 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is 3217 * possible that there is enough contiguous free space for this request. 3218 */ 3219 if (blen < ap->minlen) 3220 return ap->minlen; 3221 3222 /* 3223 * If the best seen length is less than the request length, 3224 * use the best as the minimum, otherwise we've got the maxlen we 3225 * were asked for. 3226 */ 3227 if (blen < args->maxlen) 3228 return blen; 3229 return args->maxlen; 3230 } 3231 3232 static int 3233 xfs_bmap_btalloc_select_lengths( 3234 struct xfs_bmalloca *ap, 3235 struct xfs_alloc_arg *args, 3236 xfs_extlen_t *blen) 3237 { 3238 struct xfs_mount *mp = args->mp; 3239 struct xfs_perag *pag; 3240 xfs_agnumber_t agno, startag; 3241 int error = 0; 3242 3243 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3244 args->total = ap->minlen; 3245 args->minlen = ap->minlen; 3246 return 0; 3247 } 3248 3249 args->total = ap->total; 3250 startag = XFS_FSB_TO_AGNO(mp, ap->blkno); 3251 if (startag == NULLAGNUMBER) 3252 startag = 0; 3253 3254 *blen = 0; 3255 for_each_perag_wrap(mp, startag, agno, pag) { 3256 error = xfs_bmap_longest_free_extent(pag, args->tp, blen); 3257 if (error && error != -EAGAIN) 3258 break; 3259 error = 0; 3260 if (*blen >= args->maxlen) 3261 break; 3262 } 3263 if (pag) 3264 xfs_perag_rele(pag); 3265 3266 args->minlen = xfs_bmap_select_minlen(ap, args, *blen); 3267 return error; 3268 } 3269 3270 /* Update all inode and quota accounting for the allocation we just did. */ 3271 static void 3272 xfs_bmap_btalloc_accounting( 3273 struct xfs_bmalloca *ap, 3274 struct xfs_alloc_arg *args) 3275 { 3276 if (ap->flags & XFS_BMAPI_COWFORK) { 3277 /* 3278 * COW fork blocks are in-core only and thus are treated as 3279 * in-core quota reservation (like delalloc blocks) even when 3280 * converted to real blocks. The quota reservation is not 3281 * accounted to disk until blocks are remapped to the data 3282 * fork. So if these blocks were previously delalloc, we 3283 * already have quota reservation and there's nothing to do 3284 * yet. 3285 */ 3286 if (ap->wasdel) { 3287 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3288 return; 3289 } 3290 3291 /* 3292 * Otherwise, we've allocated blocks in a hole. The transaction 3293 * has acquired in-core quota reservation for this extent. 3294 * Rather than account these as real blocks, however, we reduce 3295 * the transaction quota reservation based on the allocation. 3296 * This essentially transfers the transaction quota reservation 3297 * to that of a delalloc extent. 3298 */ 3299 ap->ip->i_delayed_blks += args->len; 3300 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 3301 -(long)args->len); 3302 return; 3303 } 3304 3305 /* data/attr fork only */ 3306 ap->ip->i_nblocks += args->len; 3307 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 3308 if (ap->wasdel) { 3309 ap->ip->i_delayed_blks -= args->len; 3310 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 3311 } 3312 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3313 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3314 args->len); 3315 } 3316 3317 static int 3318 xfs_bmap_compute_alignments( 3319 struct xfs_bmalloca *ap, 3320 struct xfs_alloc_arg *args) 3321 { 3322 struct xfs_mount *mp = args->mp; 3323 xfs_extlen_t align = 0; /* minimum allocation alignment */ 3324 int stripe_align = 0; 3325 3326 /* stripe alignment for allocation is determined by mount parameters */ 3327 if (mp->m_swidth && xfs_has_swalloc(mp)) 3328 stripe_align = mp->m_swidth; 3329 else if (mp->m_dalign) 3330 stripe_align = mp->m_dalign; 3331 3332 if (ap->flags & XFS_BMAPI_COWFORK) 3333 align = xfs_get_cowextsz_hint(ap->ip); 3334 else if (ap->datatype & XFS_ALLOC_USERDATA) 3335 align = xfs_get_extsz_hint(ap->ip); 3336 if (align) { 3337 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, 3338 ap->eof, 0, ap->conv, &ap->offset, 3339 &ap->length)) 3340 ASSERT(0); 3341 ASSERT(ap->length); 3342 } 3343 3344 /* apply extent size hints if obtained earlier */ 3345 if (align) { 3346 args->prod = align; 3347 div_u64_rem(ap->offset, args->prod, &args->mod); 3348 if (args->mod) 3349 args->mod = args->prod - args->mod; 3350 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 3351 args->prod = 1; 3352 args->mod = 0; 3353 } else { 3354 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 3355 div_u64_rem(ap->offset, args->prod, &args->mod); 3356 if (args->mod) 3357 args->mod = args->prod - args->mod; 3358 } 3359 3360 return stripe_align; 3361 } 3362 3363 static void 3364 xfs_bmap_process_allocated_extent( 3365 struct xfs_bmalloca *ap, 3366 struct xfs_alloc_arg *args, 3367 xfs_fileoff_t orig_offset, 3368 xfs_extlen_t orig_length) 3369 { 3370 ap->blkno = args->fsbno; 3371 ap->length = args->len; 3372 /* 3373 * If the extent size hint is active, we tried to round the 3374 * caller's allocation request offset down to extsz and the 3375 * length up to another extsz boundary. If we found a free 3376 * extent we mapped it in starting at this new offset. If the 3377 * newly mapped space isn't long enough to cover any of the 3378 * range of offsets that was originally requested, move the 3379 * mapping up so that we can fill as much of the caller's 3380 * original request as possible. Free space is apparently 3381 * very fragmented so we're unlikely to be able to satisfy the 3382 * hints anyway. 3383 */ 3384 if (ap->length <= orig_length) 3385 ap->offset = orig_offset; 3386 else if (ap->offset + ap->length < orig_offset + orig_length) 3387 ap->offset = orig_offset + orig_length - ap->length; 3388 xfs_bmap_btalloc_accounting(ap, args); 3389 } 3390 3391 #ifdef DEBUG 3392 static int 3393 xfs_bmap_exact_minlen_extent_alloc( 3394 struct xfs_bmalloca *ap) 3395 { 3396 struct xfs_mount *mp = ap->ip->i_mount; 3397 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; 3398 xfs_fileoff_t orig_offset; 3399 xfs_extlen_t orig_length; 3400 int error; 3401 3402 ASSERT(ap->length); 3403 3404 if (ap->minlen != 1) { 3405 ap->blkno = NULLFSBLOCK; 3406 ap->length = 0; 3407 return 0; 3408 } 3409 3410 orig_offset = ap->offset; 3411 orig_length = ap->length; 3412 3413 args.alloc_minlen_only = 1; 3414 3415 xfs_bmap_compute_alignments(ap, &args); 3416 3417 /* 3418 * Unlike the longest extent available in an AG, we don't track 3419 * the length of an AG's shortest extent. 3420 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and 3421 * hence we can afford to start traversing from the 0th AG since 3422 * we need not be concerned about a drop in performance in 3423 * "debug only" code paths. 3424 */ 3425 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0); 3426 3427 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 3428 args.minlen = args.maxlen = ap->minlen; 3429 args.total = ap->total; 3430 3431 args.alignment = 1; 3432 args.minalignslop = 0; 3433 3434 args.minleft = ap->minleft; 3435 args.wasdel = ap->wasdel; 3436 args.resv = XFS_AG_RESV_NONE; 3437 args.datatype = ap->datatype; 3438 3439 error = xfs_alloc_vextent_first_ag(&args, ap->blkno); 3440 if (error) 3441 return error; 3442 3443 if (args.fsbno != NULLFSBLOCK) { 3444 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3445 orig_length); 3446 } else { 3447 ap->blkno = NULLFSBLOCK; 3448 ap->length = 0; 3449 } 3450 3451 return 0; 3452 } 3453 #else 3454 3455 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED) 3456 3457 #endif 3458 3459 /* 3460 * If we are not low on available data blocks and we are allocating at 3461 * EOF, optimise allocation for contiguous file extension and/or stripe 3462 * alignment of the new extent. 3463 * 3464 * NOTE: ap->aeof is only set if the allocation length is >= the 3465 * stripe unit and the allocation offset is at the end of file. 3466 */ 3467 static int 3468 xfs_bmap_btalloc_at_eof( 3469 struct xfs_bmalloca *ap, 3470 struct xfs_alloc_arg *args, 3471 xfs_extlen_t blen, 3472 int stripe_align, 3473 bool ag_only) 3474 { 3475 struct xfs_mount *mp = args->mp; 3476 struct xfs_perag *caller_pag = args->pag; 3477 int error; 3478 3479 /* 3480 * If there are already extents in the file, try an exact EOF block 3481 * allocation to extend the file as a contiguous extent. If that fails, 3482 * or it's the first allocation in a file, just try for a stripe aligned 3483 * allocation. 3484 */ 3485 if (ap->offset) { 3486 xfs_extlen_t nextminlen = 0; 3487 3488 /* 3489 * Compute the minlen+alignment for the next case. Set slop so 3490 * that the value of minlen+alignment+slop doesn't go up between 3491 * the calls. 3492 */ 3493 args->alignment = 1; 3494 if (blen > stripe_align && blen <= args->maxlen) 3495 nextminlen = blen - stripe_align; 3496 else 3497 nextminlen = args->minlen; 3498 if (nextminlen + stripe_align > args->minlen + 1) 3499 args->minalignslop = nextminlen + stripe_align - 3500 args->minlen - 1; 3501 else 3502 args->minalignslop = 0; 3503 3504 if (!caller_pag) 3505 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno)); 3506 error = xfs_alloc_vextent_exact_bno(args, ap->blkno); 3507 if (!caller_pag) { 3508 xfs_perag_put(args->pag); 3509 args->pag = NULL; 3510 } 3511 if (error) 3512 return error; 3513 3514 if (args->fsbno != NULLFSBLOCK) 3515 return 0; 3516 /* 3517 * Exact allocation failed. Reset to try an aligned allocation 3518 * according to the original allocation specification. 3519 */ 3520 args->alignment = stripe_align; 3521 args->minlen = nextminlen; 3522 args->minalignslop = 0; 3523 } else { 3524 /* 3525 * Adjust minlen to try and preserve alignment if we 3526 * can't guarantee an aligned maxlen extent. 3527 */ 3528 args->alignment = stripe_align; 3529 if (blen > args->alignment && 3530 blen <= args->maxlen + args->alignment) 3531 args->minlen = blen - args->alignment; 3532 args->minalignslop = 0; 3533 } 3534 3535 if (ag_only) { 3536 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3537 } else { 3538 args->pag = NULL; 3539 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3540 ASSERT(args->pag == NULL); 3541 args->pag = caller_pag; 3542 } 3543 if (error) 3544 return error; 3545 3546 if (args->fsbno != NULLFSBLOCK) 3547 return 0; 3548 3549 /* 3550 * Allocation failed, so turn return the allocation args to their 3551 * original non-aligned state so the caller can proceed on allocation 3552 * failure as if this function was never called. 3553 */ 3554 args->alignment = 1; 3555 return 0; 3556 } 3557 3558 /* 3559 * We have failed multiple allocation attempts so now are in a low space 3560 * allocation situation. Try a locality first full filesystem minimum length 3561 * allocation whilst still maintaining necessary total block reservation 3562 * requirements. 3563 * 3564 * If that fails, we are now critically low on space, so perform a last resort 3565 * allocation attempt: no reserve, no locality, blocking, minimum length, full 3566 * filesystem free space scan. We also indicate to future allocations in this 3567 * transaction that we are critically low on space so they don't waste time on 3568 * allocation modes that are unlikely to succeed. 3569 */ 3570 int 3571 xfs_bmap_btalloc_low_space( 3572 struct xfs_bmalloca *ap, 3573 struct xfs_alloc_arg *args) 3574 { 3575 int error; 3576 3577 if (args->minlen > ap->minlen) { 3578 args->minlen = ap->minlen; 3579 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3580 if (error || args->fsbno != NULLFSBLOCK) 3581 return error; 3582 } 3583 3584 /* Last ditch attempt before failure is declared. */ 3585 args->total = ap->minlen; 3586 error = xfs_alloc_vextent_first_ag(args, 0); 3587 if (error) 3588 return error; 3589 ap->tp->t_flags |= XFS_TRANS_LOWMODE; 3590 return 0; 3591 } 3592 3593 static int 3594 xfs_bmap_btalloc_filestreams( 3595 struct xfs_bmalloca *ap, 3596 struct xfs_alloc_arg *args, 3597 int stripe_align) 3598 { 3599 xfs_extlen_t blen = 0; 3600 int error = 0; 3601 3602 3603 error = xfs_filestream_select_ag(ap, args, &blen); 3604 if (error) 3605 return error; 3606 ASSERT(args->pag); 3607 3608 /* 3609 * If we are in low space mode, then optimal allocation will fail so 3610 * prepare for minimal allocation and jump to the low space algorithm 3611 * immediately. 3612 */ 3613 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3614 args->minlen = ap->minlen; 3615 ASSERT(args->fsbno == NULLFSBLOCK); 3616 goto out_low_space; 3617 } 3618 3619 args->minlen = xfs_bmap_select_minlen(ap, args, blen); 3620 if (ap->aeof) 3621 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3622 true); 3623 3624 if (!error && args->fsbno == NULLFSBLOCK) 3625 error = xfs_alloc_vextent_near_bno(args, ap->blkno); 3626 3627 out_low_space: 3628 /* 3629 * We are now done with the perag reference for the filestreams 3630 * association provided by xfs_filestream_select_ag(). Release it now as 3631 * we've either succeeded, had a fatal error or we are out of space and 3632 * need to do a full filesystem scan for free space which will take it's 3633 * own references. 3634 */ 3635 xfs_perag_rele(args->pag); 3636 args->pag = NULL; 3637 if (error || args->fsbno != NULLFSBLOCK) 3638 return error; 3639 3640 return xfs_bmap_btalloc_low_space(ap, args); 3641 } 3642 3643 static int 3644 xfs_bmap_btalloc_best_length( 3645 struct xfs_bmalloca *ap, 3646 struct xfs_alloc_arg *args, 3647 int stripe_align) 3648 { 3649 xfs_extlen_t blen = 0; 3650 int error; 3651 3652 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino); 3653 xfs_bmap_adjacent(ap); 3654 3655 /* 3656 * Search for an allocation group with a single extent large enough for 3657 * the request. If one isn't found, then adjust the minimum allocation 3658 * size to the largest space found. 3659 */ 3660 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen); 3661 if (error) 3662 return error; 3663 3664 /* 3665 * Don't attempt optimal EOF allocation if previous allocations barely 3666 * succeeded due to being near ENOSPC. It is highly unlikely we'll get 3667 * optimal or even aligned allocations in this case, so don't waste time 3668 * trying. 3669 */ 3670 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) { 3671 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 3672 false); 3673 if (error || args->fsbno != NULLFSBLOCK) 3674 return error; 3675 } 3676 3677 error = xfs_alloc_vextent_start_ag(args, ap->blkno); 3678 if (error || args->fsbno != NULLFSBLOCK) 3679 return error; 3680 3681 return xfs_bmap_btalloc_low_space(ap, args); 3682 } 3683 3684 static int 3685 xfs_bmap_btalloc( 3686 struct xfs_bmalloca *ap) 3687 { 3688 struct xfs_mount *mp = ap->ip->i_mount; 3689 struct xfs_alloc_arg args = { 3690 .tp = ap->tp, 3691 .mp = mp, 3692 .fsbno = NULLFSBLOCK, 3693 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 3694 .minleft = ap->minleft, 3695 .wasdel = ap->wasdel, 3696 .resv = XFS_AG_RESV_NONE, 3697 .datatype = ap->datatype, 3698 .alignment = 1, 3699 .minalignslop = 0, 3700 }; 3701 xfs_fileoff_t orig_offset; 3702 xfs_extlen_t orig_length; 3703 int error; 3704 int stripe_align; 3705 3706 ASSERT(ap->length); 3707 orig_offset = ap->offset; 3708 orig_length = ap->length; 3709 3710 stripe_align = xfs_bmap_compute_alignments(ap, &args); 3711 3712 /* Trim the allocation back to the maximum an AG can fit. */ 3713 args.maxlen = min(ap->length, mp->m_ag_max_usable); 3714 3715 if ((ap->datatype & XFS_ALLOC_USERDATA) && 3716 xfs_inode_is_filestream(ap->ip)) 3717 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align); 3718 else 3719 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align); 3720 if (error) 3721 return error; 3722 3723 if (args.fsbno != NULLFSBLOCK) { 3724 xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 3725 orig_length); 3726 } else { 3727 ap->blkno = NULLFSBLOCK; 3728 ap->length = 0; 3729 } 3730 return 0; 3731 } 3732 3733 /* Trim extent to fit a logical block range. */ 3734 void 3735 xfs_trim_extent( 3736 struct xfs_bmbt_irec *irec, 3737 xfs_fileoff_t bno, 3738 xfs_filblks_t len) 3739 { 3740 xfs_fileoff_t distance; 3741 xfs_fileoff_t end = bno + len; 3742 3743 if (irec->br_startoff + irec->br_blockcount <= bno || 3744 irec->br_startoff >= end) { 3745 irec->br_blockcount = 0; 3746 return; 3747 } 3748 3749 if (irec->br_startoff < bno) { 3750 distance = bno - irec->br_startoff; 3751 if (isnullstartblock(irec->br_startblock)) 3752 irec->br_startblock = DELAYSTARTBLOCK; 3753 if (irec->br_startblock != DELAYSTARTBLOCK && 3754 irec->br_startblock != HOLESTARTBLOCK) 3755 irec->br_startblock += distance; 3756 irec->br_startoff += distance; 3757 irec->br_blockcount -= distance; 3758 } 3759 3760 if (end < irec->br_startoff + irec->br_blockcount) { 3761 distance = irec->br_startoff + irec->br_blockcount - end; 3762 irec->br_blockcount -= distance; 3763 } 3764 } 3765 3766 /* 3767 * Trim the returned map to the required bounds 3768 */ 3769 STATIC void 3770 xfs_bmapi_trim_map( 3771 struct xfs_bmbt_irec *mval, 3772 struct xfs_bmbt_irec *got, 3773 xfs_fileoff_t *bno, 3774 xfs_filblks_t len, 3775 xfs_fileoff_t obno, 3776 xfs_fileoff_t end, 3777 int n, 3778 uint32_t flags) 3779 { 3780 if ((flags & XFS_BMAPI_ENTIRE) || 3781 got->br_startoff + got->br_blockcount <= obno) { 3782 *mval = *got; 3783 if (isnullstartblock(got->br_startblock)) 3784 mval->br_startblock = DELAYSTARTBLOCK; 3785 return; 3786 } 3787 3788 if (obno > *bno) 3789 *bno = obno; 3790 ASSERT((*bno >= obno) || (n == 0)); 3791 ASSERT(*bno < end); 3792 mval->br_startoff = *bno; 3793 if (isnullstartblock(got->br_startblock)) 3794 mval->br_startblock = DELAYSTARTBLOCK; 3795 else 3796 mval->br_startblock = got->br_startblock + 3797 (*bno - got->br_startoff); 3798 /* 3799 * Return the minimum of what we got and what we asked for for 3800 * the length. We can use the len variable here because it is 3801 * modified below and we could have been there before coming 3802 * here if the first part of the allocation didn't overlap what 3803 * was asked for. 3804 */ 3805 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 3806 got->br_blockcount - (*bno - got->br_startoff)); 3807 mval->br_state = got->br_state; 3808 ASSERT(mval->br_blockcount <= len); 3809 return; 3810 } 3811 3812 /* 3813 * Update and validate the extent map to return 3814 */ 3815 STATIC void 3816 xfs_bmapi_update_map( 3817 struct xfs_bmbt_irec **map, 3818 xfs_fileoff_t *bno, 3819 xfs_filblks_t *len, 3820 xfs_fileoff_t obno, 3821 xfs_fileoff_t end, 3822 int *n, 3823 uint32_t flags) 3824 { 3825 xfs_bmbt_irec_t *mval = *map; 3826 3827 ASSERT((flags & XFS_BMAPI_ENTIRE) || 3828 ((mval->br_startoff + mval->br_blockcount) <= end)); 3829 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 3830 (mval->br_startoff < obno)); 3831 3832 *bno = mval->br_startoff + mval->br_blockcount; 3833 *len = end - *bno; 3834 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 3835 /* update previous map with new information */ 3836 ASSERT(mval->br_startblock == mval[-1].br_startblock); 3837 ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 3838 ASSERT(mval->br_state == mval[-1].br_state); 3839 mval[-1].br_blockcount = mval->br_blockcount; 3840 mval[-1].br_state = mval->br_state; 3841 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 3842 mval[-1].br_startblock != DELAYSTARTBLOCK && 3843 mval[-1].br_startblock != HOLESTARTBLOCK && 3844 mval->br_startblock == mval[-1].br_startblock + 3845 mval[-1].br_blockcount && 3846 mval[-1].br_state == mval->br_state) { 3847 ASSERT(mval->br_startoff == 3848 mval[-1].br_startoff + mval[-1].br_blockcount); 3849 mval[-1].br_blockcount += mval->br_blockcount; 3850 } else if (*n > 0 && 3851 mval->br_startblock == DELAYSTARTBLOCK && 3852 mval[-1].br_startblock == DELAYSTARTBLOCK && 3853 mval->br_startoff == 3854 mval[-1].br_startoff + mval[-1].br_blockcount) { 3855 mval[-1].br_blockcount += mval->br_blockcount; 3856 mval[-1].br_state = mval->br_state; 3857 } else if (!((*n == 0) && 3858 ((mval->br_startoff + mval->br_blockcount) <= 3859 obno))) { 3860 mval++; 3861 (*n)++; 3862 } 3863 *map = mval; 3864 } 3865 3866 /* 3867 * Map file blocks to filesystem blocks without allocation. 3868 */ 3869 int 3870 xfs_bmapi_read( 3871 struct xfs_inode *ip, 3872 xfs_fileoff_t bno, 3873 xfs_filblks_t len, 3874 struct xfs_bmbt_irec *mval, 3875 int *nmap, 3876 uint32_t flags) 3877 { 3878 struct xfs_mount *mp = ip->i_mount; 3879 int whichfork = xfs_bmapi_whichfork(flags); 3880 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3881 struct xfs_bmbt_irec got; 3882 xfs_fileoff_t obno; 3883 xfs_fileoff_t end; 3884 struct xfs_iext_cursor icur; 3885 int error; 3886 bool eof = false; 3887 int n = 0; 3888 3889 ASSERT(*nmap >= 1); 3890 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); 3891 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 3892 3893 if (WARN_ON_ONCE(!ifp)) 3894 return -EFSCORRUPTED; 3895 3896 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 3897 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) 3898 return -EFSCORRUPTED; 3899 3900 if (xfs_is_shutdown(mp)) 3901 return -EIO; 3902 3903 XFS_STATS_INC(mp, xs_blk_mapr); 3904 3905 error = xfs_iread_extents(NULL, ip, whichfork); 3906 if (error) 3907 return error; 3908 3909 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3910 eof = true; 3911 end = bno + len; 3912 obno = bno; 3913 3914 while (bno < end && n < *nmap) { 3915 /* Reading past eof, act as though there's a hole up to end. */ 3916 if (eof) 3917 got.br_startoff = end; 3918 if (got.br_startoff > bno) { 3919 /* Reading in a hole. */ 3920 mval->br_startoff = bno; 3921 mval->br_startblock = HOLESTARTBLOCK; 3922 mval->br_blockcount = 3923 XFS_FILBLKS_MIN(len, got.br_startoff - bno); 3924 mval->br_state = XFS_EXT_NORM; 3925 bno += mval->br_blockcount; 3926 len -= mval->br_blockcount; 3927 mval++; 3928 n++; 3929 continue; 3930 } 3931 3932 /* set up the extent map to return. */ 3933 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 3934 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 3935 3936 /* If we're done, stop now. */ 3937 if (bno >= end || n >= *nmap) 3938 break; 3939 3940 /* Else go on to the next record. */ 3941 if (!xfs_iext_next_extent(ifp, &icur, &got)) 3942 eof = true; 3943 } 3944 *nmap = n; 3945 return 0; 3946 } 3947 3948 /* 3949 * Add a delayed allocation extent to an inode. Blocks are reserved from the 3950 * global pool and the extent inserted into the inode in-core extent tree. 3951 * 3952 * On entry, got refers to the first extent beyond the offset of the extent to 3953 * allocate or eof is specified if no such extent exists. On return, got refers 3954 * to the extent record that was inserted to the inode fork. 3955 * 3956 * Note that the allocated extent may have been merged with contiguous extents 3957 * during insertion into the inode fork. Thus, got does not reflect the current 3958 * state of the inode fork on return. If necessary, the caller can use lastx to 3959 * look up the updated record in the inode fork. 3960 */ 3961 int 3962 xfs_bmapi_reserve_delalloc( 3963 struct xfs_inode *ip, 3964 int whichfork, 3965 xfs_fileoff_t off, 3966 xfs_filblks_t len, 3967 xfs_filblks_t prealloc, 3968 struct xfs_bmbt_irec *got, 3969 struct xfs_iext_cursor *icur, 3970 int eof) 3971 { 3972 struct xfs_mount *mp = ip->i_mount; 3973 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 3974 xfs_extlen_t alen; 3975 xfs_extlen_t indlen; 3976 int error; 3977 xfs_fileoff_t aoff; 3978 bool use_cowextszhint = 3979 whichfork == XFS_COW_FORK && !prealloc; 3980 3981 retry: 3982 /* 3983 * Cap the alloc length. Keep track of prealloc so we know whether to 3984 * tag the inode before we return. 3985 */ 3986 aoff = off; 3987 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); 3988 if (!eof) 3989 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3990 if (prealloc && alen >= len) 3991 prealloc = alen - len; 3992 3993 /* 3994 * If we're targetting the COW fork but aren't creating a speculative 3995 * posteof preallocation, try to expand the reservation to align with 3996 * the COW extent size hint if there's sufficient free space. 3997 * 3998 * Unlike the data fork, the CoW cancellation functions will free all 3999 * the reservations at inactivation, so we don't require that every 4000 * delalloc reservation have a dirty pagecache. 4001 */ 4002 if (use_cowextszhint) { 4003 struct xfs_bmbt_irec prev; 4004 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 4005 4006 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 4007 prev.br_startoff = NULLFILEOFF; 4008 4009 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 4010 1, 0, &aoff, &alen); 4011 ASSERT(!error); 4012 } 4013 4014 /* 4015 * Make a transaction-less quota reservation for delayed allocation 4016 * blocks. This number gets adjusted later. We return if we haven't 4017 * allocated blocks already inside this loop. 4018 */ 4019 error = xfs_quota_reserve_blkres(ip, alen); 4020 if (error) 4021 goto out; 4022 4023 /* 4024 * Split changing sb for alen and indlen since they could be coming 4025 * from different places. 4026 */ 4027 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 4028 ASSERT(indlen > 0); 4029 4030 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 4031 if (error) 4032 goto out_unreserve_quota; 4033 4034 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 4035 if (error) 4036 goto out_unreserve_blocks; 4037 4038 4039 ip->i_delayed_blks += alen; 4040 xfs_mod_delalloc(ip->i_mount, alen + indlen); 4041 4042 got->br_startoff = aoff; 4043 got->br_startblock = nullstartblock(indlen); 4044 got->br_blockcount = alen; 4045 got->br_state = XFS_EXT_NORM; 4046 4047 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 4048 4049 /* 4050 * Tag the inode if blocks were preallocated. Note that COW fork 4051 * preallocation can occur at the start or end of the extent, even when 4052 * prealloc == 0, so we must also check the aligned offset and length. 4053 */ 4054 if (whichfork == XFS_DATA_FORK && prealloc) 4055 xfs_inode_set_eofblocks_tag(ip); 4056 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4057 xfs_inode_set_cowblocks_tag(ip); 4058 4059 return 0; 4060 4061 out_unreserve_blocks: 4062 xfs_mod_fdblocks(mp, alen, false); 4063 out_unreserve_quota: 4064 if (XFS_IS_QUOTA_ON(mp)) 4065 xfs_quota_unreserve_blkres(ip, alen); 4066 out: 4067 if (error == -ENOSPC || error == -EDQUOT) { 4068 trace_xfs_delalloc_enospc(ip, off, len); 4069 4070 if (prealloc || use_cowextszhint) { 4071 /* retry without any preallocation */ 4072 use_cowextszhint = false; 4073 prealloc = 0; 4074 goto retry; 4075 } 4076 } 4077 return error; 4078 } 4079 4080 static int 4081 xfs_bmap_alloc_userdata( 4082 struct xfs_bmalloca *bma) 4083 { 4084 struct xfs_mount *mp = bma->ip->i_mount; 4085 int whichfork = xfs_bmapi_whichfork(bma->flags); 4086 int error; 4087 4088 /* 4089 * Set the data type being allocated. For the data fork, the first data 4090 * in the file is treated differently to all other allocations. For the 4091 * attribute fork, we only need to ensure the allocated range is not on 4092 * the busy list. 4093 */ 4094 bma->datatype = XFS_ALLOC_NOBUSY; 4095 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) { 4096 bma->datatype |= XFS_ALLOC_USERDATA; 4097 if (bma->offset == 0) 4098 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4099 4100 if (mp->m_dalign && bma->length >= mp->m_dalign) { 4101 error = xfs_bmap_isaeof(bma, whichfork); 4102 if (error) 4103 return error; 4104 } 4105 4106 if (XFS_IS_REALTIME_INODE(bma->ip)) 4107 return xfs_bmap_rtalloc(bma); 4108 } 4109 4110 if (unlikely(XFS_TEST_ERROR(false, mp, 4111 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4112 return xfs_bmap_exact_minlen_extent_alloc(bma); 4113 4114 return xfs_bmap_btalloc(bma); 4115 } 4116 4117 static int 4118 xfs_bmapi_allocate( 4119 struct xfs_bmalloca *bma) 4120 { 4121 struct xfs_mount *mp = bma->ip->i_mount; 4122 int whichfork = xfs_bmapi_whichfork(bma->flags); 4123 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4124 int tmp_logflags = 0; 4125 int error; 4126 4127 ASSERT(bma->length > 0); 4128 4129 /* 4130 * For the wasdelay case, we could also just allocate the stuff asked 4131 * for in this bmap call but that wouldn't be as good. 4132 */ 4133 if (bma->wasdel) { 4134 bma->length = (xfs_extlen_t)bma->got.br_blockcount; 4135 bma->offset = bma->got.br_startoff; 4136 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev)) 4137 bma->prev.br_startoff = NULLFILEOFF; 4138 } else { 4139 bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN); 4140 if (!bma->eof) 4141 bma->length = XFS_FILBLKS_MIN(bma->length, 4142 bma->got.br_startoff - bma->offset); 4143 } 4144 4145 if (bma->flags & XFS_BMAPI_CONTIG) 4146 bma->minlen = bma->length; 4147 else 4148 bma->minlen = 1; 4149 4150 if (bma->flags & XFS_BMAPI_METADATA) { 4151 if (unlikely(XFS_TEST_ERROR(false, mp, 4152 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 4153 error = xfs_bmap_exact_minlen_extent_alloc(bma); 4154 else 4155 error = xfs_bmap_btalloc(bma); 4156 } else { 4157 error = xfs_bmap_alloc_userdata(bma); 4158 } 4159 if (error) 4160 return error; 4161 if (bma->blkno == NULLFSBLOCK) 4162 return -ENOSPC; 4163 4164 if (bma->flags & XFS_BMAPI_ZERO) { 4165 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); 4166 if (error) 4167 return error; 4168 } 4169 4170 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) 4171 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 4172 /* 4173 * Bump the number of extents we've allocated 4174 * in this call. 4175 */ 4176 bma->nallocs++; 4177 4178 if (bma->cur) 4179 bma->cur->bc_ino.flags = 4180 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 4181 4182 bma->got.br_startoff = bma->offset; 4183 bma->got.br_startblock = bma->blkno; 4184 bma->got.br_blockcount = bma->length; 4185 bma->got.br_state = XFS_EXT_NORM; 4186 4187 if (bma->flags & XFS_BMAPI_PREALLOC) 4188 bma->got.br_state = XFS_EXT_UNWRITTEN; 4189 4190 if (bma->wasdel) 4191 error = xfs_bmap_add_extent_delay_real(bma, whichfork); 4192 else 4193 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4194 whichfork, &bma->icur, &bma->cur, &bma->got, 4195 &bma->logflags, bma->flags); 4196 4197 bma->logflags |= tmp_logflags; 4198 if (error) 4199 return error; 4200 4201 /* 4202 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 4203 * or xfs_bmap_add_extent_hole_real might have merged it into one of 4204 * the neighbouring ones. 4205 */ 4206 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4207 4208 ASSERT(bma->got.br_startoff <= bma->offset); 4209 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 4210 bma->offset + bma->length); 4211 ASSERT(bma->got.br_state == XFS_EXT_NORM || 4212 bma->got.br_state == XFS_EXT_UNWRITTEN); 4213 return 0; 4214 } 4215 4216 STATIC int 4217 xfs_bmapi_convert_unwritten( 4218 struct xfs_bmalloca *bma, 4219 struct xfs_bmbt_irec *mval, 4220 xfs_filblks_t len, 4221 uint32_t flags) 4222 { 4223 int whichfork = xfs_bmapi_whichfork(flags); 4224 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4225 int tmp_logflags = 0; 4226 int error; 4227 4228 /* check if we need to do unwritten->real conversion */ 4229 if (mval->br_state == XFS_EXT_UNWRITTEN && 4230 (flags & XFS_BMAPI_PREALLOC)) 4231 return 0; 4232 4233 /* check if we need to do real->unwritten conversion */ 4234 if (mval->br_state == XFS_EXT_NORM && 4235 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 4236 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 4237 return 0; 4238 4239 /* 4240 * Modify (by adding) the state flag, if writing. 4241 */ 4242 ASSERT(mval->br_blockcount <= len); 4243 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) { 4244 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 4245 bma->ip, whichfork); 4246 } 4247 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4248 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 4249 4250 /* 4251 * Before insertion into the bmbt, zero the range being converted 4252 * if required. 4253 */ 4254 if (flags & XFS_BMAPI_ZERO) { 4255 error = xfs_zero_extent(bma->ip, mval->br_startblock, 4256 mval->br_blockcount); 4257 if (error) 4258 return error; 4259 } 4260 4261 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 4262 &bma->icur, &bma->cur, mval, &tmp_logflags); 4263 /* 4264 * Log the inode core unconditionally in the unwritten extent conversion 4265 * path because the conversion might not have done so (e.g., if the 4266 * extent count hasn't changed). We need to make sure the inode is dirty 4267 * in the transaction for the sake of fsync(), even if nothing has 4268 * changed, because fsync() will not force the log for this transaction 4269 * unless it sees the inode pinned. 4270 * 4271 * Note: If we're only converting cow fork extents, there aren't 4272 * any on-disk updates to make, so we don't need to log anything. 4273 */ 4274 if (whichfork != XFS_COW_FORK) 4275 bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 4276 if (error) 4277 return error; 4278 4279 /* 4280 * Update our extent pointer, given that 4281 * xfs_bmap_add_extent_unwritten_real might have merged it into one 4282 * of the neighbouring ones. 4283 */ 4284 xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 4285 4286 /* 4287 * We may have combined previously unwritten space with written space, 4288 * so generate another request. 4289 */ 4290 if (mval->br_blockcount < len) 4291 return -EAGAIN; 4292 return 0; 4293 } 4294 4295 xfs_extlen_t 4296 xfs_bmapi_minleft( 4297 struct xfs_trans *tp, 4298 struct xfs_inode *ip, 4299 int fork) 4300 { 4301 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork); 4302 4303 if (tp && tp->t_highest_agno != NULLAGNUMBER) 4304 return 0; 4305 if (ifp->if_format != XFS_DINODE_FMT_BTREE) 4306 return 1; 4307 return be16_to_cpu(ifp->if_broot->bb_level) + 1; 4308 } 4309 4310 /* 4311 * Log whatever the flags say, even if error. Otherwise we might miss detecting 4312 * a case where the data is changed, there's an error, and it's not logged so we 4313 * don't shutdown when we should. Don't bother logging extents/btree changes if 4314 * we converted to the other format. 4315 */ 4316 static void 4317 xfs_bmapi_finish( 4318 struct xfs_bmalloca *bma, 4319 int whichfork, 4320 int error) 4321 { 4322 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4323 4324 if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4325 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 4326 bma->logflags &= ~xfs_ilog_fext(whichfork); 4327 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4328 ifp->if_format != XFS_DINODE_FMT_BTREE) 4329 bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4330 4331 if (bma->logflags) 4332 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4333 if (bma->cur) 4334 xfs_btree_del_cursor(bma->cur, error); 4335 } 4336 4337 /* 4338 * Map file blocks to filesystem blocks, and allocate blocks or convert the 4339 * extent state if necessary. Details behaviour is controlled by the flags 4340 * parameter. Only allocates blocks from a single allocation group, to avoid 4341 * locking problems. 4342 * 4343 * Returns 0 on success and places the extent mappings in mval. nmaps is used 4344 * as an input/output parameter where the caller specifies the maximum number 4345 * of mappings that may be returned and xfs_bmapi_write passes back the number 4346 * of mappings (including existing mappings) it found. 4347 * 4348 * Returns a negative error code on failure, including -ENOSPC when it could not 4349 * allocate any blocks and -ENOSR when it did allocate blocks to convert a 4350 * delalloc range, but those blocks were before the passed in range. 4351 */ 4352 int 4353 xfs_bmapi_write( 4354 struct xfs_trans *tp, /* transaction pointer */ 4355 struct xfs_inode *ip, /* incore inode */ 4356 xfs_fileoff_t bno, /* starting file offs. mapped */ 4357 xfs_filblks_t len, /* length to map in file */ 4358 uint32_t flags, /* XFS_BMAPI_... */ 4359 xfs_extlen_t total, /* total blocks needed */ 4360 struct xfs_bmbt_irec *mval, /* output: map values */ 4361 int *nmap) /* i/o: mval size/count */ 4362 { 4363 struct xfs_bmalloca bma = { 4364 .tp = tp, 4365 .ip = ip, 4366 .total = total, 4367 }; 4368 struct xfs_mount *mp = ip->i_mount; 4369 int whichfork = xfs_bmapi_whichfork(flags); 4370 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4371 xfs_fileoff_t end; /* end of mapped file region */ 4372 bool eof = false; /* after the end of extents */ 4373 int error; /* error return */ 4374 int n; /* current extent index */ 4375 xfs_fileoff_t obno; /* old block number (offset) */ 4376 4377 #ifdef DEBUG 4378 xfs_fileoff_t orig_bno; /* original block number value */ 4379 int orig_flags; /* original flags arg value */ 4380 xfs_filblks_t orig_len; /* original value of len arg */ 4381 struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 4382 int orig_nmap; /* original value of *nmap */ 4383 4384 orig_bno = bno; 4385 orig_len = len; 4386 orig_flags = flags; 4387 orig_mval = mval; 4388 orig_nmap = *nmap; 4389 #endif 4390 4391 ASSERT(*nmap >= 1); 4392 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 4393 ASSERT(tp != NULL); 4394 ASSERT(len > 0); 4395 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); 4396 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4397 ASSERT(!(flags & XFS_BMAPI_REMAP)); 4398 4399 /* zeroing is for currently only for data extents, not metadata */ 4400 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 4401 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 4402 /* 4403 * we can allocate unwritten extents or pre-zero allocated blocks, 4404 * but it makes no sense to do both at once. This would result in 4405 * zeroing the unwritten extent twice, but it still being an 4406 * unwritten extent.... 4407 */ 4408 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 4409 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 4410 4411 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4412 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4413 return -EFSCORRUPTED; 4414 } 4415 4416 if (xfs_is_shutdown(mp)) 4417 return -EIO; 4418 4419 XFS_STATS_INC(mp, xs_blk_mapw); 4420 4421 error = xfs_iread_extents(tp, ip, whichfork); 4422 if (error) 4423 goto error0; 4424 4425 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 4426 eof = true; 4427 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4428 bma.prev.br_startoff = NULLFILEOFF; 4429 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4430 4431 n = 0; 4432 end = bno + len; 4433 obno = bno; 4434 while (bno < end && n < *nmap) { 4435 bool need_alloc = false, wasdelay = false; 4436 4437 /* in hole or beyond EOF? */ 4438 if (eof || bma.got.br_startoff > bno) { 4439 /* 4440 * CoW fork conversions should /never/ hit EOF or 4441 * holes. There should always be something for us 4442 * to work on. 4443 */ 4444 ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4445 (flags & XFS_BMAPI_COWFORK))); 4446 4447 need_alloc = true; 4448 } else if (isnullstartblock(bma.got.br_startblock)) { 4449 wasdelay = true; 4450 } 4451 4452 /* 4453 * First, deal with the hole before the allocated space 4454 * that we found, if any. 4455 */ 4456 if (need_alloc || wasdelay) { 4457 bma.eof = eof; 4458 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4459 bma.wasdel = wasdelay; 4460 bma.offset = bno; 4461 bma.flags = flags; 4462 4463 /* 4464 * There's a 32/64 bit type mismatch between the 4465 * allocation length request (which can be 64 bits in 4466 * length) and the bma length request, which is 4467 * xfs_extlen_t and therefore 32 bits. Hence we have to 4468 * check for 32-bit overflows and handle them here. 4469 */ 4470 if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN) 4471 bma.length = XFS_MAX_BMBT_EXTLEN; 4472 else 4473 bma.length = len; 4474 4475 ASSERT(len > 0); 4476 ASSERT(bma.length > 0); 4477 error = xfs_bmapi_allocate(&bma); 4478 if (error) { 4479 /* 4480 * If we already allocated space in a previous 4481 * iteration return what we go so far when 4482 * running out of space. 4483 */ 4484 if (error == -ENOSPC && bma.nallocs) 4485 break; 4486 goto error0; 4487 } 4488 4489 /* 4490 * If this is a CoW allocation, record the data in 4491 * the refcount btree for orphan recovery. 4492 */ 4493 if (whichfork == XFS_COW_FORK) 4494 xfs_refcount_alloc_cow_extent(tp, bma.blkno, 4495 bma.length); 4496 } 4497 4498 /* Deal with the allocated space we found. */ 4499 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 4500 end, n, flags); 4501 4502 /* Execute unwritten extent conversion if necessary */ 4503 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4504 if (error == -EAGAIN) 4505 continue; 4506 if (error) 4507 goto error0; 4508 4509 /* update the extent map to return */ 4510 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 4511 4512 /* 4513 * If we're done, stop now. Stop when we've allocated 4514 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 4515 * the transaction may get too big. 4516 */ 4517 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 4518 break; 4519 4520 /* Else go on to the next record. */ 4521 bma.prev = bma.got; 4522 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 4523 eof = true; 4524 } 4525 4526 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4527 whichfork); 4528 if (error) 4529 goto error0; 4530 4531 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || 4532 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); 4533 xfs_bmapi_finish(&bma, whichfork, 0); 4534 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 4535 orig_nmap, n); 4536 4537 /* 4538 * When converting delayed allocations, xfs_bmapi_allocate ignores 4539 * the passed in bno and always converts from the start of the found 4540 * delalloc extent. 4541 * 4542 * To avoid a successful return with *nmap set to 0, return the magic 4543 * -ENOSR error code for this particular case so that the caller can 4544 * handle it. 4545 */ 4546 if (!n) { 4547 ASSERT(bma.nallocs >= *nmap); 4548 return -ENOSR; 4549 } 4550 *nmap = n; 4551 return 0; 4552 error0: 4553 xfs_bmapi_finish(&bma, whichfork, error); 4554 return error; 4555 } 4556 4557 /* 4558 * Convert an existing delalloc extent to real blocks based on file offset. This 4559 * attempts to allocate the entire delalloc extent and may require multiple 4560 * invocations to allocate the target offset if a large enough physical extent 4561 * is not available. 4562 */ 4563 static int 4564 xfs_bmapi_convert_one_delalloc( 4565 struct xfs_inode *ip, 4566 int whichfork, 4567 xfs_off_t offset, 4568 struct iomap *iomap, 4569 unsigned int *seq) 4570 { 4571 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4572 struct xfs_mount *mp = ip->i_mount; 4573 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 4574 struct xfs_bmalloca bma = { NULL }; 4575 uint16_t flags = 0; 4576 struct xfs_trans *tp; 4577 int error; 4578 4579 if (whichfork == XFS_COW_FORK) 4580 flags |= IOMAP_F_SHARED; 4581 4582 /* 4583 * Space for the extent and indirect blocks was reserved when the 4584 * delalloc extent was created so there's no need to do so here. 4585 */ 4586 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4587 XFS_TRANS_RESERVE, &tp); 4588 if (error) 4589 return error; 4590 4591 xfs_ilock(ip, XFS_ILOCK_EXCL); 4592 xfs_trans_ijoin(tp, ip, 0); 4593 4594 error = xfs_iext_count_may_overflow(ip, whichfork, 4595 XFS_IEXT_ADD_NOSPLIT_CNT); 4596 if (error == -EFBIG) 4597 error = xfs_iext_count_upgrade(tp, ip, 4598 XFS_IEXT_ADD_NOSPLIT_CNT); 4599 if (error) 4600 goto out_trans_cancel; 4601 4602 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4603 bma.got.br_startoff > offset_fsb) { 4604 /* 4605 * No extent found in the range we are trying to convert. This 4606 * should only happen for the COW fork, where another thread 4607 * might have moved the extent to the data fork in the meantime. 4608 */ 4609 WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4610 error = -EAGAIN; 4611 goto out_trans_cancel; 4612 } 4613 4614 /* 4615 * If we find a real extent here we raced with another thread converting 4616 * the extent. Just return the real extent at this offset. 4617 */ 4618 if (!isnullstartblock(bma.got.br_startblock)) { 4619 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4620 xfs_iomap_inode_sequence(ip, flags)); 4621 if (seq) 4622 *seq = READ_ONCE(ifp->if_seq); 4623 goto out_trans_cancel; 4624 } 4625 4626 bma.tp = tp; 4627 bma.ip = ip; 4628 bma.wasdel = true; 4629 bma.offset = bma.got.br_startoff; 4630 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, 4631 XFS_MAX_BMBT_EXTLEN); 4632 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4633 4634 /* 4635 * When we're converting the delalloc reservations backing dirty pages 4636 * in the page cache, we must be careful about how we create the new 4637 * extents: 4638 * 4639 * New CoW fork extents are created unwritten, turned into real extents 4640 * when we're about to write the data to disk, and mapped into the data 4641 * fork after the write finishes. End of story. 4642 * 4643 * New data fork extents must be mapped in as unwritten and converted 4644 * to real extents after the write succeeds to avoid exposing stale 4645 * disk contents if we crash. 4646 */ 4647 bma.flags = XFS_BMAPI_PREALLOC; 4648 if (whichfork == XFS_COW_FORK) 4649 bma.flags |= XFS_BMAPI_COWFORK; 4650 4651 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4652 bma.prev.br_startoff = NULLFILEOFF; 4653 4654 error = xfs_bmapi_allocate(&bma); 4655 if (error) 4656 goto out_finish; 4657 4658 error = -EFSCORRUPTED; 4659 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) 4660 goto out_finish; 4661 4662 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4663 XFS_STATS_INC(mp, xs_xstrat_quick); 4664 4665 ASSERT(!isnullstartblock(bma.got.br_startblock)); 4666 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4667 xfs_iomap_inode_sequence(ip, flags)); 4668 if (seq) 4669 *seq = READ_ONCE(ifp->if_seq); 4670 4671 if (whichfork == XFS_COW_FORK) 4672 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); 4673 4674 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4675 whichfork); 4676 if (error) 4677 goto out_finish; 4678 4679 xfs_bmapi_finish(&bma, whichfork, 0); 4680 error = xfs_trans_commit(tp); 4681 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4682 return error; 4683 4684 out_finish: 4685 xfs_bmapi_finish(&bma, whichfork, error); 4686 out_trans_cancel: 4687 xfs_trans_cancel(tp); 4688 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4689 return error; 4690 } 4691 4692 /* 4693 * Pass in a dellalloc extent and convert it to real extents, return the real 4694 * extent that maps offset_fsb in iomap. 4695 */ 4696 int 4697 xfs_bmapi_convert_delalloc( 4698 struct xfs_inode *ip, 4699 int whichfork, 4700 loff_t offset, 4701 struct iomap *iomap, 4702 unsigned int *seq) 4703 { 4704 int error; 4705 4706 /* 4707 * Attempt to allocate whatever delalloc extent currently backs offset 4708 * and put the result into iomap. Allocate in a loop because it may 4709 * take several attempts to allocate real blocks for a contiguous 4710 * delalloc extent if free space is sufficiently fragmented. 4711 */ 4712 do { 4713 error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset, 4714 iomap, seq); 4715 if (error) 4716 return error; 4717 } while (iomap->offset + iomap->length <= offset); 4718 4719 return 0; 4720 } 4721 4722 int 4723 xfs_bmapi_remap( 4724 struct xfs_trans *tp, 4725 struct xfs_inode *ip, 4726 xfs_fileoff_t bno, 4727 xfs_filblks_t len, 4728 xfs_fsblock_t startblock, 4729 uint32_t flags) 4730 { 4731 struct xfs_mount *mp = ip->i_mount; 4732 struct xfs_ifork *ifp; 4733 struct xfs_btree_cur *cur = NULL; 4734 struct xfs_bmbt_irec got; 4735 struct xfs_iext_cursor icur; 4736 int whichfork = xfs_bmapi_whichfork(flags); 4737 int logflags = 0, error; 4738 4739 ifp = xfs_ifork_ptr(ip, whichfork); 4740 ASSERT(len > 0); 4741 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN); 4742 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 4743 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 4744 XFS_BMAPI_NORMAP))); 4745 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 4746 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 4747 4748 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4749 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 4750 return -EFSCORRUPTED; 4751 } 4752 4753 if (xfs_is_shutdown(mp)) 4754 return -EIO; 4755 4756 error = xfs_iread_extents(tp, ip, whichfork); 4757 if (error) 4758 return error; 4759 4760 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 4761 /* make sure we only reflink into a hole. */ 4762 ASSERT(got.br_startoff > bno); 4763 ASSERT(got.br_startoff - bno >= len); 4764 } 4765 4766 ip->i_nblocks += len; 4767 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 4768 4769 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 4770 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 4771 cur->bc_ino.flags = 0; 4772 } 4773 4774 got.br_startoff = bno; 4775 got.br_startblock = startblock; 4776 got.br_blockcount = len; 4777 if (flags & XFS_BMAPI_PREALLOC) 4778 got.br_state = XFS_EXT_UNWRITTEN; 4779 else 4780 got.br_state = XFS_EXT_NORM; 4781 4782 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 4783 &cur, &got, &logflags, flags); 4784 if (error) 4785 goto error0; 4786 4787 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 4788 4789 error0: 4790 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) 4791 logflags &= ~XFS_ILOG_DEXT; 4792 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) 4793 logflags &= ~XFS_ILOG_DBROOT; 4794 4795 if (logflags) 4796 xfs_trans_log_inode(tp, ip, logflags); 4797 if (cur) 4798 xfs_btree_del_cursor(cur, error); 4799 return error; 4800 } 4801 4802 /* 4803 * When a delalloc extent is split (e.g., due to a hole punch), the original 4804 * indlen reservation must be shared across the two new extents that are left 4805 * behind. 4806 * 4807 * Given the original reservation and the worst case indlen for the two new 4808 * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4809 * reservation fairly across the two new extents. If necessary, steal available 4810 * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4811 * ores == 1). The number of stolen blocks is returned. The availability and 4812 * subsequent accounting of stolen blocks is the responsibility of the caller. 4813 */ 4814 static xfs_filblks_t 4815 xfs_bmap_split_indlen( 4816 xfs_filblks_t ores, /* original res. */ 4817 xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4818 xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4819 xfs_filblks_t avail) /* stealable blocks */ 4820 { 4821 xfs_filblks_t len1 = *indlen1; 4822 xfs_filblks_t len2 = *indlen2; 4823 xfs_filblks_t nres = len1 + len2; /* new total res. */ 4824 xfs_filblks_t stolen = 0; 4825 xfs_filblks_t resfactor; 4826 4827 /* 4828 * Steal as many blocks as we can to try and satisfy the worst case 4829 * indlen for both new extents. 4830 */ 4831 if (ores < nres && avail) 4832 stolen = XFS_FILBLKS_MIN(nres - ores, avail); 4833 ores += stolen; 4834 4835 /* nothing else to do if we've satisfied the new reservation */ 4836 if (ores >= nres) 4837 return stolen; 4838 4839 /* 4840 * We can't meet the total required reservation for the two extents. 4841 * Calculate the percent of the overall shortage between both extents 4842 * and apply this percentage to each of the requested indlen values. 4843 * This distributes the shortage fairly and reduces the chances that one 4844 * of the two extents is left with nothing when extents are repeatedly 4845 * split. 4846 */ 4847 resfactor = (ores * 100); 4848 do_div(resfactor, nres); 4849 len1 *= resfactor; 4850 do_div(len1, 100); 4851 len2 *= resfactor; 4852 do_div(len2, 100); 4853 ASSERT(len1 + len2 <= ores); 4854 ASSERT(len1 < *indlen1 && len2 < *indlen2); 4855 4856 /* 4857 * Hand out the remainder to each extent. If one of the two reservations 4858 * is zero, we want to make sure that one gets a block first. The loop 4859 * below starts with len1, so hand len2 a block right off the bat if it 4860 * is zero. 4861 */ 4862 ores -= (len1 + len2); 4863 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 4864 if (ores && !len2 && *indlen2) { 4865 len2++; 4866 ores--; 4867 } 4868 while (ores) { 4869 if (len1 < *indlen1) { 4870 len1++; 4871 ores--; 4872 } 4873 if (!ores) 4874 break; 4875 if (len2 < *indlen2) { 4876 len2++; 4877 ores--; 4878 } 4879 } 4880 4881 *indlen1 = len1; 4882 *indlen2 = len2; 4883 4884 return stolen; 4885 } 4886 4887 int 4888 xfs_bmap_del_extent_delay( 4889 struct xfs_inode *ip, 4890 int whichfork, 4891 struct xfs_iext_cursor *icur, 4892 struct xfs_bmbt_irec *got, 4893 struct xfs_bmbt_irec *del) 4894 { 4895 struct xfs_mount *mp = ip->i_mount; 4896 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4897 struct xfs_bmbt_irec new; 4898 int64_t da_old, da_new, da_diff = 0; 4899 xfs_fileoff_t del_endoff, got_endoff; 4900 xfs_filblks_t got_indlen, new_indlen, stolen; 4901 uint32_t state = xfs_bmap_fork_to_state(whichfork); 4902 int error = 0; 4903 bool isrt; 4904 4905 XFS_STATS_INC(mp, xs_del_exlist); 4906 4907 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4908 del_endoff = del->br_startoff + del->br_blockcount; 4909 got_endoff = got->br_startoff + got->br_blockcount; 4910 da_old = startblockval(got->br_startblock); 4911 da_new = 0; 4912 4913 ASSERT(del->br_blockcount > 0); 4914 ASSERT(got->br_startoff <= del->br_startoff); 4915 ASSERT(got_endoff >= del_endoff); 4916 4917 if (isrt) { 4918 uint64_t rtexts = del->br_blockcount; 4919 4920 do_div(rtexts, mp->m_sb.sb_rextsize); 4921 xfs_mod_frextents(mp, rtexts); 4922 } 4923 4924 /* 4925 * Update the inode delalloc counter now and wait to update the 4926 * sb counters as we might have to borrow some blocks for the 4927 * indirect block accounting. 4928 */ 4929 ASSERT(!isrt); 4930 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount); 4931 if (error) 4932 return error; 4933 ip->i_delayed_blks -= del->br_blockcount; 4934 4935 if (got->br_startoff == del->br_startoff) 4936 state |= BMAP_LEFT_FILLING; 4937 if (got_endoff == del_endoff) 4938 state |= BMAP_RIGHT_FILLING; 4939 4940 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 4941 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4942 /* 4943 * Matches the whole extent. Delete the entry. 4944 */ 4945 xfs_iext_remove(ip, icur, state); 4946 xfs_iext_prev(ifp, icur); 4947 break; 4948 case BMAP_LEFT_FILLING: 4949 /* 4950 * Deleting the first part of the extent. 4951 */ 4952 got->br_startoff = del_endoff; 4953 got->br_blockcount -= del->br_blockcount; 4954 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4955 got->br_blockcount), da_old); 4956 got->br_startblock = nullstartblock((int)da_new); 4957 xfs_iext_update_extent(ip, state, icur, got); 4958 break; 4959 case BMAP_RIGHT_FILLING: 4960 /* 4961 * Deleting the last part of the extent. 4962 */ 4963 got->br_blockcount = got->br_blockcount - del->br_blockcount; 4964 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4965 got->br_blockcount), da_old); 4966 got->br_startblock = nullstartblock((int)da_new); 4967 xfs_iext_update_extent(ip, state, icur, got); 4968 break; 4969 case 0: 4970 /* 4971 * Deleting the middle of the extent. 4972 * 4973 * Distribute the original indlen reservation across the two new 4974 * extents. Steal blocks from the deleted extent if necessary. 4975 * Stealing blocks simply fudges the fdblocks accounting below. 4976 * Warn if either of the new indlen reservations is zero as this 4977 * can lead to delalloc problems. 4978 */ 4979 got->br_blockcount = del->br_startoff - got->br_startoff; 4980 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4981 4982 new.br_blockcount = got_endoff - del_endoff; 4983 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4984 4985 WARN_ON_ONCE(!got_indlen || !new_indlen); 4986 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4987 del->br_blockcount); 4988 4989 got->br_startblock = nullstartblock((int)got_indlen); 4990 4991 new.br_startoff = del_endoff; 4992 new.br_state = got->br_state; 4993 new.br_startblock = nullstartblock((int)new_indlen); 4994 4995 xfs_iext_update_extent(ip, state, icur, got); 4996 xfs_iext_next(ifp, icur); 4997 xfs_iext_insert(ip, icur, &new, state); 4998 4999 da_new = got_indlen + new_indlen - stolen; 5000 del->br_blockcount -= stolen; 5001 break; 5002 } 5003 5004 ASSERT(da_old >= da_new); 5005 da_diff = da_old - da_new; 5006 if (!isrt) 5007 da_diff += del->br_blockcount; 5008 if (da_diff) { 5009 xfs_mod_fdblocks(mp, da_diff, false); 5010 xfs_mod_delalloc(mp, -da_diff); 5011 } 5012 return error; 5013 } 5014 5015 void 5016 xfs_bmap_del_extent_cow( 5017 struct xfs_inode *ip, 5018 struct xfs_iext_cursor *icur, 5019 struct xfs_bmbt_irec *got, 5020 struct xfs_bmbt_irec *del) 5021 { 5022 struct xfs_mount *mp = ip->i_mount; 5023 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 5024 struct xfs_bmbt_irec new; 5025 xfs_fileoff_t del_endoff, got_endoff; 5026 uint32_t state = BMAP_COWFORK; 5027 5028 XFS_STATS_INC(mp, xs_del_exlist); 5029 5030 del_endoff = del->br_startoff + del->br_blockcount; 5031 got_endoff = got->br_startoff + got->br_blockcount; 5032 5033 ASSERT(del->br_blockcount > 0); 5034 ASSERT(got->br_startoff <= del->br_startoff); 5035 ASSERT(got_endoff >= del_endoff); 5036 ASSERT(!isnullstartblock(got->br_startblock)); 5037 5038 if (got->br_startoff == del->br_startoff) 5039 state |= BMAP_LEFT_FILLING; 5040 if (got_endoff == del_endoff) 5041 state |= BMAP_RIGHT_FILLING; 5042 5043 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5044 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5045 /* 5046 * Matches the whole extent. Delete the entry. 5047 */ 5048 xfs_iext_remove(ip, icur, state); 5049 xfs_iext_prev(ifp, icur); 5050 break; 5051 case BMAP_LEFT_FILLING: 5052 /* 5053 * Deleting the first part of the extent. 5054 */ 5055 got->br_startoff = del_endoff; 5056 got->br_blockcount -= del->br_blockcount; 5057 got->br_startblock = del->br_startblock + del->br_blockcount; 5058 xfs_iext_update_extent(ip, state, icur, got); 5059 break; 5060 case BMAP_RIGHT_FILLING: 5061 /* 5062 * Deleting the last part of the extent. 5063 */ 5064 got->br_blockcount -= del->br_blockcount; 5065 xfs_iext_update_extent(ip, state, icur, got); 5066 break; 5067 case 0: 5068 /* 5069 * Deleting the middle of the extent. 5070 */ 5071 got->br_blockcount = del->br_startoff - got->br_startoff; 5072 5073 new.br_startoff = del_endoff; 5074 new.br_blockcount = got_endoff - del_endoff; 5075 new.br_state = got->br_state; 5076 new.br_startblock = del->br_startblock + del->br_blockcount; 5077 5078 xfs_iext_update_extent(ip, state, icur, got); 5079 xfs_iext_next(ifp, icur); 5080 xfs_iext_insert(ip, icur, &new, state); 5081 break; 5082 } 5083 ip->i_delayed_blks -= del->br_blockcount; 5084 } 5085 5086 /* 5087 * Called by xfs_bmapi to update file extent records and the btree 5088 * after removing space. 5089 */ 5090 STATIC int /* error */ 5091 xfs_bmap_del_extent_real( 5092 xfs_inode_t *ip, /* incore inode pointer */ 5093 xfs_trans_t *tp, /* current transaction pointer */ 5094 struct xfs_iext_cursor *icur, 5095 struct xfs_btree_cur *cur, /* if null, not a btree */ 5096 xfs_bmbt_irec_t *del, /* data to remove from extents */ 5097 int *logflagsp, /* inode logging flags */ 5098 int whichfork, /* data or attr fork */ 5099 uint32_t bflags) /* bmapi flags */ 5100 { 5101 xfs_fsblock_t del_endblock=0; /* first block past del */ 5102 xfs_fileoff_t del_endoff; /* first offset past del */ 5103 int do_fx; /* free extent at end of routine */ 5104 int error; /* error return value */ 5105 struct xfs_bmbt_irec got; /* current extent entry */ 5106 xfs_fileoff_t got_endoff; /* first offset past got */ 5107 int i; /* temp state */ 5108 struct xfs_ifork *ifp; /* inode fork pointer */ 5109 xfs_mount_t *mp; /* mount structure */ 5110 xfs_filblks_t nblks; /* quota/sb block count */ 5111 xfs_bmbt_irec_t new; /* new record to be inserted */ 5112 /* REFERENCED */ 5113 uint qfield; /* quota field to update */ 5114 uint32_t state = xfs_bmap_fork_to_state(whichfork); 5115 struct xfs_bmbt_irec old; 5116 5117 *logflagsp = 0; 5118 5119 mp = ip->i_mount; 5120 XFS_STATS_INC(mp, xs_del_exlist); 5121 5122 ifp = xfs_ifork_ptr(ip, whichfork); 5123 ASSERT(del->br_blockcount > 0); 5124 xfs_iext_get_extent(ifp, icur, &got); 5125 ASSERT(got.br_startoff <= del->br_startoff); 5126 del_endoff = del->br_startoff + del->br_blockcount; 5127 got_endoff = got.br_startoff + got.br_blockcount; 5128 ASSERT(got_endoff >= del_endoff); 5129 ASSERT(!isnullstartblock(got.br_startblock)); 5130 qfield = 0; 5131 5132 /* 5133 * If it's the case where the directory code is running with no block 5134 * reservation, and the deleted block is in the middle of its extent, 5135 * and the resulting insert of an extent would cause transformation to 5136 * btree format, then reject it. The calling code will then swap blocks 5137 * around instead. We have to do this now, rather than waiting for the 5138 * conversion to btree format, since the transaction will be dirty then. 5139 */ 5140 if (tp->t_blk_res == 0 && 5141 ifp->if_format == XFS_DINODE_FMT_EXTENTS && 5142 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && 5143 del->br_startoff > got.br_startoff && del_endoff < got_endoff) 5144 return -ENOSPC; 5145 5146 *logflagsp = XFS_ILOG_CORE; 5147 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 5148 if (!(bflags & XFS_BMAPI_REMAP)) { 5149 error = xfs_rtfree_blocks(tp, del->br_startblock, 5150 del->br_blockcount); 5151 if (error) 5152 return error; 5153 } 5154 5155 do_fx = 0; 5156 qfield = XFS_TRANS_DQ_RTBCOUNT; 5157 } else { 5158 do_fx = 1; 5159 qfield = XFS_TRANS_DQ_BCOUNT; 5160 } 5161 nblks = del->br_blockcount; 5162 5163 del_endblock = del->br_startblock + del->br_blockcount; 5164 if (cur) { 5165 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5166 if (error) 5167 return error; 5168 if (XFS_IS_CORRUPT(mp, i != 1)) 5169 return -EFSCORRUPTED; 5170 } 5171 5172 if (got.br_startoff == del->br_startoff) 5173 state |= BMAP_LEFT_FILLING; 5174 if (got_endoff == del_endoff) 5175 state |= BMAP_RIGHT_FILLING; 5176 5177 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5178 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 5179 /* 5180 * Matches the whole extent. Delete the entry. 5181 */ 5182 xfs_iext_remove(ip, icur, state); 5183 xfs_iext_prev(ifp, icur); 5184 ifp->if_nextents--; 5185 5186 *logflagsp |= XFS_ILOG_CORE; 5187 if (!cur) { 5188 *logflagsp |= xfs_ilog_fext(whichfork); 5189 break; 5190 } 5191 if ((error = xfs_btree_delete(cur, &i))) 5192 return error; 5193 if (XFS_IS_CORRUPT(mp, i != 1)) 5194 return -EFSCORRUPTED; 5195 break; 5196 case BMAP_LEFT_FILLING: 5197 /* 5198 * Deleting the first part of the extent. 5199 */ 5200 got.br_startoff = del_endoff; 5201 got.br_startblock = del_endblock; 5202 got.br_blockcount -= del->br_blockcount; 5203 xfs_iext_update_extent(ip, state, icur, &got); 5204 if (!cur) { 5205 *logflagsp |= xfs_ilog_fext(whichfork); 5206 break; 5207 } 5208 error = xfs_bmbt_update(cur, &got); 5209 if (error) 5210 return error; 5211 break; 5212 case BMAP_RIGHT_FILLING: 5213 /* 5214 * Deleting the last part of the extent. 5215 */ 5216 got.br_blockcount -= del->br_blockcount; 5217 xfs_iext_update_extent(ip, state, icur, &got); 5218 if (!cur) { 5219 *logflagsp |= xfs_ilog_fext(whichfork); 5220 break; 5221 } 5222 error = xfs_bmbt_update(cur, &got); 5223 if (error) 5224 return error; 5225 break; 5226 case 0: 5227 /* 5228 * Deleting the middle of the extent. 5229 */ 5230 5231 old = got; 5232 5233 got.br_blockcount = del->br_startoff - got.br_startoff; 5234 xfs_iext_update_extent(ip, state, icur, &got); 5235 5236 new.br_startoff = del_endoff; 5237 new.br_blockcount = got_endoff - del_endoff; 5238 new.br_state = got.br_state; 5239 new.br_startblock = del_endblock; 5240 5241 *logflagsp |= XFS_ILOG_CORE; 5242 if (cur) { 5243 error = xfs_bmbt_update(cur, &got); 5244 if (error) 5245 return error; 5246 error = xfs_btree_increment(cur, 0, &i); 5247 if (error) 5248 return error; 5249 cur->bc_rec.b = new; 5250 error = xfs_btree_insert(cur, &i); 5251 if (error && error != -ENOSPC) 5252 return error; 5253 /* 5254 * If get no-space back from btree insert, it tried a 5255 * split, and we have a zero block reservation. Fix up 5256 * our state and return the error. 5257 */ 5258 if (error == -ENOSPC) { 5259 /* 5260 * Reset the cursor, don't trust it after any 5261 * insert operation. 5262 */ 5263 error = xfs_bmbt_lookup_eq(cur, &got, &i); 5264 if (error) 5265 return error; 5266 if (XFS_IS_CORRUPT(mp, i != 1)) 5267 return -EFSCORRUPTED; 5268 /* 5269 * Update the btree record back 5270 * to the original value. 5271 */ 5272 error = xfs_bmbt_update(cur, &old); 5273 if (error) 5274 return error; 5275 /* 5276 * Reset the extent record back 5277 * to the original value. 5278 */ 5279 xfs_iext_update_extent(ip, state, icur, &old); 5280 *logflagsp = 0; 5281 return -ENOSPC; 5282 } 5283 if (XFS_IS_CORRUPT(mp, i != 1)) 5284 return -EFSCORRUPTED; 5285 } else 5286 *logflagsp |= xfs_ilog_fext(whichfork); 5287 5288 ifp->if_nextents++; 5289 xfs_iext_next(ifp, icur); 5290 xfs_iext_insert(ip, icur, &new, state); 5291 break; 5292 } 5293 5294 /* remove reverse mapping */ 5295 xfs_rmap_unmap_extent(tp, ip, whichfork, del); 5296 5297 /* 5298 * If we need to, add to list of extents to delete. 5299 */ 5300 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 5301 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 5302 xfs_refcount_decrease_extent(tp, del); 5303 } else { 5304 error = __xfs_free_extent_later(tp, del->br_startblock, 5305 del->br_blockcount, NULL, 5306 XFS_AG_RESV_NONE, 5307 ((bflags & XFS_BMAPI_NODISCARD) || 5308 del->br_state == XFS_EXT_UNWRITTEN)); 5309 if (error) 5310 return error; 5311 } 5312 } 5313 5314 /* 5315 * Adjust inode # blocks in the file. 5316 */ 5317 if (nblks) 5318 ip->i_nblocks -= nblks; 5319 /* 5320 * Adjust quota data. 5321 */ 5322 if (qfield && !(bflags & XFS_BMAPI_REMAP)) 5323 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 5324 5325 return 0; 5326 } 5327 5328 /* 5329 * Unmap (remove) blocks from a file. 5330 * If nexts is nonzero then the number of extents to remove is limited to 5331 * that value. If not all extents in the block range can be removed then 5332 * *done is set. 5333 */ 5334 int /* error */ 5335 __xfs_bunmapi( 5336 struct xfs_trans *tp, /* transaction pointer */ 5337 struct xfs_inode *ip, /* incore inode */ 5338 xfs_fileoff_t start, /* first file offset deleted */ 5339 xfs_filblks_t *rlen, /* i/o: amount remaining */ 5340 uint32_t flags, /* misc flags */ 5341 xfs_extnum_t nexts) /* number of extents max */ 5342 { 5343 struct xfs_btree_cur *cur; /* bmap btree cursor */ 5344 struct xfs_bmbt_irec del; /* extent being deleted */ 5345 int error; /* error return value */ 5346 xfs_extnum_t extno; /* extent number in list */ 5347 struct xfs_bmbt_irec got; /* current extent record */ 5348 struct xfs_ifork *ifp; /* inode fork pointer */ 5349 int isrt; /* freeing in rt area */ 5350 int logflags; /* transaction logging flags */ 5351 xfs_extlen_t mod; /* rt extent offset */ 5352 struct xfs_mount *mp = ip->i_mount; 5353 int tmp_logflags; /* partial logging flags */ 5354 int wasdel; /* was a delayed alloc extent */ 5355 int whichfork; /* data or attribute fork */ 5356 xfs_fsblock_t sum; 5357 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5358 xfs_fileoff_t end; 5359 struct xfs_iext_cursor icur; 5360 bool done = false; 5361 5362 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 5363 5364 whichfork = xfs_bmapi_whichfork(flags); 5365 ASSERT(whichfork != XFS_COW_FORK); 5366 ifp = xfs_ifork_ptr(ip, whichfork); 5367 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) 5368 return -EFSCORRUPTED; 5369 if (xfs_is_shutdown(mp)) 5370 return -EIO; 5371 5372 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5373 ASSERT(len > 0); 5374 ASSERT(nexts >= 0); 5375 5376 error = xfs_iread_extents(tp, ip, whichfork); 5377 if (error) 5378 return error; 5379 5380 if (xfs_iext_count(ifp) == 0) { 5381 *rlen = 0; 5382 return 0; 5383 } 5384 XFS_STATS_INC(mp, xs_blk_unmap); 5385 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5386 end = start + len; 5387 5388 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5389 *rlen = 0; 5390 return 0; 5391 } 5392 end--; 5393 5394 logflags = 0; 5395 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5396 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 5397 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5398 cur->bc_ino.flags = 0; 5399 } else 5400 cur = NULL; 5401 5402 if (isrt) { 5403 /* 5404 * Synchronize by locking the bitmap inode. 5405 */ 5406 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 5407 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5408 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5409 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 5410 } 5411 5412 extno = 0; 5413 while (end != (xfs_fileoff_t)-1 && end >= start && 5414 (nexts == 0 || extno < nexts)) { 5415 /* 5416 * Is the found extent after a hole in which end lives? 5417 * Just back up to the previous extent, if so. 5418 */ 5419 if (got.br_startoff > end && 5420 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5421 done = true; 5422 break; 5423 } 5424 /* 5425 * Is the last block of this extent before the range 5426 * we're supposed to delete? If so, we're done. 5427 */ 5428 end = XFS_FILEOFF_MIN(end, 5429 got.br_startoff + got.br_blockcount - 1); 5430 if (end < start) 5431 break; 5432 /* 5433 * Then deal with the (possibly delayed) allocated space 5434 * we found. 5435 */ 5436 del = got; 5437 wasdel = isnullstartblock(del.br_startblock); 5438 5439 if (got.br_startoff < start) { 5440 del.br_startoff = start; 5441 del.br_blockcount -= start - got.br_startoff; 5442 if (!wasdel) 5443 del.br_startblock += start - got.br_startoff; 5444 } 5445 if (del.br_startoff + del.br_blockcount > end + 1) 5446 del.br_blockcount = end + 1 - del.br_startoff; 5447 5448 if (!isrt) 5449 goto delete; 5450 5451 sum = del.br_startblock + del.br_blockcount; 5452 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 5453 if (mod) { 5454 /* 5455 * Realtime extent not lined up at the end. 5456 * The extent could have been split into written 5457 * and unwritten pieces, or we could just be 5458 * unmapping part of it. But we can't really 5459 * get rid of part of a realtime extent. 5460 */ 5461 if (del.br_state == XFS_EXT_UNWRITTEN) { 5462 /* 5463 * This piece is unwritten, or we're not 5464 * using unwritten extents. Skip over it. 5465 */ 5466 ASSERT(end >= mod); 5467 end -= mod > del.br_blockcount ? 5468 del.br_blockcount : mod; 5469 if (end < got.br_startoff && 5470 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5471 done = true; 5472 break; 5473 } 5474 continue; 5475 } 5476 /* 5477 * It's written, turn it unwritten. 5478 * This is better than zeroing it. 5479 */ 5480 ASSERT(del.br_state == XFS_EXT_NORM); 5481 ASSERT(tp->t_blk_res > 0); 5482 /* 5483 * If this spans a realtime extent boundary, 5484 * chop it back to the start of the one we end at. 5485 */ 5486 if (del.br_blockcount > mod) { 5487 del.br_startoff += del.br_blockcount - mod; 5488 del.br_startblock += del.br_blockcount - mod; 5489 del.br_blockcount = mod; 5490 } 5491 del.br_state = XFS_EXT_UNWRITTEN; 5492 error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5493 whichfork, &icur, &cur, &del, 5494 &logflags); 5495 if (error) 5496 goto error0; 5497 goto nodelete; 5498 } 5499 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 5500 if (mod) { 5501 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; 5502 5503 /* 5504 * Realtime extent is lined up at the end but not 5505 * at the front. We'll get rid of full extents if 5506 * we can. 5507 */ 5508 if (del.br_blockcount > off) { 5509 del.br_blockcount -= off; 5510 del.br_startoff += off; 5511 del.br_startblock += off; 5512 } else if (del.br_startoff == start && 5513 (del.br_state == XFS_EXT_UNWRITTEN || 5514 tp->t_blk_res == 0)) { 5515 /* 5516 * Can't make it unwritten. There isn't 5517 * a full extent here so just skip it. 5518 */ 5519 ASSERT(end >= del.br_blockcount); 5520 end -= del.br_blockcount; 5521 if (got.br_startoff > end && 5522 !xfs_iext_prev_extent(ifp, &icur, &got)) { 5523 done = true; 5524 break; 5525 } 5526 continue; 5527 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5528 struct xfs_bmbt_irec prev; 5529 xfs_fileoff_t unwrite_start; 5530 5531 /* 5532 * This one is already unwritten. 5533 * It must have a written left neighbor. 5534 * Unwrite the killed part of that one and 5535 * try again. 5536 */ 5537 if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5538 ASSERT(0); 5539 ASSERT(prev.br_state == XFS_EXT_NORM); 5540 ASSERT(!isnullstartblock(prev.br_startblock)); 5541 ASSERT(del.br_startblock == 5542 prev.br_startblock + prev.br_blockcount); 5543 unwrite_start = max3(start, 5544 del.br_startoff - mod, 5545 prev.br_startoff); 5546 mod = unwrite_start - prev.br_startoff; 5547 prev.br_startoff = unwrite_start; 5548 prev.br_startblock += mod; 5549 prev.br_blockcount -= mod; 5550 prev.br_state = XFS_EXT_UNWRITTEN; 5551 error = xfs_bmap_add_extent_unwritten_real(tp, 5552 ip, whichfork, &icur, &cur, 5553 &prev, &logflags); 5554 if (error) 5555 goto error0; 5556 goto nodelete; 5557 } else { 5558 ASSERT(del.br_state == XFS_EXT_NORM); 5559 del.br_state = XFS_EXT_UNWRITTEN; 5560 error = xfs_bmap_add_extent_unwritten_real(tp, 5561 ip, whichfork, &icur, &cur, 5562 &del, &logflags); 5563 if (error) 5564 goto error0; 5565 goto nodelete; 5566 } 5567 } 5568 5569 delete: 5570 if (wasdel) { 5571 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5572 &got, &del); 5573 } else { 5574 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 5575 &del, &tmp_logflags, whichfork, 5576 flags); 5577 logflags |= tmp_logflags; 5578 } 5579 5580 if (error) 5581 goto error0; 5582 5583 end = del.br_startoff - 1; 5584 nodelete: 5585 /* 5586 * If not done go on to the next (previous) record. 5587 */ 5588 if (end != (xfs_fileoff_t)-1 && end >= start) { 5589 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5590 (got.br_startoff > end && 5591 !xfs_iext_prev_extent(ifp, &icur, &got))) { 5592 done = true; 5593 break; 5594 } 5595 extno++; 5596 } 5597 } 5598 if (done || end == (xfs_fileoff_t)-1 || end < start) 5599 *rlen = 0; 5600 else 5601 *rlen = end - start + 1; 5602 5603 /* 5604 * Convert to a btree if necessary. 5605 */ 5606 if (xfs_bmap_needs_btree(ip, whichfork)) { 5607 ASSERT(cur == NULL); 5608 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5609 &tmp_logflags, whichfork); 5610 logflags |= tmp_logflags; 5611 } else { 5612 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 5613 whichfork); 5614 } 5615 5616 error0: 5617 /* 5618 * Log everything. Do this after conversion, there's no point in 5619 * logging the extent records if we've converted to btree format. 5620 */ 5621 if ((logflags & xfs_ilog_fext(whichfork)) && 5622 ifp->if_format != XFS_DINODE_FMT_EXTENTS) 5623 logflags &= ~xfs_ilog_fext(whichfork); 5624 else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5625 ifp->if_format != XFS_DINODE_FMT_BTREE) 5626 logflags &= ~xfs_ilog_fbroot(whichfork); 5627 /* 5628 * Log inode even in the error case, if the transaction 5629 * is dirty we'll need to shut down the filesystem. 5630 */ 5631 if (logflags) 5632 xfs_trans_log_inode(tp, ip, logflags); 5633 if (cur) { 5634 if (!error) 5635 cur->bc_ino.allocated = 0; 5636 xfs_btree_del_cursor(cur, error); 5637 } 5638 return error; 5639 } 5640 5641 /* Unmap a range of a file. */ 5642 int 5643 xfs_bunmapi( 5644 xfs_trans_t *tp, 5645 struct xfs_inode *ip, 5646 xfs_fileoff_t bno, 5647 xfs_filblks_t len, 5648 uint32_t flags, 5649 xfs_extnum_t nexts, 5650 int *done) 5651 { 5652 int error; 5653 5654 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 5655 *done = (len == 0); 5656 return error; 5657 } 5658 5659 /* 5660 * Determine whether an extent shift can be accomplished by a merge with the 5661 * extent that precedes the target hole of the shift. 5662 */ 5663 STATIC bool 5664 xfs_bmse_can_merge( 5665 struct xfs_bmbt_irec *left, /* preceding extent */ 5666 struct xfs_bmbt_irec *got, /* current extent to shift */ 5667 xfs_fileoff_t shift) /* shift fsb */ 5668 { 5669 xfs_fileoff_t startoff; 5670 5671 startoff = got->br_startoff - shift; 5672 5673 /* 5674 * The extent, once shifted, must be adjacent in-file and on-disk with 5675 * the preceding extent. 5676 */ 5677 if ((left->br_startoff + left->br_blockcount != startoff) || 5678 (left->br_startblock + left->br_blockcount != got->br_startblock) || 5679 (left->br_state != got->br_state) || 5680 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN)) 5681 return false; 5682 5683 return true; 5684 } 5685 5686 /* 5687 * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5688 * hole in the file. If an extent shift would result in the extent being fully 5689 * adjacent to the extent that currently precedes the hole, we can merge with 5690 * the preceding extent rather than do the shift. 5691 * 5692 * This function assumes the caller has verified a shift-by-merge is possible 5693 * with the provided extents via xfs_bmse_can_merge(). 5694 */ 5695 STATIC int 5696 xfs_bmse_merge( 5697 struct xfs_trans *tp, 5698 struct xfs_inode *ip, 5699 int whichfork, 5700 xfs_fileoff_t shift, /* shift fsb */ 5701 struct xfs_iext_cursor *icur, 5702 struct xfs_bmbt_irec *got, /* extent to shift */ 5703 struct xfs_bmbt_irec *left, /* preceding extent */ 5704 struct xfs_btree_cur *cur, 5705 int *logflags) /* output */ 5706 { 5707 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5708 struct xfs_bmbt_irec new; 5709 xfs_filblks_t blockcount; 5710 int error, i; 5711 struct xfs_mount *mp = ip->i_mount; 5712 5713 blockcount = left->br_blockcount + got->br_blockcount; 5714 5715 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5716 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5717 ASSERT(xfs_bmse_can_merge(left, got, shift)); 5718 5719 new = *left; 5720 new.br_blockcount = blockcount; 5721 5722 /* 5723 * Update the on-disk extent count, the btree if necessary and log the 5724 * inode. 5725 */ 5726 ifp->if_nextents--; 5727 *logflags |= XFS_ILOG_CORE; 5728 if (!cur) { 5729 *logflags |= XFS_ILOG_DEXT; 5730 goto done; 5731 } 5732 5733 /* lookup and remove the extent to merge */ 5734 error = xfs_bmbt_lookup_eq(cur, got, &i); 5735 if (error) 5736 return error; 5737 if (XFS_IS_CORRUPT(mp, i != 1)) 5738 return -EFSCORRUPTED; 5739 5740 error = xfs_btree_delete(cur, &i); 5741 if (error) 5742 return error; 5743 if (XFS_IS_CORRUPT(mp, i != 1)) 5744 return -EFSCORRUPTED; 5745 5746 /* lookup and update size of the previous extent */ 5747 error = xfs_bmbt_lookup_eq(cur, left, &i); 5748 if (error) 5749 return error; 5750 if (XFS_IS_CORRUPT(mp, i != 1)) 5751 return -EFSCORRUPTED; 5752 5753 error = xfs_bmbt_update(cur, &new); 5754 if (error) 5755 return error; 5756 5757 /* change to extent format if required after extent removal */ 5758 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); 5759 if (error) 5760 return error; 5761 5762 done: 5763 xfs_iext_remove(ip, icur, 0); 5764 xfs_iext_prev(ifp, icur); 5765 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5766 &new); 5767 5768 /* update reverse mapping. rmap functions merge the rmaps for us */ 5769 xfs_rmap_unmap_extent(tp, ip, whichfork, got); 5770 memcpy(&new, got, sizeof(new)); 5771 new.br_startoff = left->br_startoff + left->br_blockcount; 5772 xfs_rmap_map_extent(tp, ip, whichfork, &new); 5773 return 0; 5774 } 5775 5776 static int 5777 xfs_bmap_shift_update_extent( 5778 struct xfs_trans *tp, 5779 struct xfs_inode *ip, 5780 int whichfork, 5781 struct xfs_iext_cursor *icur, 5782 struct xfs_bmbt_irec *got, 5783 struct xfs_btree_cur *cur, 5784 int *logflags, 5785 xfs_fileoff_t startoff) 5786 { 5787 struct xfs_mount *mp = ip->i_mount; 5788 struct xfs_bmbt_irec prev = *got; 5789 int error, i; 5790 5791 *logflags |= XFS_ILOG_CORE; 5792 5793 got->br_startoff = startoff; 5794 5795 if (cur) { 5796 error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5797 if (error) 5798 return error; 5799 if (XFS_IS_CORRUPT(mp, i != 1)) 5800 return -EFSCORRUPTED; 5801 5802 error = xfs_bmbt_update(cur, got); 5803 if (error) 5804 return error; 5805 } else { 5806 *logflags |= XFS_ILOG_DEXT; 5807 } 5808 5809 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5810 got); 5811 5812 /* update reverse mapping */ 5813 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5814 xfs_rmap_map_extent(tp, ip, whichfork, got); 5815 return 0; 5816 } 5817 5818 int 5819 xfs_bmap_collapse_extents( 5820 struct xfs_trans *tp, 5821 struct xfs_inode *ip, 5822 xfs_fileoff_t *next_fsb, 5823 xfs_fileoff_t offset_shift_fsb, 5824 bool *done) 5825 { 5826 int whichfork = XFS_DATA_FORK; 5827 struct xfs_mount *mp = ip->i_mount; 5828 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5829 struct xfs_btree_cur *cur = NULL; 5830 struct xfs_bmbt_irec got, prev; 5831 struct xfs_iext_cursor icur; 5832 xfs_fileoff_t new_startoff; 5833 int error = 0; 5834 int logflags = 0; 5835 5836 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5837 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5838 return -EFSCORRUPTED; 5839 } 5840 5841 if (xfs_is_shutdown(mp)) 5842 return -EIO; 5843 5844 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5845 5846 error = xfs_iread_extents(tp, ip, whichfork); 5847 if (error) 5848 return error; 5849 5850 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5851 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5852 cur->bc_ino.flags = 0; 5853 } 5854 5855 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5856 *done = true; 5857 goto del_cursor; 5858 } 5859 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5860 error = -EFSCORRUPTED; 5861 goto del_cursor; 5862 } 5863 5864 new_startoff = got.br_startoff - offset_shift_fsb; 5865 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5866 if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5867 error = -EINVAL; 5868 goto del_cursor; 5869 } 5870 5871 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 5872 error = xfs_bmse_merge(tp, ip, whichfork, 5873 offset_shift_fsb, &icur, &got, &prev, 5874 cur, &logflags); 5875 if (error) 5876 goto del_cursor; 5877 goto done; 5878 } 5879 } else { 5880 if (got.br_startoff < offset_shift_fsb) { 5881 error = -EINVAL; 5882 goto del_cursor; 5883 } 5884 } 5885 5886 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 5887 cur, &logflags, new_startoff); 5888 if (error) 5889 goto del_cursor; 5890 5891 done: 5892 if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5893 *done = true; 5894 goto del_cursor; 5895 } 5896 5897 *next_fsb = got.br_startoff; 5898 del_cursor: 5899 if (cur) 5900 xfs_btree_del_cursor(cur, error); 5901 if (logflags) 5902 xfs_trans_log_inode(tp, ip, logflags); 5903 return error; 5904 } 5905 5906 /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5907 int 5908 xfs_bmap_can_insert_extents( 5909 struct xfs_inode *ip, 5910 xfs_fileoff_t off, 5911 xfs_fileoff_t shift) 5912 { 5913 struct xfs_bmbt_irec got; 5914 int is_empty; 5915 int error = 0; 5916 5917 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5918 5919 if (xfs_is_shutdown(ip->i_mount)) 5920 return -EIO; 5921 5922 xfs_ilock(ip, XFS_ILOCK_EXCL); 5923 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5924 if (!error && !is_empty && got.br_startoff >= off && 5925 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5926 error = -EINVAL; 5927 xfs_iunlock(ip, XFS_ILOCK_EXCL); 5928 5929 return error; 5930 } 5931 5932 int 5933 xfs_bmap_insert_extents( 5934 struct xfs_trans *tp, 5935 struct xfs_inode *ip, 5936 xfs_fileoff_t *next_fsb, 5937 xfs_fileoff_t offset_shift_fsb, 5938 bool *done, 5939 xfs_fileoff_t stop_fsb) 5940 { 5941 int whichfork = XFS_DATA_FORK; 5942 struct xfs_mount *mp = ip->i_mount; 5943 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5944 struct xfs_btree_cur *cur = NULL; 5945 struct xfs_bmbt_irec got, next; 5946 struct xfs_iext_cursor icur; 5947 xfs_fileoff_t new_startoff; 5948 int error = 0; 5949 int logflags = 0; 5950 5951 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5952 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5953 return -EFSCORRUPTED; 5954 } 5955 5956 if (xfs_is_shutdown(mp)) 5957 return -EIO; 5958 5959 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5960 5961 error = xfs_iread_extents(tp, ip, whichfork); 5962 if (error) 5963 return error; 5964 5965 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5966 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 5967 cur->bc_ino.flags = 0; 5968 } 5969 5970 if (*next_fsb == NULLFSBLOCK) { 5971 xfs_iext_last(ifp, &icur); 5972 if (!xfs_iext_get_extent(ifp, &icur, &got) || 5973 stop_fsb > got.br_startoff) { 5974 *done = true; 5975 goto del_cursor; 5976 } 5977 } else { 5978 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5979 *done = true; 5980 goto del_cursor; 5981 } 5982 } 5983 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5984 error = -EFSCORRUPTED; 5985 goto del_cursor; 5986 } 5987 5988 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { 5989 error = -EFSCORRUPTED; 5990 goto del_cursor; 5991 } 5992 5993 new_startoff = got.br_startoff + offset_shift_fsb; 5994 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5995 if (new_startoff + got.br_blockcount > next.br_startoff) { 5996 error = -EINVAL; 5997 goto del_cursor; 5998 } 5999 6000 /* 6001 * Unlike a left shift (which involves a hole punch), a right 6002 * shift does not modify extent neighbors in any way. We should 6003 * never find mergeable extents in this scenario. Check anyways 6004 * and warn if we encounter two extents that could be one. 6005 */ 6006 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 6007 WARN_ON_ONCE(1); 6008 } 6009 6010 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 6011 cur, &logflags, new_startoff); 6012 if (error) 6013 goto del_cursor; 6014 6015 if (!xfs_iext_prev_extent(ifp, &icur, &got) || 6016 stop_fsb >= got.br_startoff + got.br_blockcount) { 6017 *done = true; 6018 goto del_cursor; 6019 } 6020 6021 *next_fsb = got.br_startoff; 6022 del_cursor: 6023 if (cur) 6024 xfs_btree_del_cursor(cur, error); 6025 if (logflags) 6026 xfs_trans_log_inode(tp, ip, logflags); 6027 return error; 6028 } 6029 6030 /* 6031 * Splits an extent into two extents at split_fsb block such that it is the 6032 * first block of the current_ext. @ext is a target extent to be split. 6033 * @split_fsb is a block where the extents is split. If split_fsb lies in a 6034 * hole or the first block of extents, just return 0. 6035 */ 6036 int 6037 xfs_bmap_split_extent( 6038 struct xfs_trans *tp, 6039 struct xfs_inode *ip, 6040 xfs_fileoff_t split_fsb) 6041 { 6042 int whichfork = XFS_DATA_FORK; 6043 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 6044 struct xfs_btree_cur *cur = NULL; 6045 struct xfs_bmbt_irec got; 6046 struct xfs_bmbt_irec new; /* split extent */ 6047 struct xfs_mount *mp = ip->i_mount; 6048 xfs_fsblock_t gotblkcnt; /* new block count for got */ 6049 struct xfs_iext_cursor icur; 6050 int error = 0; 6051 int logflags = 0; 6052 int i = 0; 6053 6054 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 6055 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 6056 return -EFSCORRUPTED; 6057 } 6058 6059 if (xfs_is_shutdown(mp)) 6060 return -EIO; 6061 6062 /* Read in all the extents */ 6063 error = xfs_iread_extents(tp, ip, whichfork); 6064 if (error) 6065 return error; 6066 6067 /* 6068 * If there are not extents, or split_fsb lies in a hole we are done. 6069 */ 6070 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 6071 got.br_startoff >= split_fsb) 6072 return 0; 6073 6074 gotblkcnt = split_fsb - got.br_startoff; 6075 new.br_startoff = split_fsb; 6076 new.br_startblock = got.br_startblock + gotblkcnt; 6077 new.br_blockcount = got.br_blockcount - gotblkcnt; 6078 new.br_state = got.br_state; 6079 6080 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 6081 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6082 cur->bc_ino.flags = 0; 6083 error = xfs_bmbt_lookup_eq(cur, &got, &i); 6084 if (error) 6085 goto del_cursor; 6086 if (XFS_IS_CORRUPT(mp, i != 1)) { 6087 error = -EFSCORRUPTED; 6088 goto del_cursor; 6089 } 6090 } 6091 6092 got.br_blockcount = gotblkcnt; 6093 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 6094 &got); 6095 6096 logflags = XFS_ILOG_CORE; 6097 if (cur) { 6098 error = xfs_bmbt_update(cur, &got); 6099 if (error) 6100 goto del_cursor; 6101 } else 6102 logflags |= XFS_ILOG_DEXT; 6103 6104 /* Add new extent */ 6105 xfs_iext_next(ifp, &icur); 6106 xfs_iext_insert(ip, &icur, &new, 0); 6107 ifp->if_nextents++; 6108 6109 if (cur) { 6110 error = xfs_bmbt_lookup_eq(cur, &new, &i); 6111 if (error) 6112 goto del_cursor; 6113 if (XFS_IS_CORRUPT(mp, i != 0)) { 6114 error = -EFSCORRUPTED; 6115 goto del_cursor; 6116 } 6117 error = xfs_btree_insert(cur, &i); 6118 if (error) 6119 goto del_cursor; 6120 if (XFS_IS_CORRUPT(mp, i != 1)) { 6121 error = -EFSCORRUPTED; 6122 goto del_cursor; 6123 } 6124 } 6125 6126 /* 6127 * Convert to a btree if necessary. 6128 */ 6129 if (xfs_bmap_needs_btree(ip, whichfork)) { 6130 int tmp_logflags; /* partial log flag return val */ 6131 6132 ASSERT(cur == NULL); 6133 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6134 &tmp_logflags, whichfork); 6135 logflags |= tmp_logflags; 6136 } 6137 6138 del_cursor: 6139 if (cur) { 6140 cur->bc_ino.allocated = 0; 6141 xfs_btree_del_cursor(cur, error); 6142 } 6143 6144 if (logflags) 6145 xfs_trans_log_inode(tp, ip, logflags); 6146 return error; 6147 } 6148 6149 /* Deferred mapping is only for real extents in the data fork. */ 6150 static bool 6151 xfs_bmap_is_update_needed( 6152 struct xfs_bmbt_irec *bmap) 6153 { 6154 return bmap->br_startblock != HOLESTARTBLOCK && 6155 bmap->br_startblock != DELAYSTARTBLOCK; 6156 } 6157 6158 /* Record a bmap intent. */ 6159 static int 6160 __xfs_bmap_add( 6161 struct xfs_trans *tp, 6162 enum xfs_bmap_intent_type type, 6163 struct xfs_inode *ip, 6164 int whichfork, 6165 struct xfs_bmbt_irec *bmap) 6166 { 6167 struct xfs_bmap_intent *bi; 6168 6169 trace_xfs_bmap_defer(tp->t_mountp, 6170 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6171 type, 6172 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6173 ip->i_ino, whichfork, 6174 bmap->br_startoff, 6175 bmap->br_blockcount, 6176 bmap->br_state); 6177 6178 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL); 6179 INIT_LIST_HEAD(&bi->bi_list); 6180 bi->bi_type = type; 6181 bi->bi_owner = ip; 6182 bi->bi_whichfork = whichfork; 6183 bi->bi_bmap = *bmap; 6184 6185 xfs_bmap_update_get_group(tp->t_mountp, bi); 6186 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 6187 return 0; 6188 } 6189 6190 /* Map an extent into a file. */ 6191 void 6192 xfs_bmap_map_extent( 6193 struct xfs_trans *tp, 6194 struct xfs_inode *ip, 6195 struct xfs_bmbt_irec *PREV) 6196 { 6197 if (!xfs_bmap_is_update_needed(PREV)) 6198 return; 6199 6200 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 6201 } 6202 6203 /* Unmap an extent out of a file. */ 6204 void 6205 xfs_bmap_unmap_extent( 6206 struct xfs_trans *tp, 6207 struct xfs_inode *ip, 6208 struct xfs_bmbt_irec *PREV) 6209 { 6210 if (!xfs_bmap_is_update_needed(PREV)) 6211 return; 6212 6213 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 6214 } 6215 6216 /* 6217 * Process one of the deferred bmap operations. We pass back the 6218 * btree cursor to maintain our lock on the bmapbt between calls. 6219 */ 6220 int 6221 xfs_bmap_finish_one( 6222 struct xfs_trans *tp, 6223 struct xfs_bmap_intent *bi) 6224 { 6225 struct xfs_bmbt_irec *bmap = &bi->bi_bmap; 6226 int error = 0; 6227 6228 ASSERT(tp->t_highest_agno == NULLAGNUMBER); 6229 6230 trace_xfs_bmap_deferred(tp->t_mountp, 6231 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6232 bi->bi_type, 6233 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6234 bi->bi_owner->i_ino, bi->bi_whichfork, 6235 bmap->br_startoff, bmap->br_blockcount, 6236 bmap->br_state); 6237 6238 if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) 6239 return -EFSCORRUPTED; 6240 6241 if (XFS_TEST_ERROR(false, tp->t_mountp, 6242 XFS_ERRTAG_BMAP_FINISH_ONE)) 6243 return -EIO; 6244 6245 switch (bi->bi_type) { 6246 case XFS_BMAP_MAP: 6247 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, 6248 bmap->br_blockcount, bmap->br_startblock, 0); 6249 bmap->br_blockcount = 0; 6250 break; 6251 case XFS_BMAP_UNMAP: 6252 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, 6253 &bmap->br_blockcount, XFS_BMAPI_REMAP, 1); 6254 break; 6255 default: 6256 ASSERT(0); 6257 error = -EFSCORRUPTED; 6258 } 6259 6260 return error; 6261 } 6262 6263 /* Check that an inode's extent does not have invalid flags or bad ranges. */ 6264 xfs_failaddr_t 6265 xfs_bmap_validate_extent( 6266 struct xfs_inode *ip, 6267 int whichfork, 6268 struct xfs_bmbt_irec *irec) 6269 { 6270 struct xfs_mount *mp = ip->i_mount; 6271 6272 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) 6273 return __this_address; 6274 6275 if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) { 6276 if (!xfs_verify_rtext(mp, irec->br_startblock, 6277 irec->br_blockcount)) 6278 return __this_address; 6279 } else { 6280 if (!xfs_verify_fsbext(mp, irec->br_startblock, 6281 irec->br_blockcount)) 6282 return __this_address; 6283 } 6284 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 6285 return __this_address; 6286 return NULL; 6287 } 6288 6289 int __init 6290 xfs_bmap_intent_init_cache(void) 6291 { 6292 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent", 6293 sizeof(struct xfs_bmap_intent), 6294 0, 0, NULL); 6295 6296 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM; 6297 } 6298 6299 void 6300 xfs_bmap_intent_destroy_cache(void) 6301 { 6302 kmem_cache_destroy(xfs_bmap_intent_cache); 6303 xfs_bmap_intent_cache = NULL; 6304 } 6305