1 /* 2 * Copyright (c) 2014 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_da_format.h" 29 #include "xfs_da_btree.h" 30 #include "xfs_btree.h" 31 #include "xfs_trans.h" 32 #include "xfs_alloc.h" 33 #include "xfs_rmap.h" 34 #include "xfs_rmap_btree.h" 35 #include "xfs_trans_space.h" 36 #include "xfs_trace.h" 37 #include "xfs_error.h" 38 #include "xfs_extent_busy.h" 39 #include "xfs_bmap.h" 40 #include "xfs_inode.h" 41 42 /* 43 * Lookup the first record less than or equal to [bno, len, owner, offset] 44 * in the btree given by cur. 45 */ 46 int 47 xfs_rmap_lookup_le( 48 struct xfs_btree_cur *cur, 49 xfs_agblock_t bno, 50 xfs_extlen_t len, 51 uint64_t owner, 52 uint64_t offset, 53 unsigned int flags, 54 int *stat) 55 { 56 cur->bc_rec.r.rm_startblock = bno; 57 cur->bc_rec.r.rm_blockcount = len; 58 cur->bc_rec.r.rm_owner = owner; 59 cur->bc_rec.r.rm_offset = offset; 60 cur->bc_rec.r.rm_flags = flags; 61 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); 62 } 63 64 /* 65 * Lookup the record exactly matching [bno, len, owner, offset] 66 * in the btree given by cur. 67 */ 68 int 69 xfs_rmap_lookup_eq( 70 struct xfs_btree_cur *cur, 71 xfs_agblock_t bno, 72 xfs_extlen_t len, 73 uint64_t owner, 74 uint64_t offset, 75 unsigned int flags, 76 int *stat) 77 { 78 cur->bc_rec.r.rm_startblock = bno; 79 cur->bc_rec.r.rm_blockcount = len; 80 cur->bc_rec.r.rm_owner = owner; 81 cur->bc_rec.r.rm_offset = offset; 82 cur->bc_rec.r.rm_flags = flags; 83 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 84 } 85 86 /* 87 * Update the record referred to by cur to the value given 88 * by [bno, len, owner, offset]. 89 * This either works (return 0) or gets an EFSCORRUPTED error. 90 */ 91 STATIC int 92 xfs_rmap_update( 93 struct xfs_btree_cur *cur, 94 struct xfs_rmap_irec *irec) 95 { 96 union xfs_btree_rec rec; 97 int error; 98 99 trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno, 100 irec->rm_startblock, irec->rm_blockcount, 101 irec->rm_owner, irec->rm_offset, irec->rm_flags); 102 103 rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock); 104 rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount); 105 rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner); 106 rec.rmap.rm_offset = cpu_to_be64( 107 xfs_rmap_irec_offset_pack(irec)); 108 error = xfs_btree_update(cur, &rec); 109 if (error) 110 trace_xfs_rmap_update_error(cur->bc_mp, 111 cur->bc_private.a.agno, error, _RET_IP_); 112 return error; 113 } 114 115 int 116 xfs_rmap_insert( 117 struct xfs_btree_cur *rcur, 118 xfs_agblock_t agbno, 119 xfs_extlen_t len, 120 uint64_t owner, 121 uint64_t offset, 122 unsigned int flags) 123 { 124 int i; 125 int error; 126 127 trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno, 128 len, owner, offset, flags); 129 130 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i); 131 if (error) 132 goto done; 133 XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done); 134 135 rcur->bc_rec.r.rm_startblock = agbno; 136 rcur->bc_rec.r.rm_blockcount = len; 137 rcur->bc_rec.r.rm_owner = owner; 138 rcur->bc_rec.r.rm_offset = offset; 139 rcur->bc_rec.r.rm_flags = flags; 140 error = xfs_btree_insert(rcur, &i); 141 if (error) 142 goto done; 143 XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done); 144 done: 145 if (error) 146 trace_xfs_rmap_insert_error(rcur->bc_mp, 147 rcur->bc_private.a.agno, error, _RET_IP_); 148 return error; 149 } 150 151 static int 152 xfs_rmap_btrec_to_irec( 153 union xfs_btree_rec *rec, 154 struct xfs_rmap_irec *irec) 155 { 156 irec->rm_flags = 0; 157 irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock); 158 irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount); 159 irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner); 160 return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset), 161 irec); 162 } 163 164 /* 165 * Get the data from the pointed-to record. 166 */ 167 int 168 xfs_rmap_get_rec( 169 struct xfs_btree_cur *cur, 170 struct xfs_rmap_irec *irec, 171 int *stat) 172 { 173 union xfs_btree_rec *rec; 174 int error; 175 176 error = xfs_btree_get_rec(cur, &rec, stat); 177 if (error || !*stat) 178 return error; 179 180 return xfs_rmap_btrec_to_irec(rec, irec); 181 } 182 183 /* 184 * Find the extent in the rmap btree and remove it. 185 * 186 * The record we find should always be an exact match for the extent that we're 187 * looking for, since we insert them into the btree without modification. 188 * 189 * Special Case #1: when growing the filesystem, we "free" an extent when 190 * growing the last AG. This extent is new space and so it is not tracked as 191 * used space in the btree. The growfs code will pass in an owner of 192 * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this 193 * extent. We verify that - the extent lookup result in a record that does not 194 * overlap. 195 * 196 * Special Case #2: EFIs do not record the owner of the extent, so when 197 * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap 198 * btree to ignore the owner (i.e. wildcard match) so we don't trigger 199 * corruption checks during log recovery. 200 */ 201 STATIC int 202 xfs_rmap_unmap( 203 struct xfs_btree_cur *cur, 204 xfs_agblock_t bno, 205 xfs_extlen_t len, 206 bool unwritten, 207 struct xfs_owner_info *oinfo) 208 { 209 struct xfs_mount *mp = cur->bc_mp; 210 struct xfs_rmap_irec ltrec; 211 uint64_t ltoff; 212 int error = 0; 213 int i; 214 uint64_t owner; 215 uint64_t offset; 216 unsigned int flags; 217 bool ignore_off; 218 219 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags); 220 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) || 221 (flags & XFS_RMAP_BMBT_BLOCK); 222 if (unwritten) 223 flags |= XFS_RMAP_UNWRITTEN; 224 trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len, 225 unwritten, oinfo); 226 227 /* 228 * We should always have a left record because there's a static record 229 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that 230 * will not ever be removed from the tree. 231 */ 232 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i); 233 if (error) 234 goto out_error; 235 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); 236 237 error = xfs_rmap_get_rec(cur, <rec, &i); 238 if (error) 239 goto out_error; 240 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); 241 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp, 242 cur->bc_private.a.agno, ltrec.rm_startblock, 243 ltrec.rm_blockcount, ltrec.rm_owner, 244 ltrec.rm_offset, ltrec.rm_flags); 245 ltoff = ltrec.rm_offset; 246 247 /* 248 * For growfs, the incoming extent must be beyond the left record we 249 * just found as it is new space and won't be used by anyone. This is 250 * just a corruption check as we don't actually do anything with this 251 * extent. Note that we need to use >= instead of > because it might 252 * be the case that the "left" extent goes all the way to EOFS. 253 */ 254 if (owner == XFS_RMAP_OWN_NULL) { 255 XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock + 256 ltrec.rm_blockcount, out_error); 257 goto out_done; 258 } 259 260 /* Make sure the unwritten flag matches. */ 261 XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) == 262 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error); 263 264 /* Make sure the extent we found covers the entire freeing range. */ 265 XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno && 266 ltrec.rm_startblock + ltrec.rm_blockcount >= 267 bno + len, out_error); 268 269 /* Make sure the owner matches what we expect to find in the tree. */ 270 XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner || 271 XFS_RMAP_NON_INODE_OWNER(owner), out_error); 272 273 /* Check the offset, if necessary. */ 274 if (!XFS_RMAP_NON_INODE_OWNER(owner)) { 275 if (flags & XFS_RMAP_BMBT_BLOCK) { 276 XFS_WANT_CORRUPTED_GOTO(mp, 277 ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK, 278 out_error); 279 } else { 280 XFS_WANT_CORRUPTED_GOTO(mp, 281 ltrec.rm_offset <= offset, out_error); 282 XFS_WANT_CORRUPTED_GOTO(mp, 283 ltoff + ltrec.rm_blockcount >= offset + len, 284 out_error); 285 } 286 } 287 288 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) { 289 /* exact match, simply remove the record from rmap tree */ 290 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 291 ltrec.rm_startblock, ltrec.rm_blockcount, 292 ltrec.rm_owner, ltrec.rm_offset, 293 ltrec.rm_flags); 294 error = xfs_btree_delete(cur, &i); 295 if (error) 296 goto out_error; 297 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); 298 } else if (ltrec.rm_startblock == bno) { 299 /* 300 * overlap left hand side of extent: move the start, trim the 301 * length and update the current record. 302 * 303 * ltbno ltlen 304 * Orig: |oooooooooooooooooooo| 305 * Freeing: |fffffffff| 306 * Result: |rrrrrrrrrr| 307 * bno len 308 */ 309 ltrec.rm_startblock += len; 310 ltrec.rm_blockcount -= len; 311 if (!ignore_off) 312 ltrec.rm_offset += len; 313 error = xfs_rmap_update(cur, <rec); 314 if (error) 315 goto out_error; 316 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) { 317 /* 318 * overlap right hand side of extent: trim the length and update 319 * the current record. 320 * 321 * ltbno ltlen 322 * Orig: |oooooooooooooooooooo| 323 * Freeing: |fffffffff| 324 * Result: |rrrrrrrrrr| 325 * bno len 326 */ 327 ltrec.rm_blockcount -= len; 328 error = xfs_rmap_update(cur, <rec); 329 if (error) 330 goto out_error; 331 } else { 332 333 /* 334 * overlap middle of extent: trim the length of the existing 335 * record to the length of the new left-extent size, increment 336 * the insertion position so we can insert a new record 337 * containing the remaining right-extent space. 338 * 339 * ltbno ltlen 340 * Orig: |oooooooooooooooooooo| 341 * Freeing: |fffffffff| 342 * Result: |rrrrr| |rrrr| 343 * bno len 344 */ 345 xfs_extlen_t orig_len = ltrec.rm_blockcount; 346 347 ltrec.rm_blockcount = bno - ltrec.rm_startblock; 348 error = xfs_rmap_update(cur, <rec); 349 if (error) 350 goto out_error; 351 352 error = xfs_btree_increment(cur, 0, &i); 353 if (error) 354 goto out_error; 355 356 cur->bc_rec.r.rm_startblock = bno + len; 357 cur->bc_rec.r.rm_blockcount = orig_len - len - 358 ltrec.rm_blockcount; 359 cur->bc_rec.r.rm_owner = ltrec.rm_owner; 360 if (ignore_off) 361 cur->bc_rec.r.rm_offset = 0; 362 else 363 cur->bc_rec.r.rm_offset = offset + len; 364 cur->bc_rec.r.rm_flags = flags; 365 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, 366 cur->bc_rec.r.rm_startblock, 367 cur->bc_rec.r.rm_blockcount, 368 cur->bc_rec.r.rm_owner, 369 cur->bc_rec.r.rm_offset, 370 cur->bc_rec.r.rm_flags); 371 error = xfs_btree_insert(cur, &i); 372 if (error) 373 goto out_error; 374 } 375 376 out_done: 377 trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len, 378 unwritten, oinfo); 379 out_error: 380 if (error) 381 trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno, 382 error, _RET_IP_); 383 return error; 384 } 385 386 /* 387 * Remove a reference to an extent in the rmap btree. 388 */ 389 int 390 xfs_rmap_free( 391 struct xfs_trans *tp, 392 struct xfs_buf *agbp, 393 xfs_agnumber_t agno, 394 xfs_agblock_t bno, 395 xfs_extlen_t len, 396 struct xfs_owner_info *oinfo) 397 { 398 struct xfs_mount *mp = tp->t_mountp; 399 struct xfs_btree_cur *cur; 400 int error; 401 402 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) 403 return 0; 404 405 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno); 406 407 error = xfs_rmap_unmap(cur, bno, len, false, oinfo); 408 if (error) 409 goto out_error; 410 411 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 412 return 0; 413 414 out_error: 415 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 416 return error; 417 } 418 419 /* 420 * A mergeable rmap must have the same owner and the same values for 421 * the unwritten, attr_fork, and bmbt flags. The startblock and 422 * offset are checked separately. 423 */ 424 static bool 425 xfs_rmap_is_mergeable( 426 struct xfs_rmap_irec *irec, 427 uint64_t owner, 428 unsigned int flags) 429 { 430 if (irec->rm_owner == XFS_RMAP_OWN_NULL) 431 return false; 432 if (irec->rm_owner != owner) 433 return false; 434 if ((flags & XFS_RMAP_UNWRITTEN) ^ 435 (irec->rm_flags & XFS_RMAP_UNWRITTEN)) 436 return false; 437 if ((flags & XFS_RMAP_ATTR_FORK) ^ 438 (irec->rm_flags & XFS_RMAP_ATTR_FORK)) 439 return false; 440 if ((flags & XFS_RMAP_BMBT_BLOCK) ^ 441 (irec->rm_flags & XFS_RMAP_BMBT_BLOCK)) 442 return false; 443 return true; 444 } 445 446 /* 447 * When we allocate a new block, the first thing we do is add a reference to 448 * the extent in the rmap btree. This takes the form of a [agbno, length, 449 * owner, offset] record. Flags are encoded in the high bits of the offset 450 * field. 451 */ 452 STATIC int 453 xfs_rmap_map( 454 struct xfs_btree_cur *cur, 455 xfs_agblock_t bno, 456 xfs_extlen_t len, 457 bool unwritten, 458 struct xfs_owner_info *oinfo) 459 { 460 struct xfs_mount *mp = cur->bc_mp; 461 struct xfs_rmap_irec ltrec; 462 struct xfs_rmap_irec gtrec; 463 int have_gt; 464 int have_lt; 465 int error = 0; 466 int i; 467 uint64_t owner; 468 uint64_t offset; 469 unsigned int flags = 0; 470 bool ignore_off; 471 472 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags); 473 ASSERT(owner != 0); 474 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) || 475 (flags & XFS_RMAP_BMBT_BLOCK); 476 if (unwritten) 477 flags |= XFS_RMAP_UNWRITTEN; 478 trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len, 479 unwritten, oinfo); 480 481 /* 482 * For the initial lookup, look for an exact match or the left-adjacent 483 * record for our insertion point. This will also give us the record for 484 * start block contiguity tests. 485 */ 486 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, 487 &have_lt); 488 if (error) 489 goto out_error; 490 XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error); 491 492 error = xfs_rmap_get_rec(cur, <rec, &have_lt); 493 if (error) 494 goto out_error; 495 XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error); 496 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp, 497 cur->bc_private.a.agno, ltrec.rm_startblock, 498 ltrec.rm_blockcount, ltrec.rm_owner, 499 ltrec.rm_offset, ltrec.rm_flags); 500 501 if (!xfs_rmap_is_mergeable(<rec, owner, flags)) 502 have_lt = 0; 503 504 XFS_WANT_CORRUPTED_GOTO(mp, 505 have_lt == 0 || 506 ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error); 507 508 /* 509 * Increment the cursor to see if we have a right-adjacent record to our 510 * insertion point. This will give us the record for end block 511 * contiguity tests. 512 */ 513 error = xfs_btree_increment(cur, 0, &have_gt); 514 if (error) 515 goto out_error; 516 if (have_gt) { 517 error = xfs_rmap_get_rec(cur, >rec, &have_gt); 518 if (error) 519 goto out_error; 520 XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error); 521 XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock, 522 out_error); 523 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp, 524 cur->bc_private.a.agno, gtrec.rm_startblock, 525 gtrec.rm_blockcount, gtrec.rm_owner, 526 gtrec.rm_offset, gtrec.rm_flags); 527 if (!xfs_rmap_is_mergeable(>rec, owner, flags)) 528 have_gt = 0; 529 } 530 531 /* 532 * Note: cursor currently points one record to the right of ltrec, even 533 * if there is no record in the tree to the right. 534 */ 535 if (have_lt && 536 ltrec.rm_startblock + ltrec.rm_blockcount == bno && 537 (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) { 538 /* 539 * left edge contiguous, merge into left record. 540 * 541 * ltbno ltlen 542 * orig: |ooooooooo| 543 * adding: |aaaaaaaaa| 544 * result: |rrrrrrrrrrrrrrrrrrr| 545 * bno len 546 */ 547 ltrec.rm_blockcount += len; 548 if (have_gt && 549 bno + len == gtrec.rm_startblock && 550 (ignore_off || offset + len == gtrec.rm_offset) && 551 (unsigned long)ltrec.rm_blockcount + len + 552 gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) { 553 /* 554 * right edge also contiguous, delete right record 555 * and merge into left record. 556 * 557 * ltbno ltlen gtbno gtlen 558 * orig: |ooooooooo| |ooooooooo| 559 * adding: |aaaaaaaaa| 560 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr| 561 */ 562 ltrec.rm_blockcount += gtrec.rm_blockcount; 563 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 564 gtrec.rm_startblock, 565 gtrec.rm_blockcount, 566 gtrec.rm_owner, 567 gtrec.rm_offset, 568 gtrec.rm_flags); 569 error = xfs_btree_delete(cur, &i); 570 if (error) 571 goto out_error; 572 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); 573 } 574 575 /* point the cursor back to the left record and update */ 576 error = xfs_btree_decrement(cur, 0, &have_gt); 577 if (error) 578 goto out_error; 579 error = xfs_rmap_update(cur, <rec); 580 if (error) 581 goto out_error; 582 } else if (have_gt && 583 bno + len == gtrec.rm_startblock && 584 (ignore_off || offset + len == gtrec.rm_offset)) { 585 /* 586 * right edge contiguous, merge into right record. 587 * 588 * gtbno gtlen 589 * Orig: |ooooooooo| 590 * adding: |aaaaaaaaa| 591 * Result: |rrrrrrrrrrrrrrrrrrr| 592 * bno len 593 */ 594 gtrec.rm_startblock = bno; 595 gtrec.rm_blockcount += len; 596 if (!ignore_off) 597 gtrec.rm_offset = offset; 598 error = xfs_rmap_update(cur, >rec); 599 if (error) 600 goto out_error; 601 } else { 602 /* 603 * no contiguous edge with identical owner, insert 604 * new record at current cursor position. 605 */ 606 cur->bc_rec.r.rm_startblock = bno; 607 cur->bc_rec.r.rm_blockcount = len; 608 cur->bc_rec.r.rm_owner = owner; 609 cur->bc_rec.r.rm_offset = offset; 610 cur->bc_rec.r.rm_flags = flags; 611 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len, 612 owner, offset, flags); 613 error = xfs_btree_insert(cur, &i); 614 if (error) 615 goto out_error; 616 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); 617 } 618 619 trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len, 620 unwritten, oinfo); 621 out_error: 622 if (error) 623 trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno, 624 error, _RET_IP_); 625 return error; 626 } 627 628 /* 629 * Add a reference to an extent in the rmap btree. 630 */ 631 int 632 xfs_rmap_alloc( 633 struct xfs_trans *tp, 634 struct xfs_buf *agbp, 635 xfs_agnumber_t agno, 636 xfs_agblock_t bno, 637 xfs_extlen_t len, 638 struct xfs_owner_info *oinfo) 639 { 640 struct xfs_mount *mp = tp->t_mountp; 641 struct xfs_btree_cur *cur; 642 int error; 643 644 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) 645 return 0; 646 647 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno); 648 error = xfs_rmap_map(cur, bno, len, false, oinfo); 649 if (error) 650 goto out_error; 651 652 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 653 return 0; 654 655 out_error: 656 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 657 return error; 658 } 659 660 #define RMAP_LEFT_CONTIG (1 << 0) 661 #define RMAP_RIGHT_CONTIG (1 << 1) 662 #define RMAP_LEFT_FILLING (1 << 2) 663 #define RMAP_RIGHT_FILLING (1 << 3) 664 #define RMAP_LEFT_VALID (1 << 6) 665 #define RMAP_RIGHT_VALID (1 << 7) 666 667 #define LEFT r[0] 668 #define RIGHT r[1] 669 #define PREV r[2] 670 #define NEW r[3] 671 672 /* 673 * Convert an unwritten extent to a real extent or vice versa. 674 * Does not handle overlapping extents. 675 */ 676 STATIC int 677 xfs_rmap_convert( 678 struct xfs_btree_cur *cur, 679 xfs_agblock_t bno, 680 xfs_extlen_t len, 681 bool unwritten, 682 struct xfs_owner_info *oinfo) 683 { 684 struct xfs_mount *mp = cur->bc_mp; 685 struct xfs_rmap_irec r[4]; /* neighbor extent entries */ 686 /* left is 0, right is 1, prev is 2 */ 687 /* new is 3 */ 688 uint64_t owner; 689 uint64_t offset; 690 uint64_t new_endoff; 691 unsigned int oldext; 692 unsigned int newext; 693 unsigned int flags = 0; 694 int i; 695 int state = 0; 696 int error; 697 698 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags); 699 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) || 700 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK)))); 701 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0; 702 new_endoff = offset + len; 703 trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len, 704 unwritten, oinfo); 705 706 /* 707 * For the initial lookup, look for an exact match or the left-adjacent 708 * record for our insertion point. This will also give us the record for 709 * start block contiguity tests. 710 */ 711 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i); 712 if (error) 713 goto done; 714 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 715 716 error = xfs_rmap_get_rec(cur, &PREV, &i); 717 if (error) 718 goto done; 719 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 720 trace_xfs_rmap_lookup_le_range_result(cur->bc_mp, 721 cur->bc_private.a.agno, PREV.rm_startblock, 722 PREV.rm_blockcount, PREV.rm_owner, 723 PREV.rm_offset, PREV.rm_flags); 724 725 ASSERT(PREV.rm_offset <= offset); 726 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff); 727 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext); 728 newext = ~oldext & XFS_RMAP_UNWRITTEN; 729 730 /* 731 * Set flags determining what part of the previous oldext allocation 732 * extent is being replaced by a newext allocation. 733 */ 734 if (PREV.rm_offset == offset) 735 state |= RMAP_LEFT_FILLING; 736 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff) 737 state |= RMAP_RIGHT_FILLING; 738 739 /* 740 * Decrement the cursor to see if we have a left-adjacent record to our 741 * insertion point. This will give us the record for end block 742 * contiguity tests. 743 */ 744 error = xfs_btree_decrement(cur, 0, &i); 745 if (error) 746 goto done; 747 if (i) { 748 state |= RMAP_LEFT_VALID; 749 error = xfs_rmap_get_rec(cur, &LEFT, &i); 750 if (error) 751 goto done; 752 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 753 XFS_WANT_CORRUPTED_GOTO(mp, 754 LEFT.rm_startblock + LEFT.rm_blockcount <= bno, 755 done); 756 trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp, 757 cur->bc_private.a.agno, LEFT.rm_startblock, 758 LEFT.rm_blockcount, LEFT.rm_owner, 759 LEFT.rm_offset, LEFT.rm_flags); 760 if (LEFT.rm_startblock + LEFT.rm_blockcount == bno && 761 LEFT.rm_offset + LEFT.rm_blockcount == offset && 762 xfs_rmap_is_mergeable(&LEFT, owner, newext)) 763 state |= RMAP_LEFT_CONTIG; 764 } 765 766 /* 767 * Increment the cursor to see if we have a right-adjacent record to our 768 * insertion point. This will give us the record for end block 769 * contiguity tests. 770 */ 771 error = xfs_btree_increment(cur, 0, &i); 772 if (error) 773 goto done; 774 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 775 error = xfs_btree_increment(cur, 0, &i); 776 if (error) 777 goto done; 778 if (i) { 779 state |= RMAP_RIGHT_VALID; 780 error = xfs_rmap_get_rec(cur, &RIGHT, &i); 781 if (error) 782 goto done; 783 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 784 XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock, 785 done); 786 trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp, 787 cur->bc_private.a.agno, RIGHT.rm_startblock, 788 RIGHT.rm_blockcount, RIGHT.rm_owner, 789 RIGHT.rm_offset, RIGHT.rm_flags); 790 if (bno + len == RIGHT.rm_startblock && 791 offset + len == RIGHT.rm_offset && 792 xfs_rmap_is_mergeable(&RIGHT, owner, newext)) 793 state |= RMAP_RIGHT_CONTIG; 794 } 795 796 /* check that left + prev + right is not too long */ 797 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | 798 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) == 799 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | 800 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) && 801 (unsigned long)LEFT.rm_blockcount + len + 802 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX) 803 state &= ~RMAP_RIGHT_CONTIG; 804 805 trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state, 806 _RET_IP_); 807 808 /* reset the cursor back to PREV */ 809 error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i); 810 if (error) 811 goto done; 812 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 813 814 /* 815 * Switch out based on the FILLING and CONTIG state bits. 816 */ 817 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | 818 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) { 819 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | 820 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: 821 /* 822 * Setting all of a previous oldext extent to newext. 823 * The left and right neighbors are both contiguous with new. 824 */ 825 error = xfs_btree_increment(cur, 0, &i); 826 if (error) 827 goto done; 828 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 829 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 830 RIGHT.rm_startblock, RIGHT.rm_blockcount, 831 RIGHT.rm_owner, RIGHT.rm_offset, 832 RIGHT.rm_flags); 833 error = xfs_btree_delete(cur, &i); 834 if (error) 835 goto done; 836 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 837 error = xfs_btree_decrement(cur, 0, &i); 838 if (error) 839 goto done; 840 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 841 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 842 PREV.rm_startblock, PREV.rm_blockcount, 843 PREV.rm_owner, PREV.rm_offset, 844 PREV.rm_flags); 845 error = xfs_btree_delete(cur, &i); 846 if (error) 847 goto done; 848 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 849 error = xfs_btree_decrement(cur, 0, &i); 850 if (error) 851 goto done; 852 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 853 NEW = LEFT; 854 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount; 855 error = xfs_rmap_update(cur, &NEW); 856 if (error) 857 goto done; 858 break; 859 860 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: 861 /* 862 * Setting all of a previous oldext extent to newext. 863 * The left neighbor is contiguous, the right is not. 864 */ 865 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 866 PREV.rm_startblock, PREV.rm_blockcount, 867 PREV.rm_owner, PREV.rm_offset, 868 PREV.rm_flags); 869 error = xfs_btree_delete(cur, &i); 870 if (error) 871 goto done; 872 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 873 error = xfs_btree_decrement(cur, 0, &i); 874 if (error) 875 goto done; 876 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 877 NEW = LEFT; 878 NEW.rm_blockcount += PREV.rm_blockcount; 879 error = xfs_rmap_update(cur, &NEW); 880 if (error) 881 goto done; 882 break; 883 884 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: 885 /* 886 * Setting all of a previous oldext extent to newext. 887 * The right neighbor is contiguous, the left is not. 888 */ 889 error = xfs_btree_increment(cur, 0, &i); 890 if (error) 891 goto done; 892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 893 trace_xfs_rmap_delete(mp, cur->bc_private.a.agno, 894 RIGHT.rm_startblock, RIGHT.rm_blockcount, 895 RIGHT.rm_owner, RIGHT.rm_offset, 896 RIGHT.rm_flags); 897 error = xfs_btree_delete(cur, &i); 898 if (error) 899 goto done; 900 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 901 error = xfs_btree_decrement(cur, 0, &i); 902 if (error) 903 goto done; 904 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 905 NEW = PREV; 906 NEW.rm_blockcount = len + RIGHT.rm_blockcount; 907 NEW.rm_flags = newext; 908 error = xfs_rmap_update(cur, &NEW); 909 if (error) 910 goto done; 911 break; 912 913 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING: 914 /* 915 * Setting all of a previous oldext extent to newext. 916 * Neither the left nor right neighbors are contiguous with 917 * the new one. 918 */ 919 NEW = PREV; 920 NEW.rm_flags = newext; 921 error = xfs_rmap_update(cur, &NEW); 922 if (error) 923 goto done; 924 break; 925 926 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG: 927 /* 928 * Setting the first part of a previous oldext extent to newext. 929 * The left neighbor is contiguous. 930 */ 931 NEW = PREV; 932 NEW.rm_offset += len; 933 NEW.rm_startblock += len; 934 NEW.rm_blockcount -= len; 935 error = xfs_rmap_update(cur, &NEW); 936 if (error) 937 goto done; 938 error = xfs_btree_decrement(cur, 0, &i); 939 if (error) 940 goto done; 941 NEW = LEFT; 942 NEW.rm_blockcount += len; 943 error = xfs_rmap_update(cur, &NEW); 944 if (error) 945 goto done; 946 break; 947 948 case RMAP_LEFT_FILLING: 949 /* 950 * Setting the first part of a previous oldext extent to newext. 951 * The left neighbor is not contiguous. 952 */ 953 NEW = PREV; 954 NEW.rm_startblock += len; 955 NEW.rm_offset += len; 956 NEW.rm_blockcount -= len; 957 error = xfs_rmap_update(cur, &NEW); 958 if (error) 959 goto done; 960 NEW.rm_startblock = bno; 961 NEW.rm_owner = owner; 962 NEW.rm_offset = offset; 963 NEW.rm_blockcount = len; 964 NEW.rm_flags = newext; 965 cur->bc_rec.r = NEW; 966 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, 967 len, owner, offset, newext); 968 error = xfs_btree_insert(cur, &i); 969 if (error) 970 goto done; 971 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 972 break; 973 974 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: 975 /* 976 * Setting the last part of a previous oldext extent to newext. 977 * The right neighbor is contiguous with the new allocation. 978 */ 979 NEW = PREV; 980 NEW.rm_blockcount -= len; 981 error = xfs_rmap_update(cur, &NEW); 982 if (error) 983 goto done; 984 error = xfs_btree_increment(cur, 0, &i); 985 if (error) 986 goto done; 987 NEW = RIGHT; 988 NEW.rm_offset = offset; 989 NEW.rm_startblock = bno; 990 NEW.rm_blockcount += len; 991 error = xfs_rmap_update(cur, &NEW); 992 if (error) 993 goto done; 994 break; 995 996 case RMAP_RIGHT_FILLING: 997 /* 998 * Setting the last part of a previous oldext extent to newext. 999 * The right neighbor is not contiguous. 1000 */ 1001 NEW = PREV; 1002 NEW.rm_blockcount -= len; 1003 error = xfs_rmap_update(cur, &NEW); 1004 if (error) 1005 goto done; 1006 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset, 1007 oldext, &i); 1008 if (error) 1009 goto done; 1010 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1011 NEW.rm_startblock = bno; 1012 NEW.rm_owner = owner; 1013 NEW.rm_offset = offset; 1014 NEW.rm_blockcount = len; 1015 NEW.rm_flags = newext; 1016 cur->bc_rec.r = NEW; 1017 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, 1018 len, owner, offset, newext); 1019 error = xfs_btree_insert(cur, &i); 1020 if (error) 1021 goto done; 1022 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1023 break; 1024 1025 case 0: 1026 /* 1027 * Setting the middle part of a previous oldext extent to 1028 * newext. Contiguity is impossible here. 1029 * One extent becomes three extents. 1030 */ 1031 /* new right extent - oldext */ 1032 NEW.rm_startblock = bno + len; 1033 NEW.rm_owner = owner; 1034 NEW.rm_offset = new_endoff; 1035 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount - 1036 new_endoff; 1037 NEW.rm_flags = PREV.rm_flags; 1038 error = xfs_rmap_update(cur, &NEW); 1039 if (error) 1040 goto done; 1041 /* new left extent - oldext */ 1042 NEW = PREV; 1043 NEW.rm_blockcount = offset - PREV.rm_offset; 1044 cur->bc_rec.r = NEW; 1045 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, 1046 NEW.rm_startblock, NEW.rm_blockcount, 1047 NEW.rm_owner, NEW.rm_offset, 1048 NEW.rm_flags); 1049 error = xfs_btree_insert(cur, &i); 1050 if (error) 1051 goto done; 1052 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1053 /* 1054 * Reset the cursor to the position of the new extent 1055 * we are about to insert as we can't trust it after 1056 * the previous insert. 1057 */ 1058 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset, 1059 oldext, &i); 1060 if (error) 1061 goto done; 1062 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); 1063 /* new middle extent - newext */ 1064 cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN; 1065 cur->bc_rec.r.rm_flags |= newext; 1066 trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len, 1067 owner, offset, newext); 1068 error = xfs_btree_insert(cur, &i); 1069 if (error) 1070 goto done; 1071 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); 1072 break; 1073 1074 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: 1075 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: 1076 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG: 1077 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: 1078 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: 1079 case RMAP_LEFT_CONTIG: 1080 case RMAP_RIGHT_CONTIG: 1081 /* 1082 * These cases are all impossible. 1083 */ 1084 ASSERT(0); 1085 } 1086 1087 trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len, 1088 unwritten, oinfo); 1089 done: 1090 if (error) 1091 trace_xfs_rmap_convert_error(cur->bc_mp, 1092 cur->bc_private.a.agno, error, _RET_IP_); 1093 return error; 1094 } 1095 1096 #undef NEW 1097 #undef LEFT 1098 #undef RIGHT 1099 #undef PREV 1100 1101 struct xfs_rmap_query_range_info { 1102 xfs_rmap_query_range_fn fn; 1103 void *priv; 1104 }; 1105 1106 /* Format btree record and pass to our callback. */ 1107 STATIC int 1108 xfs_rmap_query_range_helper( 1109 struct xfs_btree_cur *cur, 1110 union xfs_btree_rec *rec, 1111 void *priv) 1112 { 1113 struct xfs_rmap_query_range_info *query = priv; 1114 struct xfs_rmap_irec irec; 1115 int error; 1116 1117 error = xfs_rmap_btrec_to_irec(rec, &irec); 1118 if (error) 1119 return error; 1120 return query->fn(cur, &irec, query->priv); 1121 } 1122 1123 /* Find all rmaps between two keys. */ 1124 int 1125 xfs_rmap_query_range( 1126 struct xfs_btree_cur *cur, 1127 struct xfs_rmap_irec *low_rec, 1128 struct xfs_rmap_irec *high_rec, 1129 xfs_rmap_query_range_fn fn, 1130 void *priv) 1131 { 1132 union xfs_btree_irec low_brec; 1133 union xfs_btree_irec high_brec; 1134 struct xfs_rmap_query_range_info query; 1135 1136 low_brec.r = *low_rec; 1137 high_brec.r = *high_rec; 1138 query.priv = priv; 1139 query.fn = fn; 1140 return xfs_btree_query_range(cur, &low_brec, &high_brec, 1141 xfs_rmap_query_range_helper, &query); 1142 } 1143 1144 /* Clean up after calling xfs_rmap_finish_one. */ 1145 void 1146 xfs_rmap_finish_one_cleanup( 1147 struct xfs_trans *tp, 1148 struct xfs_btree_cur *rcur, 1149 int error) 1150 { 1151 struct xfs_buf *agbp; 1152 1153 if (rcur == NULL) 1154 return; 1155 agbp = rcur->bc_private.a.agbp; 1156 xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 1157 if (error) 1158 xfs_trans_brelse(tp, agbp); 1159 } 1160 1161 /* 1162 * Process one of the deferred rmap operations. We pass back the 1163 * btree cursor to maintain our lock on the rmapbt between calls. 1164 * This saves time and eliminates a buffer deadlock between the 1165 * superblock and the AGF because we'll always grab them in the same 1166 * order. 1167 */ 1168 int 1169 xfs_rmap_finish_one( 1170 struct xfs_trans *tp, 1171 enum xfs_rmap_intent_type type, 1172 __uint64_t owner, 1173 int whichfork, 1174 xfs_fileoff_t startoff, 1175 xfs_fsblock_t startblock, 1176 xfs_filblks_t blockcount, 1177 xfs_exntst_t state, 1178 struct xfs_btree_cur **pcur) 1179 { 1180 struct xfs_mount *mp = tp->t_mountp; 1181 struct xfs_btree_cur *rcur; 1182 struct xfs_buf *agbp = NULL; 1183 int error = 0; 1184 xfs_agnumber_t agno; 1185 struct xfs_owner_info oinfo; 1186 xfs_agblock_t bno; 1187 bool unwritten; 1188 1189 agno = XFS_FSB_TO_AGNO(mp, startblock); 1190 ASSERT(agno != NULLAGNUMBER); 1191 bno = XFS_FSB_TO_AGBNO(mp, startblock); 1192 1193 trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork, 1194 startoff, blockcount, state); 1195 1196 if (XFS_TEST_ERROR(false, mp, 1197 XFS_ERRTAG_RMAP_FINISH_ONE, 1198 XFS_RANDOM_RMAP_FINISH_ONE)) 1199 return -EIO; 1200 1201 /* 1202 * If we haven't gotten a cursor or the cursor AG doesn't match 1203 * the startblock, get one now. 1204 */ 1205 rcur = *pcur; 1206 if (rcur != NULL && rcur->bc_private.a.agno != agno) { 1207 xfs_rmap_finish_one_cleanup(tp, rcur, 0); 1208 rcur = NULL; 1209 *pcur = NULL; 1210 } 1211 if (rcur == NULL) { 1212 /* 1213 * Refresh the freelist before we start changing the 1214 * rmapbt, because a shape change could cause us to 1215 * allocate blocks. 1216 */ 1217 error = xfs_free_extent_fix_freelist(tp, agno, &agbp); 1218 if (error) 1219 return error; 1220 if (!agbp) 1221 return -EFSCORRUPTED; 1222 1223 rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno); 1224 if (!rcur) { 1225 error = -ENOMEM; 1226 goto out_cur; 1227 } 1228 } 1229 *pcur = rcur; 1230 1231 xfs_rmap_ino_owner(&oinfo, owner, whichfork, startoff); 1232 unwritten = state == XFS_EXT_UNWRITTEN; 1233 bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, startblock); 1234 1235 switch (type) { 1236 case XFS_RMAP_ALLOC: 1237 case XFS_RMAP_MAP: 1238 error = xfs_rmap_map(rcur, bno, blockcount, unwritten, &oinfo); 1239 break; 1240 case XFS_RMAP_FREE: 1241 case XFS_RMAP_UNMAP: 1242 error = xfs_rmap_unmap(rcur, bno, blockcount, unwritten, 1243 &oinfo); 1244 break; 1245 case XFS_RMAP_CONVERT: 1246 error = xfs_rmap_convert(rcur, bno, blockcount, !unwritten, 1247 &oinfo); 1248 break; 1249 default: 1250 ASSERT(0); 1251 error = -EFSCORRUPTED; 1252 } 1253 return error; 1254 1255 out_cur: 1256 xfs_trans_brelse(tp, agbp); 1257 1258 return error; 1259 } 1260 1261 /* 1262 * Don't defer an rmap if we aren't an rmap filesystem. 1263 */ 1264 static bool 1265 xfs_rmap_update_is_needed( 1266 struct xfs_mount *mp) 1267 { 1268 return xfs_sb_version_hasrmapbt(&mp->m_sb); 1269 } 1270 1271 /* 1272 * Record a rmap intent; the list is kept sorted first by AG and then by 1273 * increasing age. 1274 */ 1275 static int 1276 __xfs_rmap_add( 1277 struct xfs_mount *mp, 1278 struct xfs_defer_ops *dfops, 1279 enum xfs_rmap_intent_type type, 1280 __uint64_t owner, 1281 int whichfork, 1282 struct xfs_bmbt_irec *bmap) 1283 { 1284 struct xfs_rmap_intent *ri; 1285 1286 trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock), 1287 type, 1288 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), 1289 owner, whichfork, 1290 bmap->br_startoff, 1291 bmap->br_blockcount, 1292 bmap->br_state); 1293 1294 ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS); 1295 INIT_LIST_HEAD(&ri->ri_list); 1296 ri->ri_type = type; 1297 ri->ri_owner = owner; 1298 ri->ri_whichfork = whichfork; 1299 ri->ri_bmap = *bmap; 1300 1301 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list); 1302 return 0; 1303 } 1304 1305 /* Map an extent into a file. */ 1306 int 1307 xfs_rmap_map_extent( 1308 struct xfs_mount *mp, 1309 struct xfs_defer_ops *dfops, 1310 struct xfs_inode *ip, 1311 int whichfork, 1312 struct xfs_bmbt_irec *PREV) 1313 { 1314 if (!xfs_rmap_update_is_needed(mp)) 1315 return 0; 1316 1317 return __xfs_rmap_add(mp, dfops, XFS_RMAP_MAP, ip->i_ino, 1318 whichfork, PREV); 1319 } 1320 1321 /* Unmap an extent out of a file. */ 1322 int 1323 xfs_rmap_unmap_extent( 1324 struct xfs_mount *mp, 1325 struct xfs_defer_ops *dfops, 1326 struct xfs_inode *ip, 1327 int whichfork, 1328 struct xfs_bmbt_irec *PREV) 1329 { 1330 if (!xfs_rmap_update_is_needed(mp)) 1331 return 0; 1332 1333 return __xfs_rmap_add(mp, dfops, XFS_RMAP_UNMAP, ip->i_ino, 1334 whichfork, PREV); 1335 } 1336 1337 /* Convert a data fork extent from unwritten to real or vice versa. */ 1338 int 1339 xfs_rmap_convert_extent( 1340 struct xfs_mount *mp, 1341 struct xfs_defer_ops *dfops, 1342 struct xfs_inode *ip, 1343 int whichfork, 1344 struct xfs_bmbt_irec *PREV) 1345 { 1346 if (!xfs_rmap_update_is_needed(mp)) 1347 return 0; 1348 1349 return __xfs_rmap_add(mp, dfops, XFS_RMAP_CONVERT, ip->i_ino, 1350 whichfork, PREV); 1351 } 1352 1353 /* Schedule the creation of an rmap for non-file data. */ 1354 int 1355 xfs_rmap_alloc_extent( 1356 struct xfs_mount *mp, 1357 struct xfs_defer_ops *dfops, 1358 xfs_agnumber_t agno, 1359 xfs_agblock_t bno, 1360 xfs_extlen_t len, 1361 __uint64_t owner) 1362 { 1363 struct xfs_bmbt_irec bmap; 1364 1365 if (!xfs_rmap_update_is_needed(mp)) 1366 return 0; 1367 1368 bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno); 1369 bmap.br_blockcount = len; 1370 bmap.br_startoff = 0; 1371 bmap.br_state = XFS_EXT_NORM; 1372 1373 return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner, 1374 XFS_DATA_FORK, &bmap); 1375 } 1376 1377 /* Schedule the deletion of an rmap for non-file data. */ 1378 int 1379 xfs_rmap_free_extent( 1380 struct xfs_mount *mp, 1381 struct xfs_defer_ops *dfops, 1382 xfs_agnumber_t agno, 1383 xfs_agblock_t bno, 1384 xfs_extlen_t len, 1385 __uint64_t owner) 1386 { 1387 struct xfs_bmbt_irec bmap; 1388 1389 if (!xfs_rmap_update_is_needed(mp)) 1390 return 0; 1391 1392 bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno); 1393 bmap.br_blockcount = len; 1394 bmap.br_startoff = 0; 1395 bmap.br_state = XFS_EXT_NORM; 1396 1397 return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner, 1398 XFS_DATA_FORK, &bmap); 1399 } 1400