1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_btree.h" 15 #include "xfs_bmap.h" 16 #include "xfs_refcount_btree.h" 17 #include "xfs_alloc.h" 18 #include "xfs_errortag.h" 19 #include "xfs_error.h" 20 #include "xfs_trace.h" 21 #include "xfs_trans.h" 22 #include "xfs_bit.h" 23 #include "xfs_refcount.h" 24 #include "xfs_rmap.h" 25 #include "xfs_ag.h" 26 27 struct kmem_cache *xfs_refcount_intent_cache; 28 29 /* Allowable refcount adjustment amounts. */ 30 enum xfs_refc_adjust_op { 31 XFS_REFCOUNT_ADJUST_INCREASE = 1, 32 XFS_REFCOUNT_ADJUST_DECREASE = -1, 33 XFS_REFCOUNT_ADJUST_COW_ALLOC = 0, 34 XFS_REFCOUNT_ADJUST_COW_FREE = -1, 35 }; 36 37 STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur, 38 xfs_agblock_t agbno, xfs_extlen_t aglen); 39 STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur, 40 xfs_agblock_t agbno, xfs_extlen_t aglen); 41 42 /* 43 * Look up the first record less than or equal to [bno, len] in the btree 44 * given by cur. 45 */ 46 int 47 xfs_refcount_lookup_le( 48 struct xfs_btree_cur *cur, 49 xfs_agblock_t bno, 50 int *stat) 51 { 52 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 53 XFS_LOOKUP_LE); 54 cur->bc_rec.rc.rc_startblock = bno; 55 cur->bc_rec.rc.rc_blockcount = 0; 56 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); 57 } 58 59 /* 60 * Look up the first record greater than or equal to [bno, len] in the btree 61 * given by cur. 62 */ 63 int 64 xfs_refcount_lookup_ge( 65 struct xfs_btree_cur *cur, 66 xfs_agblock_t bno, 67 int *stat) 68 { 69 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 70 XFS_LOOKUP_GE); 71 cur->bc_rec.rc.rc_startblock = bno; 72 cur->bc_rec.rc.rc_blockcount = 0; 73 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 74 } 75 76 /* 77 * Look up the first record equal to [bno, len] in the btree 78 * given by cur. 79 */ 80 int 81 xfs_refcount_lookup_eq( 82 struct xfs_btree_cur *cur, 83 xfs_agblock_t bno, 84 int *stat) 85 { 86 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 87 XFS_LOOKUP_LE); 88 cur->bc_rec.rc.rc_startblock = bno; 89 cur->bc_rec.rc.rc_blockcount = 0; 90 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 91 } 92 93 /* Convert on-disk record to in-core format. */ 94 void 95 xfs_refcount_btrec_to_irec( 96 const union xfs_btree_rec *rec, 97 struct xfs_refcount_irec *irec) 98 { 99 irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock); 100 irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount); 101 irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount); 102 } 103 104 /* 105 * Get the data from the pointed-to record. 106 */ 107 int 108 xfs_refcount_get_rec( 109 struct xfs_btree_cur *cur, 110 struct xfs_refcount_irec *irec, 111 int *stat) 112 { 113 struct xfs_mount *mp = cur->bc_mp; 114 struct xfs_perag *pag = cur->bc_ag.pag; 115 union xfs_btree_rec *rec; 116 int error; 117 xfs_agblock_t realstart; 118 119 error = xfs_btree_get_rec(cur, &rec, stat); 120 if (error || !*stat) 121 return error; 122 123 xfs_refcount_btrec_to_irec(rec, irec); 124 if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN) 125 goto out_bad_rec; 126 127 /* handle special COW-staging state */ 128 realstart = irec->rc_startblock; 129 if (realstart & XFS_REFC_COW_START) { 130 if (irec->rc_refcount != 1) 131 goto out_bad_rec; 132 realstart &= ~XFS_REFC_COW_START; 133 } else if (irec->rc_refcount < 2) { 134 goto out_bad_rec; 135 } 136 137 /* check for valid extent range, including overflow */ 138 if (!xfs_verify_agbext(pag, realstart, irec->rc_blockcount)) 139 goto out_bad_rec; 140 141 if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT) 142 goto out_bad_rec; 143 144 trace_xfs_refcount_get(cur->bc_mp, pag->pag_agno, irec); 145 return 0; 146 147 out_bad_rec: 148 xfs_warn(mp, 149 "Refcount BTree record corruption in AG %d detected!", 150 pag->pag_agno); 151 xfs_warn(mp, 152 "Start block 0x%x, block count 0x%x, references 0x%x", 153 irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount); 154 return -EFSCORRUPTED; 155 } 156 157 /* 158 * Update the record referred to by cur to the value given 159 * by [bno, len, refcount]. 160 * This either works (return 0) or gets an EFSCORRUPTED error. 161 */ 162 STATIC int 163 xfs_refcount_update( 164 struct xfs_btree_cur *cur, 165 struct xfs_refcount_irec *irec) 166 { 167 union xfs_btree_rec rec; 168 int error; 169 170 trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); 171 rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock); 172 rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount); 173 rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount); 174 error = xfs_btree_update(cur, &rec); 175 if (error) 176 trace_xfs_refcount_update_error(cur->bc_mp, 177 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 178 return error; 179 } 180 181 /* 182 * Insert the record referred to by cur to the value given 183 * by [bno, len, refcount]. 184 * This either works (return 0) or gets an EFSCORRUPTED error. 185 */ 186 int 187 xfs_refcount_insert( 188 struct xfs_btree_cur *cur, 189 struct xfs_refcount_irec *irec, 190 int *i) 191 { 192 int error; 193 194 trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); 195 cur->bc_rec.rc.rc_startblock = irec->rc_startblock; 196 cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount; 197 cur->bc_rec.rc.rc_refcount = irec->rc_refcount; 198 error = xfs_btree_insert(cur, i); 199 if (error) 200 goto out_error; 201 if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) { 202 error = -EFSCORRUPTED; 203 goto out_error; 204 } 205 206 out_error: 207 if (error) 208 trace_xfs_refcount_insert_error(cur->bc_mp, 209 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 210 return error; 211 } 212 213 /* 214 * Remove the record referred to by cur, then set the pointer to the spot 215 * where the record could be re-inserted, in case we want to increment or 216 * decrement the cursor. 217 * This either works (return 0) or gets an EFSCORRUPTED error. 218 */ 219 STATIC int 220 xfs_refcount_delete( 221 struct xfs_btree_cur *cur, 222 int *i) 223 { 224 struct xfs_refcount_irec irec; 225 int found_rec; 226 int error; 227 228 error = xfs_refcount_get_rec(cur, &irec, &found_rec); 229 if (error) 230 goto out_error; 231 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 232 error = -EFSCORRUPTED; 233 goto out_error; 234 } 235 trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec); 236 error = xfs_btree_delete(cur, i); 237 if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) { 238 error = -EFSCORRUPTED; 239 goto out_error; 240 } 241 if (error) 242 goto out_error; 243 error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec); 244 out_error: 245 if (error) 246 trace_xfs_refcount_delete_error(cur->bc_mp, 247 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 248 return error; 249 } 250 251 /* 252 * Adjusting the Reference Count 253 * 254 * As stated elsewhere, the reference count btree (refcbt) stores 255 * >1 reference counts for extents of physical blocks. In this 256 * operation, we're either raising or lowering the reference count of 257 * some subrange stored in the tree: 258 * 259 * <------ adjustment range ------> 260 * ----+ +---+-----+ +--+--------+--------- 261 * 2 | | 3 | 4 | |17| 55 | 10 262 * ----+ +---+-----+ +--+--------+--------- 263 * X axis is physical blocks number; 264 * reference counts are the numbers inside the rectangles 265 * 266 * The first thing we need to do is to ensure that there are no 267 * refcount extents crossing either boundary of the range to be 268 * adjusted. For any extent that does cross a boundary, split it into 269 * two extents so that we can increment the refcount of one of the 270 * pieces later: 271 * 272 * <------ adjustment range ------> 273 * ----+ +---+-----+ +--+--------+----+---- 274 * 2 | | 3 | 2 | |17| 55 | 10 | 10 275 * ----+ +---+-----+ +--+--------+----+---- 276 * 277 * For this next step, let's assume that all the physical blocks in 278 * the adjustment range are mapped to a file and are therefore in use 279 * at least once. Therefore, we can infer that any gap in the 280 * refcount tree within the adjustment range represents a physical 281 * extent with refcount == 1: 282 * 283 * <------ adjustment range ------> 284 * ----+---+---+-----+-+--+--------+----+---- 285 * 2 |"1"| 3 | 2 |1|17| 55 | 10 | 10 286 * ----+---+---+-----+-+--+--------+----+---- 287 * ^ 288 * 289 * For each extent that falls within the interval range, figure out 290 * which extent is to the left or the right of that extent. Now we 291 * have a left, current, and right extent. If the new reference count 292 * of the center extent enables us to merge left, center, and right 293 * into one record covering all three, do so. If the center extent is 294 * at the left end of the range, abuts the left extent, and its new 295 * reference count matches the left extent's record, then merge them. 296 * If the center extent is at the right end of the range, abuts the 297 * right extent, and the reference counts match, merge those. In the 298 * example, we can left merge (assuming an increment operation): 299 * 300 * <------ adjustment range ------> 301 * --------+---+-----+-+--+--------+----+---- 302 * 2 | 3 | 2 |1|17| 55 | 10 | 10 303 * --------+---+-----+-+--+--------+----+---- 304 * ^ 305 * 306 * For all other extents within the range, adjust the reference count 307 * or delete it if the refcount falls below 2. If we were 308 * incrementing, the end result looks like this: 309 * 310 * <------ adjustment range ------> 311 * --------+---+-----+-+--+--------+----+---- 312 * 2 | 4 | 3 |2|18| 56 | 11 | 10 313 * --------+---+-----+-+--+--------+----+---- 314 * 315 * The result of a decrement operation looks as such: 316 * 317 * <------ adjustment range ------> 318 * ----+ +---+ +--+--------+----+---- 319 * 2 | | 2 | |16| 54 | 9 | 10 320 * ----+ +---+ +--+--------+----+---- 321 * DDDD 111111DD 322 * 323 * The blocks marked "D" are freed; the blocks marked "1" are only 324 * referenced once and therefore the record is removed from the 325 * refcount btree. 326 */ 327 328 /* Next block after this extent. */ 329 static inline xfs_agblock_t 330 xfs_refc_next( 331 struct xfs_refcount_irec *rc) 332 { 333 return rc->rc_startblock + rc->rc_blockcount; 334 } 335 336 /* 337 * Split a refcount extent that crosses agbno. 338 */ 339 STATIC int 340 xfs_refcount_split_extent( 341 struct xfs_btree_cur *cur, 342 xfs_agblock_t agbno, 343 bool *shape_changed) 344 { 345 struct xfs_refcount_irec rcext, tmp; 346 int found_rec; 347 int error; 348 349 *shape_changed = false; 350 error = xfs_refcount_lookup_le(cur, agbno, &found_rec); 351 if (error) 352 goto out_error; 353 if (!found_rec) 354 return 0; 355 356 error = xfs_refcount_get_rec(cur, &rcext, &found_rec); 357 if (error) 358 goto out_error; 359 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 360 error = -EFSCORRUPTED; 361 goto out_error; 362 } 363 if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno) 364 return 0; 365 366 *shape_changed = true; 367 trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno, 368 &rcext, agbno); 369 370 /* Establish the right extent. */ 371 tmp = rcext; 372 tmp.rc_startblock = agbno; 373 tmp.rc_blockcount -= (agbno - rcext.rc_startblock); 374 error = xfs_refcount_update(cur, &tmp); 375 if (error) 376 goto out_error; 377 378 /* Insert the left extent. */ 379 tmp = rcext; 380 tmp.rc_blockcount = agbno - rcext.rc_startblock; 381 error = xfs_refcount_insert(cur, &tmp, &found_rec); 382 if (error) 383 goto out_error; 384 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 385 error = -EFSCORRUPTED; 386 goto out_error; 387 } 388 return error; 389 390 out_error: 391 trace_xfs_refcount_split_extent_error(cur->bc_mp, 392 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 393 return error; 394 } 395 396 /* 397 * Merge the left, center, and right extents. 398 */ 399 STATIC int 400 xfs_refcount_merge_center_extents( 401 struct xfs_btree_cur *cur, 402 struct xfs_refcount_irec *left, 403 struct xfs_refcount_irec *center, 404 struct xfs_refcount_irec *right, 405 unsigned long long extlen, 406 xfs_extlen_t *aglen) 407 { 408 int error; 409 int found_rec; 410 411 trace_xfs_refcount_merge_center_extents(cur->bc_mp, 412 cur->bc_ag.pag->pag_agno, left, center, right); 413 414 /* 415 * Make sure the center and right extents are not in the btree. 416 * If the center extent was synthesized, the first delete call 417 * removes the right extent and we skip the second deletion. 418 * If center and right were in the btree, then the first delete 419 * call removes the center and the second one removes the right 420 * extent. 421 */ 422 error = xfs_refcount_lookup_ge(cur, center->rc_startblock, 423 &found_rec); 424 if (error) 425 goto out_error; 426 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 427 error = -EFSCORRUPTED; 428 goto out_error; 429 } 430 431 error = xfs_refcount_delete(cur, &found_rec); 432 if (error) 433 goto out_error; 434 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 435 error = -EFSCORRUPTED; 436 goto out_error; 437 } 438 439 if (center->rc_refcount > 1) { 440 error = xfs_refcount_delete(cur, &found_rec); 441 if (error) 442 goto out_error; 443 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 444 error = -EFSCORRUPTED; 445 goto out_error; 446 } 447 } 448 449 /* Enlarge the left extent. */ 450 error = xfs_refcount_lookup_le(cur, left->rc_startblock, 451 &found_rec); 452 if (error) 453 goto out_error; 454 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 455 error = -EFSCORRUPTED; 456 goto out_error; 457 } 458 459 left->rc_blockcount = extlen; 460 error = xfs_refcount_update(cur, left); 461 if (error) 462 goto out_error; 463 464 *aglen = 0; 465 return error; 466 467 out_error: 468 trace_xfs_refcount_merge_center_extents_error(cur->bc_mp, 469 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 470 return error; 471 } 472 473 /* 474 * Merge with the left extent. 475 */ 476 STATIC int 477 xfs_refcount_merge_left_extent( 478 struct xfs_btree_cur *cur, 479 struct xfs_refcount_irec *left, 480 struct xfs_refcount_irec *cleft, 481 xfs_agblock_t *agbno, 482 xfs_extlen_t *aglen) 483 { 484 int error; 485 int found_rec; 486 487 trace_xfs_refcount_merge_left_extent(cur->bc_mp, 488 cur->bc_ag.pag->pag_agno, left, cleft); 489 490 /* If the extent at agbno (cleft) wasn't synthesized, remove it. */ 491 if (cleft->rc_refcount > 1) { 492 error = xfs_refcount_lookup_le(cur, cleft->rc_startblock, 493 &found_rec); 494 if (error) 495 goto out_error; 496 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 497 error = -EFSCORRUPTED; 498 goto out_error; 499 } 500 501 error = xfs_refcount_delete(cur, &found_rec); 502 if (error) 503 goto out_error; 504 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 505 error = -EFSCORRUPTED; 506 goto out_error; 507 } 508 } 509 510 /* Enlarge the left extent. */ 511 error = xfs_refcount_lookup_le(cur, left->rc_startblock, 512 &found_rec); 513 if (error) 514 goto out_error; 515 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 516 error = -EFSCORRUPTED; 517 goto out_error; 518 } 519 520 left->rc_blockcount += cleft->rc_blockcount; 521 error = xfs_refcount_update(cur, left); 522 if (error) 523 goto out_error; 524 525 *agbno += cleft->rc_blockcount; 526 *aglen -= cleft->rc_blockcount; 527 return error; 528 529 out_error: 530 trace_xfs_refcount_merge_left_extent_error(cur->bc_mp, 531 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 532 return error; 533 } 534 535 /* 536 * Merge with the right extent. 537 */ 538 STATIC int 539 xfs_refcount_merge_right_extent( 540 struct xfs_btree_cur *cur, 541 struct xfs_refcount_irec *right, 542 struct xfs_refcount_irec *cright, 543 xfs_extlen_t *aglen) 544 { 545 int error; 546 int found_rec; 547 548 trace_xfs_refcount_merge_right_extent(cur->bc_mp, 549 cur->bc_ag.pag->pag_agno, cright, right); 550 551 /* 552 * If the extent ending at agbno+aglen (cright) wasn't synthesized, 553 * remove it. 554 */ 555 if (cright->rc_refcount > 1) { 556 error = xfs_refcount_lookup_le(cur, cright->rc_startblock, 557 &found_rec); 558 if (error) 559 goto out_error; 560 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 561 error = -EFSCORRUPTED; 562 goto out_error; 563 } 564 565 error = xfs_refcount_delete(cur, &found_rec); 566 if (error) 567 goto out_error; 568 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 569 error = -EFSCORRUPTED; 570 goto out_error; 571 } 572 } 573 574 /* Enlarge the right extent. */ 575 error = xfs_refcount_lookup_le(cur, right->rc_startblock, 576 &found_rec); 577 if (error) 578 goto out_error; 579 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 580 error = -EFSCORRUPTED; 581 goto out_error; 582 } 583 584 right->rc_startblock -= cright->rc_blockcount; 585 right->rc_blockcount += cright->rc_blockcount; 586 error = xfs_refcount_update(cur, right); 587 if (error) 588 goto out_error; 589 590 *aglen -= cright->rc_blockcount; 591 return error; 592 593 out_error: 594 trace_xfs_refcount_merge_right_extent_error(cur->bc_mp, 595 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 596 return error; 597 } 598 599 #define XFS_FIND_RCEXT_SHARED 1 600 #define XFS_FIND_RCEXT_COW 2 601 /* 602 * Find the left extent and the one after it (cleft). This function assumes 603 * that we've already split any extent crossing agbno. 604 */ 605 STATIC int 606 xfs_refcount_find_left_extents( 607 struct xfs_btree_cur *cur, 608 struct xfs_refcount_irec *left, 609 struct xfs_refcount_irec *cleft, 610 xfs_agblock_t agbno, 611 xfs_extlen_t aglen, 612 int flags) 613 { 614 struct xfs_refcount_irec tmp; 615 int error; 616 int found_rec; 617 618 left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK; 619 error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec); 620 if (error) 621 goto out_error; 622 if (!found_rec) 623 return 0; 624 625 error = xfs_refcount_get_rec(cur, &tmp, &found_rec); 626 if (error) 627 goto out_error; 628 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 629 error = -EFSCORRUPTED; 630 goto out_error; 631 } 632 633 if (xfs_refc_next(&tmp) != agbno) 634 return 0; 635 if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2) 636 return 0; 637 if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1) 638 return 0; 639 /* We have a left extent; retrieve (or invent) the next right one */ 640 *left = tmp; 641 642 error = xfs_btree_increment(cur, 0, &found_rec); 643 if (error) 644 goto out_error; 645 if (found_rec) { 646 error = xfs_refcount_get_rec(cur, &tmp, &found_rec); 647 if (error) 648 goto out_error; 649 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 650 error = -EFSCORRUPTED; 651 goto out_error; 652 } 653 654 /* if tmp starts at the end of our range, just use that */ 655 if (tmp.rc_startblock == agbno) 656 *cleft = tmp; 657 else { 658 /* 659 * There's a gap in the refcntbt at the start of the 660 * range we're interested in (refcount == 1) so 661 * synthesize the implied extent and pass it back. 662 * We assume here that the agbno/aglen range was 663 * passed in from a data fork extent mapping and 664 * therefore is allocated to exactly one owner. 665 */ 666 cleft->rc_startblock = agbno; 667 cleft->rc_blockcount = min(aglen, 668 tmp.rc_startblock - agbno); 669 cleft->rc_refcount = 1; 670 } 671 } else { 672 /* 673 * No extents, so pretend that there's one covering the whole 674 * range. 675 */ 676 cleft->rc_startblock = agbno; 677 cleft->rc_blockcount = aglen; 678 cleft->rc_refcount = 1; 679 } 680 trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno, 681 left, cleft, agbno); 682 return error; 683 684 out_error: 685 trace_xfs_refcount_find_left_extent_error(cur->bc_mp, 686 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 687 return error; 688 } 689 690 /* 691 * Find the right extent and the one before it (cright). This function 692 * assumes that we've already split any extents crossing agbno + aglen. 693 */ 694 STATIC int 695 xfs_refcount_find_right_extents( 696 struct xfs_btree_cur *cur, 697 struct xfs_refcount_irec *right, 698 struct xfs_refcount_irec *cright, 699 xfs_agblock_t agbno, 700 xfs_extlen_t aglen, 701 int flags) 702 { 703 struct xfs_refcount_irec tmp; 704 int error; 705 int found_rec; 706 707 right->rc_startblock = cright->rc_startblock = NULLAGBLOCK; 708 error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec); 709 if (error) 710 goto out_error; 711 if (!found_rec) 712 return 0; 713 714 error = xfs_refcount_get_rec(cur, &tmp, &found_rec); 715 if (error) 716 goto out_error; 717 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 718 error = -EFSCORRUPTED; 719 goto out_error; 720 } 721 722 if (tmp.rc_startblock != agbno + aglen) 723 return 0; 724 if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2) 725 return 0; 726 if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1) 727 return 0; 728 /* We have a right extent; retrieve (or invent) the next left one */ 729 *right = tmp; 730 731 error = xfs_btree_decrement(cur, 0, &found_rec); 732 if (error) 733 goto out_error; 734 if (found_rec) { 735 error = xfs_refcount_get_rec(cur, &tmp, &found_rec); 736 if (error) 737 goto out_error; 738 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 739 error = -EFSCORRUPTED; 740 goto out_error; 741 } 742 743 /* if tmp ends at the end of our range, just use that */ 744 if (xfs_refc_next(&tmp) == agbno + aglen) 745 *cright = tmp; 746 else { 747 /* 748 * There's a gap in the refcntbt at the end of the 749 * range we're interested in (refcount == 1) so 750 * create the implied extent and pass it back. 751 * We assume here that the agbno/aglen range was 752 * passed in from a data fork extent mapping and 753 * therefore is allocated to exactly one owner. 754 */ 755 cright->rc_startblock = max(agbno, xfs_refc_next(&tmp)); 756 cright->rc_blockcount = right->rc_startblock - 757 cright->rc_startblock; 758 cright->rc_refcount = 1; 759 } 760 } else { 761 /* 762 * No extents, so pretend that there's one covering the whole 763 * range. 764 */ 765 cright->rc_startblock = agbno; 766 cright->rc_blockcount = aglen; 767 cright->rc_refcount = 1; 768 } 769 trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno, 770 cright, right, agbno + aglen); 771 return error; 772 773 out_error: 774 trace_xfs_refcount_find_right_extent_error(cur->bc_mp, 775 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 776 return error; 777 } 778 779 /* Is this extent valid? */ 780 static inline bool 781 xfs_refc_valid( 782 struct xfs_refcount_irec *rc) 783 { 784 return rc->rc_startblock != NULLAGBLOCK; 785 } 786 787 /* 788 * Try to merge with any extents on the boundaries of the adjustment range. 789 */ 790 STATIC int 791 xfs_refcount_merge_extents( 792 struct xfs_btree_cur *cur, 793 xfs_agblock_t *agbno, 794 xfs_extlen_t *aglen, 795 enum xfs_refc_adjust_op adjust, 796 int flags, 797 bool *shape_changed) 798 { 799 struct xfs_refcount_irec left = {0}, cleft = {0}; 800 struct xfs_refcount_irec cright = {0}, right = {0}; 801 int error; 802 unsigned long long ulen; 803 bool cequal; 804 805 *shape_changed = false; 806 /* 807 * Find the extent just below agbno [left], just above agbno [cleft], 808 * just below (agbno + aglen) [cright], and just above (agbno + aglen) 809 * [right]. 810 */ 811 error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno, 812 *aglen, flags); 813 if (error) 814 return error; 815 error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno, 816 *aglen, flags); 817 if (error) 818 return error; 819 820 /* No left or right extent to merge; exit. */ 821 if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right)) 822 return 0; 823 824 cequal = (cleft.rc_startblock == cright.rc_startblock) && 825 (cleft.rc_blockcount == cright.rc_blockcount); 826 827 /* Try to merge left, cleft, and right. cleft must == cright. */ 828 ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount + 829 right.rc_blockcount; 830 if (xfs_refc_valid(&left) && xfs_refc_valid(&right) && 831 xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal && 832 left.rc_refcount == cleft.rc_refcount + adjust && 833 right.rc_refcount == cleft.rc_refcount + adjust && 834 ulen < MAXREFCEXTLEN) { 835 *shape_changed = true; 836 return xfs_refcount_merge_center_extents(cur, &left, &cleft, 837 &right, ulen, aglen); 838 } 839 840 /* Try to merge left and cleft. */ 841 ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount; 842 if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) && 843 left.rc_refcount == cleft.rc_refcount + adjust && 844 ulen < MAXREFCEXTLEN) { 845 *shape_changed = true; 846 error = xfs_refcount_merge_left_extent(cur, &left, &cleft, 847 agbno, aglen); 848 if (error) 849 return error; 850 851 /* 852 * If we just merged left + cleft and cleft == cright, 853 * we no longer have a cright to merge with right. We're done. 854 */ 855 if (cequal) 856 return 0; 857 } 858 859 /* Try to merge cright and right. */ 860 ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount; 861 if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) && 862 right.rc_refcount == cright.rc_refcount + adjust && 863 ulen < MAXREFCEXTLEN) { 864 *shape_changed = true; 865 return xfs_refcount_merge_right_extent(cur, &right, &cright, 866 aglen); 867 } 868 869 return error; 870 } 871 872 /* 873 * XXX: This is a pretty hand-wavy estimate. The penalty for guessing 874 * true incorrectly is a shutdown FS; the penalty for guessing false 875 * incorrectly is more transaction rolls than might be necessary. 876 * Be conservative here. 877 */ 878 static bool 879 xfs_refcount_still_have_space( 880 struct xfs_btree_cur *cur) 881 { 882 unsigned long overhead; 883 884 /* 885 * Worst case estimate: full splits of the free space and rmap btrees 886 * to handle each of the shape changes to the refcount btree. 887 */ 888 overhead = xfs_allocfree_block_count(cur->bc_mp, 889 cur->bc_ag.refc.shape_changes); 890 overhead += cur->bc_mp->m_refc_maxlevels; 891 overhead *= cur->bc_mp->m_sb.sb_blocksize; 892 893 /* 894 * Only allow 2 refcount extent updates per transaction if the 895 * refcount continue update "error" has been injected. 896 */ 897 if (cur->bc_ag.refc.nr_ops > 2 && 898 XFS_TEST_ERROR(false, cur->bc_mp, 899 XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE)) 900 return false; 901 902 if (cur->bc_ag.refc.nr_ops == 0) 903 return true; 904 else if (overhead > cur->bc_tp->t_log_res) 905 return false; 906 return cur->bc_tp->t_log_res - overhead > 907 cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD; 908 } 909 910 /* 911 * Adjust the refcounts of middle extents. At this point we should have 912 * split extents that crossed the adjustment range; merged with adjacent 913 * extents; and updated agbno/aglen to reflect the merges. Therefore, 914 * all we have to do is update the extents inside [agbno, agbno + aglen]. 915 */ 916 STATIC int 917 xfs_refcount_adjust_extents( 918 struct xfs_btree_cur *cur, 919 xfs_agblock_t *agbno, 920 xfs_extlen_t *aglen, 921 enum xfs_refc_adjust_op adj) 922 { 923 struct xfs_refcount_irec ext, tmp; 924 int error; 925 int found_rec, found_tmp; 926 xfs_fsblock_t fsbno; 927 928 /* Merging did all the work already. */ 929 if (*aglen == 0) 930 return 0; 931 932 error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec); 933 if (error) 934 goto out_error; 935 936 while (*aglen > 0 && xfs_refcount_still_have_space(cur)) { 937 error = xfs_refcount_get_rec(cur, &ext, &found_rec); 938 if (error) 939 goto out_error; 940 if (!found_rec) { 941 ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks; 942 ext.rc_blockcount = 0; 943 ext.rc_refcount = 0; 944 } 945 946 /* 947 * Deal with a hole in the refcount tree; if a file maps to 948 * these blocks and there's no refcountbt record, pretend that 949 * there is one with refcount == 1. 950 */ 951 if (ext.rc_startblock != *agbno) { 952 tmp.rc_startblock = *agbno; 953 tmp.rc_blockcount = min(*aglen, 954 ext.rc_startblock - *agbno); 955 tmp.rc_refcount = 1 + adj; 956 trace_xfs_refcount_modify_extent(cur->bc_mp, 957 cur->bc_ag.pag->pag_agno, &tmp); 958 959 /* 960 * Either cover the hole (increment) or 961 * delete the range (decrement). 962 */ 963 cur->bc_ag.refc.nr_ops++; 964 if (tmp.rc_refcount) { 965 error = xfs_refcount_insert(cur, &tmp, 966 &found_tmp); 967 if (error) 968 goto out_error; 969 if (XFS_IS_CORRUPT(cur->bc_mp, 970 found_tmp != 1)) { 971 error = -EFSCORRUPTED; 972 goto out_error; 973 } 974 } else { 975 fsbno = XFS_AGB_TO_FSB(cur->bc_mp, 976 cur->bc_ag.pag->pag_agno, 977 tmp.rc_startblock); 978 xfs_free_extent_later(cur->bc_tp, fsbno, 979 tmp.rc_blockcount, NULL); 980 } 981 982 (*agbno) += tmp.rc_blockcount; 983 (*aglen) -= tmp.rc_blockcount; 984 985 /* Stop if there's nothing left to modify */ 986 if (*aglen == 0 || !xfs_refcount_still_have_space(cur)) 987 break; 988 989 /* Move the cursor to the start of ext. */ 990 error = xfs_refcount_lookup_ge(cur, *agbno, 991 &found_rec); 992 if (error) 993 goto out_error; 994 } 995 996 /* 997 * A previous step trimmed agbno/aglen such that the end of the 998 * range would not be in the middle of the record. If this is 999 * no longer the case, something is seriously wrong with the 1000 * btree. Make sure we never feed the synthesized record into 1001 * the processing loop below. 1002 */ 1003 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) || 1004 XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) { 1005 error = -EFSCORRUPTED; 1006 goto out_error; 1007 } 1008 1009 /* 1010 * Adjust the reference count and either update the tree 1011 * (incr) or free the blocks (decr). 1012 */ 1013 if (ext.rc_refcount == MAXREFCOUNT) 1014 goto skip; 1015 ext.rc_refcount += adj; 1016 trace_xfs_refcount_modify_extent(cur->bc_mp, 1017 cur->bc_ag.pag->pag_agno, &ext); 1018 cur->bc_ag.refc.nr_ops++; 1019 if (ext.rc_refcount > 1) { 1020 error = xfs_refcount_update(cur, &ext); 1021 if (error) 1022 goto out_error; 1023 } else if (ext.rc_refcount == 1) { 1024 error = xfs_refcount_delete(cur, &found_rec); 1025 if (error) 1026 goto out_error; 1027 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 1028 error = -EFSCORRUPTED; 1029 goto out_error; 1030 } 1031 goto advloop; 1032 } else { 1033 fsbno = XFS_AGB_TO_FSB(cur->bc_mp, 1034 cur->bc_ag.pag->pag_agno, 1035 ext.rc_startblock); 1036 xfs_free_extent_later(cur->bc_tp, fsbno, 1037 ext.rc_blockcount, NULL); 1038 } 1039 1040 skip: 1041 error = xfs_btree_increment(cur, 0, &found_rec); 1042 if (error) 1043 goto out_error; 1044 1045 advloop: 1046 (*agbno) += ext.rc_blockcount; 1047 (*aglen) -= ext.rc_blockcount; 1048 } 1049 1050 return error; 1051 out_error: 1052 trace_xfs_refcount_modify_extent_error(cur->bc_mp, 1053 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 1054 return error; 1055 } 1056 1057 /* Adjust the reference count of a range of AG blocks. */ 1058 STATIC int 1059 xfs_refcount_adjust( 1060 struct xfs_btree_cur *cur, 1061 xfs_agblock_t agbno, 1062 xfs_extlen_t aglen, 1063 xfs_agblock_t *new_agbno, 1064 xfs_extlen_t *new_aglen, 1065 enum xfs_refc_adjust_op adj) 1066 { 1067 bool shape_changed; 1068 int shape_changes = 0; 1069 int error; 1070 1071 *new_agbno = agbno; 1072 *new_aglen = aglen; 1073 if (adj == XFS_REFCOUNT_ADJUST_INCREASE) 1074 trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno, 1075 agbno, aglen); 1076 else 1077 trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno, 1078 agbno, aglen); 1079 1080 /* 1081 * Ensure that no rcextents cross the boundary of the adjustment range. 1082 */ 1083 error = xfs_refcount_split_extent(cur, agbno, &shape_changed); 1084 if (error) 1085 goto out_error; 1086 if (shape_changed) 1087 shape_changes++; 1088 1089 error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed); 1090 if (error) 1091 goto out_error; 1092 if (shape_changed) 1093 shape_changes++; 1094 1095 /* 1096 * Try to merge with the left or right extents of the range. 1097 */ 1098 error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj, 1099 XFS_FIND_RCEXT_SHARED, &shape_changed); 1100 if (error) 1101 goto out_error; 1102 if (shape_changed) 1103 shape_changes++; 1104 if (shape_changes) 1105 cur->bc_ag.refc.shape_changes++; 1106 1107 /* Now that we've taken care of the ends, adjust the middle extents */ 1108 error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen, adj); 1109 if (error) 1110 goto out_error; 1111 1112 return 0; 1113 1114 out_error: 1115 trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno, 1116 error, _RET_IP_); 1117 return error; 1118 } 1119 1120 /* Clean up after calling xfs_refcount_finish_one. */ 1121 void 1122 xfs_refcount_finish_one_cleanup( 1123 struct xfs_trans *tp, 1124 struct xfs_btree_cur *rcur, 1125 int error) 1126 { 1127 struct xfs_buf *agbp; 1128 1129 if (rcur == NULL) 1130 return; 1131 agbp = rcur->bc_ag.agbp; 1132 xfs_btree_del_cursor(rcur, error); 1133 if (error) 1134 xfs_trans_brelse(tp, agbp); 1135 } 1136 1137 /* 1138 * Set up a continuation a deferred refcount operation by updating the intent. 1139 * Checks to make sure we're not going to run off the end of the AG. 1140 */ 1141 static inline int 1142 xfs_refcount_continue_op( 1143 struct xfs_btree_cur *cur, 1144 xfs_fsblock_t startblock, 1145 xfs_agblock_t new_agbno, 1146 xfs_extlen_t new_len, 1147 xfs_fsblock_t *new_fsbno) 1148 { 1149 struct xfs_mount *mp = cur->bc_mp; 1150 struct xfs_perag *pag = cur->bc_ag.pag; 1151 1152 if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, new_len))) 1153 return -EFSCORRUPTED; 1154 1155 *new_fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); 1156 1157 ASSERT(xfs_verify_fsbext(mp, *new_fsbno, new_len)); 1158 ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, *new_fsbno)); 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * Process one of the deferred refcount operations. We pass back the 1165 * btree cursor to maintain our lock on the btree between calls. 1166 * This saves time and eliminates a buffer deadlock between the 1167 * superblock and the AGF because we'll always grab them in the same 1168 * order. 1169 */ 1170 int 1171 xfs_refcount_finish_one( 1172 struct xfs_trans *tp, 1173 enum xfs_refcount_intent_type type, 1174 xfs_fsblock_t startblock, 1175 xfs_extlen_t blockcount, 1176 xfs_fsblock_t *new_fsb, 1177 xfs_extlen_t *new_len, 1178 struct xfs_btree_cur **pcur) 1179 { 1180 struct xfs_mount *mp = tp->t_mountp; 1181 struct xfs_btree_cur *rcur; 1182 struct xfs_buf *agbp = NULL; 1183 int error = 0; 1184 xfs_agblock_t bno; 1185 xfs_agblock_t new_agbno; 1186 unsigned long nr_ops = 0; 1187 int shape_changes = 0; 1188 struct xfs_perag *pag; 1189 1190 pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock)); 1191 bno = XFS_FSB_TO_AGBNO(mp, startblock); 1192 1193 trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock), 1194 type, XFS_FSB_TO_AGBNO(mp, startblock), 1195 blockcount); 1196 1197 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) { 1198 error = -EIO; 1199 goto out_drop; 1200 } 1201 1202 /* 1203 * If we haven't gotten a cursor or the cursor AG doesn't match 1204 * the startblock, get one now. 1205 */ 1206 rcur = *pcur; 1207 if (rcur != NULL && rcur->bc_ag.pag != pag) { 1208 nr_ops = rcur->bc_ag.refc.nr_ops; 1209 shape_changes = rcur->bc_ag.refc.shape_changes; 1210 xfs_refcount_finish_one_cleanup(tp, rcur, 0); 1211 rcur = NULL; 1212 *pcur = NULL; 1213 } 1214 if (rcur == NULL) { 1215 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_FREEING, 1216 &agbp); 1217 if (error) 1218 goto out_drop; 1219 1220 rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); 1221 rcur->bc_ag.refc.nr_ops = nr_ops; 1222 rcur->bc_ag.refc.shape_changes = shape_changes; 1223 } 1224 *pcur = rcur; 1225 1226 switch (type) { 1227 case XFS_REFCOUNT_INCREASE: 1228 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, 1229 new_len, XFS_REFCOUNT_ADJUST_INCREASE); 1230 if (error) 1231 goto out_drop; 1232 if (*new_len > 0) 1233 error = xfs_refcount_continue_op(rcur, startblock, 1234 new_agbno, *new_len, new_fsb); 1235 break; 1236 case XFS_REFCOUNT_DECREASE: 1237 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, 1238 new_len, XFS_REFCOUNT_ADJUST_DECREASE); 1239 if (error) 1240 goto out_drop; 1241 if (*new_len > 0) 1242 error = xfs_refcount_continue_op(rcur, startblock, 1243 new_agbno, *new_len, new_fsb); 1244 break; 1245 case XFS_REFCOUNT_ALLOC_COW: 1246 *new_fsb = startblock + blockcount; 1247 *new_len = 0; 1248 error = __xfs_refcount_cow_alloc(rcur, bno, blockcount); 1249 break; 1250 case XFS_REFCOUNT_FREE_COW: 1251 *new_fsb = startblock + blockcount; 1252 *new_len = 0; 1253 error = __xfs_refcount_cow_free(rcur, bno, blockcount); 1254 break; 1255 default: 1256 ASSERT(0); 1257 error = -EFSCORRUPTED; 1258 } 1259 if (!error && *new_len > 0) 1260 trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type, 1261 bno, blockcount, new_agbno, *new_len); 1262 out_drop: 1263 xfs_perag_put(pag); 1264 return error; 1265 } 1266 1267 /* 1268 * Record a refcount intent for later processing. 1269 */ 1270 static void 1271 __xfs_refcount_add( 1272 struct xfs_trans *tp, 1273 enum xfs_refcount_intent_type type, 1274 xfs_fsblock_t startblock, 1275 xfs_extlen_t blockcount) 1276 { 1277 struct xfs_refcount_intent *ri; 1278 1279 trace_xfs_refcount_defer(tp->t_mountp, 1280 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), 1281 type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 1282 blockcount); 1283 1284 ri = kmem_cache_alloc(xfs_refcount_intent_cache, 1285 GFP_NOFS | __GFP_NOFAIL); 1286 INIT_LIST_HEAD(&ri->ri_list); 1287 ri->ri_type = type; 1288 ri->ri_startblock = startblock; 1289 ri->ri_blockcount = blockcount; 1290 1291 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list); 1292 } 1293 1294 /* 1295 * Increase the reference count of the blocks backing a file's extent. 1296 */ 1297 void 1298 xfs_refcount_increase_extent( 1299 struct xfs_trans *tp, 1300 struct xfs_bmbt_irec *PREV) 1301 { 1302 if (!xfs_has_reflink(tp->t_mountp)) 1303 return; 1304 1305 __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock, 1306 PREV->br_blockcount); 1307 } 1308 1309 /* 1310 * Decrease the reference count of the blocks backing a file's extent. 1311 */ 1312 void 1313 xfs_refcount_decrease_extent( 1314 struct xfs_trans *tp, 1315 struct xfs_bmbt_irec *PREV) 1316 { 1317 if (!xfs_has_reflink(tp->t_mountp)) 1318 return; 1319 1320 __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock, 1321 PREV->br_blockcount); 1322 } 1323 1324 /* 1325 * Given an AG extent, find the lowest-numbered run of shared blocks 1326 * within that range and return the range in fbno/flen. If 1327 * find_end_of_shared is set, return the longest contiguous extent of 1328 * shared blocks; if not, just return the first extent we find. If no 1329 * shared blocks are found, fbno and flen will be set to NULLAGBLOCK 1330 * and 0, respectively. 1331 */ 1332 int 1333 xfs_refcount_find_shared( 1334 struct xfs_btree_cur *cur, 1335 xfs_agblock_t agbno, 1336 xfs_extlen_t aglen, 1337 xfs_agblock_t *fbno, 1338 xfs_extlen_t *flen, 1339 bool find_end_of_shared) 1340 { 1341 struct xfs_refcount_irec tmp; 1342 int i; 1343 int have; 1344 int error; 1345 1346 trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno, 1347 agbno, aglen); 1348 1349 /* By default, skip the whole range */ 1350 *fbno = NULLAGBLOCK; 1351 *flen = 0; 1352 1353 /* Try to find a refcount extent that crosses the start */ 1354 error = xfs_refcount_lookup_le(cur, agbno, &have); 1355 if (error) 1356 goto out_error; 1357 if (!have) { 1358 /* No left extent, look at the next one */ 1359 error = xfs_btree_increment(cur, 0, &have); 1360 if (error) 1361 goto out_error; 1362 if (!have) 1363 goto done; 1364 } 1365 error = xfs_refcount_get_rec(cur, &tmp, &i); 1366 if (error) 1367 goto out_error; 1368 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { 1369 error = -EFSCORRUPTED; 1370 goto out_error; 1371 } 1372 1373 /* If the extent ends before the start, look at the next one */ 1374 if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) { 1375 error = xfs_btree_increment(cur, 0, &have); 1376 if (error) 1377 goto out_error; 1378 if (!have) 1379 goto done; 1380 error = xfs_refcount_get_rec(cur, &tmp, &i); 1381 if (error) 1382 goto out_error; 1383 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { 1384 error = -EFSCORRUPTED; 1385 goto out_error; 1386 } 1387 } 1388 1389 /* If the extent starts after the range we want, bail out */ 1390 if (tmp.rc_startblock >= agbno + aglen) 1391 goto done; 1392 1393 /* We found the start of a shared extent! */ 1394 if (tmp.rc_startblock < agbno) { 1395 tmp.rc_blockcount -= (agbno - tmp.rc_startblock); 1396 tmp.rc_startblock = agbno; 1397 } 1398 1399 *fbno = tmp.rc_startblock; 1400 *flen = min(tmp.rc_blockcount, agbno + aglen - *fbno); 1401 if (!find_end_of_shared) 1402 goto done; 1403 1404 /* Otherwise, find the end of this shared extent */ 1405 while (*fbno + *flen < agbno + aglen) { 1406 error = xfs_btree_increment(cur, 0, &have); 1407 if (error) 1408 goto out_error; 1409 if (!have) 1410 break; 1411 error = xfs_refcount_get_rec(cur, &tmp, &i); 1412 if (error) 1413 goto out_error; 1414 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { 1415 error = -EFSCORRUPTED; 1416 goto out_error; 1417 } 1418 if (tmp.rc_startblock >= agbno + aglen || 1419 tmp.rc_startblock != *fbno + *flen) 1420 break; 1421 *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno); 1422 } 1423 1424 done: 1425 trace_xfs_refcount_find_shared_result(cur->bc_mp, 1426 cur->bc_ag.pag->pag_agno, *fbno, *flen); 1427 1428 out_error: 1429 if (error) 1430 trace_xfs_refcount_find_shared_error(cur->bc_mp, 1431 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 1432 return error; 1433 } 1434 1435 /* 1436 * Recovering CoW Blocks After a Crash 1437 * 1438 * Due to the way that the copy on write mechanism works, there's a window of 1439 * opportunity in which we can lose track of allocated blocks during a crash. 1440 * Because CoW uses delayed allocation in the in-core CoW fork, writeback 1441 * causes blocks to be allocated and stored in the CoW fork. The blocks are 1442 * no longer in the free space btree but are not otherwise recorded anywhere 1443 * until the write completes and the blocks are mapped into the file. A crash 1444 * in between allocation and remapping results in the replacement blocks being 1445 * lost. This situation is exacerbated by the CoW extent size hint because 1446 * allocations can hang around for long time. 1447 * 1448 * However, there is a place where we can record these allocations before they 1449 * become mappings -- the reference count btree. The btree does not record 1450 * extents with refcount == 1, so we can record allocations with a refcount of 1451 * 1. Blocks being used for CoW writeout cannot be shared, so there should be 1452 * no conflict with shared block records. These mappings should be created 1453 * when we allocate blocks to the CoW fork and deleted when they're removed 1454 * from the CoW fork. 1455 * 1456 * Minor nit: records for in-progress CoW allocations and records for shared 1457 * extents must never be merged, to preserve the property that (except for CoW 1458 * allocations) there are no refcount btree entries with refcount == 1. The 1459 * only time this could potentially happen is when unsharing a block that's 1460 * adjacent to CoW allocations, so we must be careful to avoid this. 1461 * 1462 * At mount time we recover lost CoW allocations by searching the refcount 1463 * btree for these refcount == 1 mappings. These represent CoW allocations 1464 * that were in progress at the time the filesystem went down, so we can free 1465 * them to get the space back. 1466 * 1467 * This mechanism is superior to creating EFIs for unmapped CoW extents for 1468 * several reasons -- first, EFIs pin the tail of the log and would have to be 1469 * periodically relogged to avoid filling up the log. Second, CoW completions 1470 * will have to file an EFD and create new EFIs for whatever remains in the 1471 * CoW fork; this partially takes care of (1) but extent-size reservations 1472 * will have to periodically relog even if there's no writeout in progress. 1473 * This can happen if the CoW extent size hint is set, which you really want. 1474 * Third, EFIs cannot currently be automatically relogged into newer 1475 * transactions to advance the log tail. Fourth, stuffing the log full of 1476 * EFIs places an upper bound on the number of CoW allocations that can be 1477 * held filesystem-wide at any given time. Recording them in the refcount 1478 * btree doesn't require us to maintain any state in memory and doesn't pin 1479 * the log. 1480 */ 1481 /* 1482 * Adjust the refcounts of CoW allocations. These allocations are "magic" 1483 * in that they're not referenced anywhere else in the filesystem, so we 1484 * stash them in the refcount btree with a refcount of 1 until either file 1485 * remapping (or CoW cancellation) happens. 1486 */ 1487 STATIC int 1488 xfs_refcount_adjust_cow_extents( 1489 struct xfs_btree_cur *cur, 1490 xfs_agblock_t agbno, 1491 xfs_extlen_t aglen, 1492 enum xfs_refc_adjust_op adj) 1493 { 1494 struct xfs_refcount_irec ext, tmp; 1495 int error; 1496 int found_rec, found_tmp; 1497 1498 if (aglen == 0) 1499 return 0; 1500 1501 /* Find any overlapping refcount records */ 1502 error = xfs_refcount_lookup_ge(cur, agbno, &found_rec); 1503 if (error) 1504 goto out_error; 1505 error = xfs_refcount_get_rec(cur, &ext, &found_rec); 1506 if (error) 1507 goto out_error; 1508 if (!found_rec) { 1509 ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks + 1510 XFS_REFC_COW_START; 1511 ext.rc_blockcount = 0; 1512 ext.rc_refcount = 0; 1513 } 1514 1515 switch (adj) { 1516 case XFS_REFCOUNT_ADJUST_COW_ALLOC: 1517 /* Adding a CoW reservation, there should be nothing here. */ 1518 if (XFS_IS_CORRUPT(cur->bc_mp, 1519 agbno + aglen > ext.rc_startblock)) { 1520 error = -EFSCORRUPTED; 1521 goto out_error; 1522 } 1523 1524 tmp.rc_startblock = agbno; 1525 tmp.rc_blockcount = aglen; 1526 tmp.rc_refcount = 1; 1527 trace_xfs_refcount_modify_extent(cur->bc_mp, 1528 cur->bc_ag.pag->pag_agno, &tmp); 1529 1530 error = xfs_refcount_insert(cur, &tmp, 1531 &found_tmp); 1532 if (error) 1533 goto out_error; 1534 if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) { 1535 error = -EFSCORRUPTED; 1536 goto out_error; 1537 } 1538 break; 1539 case XFS_REFCOUNT_ADJUST_COW_FREE: 1540 /* Removing a CoW reservation, there should be one extent. */ 1541 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) { 1542 error = -EFSCORRUPTED; 1543 goto out_error; 1544 } 1545 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) { 1546 error = -EFSCORRUPTED; 1547 goto out_error; 1548 } 1549 if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) { 1550 error = -EFSCORRUPTED; 1551 goto out_error; 1552 } 1553 1554 ext.rc_refcount = 0; 1555 trace_xfs_refcount_modify_extent(cur->bc_mp, 1556 cur->bc_ag.pag->pag_agno, &ext); 1557 error = xfs_refcount_delete(cur, &found_rec); 1558 if (error) 1559 goto out_error; 1560 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { 1561 error = -EFSCORRUPTED; 1562 goto out_error; 1563 } 1564 break; 1565 default: 1566 ASSERT(0); 1567 } 1568 1569 return error; 1570 out_error: 1571 trace_xfs_refcount_modify_extent_error(cur->bc_mp, 1572 cur->bc_ag.pag->pag_agno, error, _RET_IP_); 1573 return error; 1574 } 1575 1576 /* 1577 * Add or remove refcount btree entries for CoW reservations. 1578 */ 1579 STATIC int 1580 xfs_refcount_adjust_cow( 1581 struct xfs_btree_cur *cur, 1582 xfs_agblock_t agbno, 1583 xfs_extlen_t aglen, 1584 enum xfs_refc_adjust_op adj) 1585 { 1586 bool shape_changed; 1587 int error; 1588 1589 agbno += XFS_REFC_COW_START; 1590 1591 /* 1592 * Ensure that no rcextents cross the boundary of the adjustment range. 1593 */ 1594 error = xfs_refcount_split_extent(cur, agbno, &shape_changed); 1595 if (error) 1596 goto out_error; 1597 1598 error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed); 1599 if (error) 1600 goto out_error; 1601 1602 /* 1603 * Try to merge with the left or right extents of the range. 1604 */ 1605 error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj, 1606 XFS_FIND_RCEXT_COW, &shape_changed); 1607 if (error) 1608 goto out_error; 1609 1610 /* Now that we've taken care of the ends, adjust the middle extents */ 1611 error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj); 1612 if (error) 1613 goto out_error; 1614 1615 return 0; 1616 1617 out_error: 1618 trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno, 1619 error, _RET_IP_); 1620 return error; 1621 } 1622 1623 /* 1624 * Record a CoW allocation in the refcount btree. 1625 */ 1626 STATIC int 1627 __xfs_refcount_cow_alloc( 1628 struct xfs_btree_cur *rcur, 1629 xfs_agblock_t agbno, 1630 xfs_extlen_t aglen) 1631 { 1632 trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, 1633 agbno, aglen); 1634 1635 /* Add refcount btree reservation */ 1636 return xfs_refcount_adjust_cow(rcur, agbno, aglen, 1637 XFS_REFCOUNT_ADJUST_COW_ALLOC); 1638 } 1639 1640 /* 1641 * Remove a CoW allocation from the refcount btree. 1642 */ 1643 STATIC int 1644 __xfs_refcount_cow_free( 1645 struct xfs_btree_cur *rcur, 1646 xfs_agblock_t agbno, 1647 xfs_extlen_t aglen) 1648 { 1649 trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, 1650 agbno, aglen); 1651 1652 /* Remove refcount btree reservation */ 1653 return xfs_refcount_adjust_cow(rcur, agbno, aglen, 1654 XFS_REFCOUNT_ADJUST_COW_FREE); 1655 } 1656 1657 /* Record a CoW staging extent in the refcount btree. */ 1658 void 1659 xfs_refcount_alloc_cow_extent( 1660 struct xfs_trans *tp, 1661 xfs_fsblock_t fsb, 1662 xfs_extlen_t len) 1663 { 1664 struct xfs_mount *mp = tp->t_mountp; 1665 1666 if (!xfs_has_reflink(mp)) 1667 return; 1668 1669 __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len); 1670 1671 /* Add rmap entry */ 1672 xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb), 1673 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); 1674 } 1675 1676 /* Forget a CoW staging event in the refcount btree. */ 1677 void 1678 xfs_refcount_free_cow_extent( 1679 struct xfs_trans *tp, 1680 xfs_fsblock_t fsb, 1681 xfs_extlen_t len) 1682 { 1683 struct xfs_mount *mp = tp->t_mountp; 1684 1685 if (!xfs_has_reflink(mp)) 1686 return; 1687 1688 /* Remove rmap entry */ 1689 xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb), 1690 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); 1691 __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len); 1692 } 1693 1694 struct xfs_refcount_recovery { 1695 struct list_head rr_list; 1696 struct xfs_refcount_irec rr_rrec; 1697 }; 1698 1699 /* Stuff an extent on the recovery list. */ 1700 STATIC int 1701 xfs_refcount_recover_extent( 1702 struct xfs_btree_cur *cur, 1703 const union xfs_btree_rec *rec, 1704 void *priv) 1705 { 1706 struct list_head *debris = priv; 1707 struct xfs_refcount_recovery *rr; 1708 1709 if (XFS_IS_CORRUPT(cur->bc_mp, 1710 be32_to_cpu(rec->refc.rc_refcount) != 1)) 1711 return -EFSCORRUPTED; 1712 1713 rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0); 1714 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); 1715 list_add_tail(&rr->rr_list, debris); 1716 1717 return 0; 1718 } 1719 1720 /* Find and remove leftover CoW reservations. */ 1721 int 1722 xfs_refcount_recover_cow_leftovers( 1723 struct xfs_mount *mp, 1724 struct xfs_perag *pag) 1725 { 1726 struct xfs_trans *tp; 1727 struct xfs_btree_cur *cur; 1728 struct xfs_buf *agbp; 1729 struct xfs_refcount_recovery *rr, *n; 1730 struct list_head debris; 1731 union xfs_btree_irec low; 1732 union xfs_btree_irec high; 1733 xfs_fsblock_t fsb; 1734 xfs_agblock_t agbno; 1735 int error; 1736 1737 if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) 1738 return -EOPNOTSUPP; 1739 1740 INIT_LIST_HEAD(&debris); 1741 1742 /* 1743 * In this first part, we use an empty transaction to gather up 1744 * all the leftover CoW extents so that we can subsequently 1745 * delete them. The empty transaction is used to avoid 1746 * a buffer lock deadlock if there happens to be a loop in the 1747 * refcountbt because we're allowed to re-grab a buffer that is 1748 * already attached to our transaction. When we're done 1749 * recording the CoW debris we cancel the (empty) transaction 1750 * and everything goes away cleanly. 1751 */ 1752 error = xfs_trans_alloc_empty(mp, &tp); 1753 if (error) 1754 return error; 1755 1756 error = xfs_alloc_read_agf(pag, tp, 0, &agbp); 1757 if (error) 1758 goto out_trans; 1759 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); 1760 1761 /* Find all the leftover CoW staging extents. */ 1762 memset(&low, 0, sizeof(low)); 1763 memset(&high, 0, sizeof(high)); 1764 low.rc.rc_startblock = XFS_REFC_COW_START; 1765 high.rc.rc_startblock = -1U; 1766 error = xfs_btree_query_range(cur, &low, &high, 1767 xfs_refcount_recover_extent, &debris); 1768 xfs_btree_del_cursor(cur, error); 1769 xfs_trans_brelse(tp, agbp); 1770 xfs_trans_cancel(tp); 1771 if (error) 1772 goto out_free; 1773 1774 /* Now iterate the list to free the leftovers */ 1775 list_for_each_entry_safe(rr, n, &debris, rr_list) { 1776 /* Set up transaction. */ 1777 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); 1778 if (error) 1779 goto out_free; 1780 1781 trace_xfs_refcount_recover_extent(mp, pag->pag_agno, 1782 &rr->rr_rrec); 1783 1784 /* Free the orphan record */ 1785 agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START; 1786 fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno); 1787 xfs_refcount_free_cow_extent(tp, fsb, 1788 rr->rr_rrec.rc_blockcount); 1789 1790 /* Free the block. */ 1791 xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL); 1792 1793 error = xfs_trans_commit(tp); 1794 if (error) 1795 goto out_free; 1796 1797 list_del(&rr->rr_list); 1798 kmem_free(rr); 1799 } 1800 1801 return error; 1802 out_trans: 1803 xfs_trans_cancel(tp); 1804 out_free: 1805 /* Free the leftover list */ 1806 list_for_each_entry_safe(rr, n, &debris, rr_list) { 1807 list_del(&rr->rr_list); 1808 kmem_free(rr); 1809 } 1810 return error; 1811 } 1812 1813 /* Is there a record covering a given extent? */ 1814 int 1815 xfs_refcount_has_record( 1816 struct xfs_btree_cur *cur, 1817 xfs_agblock_t bno, 1818 xfs_extlen_t len, 1819 bool *exists) 1820 { 1821 union xfs_btree_irec low; 1822 union xfs_btree_irec high; 1823 1824 memset(&low, 0, sizeof(low)); 1825 low.rc.rc_startblock = bno; 1826 memset(&high, 0xFF, sizeof(high)); 1827 high.rc.rc_startblock = bno + len - 1; 1828 1829 return xfs_btree_has_record(cur, &low, &high, exists); 1830 } 1831 1832 int __init 1833 xfs_refcount_intent_init_cache(void) 1834 { 1835 xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent", 1836 sizeof(struct xfs_refcount_intent), 1837 0, 0, NULL); 1838 1839 return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM; 1840 } 1841 1842 void 1843 xfs_refcount_intent_destroy_cache(void) 1844 { 1845 kmem_cache_destroy(xfs_refcount_intent_cache); 1846 xfs_refcount_intent_cache = NULL; 1847 } 1848