1 /* 2 * Copyright (c) 2014 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_sb.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_inode.h" 29 #include "xfs_trans.h" 30 #include "xfs_alloc.h" 31 #include "xfs_btree.h" 32 #include "xfs_rmap.h" 33 #include "xfs_rmap_btree.h" 34 #include "xfs_trace.h" 35 #include "xfs_cksum.h" 36 #include "xfs_error.h" 37 #include "xfs_extent_busy.h" 38 39 /* 40 * Reverse map btree. 41 * 42 * This is a per-ag tree used to track the owner(s) of a given extent. With 43 * reflink it is possible for there to be multiple owners, which is a departure 44 * from classic XFS. Owner records for data extents are inserted when the 45 * extent is mapped and removed when an extent is unmapped. Owner records for 46 * all other block types (i.e. metadata) are inserted when an extent is 47 * allocated and removed when an extent is freed. There can only be one owner 48 * of a metadata extent, usually an inode or some other metadata structure like 49 * an AG btree. 50 * 51 * The rmap btree is part of the free space management, so blocks for the tree 52 * are sourced from the agfl. Hence we need transaction reservation support for 53 * this tree so that the freelist is always large enough. This also impacts on 54 * the minimum space we need to leave free in the AG. 55 * 56 * The tree is ordered by [ag block, owner, offset]. This is a large key size, 57 * but it is the only way to enforce unique keys when a block can be owned by 58 * multiple files at any offset. There's no need to order/search by extent 59 * size for online updating/management of the tree. It is intended that most 60 * reverse lookups will be to find the owner(s) of a particular block, or to 61 * try to recover tree and file data from corrupt primary metadata. 62 */ 63 64 static struct xfs_btree_cur * 65 xfs_rmapbt_dup_cursor( 66 struct xfs_btree_cur *cur) 67 { 68 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp, 69 cur->bc_private.a.agbp, cur->bc_private.a.agno); 70 } 71 72 STATIC void 73 xfs_rmapbt_set_root( 74 struct xfs_btree_cur *cur, 75 union xfs_btree_ptr *ptr, 76 int inc) 77 { 78 struct xfs_buf *agbp = cur->bc_private.a.agbp; 79 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 80 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); 81 int btnum = cur->bc_btnum; 82 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); 83 84 ASSERT(ptr->s != 0); 85 86 agf->agf_roots[btnum] = ptr->s; 87 be32_add_cpu(&agf->agf_levels[btnum], inc); 88 pag->pagf_levels[btnum] += inc; 89 xfs_perag_put(pag); 90 91 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 92 } 93 94 STATIC int 95 xfs_rmapbt_alloc_block( 96 struct xfs_btree_cur *cur, 97 union xfs_btree_ptr *start, 98 union xfs_btree_ptr *new, 99 int *stat) 100 { 101 struct xfs_buf *agbp = cur->bc_private.a.agbp; 102 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 103 int error; 104 xfs_agblock_t bno; 105 106 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 107 108 /* Allocate the new block from the freelist. If we can't, give up. */ 109 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 110 &bno, 1); 111 if (error) { 112 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 113 return error; 114 } 115 116 trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno, 117 bno, 1); 118 if (bno == NULLAGBLOCK) { 119 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 120 *stat = 0; 121 return 0; 122 } 123 124 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, 125 false); 126 127 xfs_trans_agbtree_delta(cur->bc_tp, 1); 128 new->s = cpu_to_be32(bno); 129 be32_add_cpu(&agf->agf_rmap_blocks, 1); 130 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); 131 132 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 133 *stat = 1; 134 return 0; 135 } 136 137 STATIC int 138 xfs_rmapbt_free_block( 139 struct xfs_btree_cur *cur, 140 struct xfs_buf *bp) 141 { 142 struct xfs_buf *agbp = cur->bc_private.a.agbp; 143 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 144 xfs_agblock_t bno; 145 int error; 146 147 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); 148 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, 149 bno, 1); 150 be32_add_cpu(&agf->agf_rmap_blocks, -1); 151 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); 152 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); 153 if (error) 154 return error; 155 156 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, 157 XFS_EXTENT_BUSY_SKIP_DISCARD); 158 xfs_trans_agbtree_delta(cur->bc_tp, -1); 159 160 return 0; 161 } 162 163 STATIC int 164 xfs_rmapbt_get_minrecs( 165 struct xfs_btree_cur *cur, 166 int level) 167 { 168 return cur->bc_mp->m_rmap_mnr[level != 0]; 169 } 170 171 STATIC int 172 xfs_rmapbt_get_maxrecs( 173 struct xfs_btree_cur *cur, 174 int level) 175 { 176 return cur->bc_mp->m_rmap_mxr[level != 0]; 177 } 178 179 STATIC void 180 xfs_rmapbt_init_key_from_rec( 181 union xfs_btree_key *key, 182 union xfs_btree_rec *rec) 183 { 184 key->rmap.rm_startblock = rec->rmap.rm_startblock; 185 key->rmap.rm_owner = rec->rmap.rm_owner; 186 key->rmap.rm_offset = rec->rmap.rm_offset; 187 } 188 189 /* 190 * The high key for a reverse mapping record can be computed by shifting 191 * the startblock and offset to the highest value that would still map 192 * to that record. In practice this means that we add blockcount-1 to 193 * the startblock for all records, and if the record is for a data/attr 194 * fork mapping, we add blockcount-1 to the offset too. 195 */ 196 STATIC void 197 xfs_rmapbt_init_high_key_from_rec( 198 union xfs_btree_key *key, 199 union xfs_btree_rec *rec) 200 { 201 __uint64_t off; 202 int adj; 203 204 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1; 205 206 key->rmap.rm_startblock = rec->rmap.rm_startblock; 207 be32_add_cpu(&key->rmap.rm_startblock, adj); 208 key->rmap.rm_owner = rec->rmap.rm_owner; 209 key->rmap.rm_offset = rec->rmap.rm_offset; 210 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) || 211 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset))) 212 return; 213 off = be64_to_cpu(key->rmap.rm_offset); 214 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK); 215 key->rmap.rm_offset = cpu_to_be64(off); 216 } 217 218 STATIC void 219 xfs_rmapbt_init_rec_from_cur( 220 struct xfs_btree_cur *cur, 221 union xfs_btree_rec *rec) 222 { 223 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock); 224 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount); 225 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner); 226 rec->rmap.rm_offset = cpu_to_be64( 227 xfs_rmap_irec_offset_pack(&cur->bc_rec.r)); 228 } 229 230 STATIC void 231 xfs_rmapbt_init_ptr_from_cur( 232 struct xfs_btree_cur *cur, 233 union xfs_btree_ptr *ptr) 234 { 235 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 236 237 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); 238 ASSERT(agf->agf_roots[cur->bc_btnum] != 0); 239 240 ptr->s = agf->agf_roots[cur->bc_btnum]; 241 } 242 243 STATIC __int64_t 244 xfs_rmapbt_key_diff( 245 struct xfs_btree_cur *cur, 246 union xfs_btree_key *key) 247 { 248 struct xfs_rmap_irec *rec = &cur->bc_rec.r; 249 struct xfs_rmap_key *kp = &key->rmap; 250 __u64 x, y; 251 __int64_t d; 252 253 d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock; 254 if (d) 255 return d; 256 257 x = be64_to_cpu(kp->rm_owner); 258 y = rec->rm_owner; 259 if (x > y) 260 return 1; 261 else if (y > x) 262 return -1; 263 264 x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset)); 265 y = rec->rm_offset; 266 if (x > y) 267 return 1; 268 else if (y > x) 269 return -1; 270 return 0; 271 } 272 273 STATIC __int64_t 274 xfs_rmapbt_diff_two_keys( 275 struct xfs_btree_cur *cur, 276 union xfs_btree_key *k1, 277 union xfs_btree_key *k2) 278 { 279 struct xfs_rmap_key *kp1 = &k1->rmap; 280 struct xfs_rmap_key *kp2 = &k2->rmap; 281 __int64_t d; 282 __u64 x, y; 283 284 d = (__int64_t)be32_to_cpu(kp1->rm_startblock) - 285 be32_to_cpu(kp2->rm_startblock); 286 if (d) 287 return d; 288 289 x = be64_to_cpu(kp1->rm_owner); 290 y = be64_to_cpu(kp2->rm_owner); 291 if (x > y) 292 return 1; 293 else if (y > x) 294 return -1; 295 296 x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset)); 297 y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset)); 298 if (x > y) 299 return 1; 300 else if (y > x) 301 return -1; 302 return 0; 303 } 304 305 static bool 306 xfs_rmapbt_verify( 307 struct xfs_buf *bp) 308 { 309 struct xfs_mount *mp = bp->b_target->bt_mount; 310 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 311 struct xfs_perag *pag = bp->b_pag; 312 unsigned int level; 313 314 /* 315 * magic number and level verification 316 * 317 * During growfs operations, we can't verify the exact level or owner as 318 * the perag is not fully initialised and hence not attached to the 319 * buffer. In this case, check against the maximum tree depth. 320 * 321 * Similarly, during log recovery we will have a perag structure 322 * attached, but the agf information will not yet have been initialised 323 * from the on disk AGF. Again, we can only check against maximum limits 324 * in this case. 325 */ 326 if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC)) 327 return false; 328 329 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) 330 return false; 331 if (!xfs_btree_sblock_v5hdr_verify(bp)) 332 return false; 333 334 level = be16_to_cpu(block->bb_level); 335 if (pag && pag->pagf_init) { 336 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi]) 337 return false; 338 } else if (level >= mp->m_rmap_maxlevels) 339 return false; 340 341 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]); 342 } 343 344 static void 345 xfs_rmapbt_read_verify( 346 struct xfs_buf *bp) 347 { 348 if (!xfs_btree_sblock_verify_crc(bp)) 349 xfs_buf_ioerror(bp, -EFSBADCRC); 350 else if (!xfs_rmapbt_verify(bp)) 351 xfs_buf_ioerror(bp, -EFSCORRUPTED); 352 353 if (bp->b_error) { 354 trace_xfs_btree_corrupt(bp, _RET_IP_); 355 xfs_verifier_error(bp); 356 } 357 } 358 359 static void 360 xfs_rmapbt_write_verify( 361 struct xfs_buf *bp) 362 { 363 if (!xfs_rmapbt_verify(bp)) { 364 trace_xfs_btree_corrupt(bp, _RET_IP_); 365 xfs_buf_ioerror(bp, -EFSCORRUPTED); 366 xfs_verifier_error(bp); 367 return; 368 } 369 xfs_btree_sblock_calc_crc(bp); 370 371 } 372 373 const struct xfs_buf_ops xfs_rmapbt_buf_ops = { 374 .name = "xfs_rmapbt", 375 .verify_read = xfs_rmapbt_read_verify, 376 .verify_write = xfs_rmapbt_write_verify, 377 }; 378 379 #if defined(DEBUG) || defined(XFS_WARN) 380 STATIC int 381 xfs_rmapbt_keys_inorder( 382 struct xfs_btree_cur *cur, 383 union xfs_btree_key *k1, 384 union xfs_btree_key *k2) 385 { 386 __uint32_t x; 387 __uint32_t y; 388 __uint64_t a; 389 __uint64_t b; 390 391 x = be32_to_cpu(k1->rmap.rm_startblock); 392 y = be32_to_cpu(k2->rmap.rm_startblock); 393 if (x < y) 394 return 1; 395 else if (x > y) 396 return 0; 397 a = be64_to_cpu(k1->rmap.rm_owner); 398 b = be64_to_cpu(k2->rmap.rm_owner); 399 if (a < b) 400 return 1; 401 else if (a > b) 402 return 0; 403 a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset)); 404 b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset)); 405 if (a <= b) 406 return 1; 407 return 0; 408 } 409 410 STATIC int 411 xfs_rmapbt_recs_inorder( 412 struct xfs_btree_cur *cur, 413 union xfs_btree_rec *r1, 414 union xfs_btree_rec *r2) 415 { 416 __uint32_t x; 417 __uint32_t y; 418 __uint64_t a; 419 __uint64_t b; 420 421 x = be32_to_cpu(r1->rmap.rm_startblock); 422 y = be32_to_cpu(r2->rmap.rm_startblock); 423 if (x < y) 424 return 1; 425 else if (x > y) 426 return 0; 427 a = be64_to_cpu(r1->rmap.rm_owner); 428 b = be64_to_cpu(r2->rmap.rm_owner); 429 if (a < b) 430 return 1; 431 else if (a > b) 432 return 0; 433 a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset)); 434 b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset)); 435 if (a <= b) 436 return 1; 437 return 0; 438 } 439 #endif /* DEBUG */ 440 441 static const struct xfs_btree_ops xfs_rmapbt_ops = { 442 .rec_len = sizeof(struct xfs_rmap_rec), 443 .key_len = 2 * sizeof(struct xfs_rmap_key), 444 445 .dup_cursor = xfs_rmapbt_dup_cursor, 446 .set_root = xfs_rmapbt_set_root, 447 .alloc_block = xfs_rmapbt_alloc_block, 448 .free_block = xfs_rmapbt_free_block, 449 .get_minrecs = xfs_rmapbt_get_minrecs, 450 .get_maxrecs = xfs_rmapbt_get_maxrecs, 451 .init_key_from_rec = xfs_rmapbt_init_key_from_rec, 452 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec, 453 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur, 454 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur, 455 .key_diff = xfs_rmapbt_key_diff, 456 .buf_ops = &xfs_rmapbt_buf_ops, 457 .diff_two_keys = xfs_rmapbt_diff_two_keys, 458 #if defined(DEBUG) || defined(XFS_WARN) 459 .keys_inorder = xfs_rmapbt_keys_inorder, 460 .recs_inorder = xfs_rmapbt_recs_inorder, 461 #endif 462 }; 463 464 /* 465 * Allocate a new allocation btree cursor. 466 */ 467 struct xfs_btree_cur * 468 xfs_rmapbt_init_cursor( 469 struct xfs_mount *mp, 470 struct xfs_trans *tp, 471 struct xfs_buf *agbp, 472 xfs_agnumber_t agno) 473 { 474 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 475 struct xfs_btree_cur *cur; 476 477 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); 478 cur->bc_tp = tp; 479 cur->bc_mp = mp; 480 /* Overlapping btree; 2 keys per pointer. */ 481 cur->bc_btnum = XFS_BTNUM_RMAP; 482 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; 483 cur->bc_blocklog = mp->m_sb.sb_blocklog; 484 cur->bc_ops = &xfs_rmapbt_ops; 485 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); 486 487 cur->bc_private.a.agbp = agbp; 488 cur->bc_private.a.agno = agno; 489 490 return cur; 491 } 492 493 /* 494 * Calculate number of records in an rmap btree block. 495 */ 496 int 497 xfs_rmapbt_maxrecs( 498 struct xfs_mount *mp, 499 int blocklen, 500 int leaf) 501 { 502 blocklen -= XFS_RMAP_BLOCK_LEN; 503 504 if (leaf) 505 return blocklen / sizeof(struct xfs_rmap_rec); 506 return blocklen / 507 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t)); 508 } 509 510 /* Compute the maximum height of an rmap btree. */ 511 void 512 xfs_rmapbt_compute_maxlevels( 513 struct xfs_mount *mp) 514 { 515 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp, 516 mp->m_rmap_mnr, mp->m_sb.sb_agblocks); 517 } 518